#[test]
fn test_map_in_place_zero_drop_count() {
- use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+ use std::sync::atomic::{AtomicUsize, Ordering};
#[derive(Clone, PartialEq, Debug)]
struct Nothing;
}
}
const NUM_ELEMENTS: usize = 2;
- static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
+ static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);
let v = repeat(Nothing).take(NUM_ELEMENTS).collect::<Vec<_>>();
use intrinsics;
use cell::UnsafeCell;
-use marker::PhantomData;
use default::Default;
}
impl Default for AtomicBool {
- fn default() -> AtomicBool {
- ATOMIC_BOOL_INIT
+ fn default() -> Self {
+ Self::new(Default::default())
}
}
}
impl Default for AtomicIsize {
- fn default() -> AtomicIsize {
- ATOMIC_ISIZE_INIT
+ fn default() -> Self {
+ Self::new(Default::default())
}
}
}
impl Default for AtomicUsize {
- fn default() -> AtomicUsize {
- ATOMIC_USIZE_INIT
+ fn default() -> Self {
+ Self::new(Default::default())
}
}
/// A raw pointer type which can be safely shared between threads.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct AtomicPtr<T> {
- p: UnsafeCell<usize>,
- _marker: PhantomData<*mut T>,
+ p: UnsafeCell<*mut T>,
}
impl<T> Default for AtomicPtr<T> {
/// An `AtomicBool` initialized to `false`.
#[stable(feature = "rust1", since = "1.0.0")]
-pub const ATOMIC_BOOL_INIT: AtomicBool =
- AtomicBool { v: UnsafeCell { value: 0 } };
+pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
/// An `AtomicIsize` initialized to `0`.
#[stable(feature = "rust1", since = "1.0.0")]
-pub const ATOMIC_ISIZE_INIT: AtomicIsize =
- AtomicIsize { v: UnsafeCell { value: 0 } };
+pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
/// An `AtomicUsize` initialized to `0`.
#[stable(feature = "rust1", since = "1.0.0")]
-pub const ATOMIC_USIZE_INIT: AtomicUsize =
- AtomicUsize { v: UnsafeCell { value: 0, } };
+pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
const UINT_TRUE: usize = !0;
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn new(v: bool) -> AtomicBool {
- let val = if v { UINT_TRUE } else { 0 };
- AtomicBool { v: UnsafeCell::new(val) }
+ pub const fn new(v: bool) -> AtomicBool {
+ AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
}
/// Loads a value from the bool.
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn new(v: isize) -> AtomicIsize {
+ pub const fn new(v: isize) -> AtomicIsize {
AtomicIsize {v: UnsafeCell::new(v)}
}
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn new(v: usize) -> AtomicUsize {
+ pub const fn new(v: usize) -> AtomicUsize {
AtomicUsize { v: UnsafeCell::new(v) }
}
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn new(p: *mut T) -> AtomicPtr<T> {
- AtomicPtr { p: UnsafeCell::new(p as usize),
- _marker: PhantomData }
+ pub const fn new(p: *mut T) -> AtomicPtr<T> {
+ AtomicPtr { p: UnsafeCell::new(p) }
}
/// Loads a value from the pointer.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn load(&self, order: Ordering) -> *mut T {
unsafe {
- atomic_load(self.p.get(), order) as *mut T
+ atomic_load(self.p.get() as *mut usize, order) as *mut T
}
}
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn store(&self, ptr: *mut T, order: Ordering) {
- unsafe { atomic_store(self.p.get(), ptr as usize, order); }
+ unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
}
/// Stores a value into the pointer, returning the old value.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
- unsafe { atomic_swap(self.p.get(), ptr as usize, order) as *mut T }
+ unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
}
/// Stores a value into the pointer if the current value is the same as the expected value.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
unsafe {
- atomic_compare_and_swap(self.p.get(), old as usize,
+ atomic_compare_and_swap(self.p.get() as *mut usize, old as usize,
new as usize, order) as *mut T
}
}
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
- pub fn new(value: T) -> Cell<T> {
+ pub const fn new(value: T) -> Cell<T> {
Cell {
value: UnsafeCell::new(value),
}
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
- pub fn new(value: T) -> RefCell<T> {
+ pub const fn new(value: T) -> RefCell<T> {
RefCell {
value: UnsafeCell::new(value),
borrow: Cell::new(UNUSED),
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
- pub fn new(value: T) -> UnsafeCell<T> {
+ pub const fn new(value: T) -> UnsafeCell<T> {
UnsafeCell { value: value }
}
#![feature(concat_idents)]
#![feature(reflect)]
#![feature(custom_attribute)]
+#![feature(const_fn)]
#[macro_use]
mod macros;
assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
}
-static S_BOOL : AtomicBool = ATOMIC_BOOL_INIT;
-static S_INT : AtomicIsize = ATOMIC_ISIZE_INIT;
-static S_UINT : AtomicUsize = ATOMIC_USIZE_INIT;
+static S_FALSE: AtomicBool = AtomicBool::new(false);
+static S_TRUE: AtomicBool = AtomicBool::new(true);
+static S_INT: AtomicIsize = AtomicIsize::new(0);
+static S_UINT: AtomicUsize = AtomicUsize::new(0);
#[test]
fn static_init() {
- assert!(!S_BOOL.load(SeqCst));
+ assert!(!S_FALSE.load(SeqCst));
+ assert!(S_TRUE.load(SeqCst));
assert!(S_INT.load(SeqCst) == 0);
assert!(S_UINT.load(SeqCst) == 0);
}
use std::env;
use std::rt;
use std::slice;
-use std::sync::{Once, ONCE_INIT, StaticMutex, MUTEX_INIT};
+use std::sync::{Once, StaticMutex};
use directive::LOG_LEVEL_NAMES;
/// The default logging level of a crate if no other is specified.
const DEFAULT_LOG_LEVEL: u32 = 1;
-static LOCK: StaticMutex = MUTEX_INIT;
+static LOCK: StaticMutex = StaticMutex::new();
/// An unsafe constant that is the maximum logging level of any module
/// specified. This is the first line of defense to determining whether a
/// module's log statement should be emitted or not.
#[doc(hidden)]
pub fn mod_enabled(level: u32, module: &str) -> bool {
- static INIT: Once = ONCE_INIT;
+ static INIT: Once = Once::new();
INIT.call_once(init);
// It's possible for many threads are in this function, only one of them
use std::fs::File;
use std::io;
use std::io::prelude::*;
-use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT};
+use std::sync::atomic::{AtomicBool, Ordering};
use syntax::ast;
fn print_help_message() {
let output_path = {
let output_template = match requested_output {
Ok(ref s) if &**s == "help" => {
- static PRINTED_YET: AtomicBool = ATOMIC_BOOL_INIT;
+ static PRINTED_YET: AtomicBool = AtomicBool::new(false);
if !PRINTED_YET.load(Ordering::SeqCst) {
print_help_message();
PRINTED_YET.store(true, Ordering::SeqCst);
}
unsafe fn configure_llvm(sess: &Session) {
- use std::sync::{Once, ONCE_INIT};
- static INIT: Once = ONCE_INIT;
+ use std::sync::Once;
+ static INIT: Once = Once::new();
// Copy what clang does by turning on loop vectorization at O2 and
// slp vectorization at O3
#![feature(path_ext)]
#![feature(fs)]
#![feature(path_relative_from)]
+#![feature(std_misc)]
#![allow(trivial_casts)]
// Before we touch LLVM, make sure that multithreading is enabled.
unsafe {
- use std::sync::{Once, ONCE_INIT};
- static INIT: Once = ONCE_INIT;
+ use std::sync::Once;
+ static INIT: Once = Once::new();
static mut POISONED: bool = false;
INIT.call_once(|| {
if llvm::LLVMStartMultithreaded() != 1 {
pub fn check_for_errors_in<T, F>(f: F) -> Result<T, String> where
F: FnOnce() -> T,
{
- use sync::{StaticMutex, MUTEX_INIT};
- static LOCK: StaticMutex = MUTEX_INIT;
+ use sync::StaticMutex;
+ static LOCK: StaticMutex = StaticMutex::new();
unsafe {
// dlerror isn't thread safe, so we need to lock around this entire
// sequence
use fmt;
use io;
use path::{Path, PathBuf};
-use sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT, Ordering};
-use sync::{StaticMutex, MUTEX_INIT};
+use sync::atomic::{AtomicIsize, Ordering};
+use sync::StaticMutex;
use sys::os as os_imp;
/// Returns the current working directory as a `PathBuf`.
os_imp::chdir(p.as_ref())
}
-static ENV_LOCK: StaticMutex = MUTEX_INIT;
+static ENV_LOCK: StaticMutex = StaticMutex::new();
/// An iterator over a snapshot of the environment variables of this process.
///
os_imp::current_exe()
}
-static EXIT_STATUS: AtomicIsize = ATOMIC_ISIZE_INIT;
+static EXIT_STATUS: AtomicIsize = AtomicIsize::new(0);
/// Sets the process exit code
///
use prelude::v1::*;
use boxed;
-use cell::UnsafeCell;
+use cell::Cell;
use rt;
use sync::{StaticMutex, Arc};
pub struct Lazy<T> {
- pub lock: StaticMutex,
- pub ptr: UnsafeCell<*mut Arc<T>>,
- pub init: fn() -> Arc<T>,
+ lock: StaticMutex,
+ ptr: Cell<*mut Arc<T>>,
+ init: fn() -> Arc<T>,
}
unsafe impl<T> Sync for Lazy<T> {}
-macro_rules! lazy_init {
- ($init:expr) => (::io::lazy::Lazy {
- lock: ::sync::MUTEX_INIT,
- ptr: ::cell::UnsafeCell { value: 0 as *mut _ },
- init: $init,
- })
-}
-
impl<T: Send + Sync + 'static> Lazy<T> {
+ pub const fn new(init: fn() -> Arc<T>) -> Lazy<T> {
+ Lazy {
+ lock: StaticMutex::new(),
+ ptr: Cell::new(0 as *mut _),
+ init: init
+ }
+ }
+
pub fn get(&'static self) -> Option<Arc<T>> {
let _g = self.lock.lock();
+ let ptr = self.ptr.get();
unsafe {
- let ptr = *self.ptr.get();
if ptr.is_null() {
Some(self.init())
} else if ptr as usize == 1 {
// `Arc`.
let registered = rt::at_exit(move || {
let g = self.lock.lock();
- let ptr = *self.ptr.get();
- *self.ptr.get() = 1 as *mut _;
+ let ptr = self.ptr.get();
+ self.ptr.set(1 as *mut _);
drop(g);
drop(Box::from_raw(ptr))
});
let ret = (self.init)();
if registered.is_ok() {
- *self.ptr.get() = boxed::into_raw(Box::new(ret.clone()));
+ self.ptr.set(boxed::into_raw(Box::new(ret.clone())));
}
return ret
}
#[doc(no_inline, hidden)]
pub use self::stdio::{set_panic, set_print};
-#[macro_use] mod lazy;
-
pub mod prelude;
mod buffered;
mod cursor;
mod error;
mod impls;
+mod lazy;
mod util;
mod stdio;
/// locked version, `StdinLock`, implements both `Read` and `BufRead`, however.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdin() -> Stdin {
- static INSTANCE: Lazy<Mutex<BufReader<StdinRaw>>> = lazy_init!(stdin_init);
+ static INSTANCE: Lazy<Mutex<BufReader<StdinRaw>>> = Lazy::new(stdin_init);
return Stdin {
inner: INSTANCE.get().expect("cannot access stdin during shutdown"),
};
/// The returned handle implements the `Write` trait.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdout() -> Stdout {
- static INSTANCE: Lazy<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = lazy_init!(stdout_init);
+ static INSTANCE: Lazy<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = Lazy::new(stdout_init);
return Stdout {
inner: INSTANCE.get().expect("cannot access stdout during shutdown"),
};
/// The returned handle implements the `Write` trait.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stderr() -> Stderr {
- static INSTANCE: Lazy<ReentrantMutex<RefCell<StderrRaw>>> = lazy_init!(stderr_init);
+ static INSTANCE: Lazy<ReentrantMutex<RefCell<StderrRaw>>> = Lazy::new(stderr_init);
return Stderr {
inner: INSTANCE.get().expect("cannot access stderr during shutdown"),
};
#![feature(box_syntax)]
#![feature(collections)]
#![feature(core)]
+#![feature(const_fn)]
#![feature(into_cow)]
#![feature(lang_items)]
#![feature(libc)]
use env;
use net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr, ToSocketAddrs};
-use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+use sync::atomic::{AtomicUsize, Ordering};
-static PORT: AtomicUsize = ATOMIC_USIZE_INIT;
+static PORT: AtomicUsize = AtomicUsize::new(0);
pub fn next_test_ip4() -> SocketAddr {
let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port();
target_arch = "aarch64",
target_arch = "powerpc")))]
fn is_getrandom_available() -> bool {
- use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
- use sync::{Once, ONCE_INIT};
+ use sync::atomic::{AtomicBool, Ordering};
+ use sync::Once;
- static CHECKER: Once = ONCE_INIT;
- static AVAILABLE: AtomicBool = ATOMIC_BOOL_INIT;
+ static CHECKER: Once = Once::new();
+ static AVAILABLE: AtomicBool = AtomicBool::new(false);
CHECKER.call_once(|| {
let mut buf: [u8; 0] = [];
use mem;
use ffi::CStr;
- use sync::{StaticMutex, MUTEX_INIT};
+ use sync::StaticMutex;
static mut GLOBAL_ARGS_PTR: usize = 0;
- static LOCK: StaticMutex = MUTEX_INIT;
+ static LOCK: StaticMutex = StaticMutex::new();
pub unsafe fn init(argc: isize, argv: *const *const u8) {
let args = load_argc_and_argv(argc, argv);
use boxed::Box;
use vec::Vec;
use thunk::Thunk;
-use sys_common::mutex::{Mutex, MUTEX_INIT};
+use sys_common::mutex::Mutex;
type Queue = Vec<Thunk<'static>>;
// on poisoning and this module needs to operate at a lower level than requiring
// the thread infrastructure to be in place (useful on the borders of
// initialization/destruction).
-static LOCK: Mutex = MUTEX_INIT;
+static LOCK: Mutex = Mutex::new();
static mut QUEUE: *mut Queue = 0 as *mut Queue;
// The maximum number of times the cleanup routines will be run. While running
// For now logging is turned off by default, and this function checks to see
// whether the magical environment variable is present to see if it's turned on.
pub fn log_enabled() -> bool {
- static ENABLED: atomic::AtomicIsize = atomic::ATOMIC_ISIZE_INIT;
+ static ENABLED: atomic::AtomicIsize = atomic::AtomicIsize::new(0);
match ENABLED.load(Ordering::SeqCst) {
1 => return false,
2 => return true,
use libc::c_void;
use mem;
use sync::atomic::{self, Ordering};
-use sys_common::mutex::{Mutex, MUTEX_INIT};
+use sys_common::mutex::Mutex;
// The actual unwinding implementation is cfg'd here, and we've got two current
// implementations. One goes through SEH on Windows and the other goes through
// For more information, see below.
const MAX_CALLBACKS: usize = 16;
static CALLBACKS: [atomic::AtomicUsize; MAX_CALLBACKS] =
- [atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
- atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
- atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
- atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
- atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
- atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
- atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT,
- atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT];
-static CALLBACK_CNT: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
+ [atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+ atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+ atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+ atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+ atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+ atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+ atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0),
+ atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0)];
+static CALLBACK_CNT: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
thread_local! { static PANICKING: Cell<bool> = Cell::new(false) }
// `std::sync` one as accessing TLS can cause weird recursive problems (and
// we don't need poison checking).
unsafe {
- static LOCK: Mutex = MUTEX_INIT;
+ static LOCK: Mutex = Mutex::new();
static mut INIT: bool = false;
LOCK.lock();
if !INIT {
}
pub fn min_stack() -> usize {
- static MIN: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
+ static MIN: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
match MIN.load(Ordering::SeqCst) {
0 => {}
n => return n - 1,
use prelude::v1::*;
-use sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use sync::atomic::{AtomicUsize, Ordering};
use sync::{mutex, MutexGuard, PoisonError};
use sys_common::condvar as sys;
use sys_common::mutex as sys_mutex;
/// Constant initializer for a statically allocated condition variable.
#[unstable(feature = "static_condvar",
reason = "may be merged with Condvar in the future")]
-pub const CONDVAR_INIT: StaticCondvar = StaticCondvar {
- inner: sys::CONDVAR_INIT,
- mutex: ATOMIC_USIZE_INIT,
-};
+pub const CONDVAR_INIT: StaticCondvar = StaticCondvar::new();
impl Condvar {
/// Creates a new condition variable which is ready to be waited on and
pub fn new() -> Condvar {
Condvar {
inner: box StaticCondvar {
- inner: unsafe { sys::Condvar::new() },
+ inner: sys::Condvar::new(),
mutex: AtomicUsize::new(0),
}
}
}
impl StaticCondvar {
+ /// Creates a new condition variable
+ #[unstable(feature = "static_condvar",
+ reason = "may be merged with Condvar in the future")]
+ pub const fn new() -> StaticCondvar {
+ StaticCondvar {
+ inner: sys::Condvar::new(),
+ mutex: AtomicUsize::new(0),
+ }
+ }
+
/// Blocks the current thread until this condition variable receives a
/// notification.
///
mod tests {
use prelude::v1::*;
- use super::{StaticCondvar, CONDVAR_INIT};
+ use super::StaticCondvar;
use sync::mpsc::channel;
- use sync::{StaticMutex, MUTEX_INIT, Condvar, Mutex, Arc};
- use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+ use sync::{StaticMutex, Condvar, Mutex, Arc};
+ use sync::atomic::{AtomicUsize, Ordering};
use thread;
use time::Duration;
use u32;
#[test]
fn static_smoke() {
- static C: StaticCondvar = CONDVAR_INIT;
+ static C: StaticCondvar = StaticCondvar::new();
C.notify_one();
C.notify_all();
unsafe { C.destroy(); }
#[test]
fn notify_one() {
- static C: StaticCondvar = CONDVAR_INIT;
- static M: StaticMutex = MUTEX_INIT;
+ static C: StaticCondvar = StaticCondvar::new();
+ static M: StaticMutex = StaticMutex::new();
let g = M.lock().unwrap();
let _t = thread::spawn(move|| {
#[test]
fn wait_timeout_ms() {
- static C: StaticCondvar = CONDVAR_INIT;
- static M: StaticMutex = MUTEX_INIT;
+ static C: StaticCondvar = StaticCondvar::new();
+ static M: StaticMutex = StaticMutex::new();
let g = M.lock().unwrap();
let (g, _no_timeout) = C.wait_timeout_ms(g, 1).unwrap();
#[test]
fn wait_timeout_with() {
- static C: StaticCondvar = CONDVAR_INIT;
- static M: StaticMutex = MUTEX_INIT;
- static S: AtomicUsize = ATOMIC_USIZE_INIT;
+ static C: StaticCondvar = StaticCondvar::new();
+ static M: StaticMutex = StaticMutex::new();
+ static S: AtomicUsize = AtomicUsize::new(0);
let g = M.lock().unwrap();
let (g, success) = C.wait_timeout_with(g, Duration::new(0, 1000), |_| {
#[test]
#[should_panic]
fn two_mutexes() {
- static M1: StaticMutex = MUTEX_INIT;
- static M2: StaticMutex = MUTEX_INIT;
- static C: StaticCondvar = CONDVAR_INIT;
+ static M1: StaticMutex = StaticMutex::new();
+ static M2: StaticMutex = StaticMutex::new();
+ static C: StaticCondvar = StaticCondvar::new();
let mut g = M1.lock().unwrap();
let _t = thread::spawn(move|| {
//! Generic support for building blocking abstractions.
use thread::{self, Thread};
-use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
+use sync::atomic::{AtomicBool, Ordering};
use sync::Arc;
use marker::{Sync, Send};
use mem;
pub fn tokens() -> (WaitToken, SignalToken) {
let inner = Arc::new(Inner {
thread: thread::current(),
- woken: ATOMIC_BOOL_INIT,
+ woken: AtomicBool::new(false),
});
let wait_token = WaitToken {
inner: inner.clone(),
/// other mutex constants.
#[unstable(feature = "std_misc",
reason = "may be merged with Mutex in the future")]
-pub const MUTEX_INIT: StaticMutex = StaticMutex {
- lock: sys::MUTEX_INIT,
- poison: poison::FLAG_INIT,
-};
+pub const MUTEX_INIT: StaticMutex = StaticMutex::new();
impl<T> Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(t: T) -> Mutex<T> {
Mutex {
- inner: box MUTEX_INIT,
+ inner: box StaticMutex::new(),
data: UnsafeCell::new(t),
}
}
struct Dummy(UnsafeCell<()>);
unsafe impl Sync for Dummy {}
-static DUMMY: Dummy = Dummy(UnsafeCell { value: () });
+static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
impl StaticMutex {
+ /// Creates a new mutex in an unlocked state ready for use.
+ #[unstable(feature = "std_misc",
+ reason = "may be merged with Mutex in the future")]
+ pub const fn new() -> StaticMutex {
+ StaticMutex {
+ lock: sys::Mutex::new(),
+ poison: poison::Flag::new(),
+ }
+ }
+
/// Acquires this lock, see `Mutex::lock`
#[inline]
#[unstable(feature = "std_misc",
use prelude::v1::*;
use sync::mpsc::channel;
- use sync::{Arc, Mutex, StaticMutex, MUTEX_INIT, Condvar};
+ use sync::{Arc, Mutex, StaticMutex, Condvar};
use thread;
struct Packet<T: Send>(Arc<(Mutex<T>, Condvar)>);
#[test]
fn smoke_static() {
- static M: StaticMutex = MUTEX_INIT;
+ static M: StaticMutex = StaticMutex::new();
unsafe {
drop(M.lock().unwrap());
drop(M.lock().unwrap());
#[test]
fn lots_and_lots() {
- static M: StaticMutex = MUTEX_INIT;
+ static M: StaticMutex = StaticMutex::new();
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
use prelude::v1::*;
use isize;
-use sync::atomic::{AtomicIsize, Ordering, ATOMIC_ISIZE_INIT};
-use sync::{StaticMutex, MUTEX_INIT};
+use sync::atomic::{AtomicIsize, Ordering};
+use sync::StaticMutex;
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Useful for one-time initialization for FFI or related
/// Initialization value for static `Once` values.
#[stable(feature = "rust1", since = "1.0.0")]
-pub const ONCE_INIT: Once = Once {
- mutex: MUTEX_INIT,
- cnt: ATOMIC_ISIZE_INIT,
- lock_cnt: ATOMIC_ISIZE_INIT,
-};
+pub const ONCE_INIT: Once = Once::new();
impl Once {
+ /// Creates a new `Once` value.
+ #[unstable(feature = "std_misc")]
+ pub const fn new() -> Once {
+ Once {
+ mutex: StaticMutex::new(),
+ cnt: AtomicIsize::new(0),
+ lock_cnt: AtomicIsize::new(0),
+ }
+ }
+
/// Performs an initialization routine once and only once. The given closure
/// will be executed if this is the first time `call_once` has been called,
/// and otherwise the routine will *not* be invoked.
use prelude::v1::*;
use thread;
- use super::{ONCE_INIT, Once};
+ use super::Once;
use sync::mpsc::channel;
#[test]
fn smoke_once() {
- static O: Once = ONCE_INIT;
+ static O: Once = Once::new();
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
#[test]
fn stampede_once() {
- static O: Once = ONCE_INIT;
+ static O: Once = Once::new();
static mut run: bool = false;
let (tx, rx) = channel();
/// Constant initialization for a statically-initialized rwlock.
#[unstable(feature = "std_misc",
reason = "may be merged with RwLock in the future")]
-pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock {
- lock: sys::RWLOCK_INIT,
- poison: poison::FLAG_INIT,
-};
+pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock::new();
/// RAII structure used to release the shared read access of a lock when
/// dropped.
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(t: T) -> RwLock<T> {
- RwLock { inner: box RW_LOCK_INIT, data: UnsafeCell::new(t) }
+ RwLock { inner: box StaticRwLock::new(), data: UnsafeCell::new(t) }
}
}
struct Dummy(UnsafeCell<()>);
unsafe impl Sync for Dummy {}
-static DUMMY: Dummy = Dummy(UnsafeCell { value: () });
+static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
impl StaticRwLock {
+ /// Creates a new rwlock.
+ #[unstable(feature = "std_misc",
+ reason = "may be merged with RwLock in the future")]
+ pub const fn new() -> StaticRwLock {
+ StaticRwLock {
+ lock: sys::RWLock::new(),
+ poison: poison::Flag::new(),
+ }
+ }
+
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
use rand::{self, Rng};
use sync::mpsc::channel;
use thread;
- use sync::{Arc, RwLock, StaticRwLock, TryLockError, RW_LOCK_INIT};
+ use sync::{Arc, RwLock, StaticRwLock, TryLockError};
#[test]
fn smoke() {
#[test]
fn static_smoke() {
- static R: StaticRwLock = RW_LOCK_INIT;
+ static R: StaticRwLock = StaticRwLock::new();
drop(R.read().unwrap());
drop(R.write().unwrap());
drop((R.read().unwrap(), R.read().unwrap()));
#[test]
fn frob() {
- static R: StaticRwLock = RW_LOCK_INIT;
+ static R: StaticRwLock = StaticRwLock::new();
const N: usize = 10;
const M: usize = 1000;
/// this type.
pub struct Condvar(imp::Condvar);
-/// Static initializer for condition variables.
-pub const CONDVAR_INIT: Condvar = Condvar(imp::CONDVAR_INIT);
-
impl Condvar {
/// Creates a new condition variable for use.
///
/// Behavior is undefined if the condition variable is moved after it is
/// first used with any of the functions below.
- #[inline]
- pub unsafe fn new() -> Condvar { Condvar(imp::Condvar::new()) }
+ pub const fn new() -> Condvar { Condvar(imp::Condvar::new()) }
/// Signals one waiter on this condition variable to wake up.
#[inline]
unsafe impl Sync for Mutex {}
-/// Constant initializer for statically allocated mutexes.
-pub const MUTEX_INIT: Mutex = Mutex(imp::MUTEX_INIT);
-
impl Mutex {
+ /// Creates a new mutex for use.
+ ///
+ /// Behavior is undefined if the mutex is moved after it is
+ /// first used with any of the functions below.
+ pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) }
+
/// Locks the mutex blocking the current thread until it is available.
///
/// Behavior is undefined if the mutex has been moved between this and any
use prelude::v1::*;
-use marker::Reflect;
-use cell::UnsafeCell;
+use cell::Cell;
use error::{Error};
use fmt;
+use marker::Reflect;
use thread;
-pub struct Flag { failed: UnsafeCell<bool> }
+pub struct Flag { failed: Cell<bool> }
// This flag is only ever accessed with a lock previously held. Note that this
// a totally private structure.
unsafe impl Send for Flag {}
unsafe impl Sync for Flag {}
-pub const FLAG_INIT: Flag = Flag { failed: UnsafeCell { value: false } };
-
impl Flag {
+ pub const fn new() -> Flag {
+ Flag { failed: Cell::new(false) }
+ }
+
#[inline]
pub fn borrow(&self) -> LockResult<Guard> {
let ret = Guard { panicking: thread::panicking() };
- if unsafe { *self.failed.get() } {
+ if self.get() {
Err(PoisonError::new(ret))
} else {
Ok(ret)
#[inline]
pub fn done(&self, guard: &Guard) {
if !guard.panicking && thread::panicking() {
- unsafe { *self.failed.get() = true; }
+ self.failed.set(true);
}
}
#[inline]
pub fn get(&self) -> bool {
- unsafe { *self.failed.get() }
+ self.failed.get()
}
}
unsafe {
let mut mutex = ReentrantMutex {
inner: box sys::ReentrantMutex::uninitialized(),
- poison: poison::FLAG_INIT,
+ poison: poison::Flag::new(),
data: t,
};
mutex.inner.init();
/// safer types at the top level of this crate instead of this type.
pub struct RWLock(imp::RWLock);
-/// Constant initializer for static RWLocks.
-pub const RWLOCK_INIT: RWLock = RWLock(imp::RWLOCK_INIT);
-
impl RWLock {
+ /// Creates a new reader-writer lock for use.
+ ///
+ /// Behavior is undefined if the reader-writer lock is moved after it is
+ /// first used with any of the functions below.
+ pub const fn new() -> RWLock { RWLock(imp::RWLock::new()) }
+
/// Acquires shared access to the underlying lock, blocking the current
/// thread to do so.
///
/// }
/// ```
pub struct StaticKey {
- /// Inner static TLS key (internals), created with by `INIT_INNER` in this
- /// module.
- pub inner: StaticKeyInner,
+ /// Inner static TLS key (internals).
+ key: AtomicUsize,
/// Destructor for the TLS value.
///
/// See `Key::new` for information about when the destructor runs and how
/// it runs.
- pub dtor: Option<unsafe extern fn(*mut u8)>,
-}
-
-/// Inner contents of `StaticKey`, created by the `INIT_INNER` constant.
-pub struct StaticKeyInner {
- key: AtomicUsize,
+ dtor: Option<unsafe extern fn(*mut u8)>,
}
/// A type for a safely managed OS-based TLS slot.
/// Constant initialization value for static TLS keys.
///
/// This value specifies no destructor by default.
-pub const INIT: StaticKey = StaticKey {
- inner: INIT_INNER,
- dtor: None,
-};
-
-/// Constant initialization value for the inner part of static TLS keys.
-///
-/// This value allows specific configuration of the destructor for a TLS key.
-pub const INIT_INNER: StaticKeyInner = StaticKeyInner {
- key: atomic::ATOMIC_USIZE_INIT,
-};
+pub const INIT: StaticKey = StaticKey::new(None);
impl StaticKey {
+ pub const fn new(dtor: Option<unsafe extern fn(*mut u8)>) -> StaticKey {
+ StaticKey {
+ key: atomic::AtomicUsize::new(0),
+ dtor: dtor
+ }
+ }
+
/// Gets the value associated with this TLS key
///
/// This will lazily allocate a TLS key from the OS if one has not already
/// Note that this does *not* run the user-provided destructor if one was
/// specified at definition time. Doing so must be done manually.
pub unsafe fn destroy(&self) {
- match self.inner.key.swap(0, Ordering::SeqCst) {
+ match self.key.swap(0, Ordering::SeqCst) {
0 => {}
n => { imp::destroy(n as imp::Key) }
}
#[inline]
unsafe fn key(&self) -> imp::Key {
- match self.inner.key.load(Ordering::Relaxed) {
+ match self.key.load(Ordering::Relaxed) {
0 => self.lazy_init() as imp::Key,
n => n as imp::Key
}
key2
};
assert!(key != 0);
- match self.inner.key.compare_and_swap(0, key as usize, Ordering::SeqCst) {
+ match self.key.compare_and_swap(0, key as usize, Ordering::SeqCst) {
// The CAS succeeded, so we've created the actual key
0 => key as usize,
// If someone beat us to the punch, use their key instead
#[cfg(test)]
mod tests {
use prelude::v1::*;
- use super::{Key, StaticKey, INIT_INNER};
+ use super::{Key, StaticKey};
fn assert_sync<T: Sync>() {}
fn assert_send<T: Send>() {}
#[test]
fn statik() {
- static K1: StaticKey = StaticKey { inner: INIT_INNER, dtor: None };
- static K2: StaticKey = StaticKey { inner: INIT_INNER, dtor: None };
+ static K1: StaticKey = StaticKey::new(None);
+ static K2: StaticKey = StaticKey::new(None);
unsafe {
assert!(K1.get().is_null());
use libc;
use mem;
use str;
-use sync::{StaticMutex, MUTEX_INIT};
+use sync::StaticMutex;
use sys_common::backtrace::*;
// while it doesn't requires lock for work as everything is
// local, it still displays much nicer backtraces when a
// couple of threads panic simultaneously
- static LOCK: StaticMutex = MUTEX_INIT;
+ static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
try!(writeln!(w, "stack backtrace:"));
// is semi-reasonable in terms of printing anyway, and we know that all
// I/O done here is blocking I/O, not green I/O, so we don't have to
// worry about this being a native vs green mutex.
- static LOCK: StaticMutex = MUTEX_INIT;
+ static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
try!(writeln!(w, "stack backtrace:"));
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
-pub const CONDVAR_INIT: Condvar = Condvar {
- inner: UnsafeCell { value: ffi::PTHREAD_COND_INITIALIZER },
-};
-
impl Condvar {
- #[inline]
- pub unsafe fn new() -> Condvar {
+ pub const fn new() -> Condvar {
// Might be moved and address is changing it is better to avoid
// initialization of potentially opaque OS data before it landed
Condvar { inner: UnsafeCell::new(ffi::PTHREAD_COND_INITIALIZER) }
m.inner.get()
}
-pub const MUTEX_INIT: Mutex = Mutex {
- inner: UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER },
-};
-
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
#[allow(dead_code)] // sys isn't exported yet
impl Mutex {
- #[inline]
- pub unsafe fn new() -> Mutex {
+ pub const fn new() -> Mutex {
// Might be moved and address is changing it is better to avoid
// initialization of potentially opaque OS data before it landed
- MUTEX_INIT
+ Mutex { inner: UnsafeCell::new(ffi::PTHREAD_MUTEX_INITIALIZER) }
}
#[inline]
pub unsafe fn lock(&self) {
#[cfg(any(target_os = "bitrig", target_os = "openbsd"))]
pub fn current_exe() -> io::Result<PathBuf> {
- use sync::{StaticMutex, MUTEX_INIT};
- static LOCK: StaticMutex = MUTEX_INIT;
+ use sync::StaticMutex;
+ static LOCK: StaticMutex = StaticMutex::new();
extern {
fn rust_current_exe() -> *const c_char;
pub struct RWLock { inner: UnsafeCell<ffi::pthread_rwlock_t> }
-pub const RWLOCK_INIT: RWLock = RWLock {
- inner: UnsafeCell { value: ffi::PTHREAD_RWLOCK_INITIALIZER },
-};
-
unsafe impl Send for RWLock {}
unsafe impl Sync for RWLock {}
impl RWLock {
+ pub const fn new() -> RWLock {
+ RWLock { inner: UnsafeCell::new(ffi::PTHREAD_RWLOCK_INITIALIZER) }
+ }
#[inline]
pub unsafe fn read(&self) {
let r = ffi::pthread_rwlock_rdlock(self.inner.get());
#[cfg(target_os = "linux")]
fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
use dynamic_lib::DynamicLibrary;
- use sync::{Once, ONCE_INIT};
+ use sync::Once;
type F = unsafe extern "C" fn(*const libc::pthread_attr_t) -> libc::size_t;
- static INIT: Once = ONCE_INIT;
+ static INIT: Once = Once::new();
static mut __pthread_get_minstack: Option<F> = None;
INIT.call_once(|| {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#![allow(dead_code)] // sys isn't exported yet
+
use prelude::v1::*;
use libc::c_int;
use libc;
use time::Duration;
use ops::Sub;
- use sync::{Once, ONCE_INIT};
+ use sync::Once;
use super::NSEC_PER_SEC;
pub struct SteadyTime {
numer: 0,
denom: 0,
};
- static ONCE: Once = ONCE_INIT;
+ static ONCE: Once = Once::new();
unsafe {
ONCE.call_once(|| {
use path::Path;
use ptr;
use str;
-use sync::{StaticMutex, MUTEX_INIT};
+use sync::StaticMutex;
use sys_common::backtrace::*;
pub fn write(w: &mut Write) -> io::Result<()> {
// According to windows documentation, all dbghelp functions are
// single-threaded.
- static LOCK: StaticMutex = MUTEX_INIT;
+ static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
// Open up dbghelp.dll, we don't link to it explicitly because it can't
-> $rettype:ty { $fallback:expr }) => (
#[inline(always)]
pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
- use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+ use sync::atomic::{AtomicUsize, Ordering};
use mem;
- static PTR: AtomicUsize = ATOMIC_USIZE_INIT;
+ static PTR: AtomicUsize = AtomicUsize::new(0);
fn load() -> usize {
::sys::c::compat::store_func(&PTR,
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
-pub const CONDVAR_INIT: Condvar = Condvar {
- inner: UnsafeCell { value: ffi::CONDITION_VARIABLE_INIT }
-};
-
impl Condvar {
- #[inline]
- pub unsafe fn new() -> Condvar { CONDVAR_INIT }
+ pub const fn new() -> Condvar {
+ Condvar { inner: UnsafeCell::new(ffi::CONDITION_VARIABLE_INIT) }
+ }
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) {
pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
-pub const MUTEX_INIT: Mutex = Mutex {
- inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
-};
-
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
// is there there are no guarantees of fairness.
impl Mutex {
+ pub const fn new() -> Mutex {
+ Mutex { inner: UnsafeCell::new(ffi::SRWLOCK_INIT) }
+ }
#[inline]
pub unsafe fn lock(&self) {
ffi::AcquireSRWLockExclusive(self.inner.get())
use num::One;
use ops::Neg;
use rt;
-use sync::{Once, ONCE_INIT};
+use sync::Once;
use sys::c;
use sys_common::{AsInner, FromInner};
/// Checks whether the Windows socket interface has been started already, and
/// if not, starts it.
pub fn init() {
- static START: Once = ONCE_INIT;
+ static START: Once = Once::new();
START.call_once(|| unsafe {
let mut data: c::WSADATA = mem::zeroed();
use os::windows::ffi::OsStrExt;
use path::Path;
use ptr;
-use sync::{StaticMutex, MUTEX_INIT};
+use sync::StaticMutex;
use sys::c;
use sys::fs::{OpenOptions, File};
use sys::handle::Handle;
try!(unsafe {
// `CreateProcess` is racy!
// http://support.microsoft.com/kb/315939
- static CREATE_PROCESS_LOCK: StaticMutex = MUTEX_INIT;
+ static CREATE_PROCESS_LOCK: StaticMutex = StaticMutex::new();
let _lock = CREATE_PROCESS_LOCK.lock();
cvt(CreateProcessW(ptr::null(),
pub struct RWLock { inner: UnsafeCell<ffi::SRWLOCK> }
-pub const RWLOCK_INIT: RWLock = RWLock {
- inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
-};
-
unsafe impl Send for RWLock {}
unsafe impl Sync for RWLock {}
impl RWLock {
+ pub const fn new() -> RWLock {
+ RWLock { inner: UnsafeCell::new(ffi::SRWLOCK_INIT) }
+ }
#[inline]
pub unsafe fn read(&self) {
ffi::AcquireSRWLockShared(self.inner.get())
use boxed;
use ptr;
use rt;
-use sys_common::mutex::{MUTEX_INIT, Mutex};
+use sys_common::mutex::Mutex;
pub type Key = DWORD;
pub type Dtor = unsafe extern fn(*mut u8);
// on poisoning and this module needs to operate at a lower level than requiring
// the thread infrastructure to be in place (useful on the borders of
// initialization/destruction).
-static DTOR_LOCK: Mutex = MUTEX_INIT;
+static DTOR_LOCK: Mutex = Mutex::new();
static mut DTORS: *mut Vec<(Key, Dtor)> = 0 as *mut _;
// -------------------------------------------------------------------------
use libc;
use ops::Sub;
use time::Duration;
-use sync::{Once, ONCE_INIT};
+use sync::Once;
const NANOS_PER_SEC: u64 = 1_000_000_000;
fn frequency() -> libc::LARGE_INTEGER {
static mut FREQUENCY: libc::LARGE_INTEGER = 0;
- static ONCE: Once = ONCE_INIT;
+ static ONCE: Once = Once::new();
unsafe {
ONCE.call_once(|| {
// Sure wish we had macro hygiene, no?
#[doc(hidden)]
-pub mod __impl {
- pub use super::imp::Key as KeyInner;
- pub use super::imp::destroy_value;
- pub use sys_common::thread_local::INIT_INNER as OS_INIT_INNER;
- pub use sys_common::thread_local::StaticKey as OsStaticKey;
-}
+pub use self::imp::Key as __KeyInner;
/// A thread local storage key which owns its contents.
///
//
// This is trivially devirtualizable by LLVM because we never store anything
// to this field and rustc can declare the `static` as constant as well.
- #[doc(hidden)]
- pub inner: fn() -> &'static __impl::KeyInner<UnsafeCell<Option<T>>>,
+ inner: fn() -> &'static __KeyInner<T>,
// initialization routine to invoke to create a value
- #[doc(hidden)]
- pub init: fn() -> T,
-}
-
-/// Declare a new thread local storage key of type `std::thread::LocalKey`.
-///
-/// See [LocalKey documentation](thread/struct.LocalKey.html) for more information.
-#[macro_export]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[allow_internal_unstable]
-macro_rules! thread_local {
- (static $name:ident: $t:ty = $init:expr) => (
- static $name: ::std::thread::LocalKey<$t> = {
- use std::cell::UnsafeCell as __UnsafeCell;
- use std::thread::__local::KeyInner as __KeyInner;
- use std::option::Option as __Option;
- use std::option::Option::None as __None;
-
- __thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = {
- __UnsafeCell { value: __None }
- });
- fn __init() -> $t { $init }
- fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> {
- &__KEY
- }
- ::std::thread::LocalKey { inner: __getit, init: __init }
- };
- );
- (pub static $name:ident: $t:ty = $init:expr) => (
- pub static $name: ::std::thread::LocalKey<$t> = {
- use std::cell::UnsafeCell as __UnsafeCell;
- use std::thread::__local::KeyInner as __KeyInner;
- use std::option::Option as __Option;
- use std::option::Option::None as __None;
-
- __thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = {
- __UnsafeCell { value: __None }
- });
- fn __init() -> $t { $init }
- fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> {
- &__KEY
- }
- ::std::thread::LocalKey { inner: __getit, init: __init }
- };
- );
+ init: fn() -> T,
}
// Macro pain #4586:
// To get around this, we're forced to inject the #[cfg] logic into the macro
// itself. Woohoo.
+/// Declare a new thread local storage key of type `std::thread::LocalKey`.
+///
+/// See [LocalKey documentation](thread/struct.LocalKey.html) for more information.
#[macro_export]
-#[doc(hidden)]
+#[stable(feature = "rust1", since = "1.0.0")]
#[allow_internal_unstable]
-macro_rules! __thread_local_inner {
+macro_rules! thread_local {
(static $name:ident: $t:ty = $init:expr) => (
- #[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
- not(target_arch = "aarch64")),
- thread_local)]
- static $name: ::std::thread::__local::KeyInner<$t> =
- __thread_local_inner!($init, $t);
+ static $name: ::std::thread::LocalKey<$t> = {
+ #[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
+ not(target_arch = "aarch64")),
+ thread_local)]
+ static __KEY: ::std::thread::__LocalKeyInner<$t> =
+ ::std::thread::__LocalKeyInner::new();
+ fn __init() -> $t { $init }
+ fn __getit() -> &'static ::std::thread::__LocalKeyInner<$t> { &__KEY }
+ ::std::thread::LocalKey::new(__getit, __init)
+ };
);
(pub static $name:ident: $t:ty = $init:expr) => (
- #[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
- not(target_arch = "aarch64")),
- thread_local)]
- pub static $name: ::std::thread::__local::KeyInner<$t> =
- __thread_local_inner!($init, $t);
- );
- ($init:expr, $t:ty) => ({
- #[cfg(all(any(target_os = "macos", target_os = "linux"), not(target_arch = "aarch64")))]
- const _INIT: ::std::thread::__local::KeyInner<$t> = {
- ::std::thread::__local::KeyInner {
- inner: ::std::cell::UnsafeCell { value: $init },
- dtor_registered: ::std::cell::UnsafeCell { value: false },
- dtor_running: ::std::cell::UnsafeCell { value: false },
- }
- };
-
- #[allow(trivial_casts)]
- #[cfg(any(not(any(target_os = "macos", target_os = "linux")), target_arch = "aarch64"))]
- const _INIT: ::std::thread::__local::KeyInner<$t> = {
- ::std::thread::__local::KeyInner {
- inner: ::std::cell::UnsafeCell { value: $init },
- os: ::std::thread::__local::OsStaticKey {
- inner: ::std::thread::__local::OS_INIT_INNER,
- dtor: ::std::option::Option::Some(
- ::std::thread::__local::destroy_value::<$t>
- ),
- },
- }
+ pub static $name: ::std::thread::LocalKey<$t> = {
+ #[cfg_attr(all(any(target_os = "macos", target_os = "linux"),
+ not(target_arch = "aarch64")),
+ thread_local)]
+ static __KEY: ::std::thread::__LocalKeyInner<$t> =
+ ::std::thread::__LocalKeyInner::new();
+ fn __init() -> $t { $init }
+ fn __getit() -> &'static ::std::thread::__LocalKeyInner<$t> { &__KEY }
+ ::std::thread::LocalKey::new(__getit, __init)
};
-
- _INIT
- });
+ );
}
/// Indicator of the state of a thread local storage key.
}
impl<T: 'static> LocalKey<T> {
+ #[doc(hidden)]
+ pub const fn new(inner: fn() -> &'static __KeyInner<T>, init: fn() -> T) -> LocalKey<T> {
+ LocalKey {
+ inner: inner,
+ init: init
+ }
+ }
+
/// Acquires a reference to the value in this TLS key.
///
/// This will lazily initialize the value if this thread has not referenced
mod imp {
use prelude::v1::*;
- use cell::UnsafeCell;
+ use cell::{Cell, UnsafeCell};
use intrinsics;
- use ptr;
pub struct Key<T> {
- // Place the inner bits in an `UnsafeCell` to currently get around the
- // "only Sync statics" restriction. This allows any type to be placed in
- // the cell.
- //
- // Note that all access requires `T: 'static` so it can't be a type with
- // any borrowed pointers still.
- pub inner: UnsafeCell<T>,
+ inner: UnsafeCell<Option<T>>,
// Metadata to keep track of the state of the destructor. Remember that
// these variables are thread-local, not global.
- pub dtor_registered: UnsafeCell<bool>, // should be Cell
- pub dtor_running: UnsafeCell<bool>, // should be Cell
+ dtor_registered: Cell<bool>,
+ dtor_running: Cell<bool>,
}
unsafe impl<T> ::marker::Sync for Key<T> { }
impl<T> Key<T> {
- pub unsafe fn get(&'static self) -> Option<&'static T> {
- if intrinsics::needs_drop::<T>() && *self.dtor_running.get() {
+ pub const fn new() -> Key<T> {
+ Key {
+ inner: UnsafeCell::new(None),
+ dtor_registered: Cell::new(false),
+ dtor_running: Cell::new(false)
+ }
+ }
+
+ pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell<Option<T>>> {
+ if intrinsics::needs_drop::<T>() && self.dtor_running.get() {
return None
}
self.register_dtor();
- Some(&*self.inner.get())
+ Some(&self.inner)
}
unsafe fn register_dtor(&self) {
- if !intrinsics::needs_drop::<T>() || *self.dtor_registered.get() {
+ if !intrinsics::needs_drop::<T>() || self.dtor_registered.get() {
return
}
register_dtor(self as *const _ as *mut u8,
destroy_value::<T>);
- *self.dtor_registered.get() = true;
+ self.dtor_registered.set(true);
}
}
unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
use boxed;
use mem;
+ use ptr;
use libc;
use sys_common::thread_local as os;
// *should* be the case that this loop always terminates because we
// provide the guarantee that a TLS key cannot be set after it is
// flagged for destruction.
- static DTORS: os::StaticKey = os::StaticKey {
- inner: os::INIT_INNER,
- dtor: Some(run_dtors as unsafe extern "C" fn(*mut u8)),
- };
+ static DTORS: os::StaticKey = os::StaticKey::new(Some(run_dtors));
type List = Vec<(*mut u8, unsafe extern fn(*mut u8))>;
if DTORS.get().is_null() {
let v: Box<List> = box Vec::new();
// Right before we run the user destructor be sure to flag the
// destructor as running for this thread so calls to `get` will return
// `None`.
- *(*ptr).dtor_running.get() = true;
- ptr::read((*ptr).inner.get());
+ (*ptr).dtor_running.set(true);
+ intrinsics::drop_in_place((*ptr).inner.get());
}
}
use prelude::v1::*;
use alloc::boxed;
- use cell::UnsafeCell;
- use mem;
+ use cell::{Cell, UnsafeCell};
+ use marker;
use ptr;
use sys_common::thread_local::StaticKey as OsStaticKey;
pub struct Key<T> {
- // Statically allocated initialization expression, using an `UnsafeCell`
- // for the same reasons as above.
- pub inner: UnsafeCell<T>,
-
// OS-TLS key that we'll use to key off.
- pub os: OsStaticKey,
+ os: OsStaticKey,
+ marker: marker::PhantomData<Cell<T>>,
}
unsafe impl<T> ::marker::Sync for Key<T> { }
struct Value<T: 'static> {
key: &'static Key<T>,
- value: T,
+ value: UnsafeCell<Option<T>>,
}
- impl<T> Key<T> {
- pub unsafe fn get(&'static self) -> Option<&'static T> {
- self.ptr().map(|p| &*p)
+ impl<T: 'static> Key<T> {
+ pub const fn new() -> Key<T> {
+ Key {
+ os: OsStaticKey::new(Some(destroy_value::<T>)),
+ marker: marker::PhantomData
+ }
}
- unsafe fn ptr(&'static self) -> Option<*mut T> {
+ pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell<Option<T>>> {
let ptr = self.os.get() as *mut Value<T>;
if !ptr.is_null() {
if ptr as usize == 1 {
return None
}
- return Some(&mut (*ptr).value as *mut T);
+ return Some(&(*ptr).value);
}
// If the lookup returned null, we haven't initialized our own local
// copy, so do that now.
- //
- // Also note that this transmute_copy should be ok because the value
- // `inner` is already validated to be a valid `static` value, so we
- // should be able to freely copy the bits.
let ptr: Box<Value<T>> = box Value {
key: self,
- value: mem::transmute_copy(&self.inner),
+ value: UnsafeCell::new(None),
};
let ptr = boxed::into_raw(ptr);
self.os.set(ptr as *mut u8);
- Some(&mut (*ptr).value as *mut T)
+ Some(&(*ptr).value)
}
}
use prelude::v1::*;
use sync::mpsc::{channel, Sender};
- use cell::UnsafeCell;
+ use cell::{Cell, UnsafeCell};
use super::LocalKeyState;
use thread;
#[test]
fn smoke_no_dtor() {
- thread_local!(static FOO: UnsafeCell<i32> = UnsafeCell { value: 1 });
+ thread_local!(static FOO: Cell<i32> = Cell::new(1));
- FOO.with(|f| unsafe {
- assert_eq!(*f.get(), 1);
- *f.get() = 2;
+ FOO.with(|f| {
+ assert_eq!(f.get(), 1);
+ f.set(2);
});
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
- FOO.with(|f| unsafe {
- assert_eq!(*f.get(), 1);
+ FOO.with(|f| {
+ assert_eq!(f.get(), 1);
});
tx.send(()).unwrap();
});
rx.recv().unwrap();
- FOO.with(|f| unsafe {
- assert_eq!(*f.get(), 2);
+ FOO.with(|f| {
+ assert_eq!(f.get(), 2);
});
}
#[test]
fn smoke_dtor() {
- thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell {
- value: None
- });
+ thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
let (tx, rx) = channel();
let _t = thread::spawn(move|| unsafe {
fn circular() {
struct S1;
struct S2;
- thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
- value: None
- });
- thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell {
- value: None
- });
+ thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
+ thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell::new(None));
static mut HITS: u32 = 0;
impl Drop for S1 {
#[test]
fn self_referential() {
struct S1;
- thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
- value: None
- });
+ thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
impl Drop for S1 {
fn drop(&mut self) {
#[test]
fn dtors_in_dtors_in_dtors() {
struct S1(Sender<()>);
- thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell {
- value: None
- });
- thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell {
- value: None
- });
+ thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
+ thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
impl Drop for S1 {
fn drop(&mut self) {
consider stabilizing its interface")]
pub use self::scoped_tls::ScopedKey;
-#[doc(hidden)] pub use self::local::__impl as __local;
-#[doc(hidden)] pub use self::scoped_tls::__impl as __scoped;
+#[doc(hidden)] pub use self::local::__KeyInner as __LocalKeyInner;
////////////////////////////////////////////////////////////////////////////////
// Builder
use prelude::v1::*;
-// macro hygiene sure would be nice, wouldn't it?
-#[doc(hidden)]
-pub mod __impl {
- pub use super::imp::KeyInner;
- pub use sys_common::thread_local::INIT as OS_INIT;
-}
-
/// Type representing a thread local storage key corresponding to a reference
/// to the type parameter `T`.
///
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
-pub struct ScopedKey<T> { #[doc(hidden)] pub inner: __impl::KeyInner<T> }
+pub struct ScopedKey<T> { inner: imp::KeyInner<T> }
/// Declare a new scoped thread local storage key.
///
#[macro_export]
#[allow_internal_unstable]
macro_rules! scoped_thread_local {
- (static $name:ident: $t:ty) => (
- __scoped_thread_local_inner!(static $name: $t);
- );
- (pub static $name:ident: $t:ty) => (
- __scoped_thread_local_inner!(pub static $name: $t);
- );
-}
-
-#[macro_export]
-#[doc(hidden)]
-#[allow_internal_unstable]
-macro_rules! __scoped_thread_local_inner {
(static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
target_os = "android",
target_arch = "aarch64")),
thread_local)]
static $name: ::std::thread::ScopedKey<$t> =
- __scoped_thread_local_inner!($t);
+ ::std::thread::ScopedKey::new();
);
(pub static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
target_arch = "aarch64")),
thread_local)]
pub static $name: ::std::thread::ScopedKey<$t> =
- __scoped_thread_local_inner!($t);
+ ::std::thread::ScopedKey::new();
);
- ($t:ty) => ({
- use std::thread::ScopedKey as __Key;
-
- #[cfg(not(any(windows,
- target_os = "android",
- target_os = "ios",
- target_os = "openbsd",
- target_arch = "aarch64")))]
- const _INIT: __Key<$t> = __Key {
- inner: ::std::thread::__scoped::KeyInner {
- inner: ::std::cell::UnsafeCell { value: 0 as *mut _ },
- }
- };
-
- #[cfg(any(windows,
- target_os = "android",
- target_os = "ios",
- target_os = "openbsd",
- target_arch = "aarch64"))]
- const _INIT: __Key<$t> = __Key {
- inner: ::std::thread::__scoped::KeyInner {
- inner: ::std::thread::__scoped::OS_INIT,
- marker: ::std::marker::PhantomData::<::std::cell::Cell<$t>>,
- }
- };
-
- _INIT
- })
}
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
impl<T> ScopedKey<T> {
+ #[doc(hidden)]
+ pub const fn new() -> ScopedKey<T> {
+ ScopedKey { inner: imp::KeyInner::new() }
+ }
+
/// Inserts a value into this scoped thread local storage slot for a
/// duration of a closure.
///
F: FnOnce() -> R,
{
struct Reset<'a, T: 'a> {
- key: &'a __impl::KeyInner<T>,
+ key: &'a imp::KeyInner<T>,
val: *mut T,
}
impl<'a, T> Drop for Reset<'a, T> {
target_os = "openbsd",
target_arch = "aarch64")))]
mod imp {
- use std::cell::UnsafeCell;
+ use std::cell::Cell;
- #[doc(hidden)]
- pub struct KeyInner<T> { pub inner: UnsafeCell<*mut T> }
+ pub struct KeyInner<T> { inner: Cell<*mut T> }
unsafe impl<T> ::marker::Sync for KeyInner<T> { }
- #[doc(hidden)]
impl<T> KeyInner<T> {
- #[doc(hidden)]
- pub unsafe fn set(&self, ptr: *mut T) { *self.inner.get() = ptr; }
- #[doc(hidden)]
- pub unsafe fn get(&self) -> *mut T { *self.inner.get() }
+ pub const fn new() -> KeyInner<T> {
+ KeyInner { inner: Cell::new(0 as *mut _) }
+ }
+ pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr); }
+ pub unsafe fn get(&self) -> *mut T { self.inner.get() }
}
}
target_os = "openbsd",
target_arch = "aarch64"))]
mod imp {
+ use prelude::v1::*;
+
+ use cell::Cell;
use marker;
- use std::cell::Cell;
use sys_common::thread_local::StaticKey as OsStaticKey;
- #[doc(hidden)]
pub struct KeyInner<T> {
pub inner: OsStaticKey,
pub marker: marker::PhantomData<Cell<T>>,
}
- unsafe impl<T> ::marker::Sync for KeyInner<T> { }
+ unsafe impl<T> marker::Sync for KeyInner<T> { }
- #[doc(hidden)]
impl<T> KeyInner<T> {
- #[doc(hidden)]
+ pub const fn new() -> KeyInner<T> {
+ KeyInner {
+ inner: OsStaticKey::new(None),
+ marker: marker::PhantomData
+ }
+ }
pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr as *mut _) }
- #[doc(hidden)]
pub unsafe fn get(&self) -> *mut T { self.inner.get() as *mut _ }
}
}
use std::sync::atomic;
pub const C1: usize = 1;
-pub const C2: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
+pub const C2: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
pub const C3: fn() = foo;
pub const C4: usize = C1 * C1 + C1 / C1;
pub const C5: &'static usize = &C4;
pub static S1: usize = 3;
-pub static S2: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
+pub static S2: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
fn foo() {}
mod s {
#![allow(unstable)]
- use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+ use std::sync::atomic::{AtomicUsize, Ordering};
- static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
+ static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1
mod s {
#![allow(unstable)]
- use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+ use std::sync::atomic::{AtomicUsize, Ordering};
- static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
+ static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1
use id::Id;
mod s {
- use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+ use std::sync::atomic::{AtomicUsize, Ordering};
- static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
+ static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1
mod s {
#![allow(unstable)]
- use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+ use std::sync::atomic::{AtomicUsize, Ordering};
- static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
+ static S_COUNT: AtomicUsize = AtomicUsize::new(0);
pub fn next_count() -> usize {
S_COUNT.fetch_add(1, Ordering::SeqCst) + 1
mod foo {
use std::cell::{UnsafeCell};
- static mut count : UnsafeCell<u64> = UnsafeCell { value: 1 };
+ static mut count : UnsafeCell<u64> = UnsafeCell::new(1);
pub struct S { pub a: u8, pub b: String, secret_uid: u64 }
use std::cell::UnsafeCell;
-const A: UnsafeCell<usize> = UnsafeCell { value: 1 };
+const A: UnsafeCell<usize> = UnsafeCell::new(1);
const B: &'static UnsafeCell<usize> = &A;
//~^ ERROR: cannot borrow a constant which contains interior mutability
struct C { a: UnsafeCell<usize> }
-const D: C = C { a: UnsafeCell { value: 1 } };
+const D: C = C { a: UnsafeCell::new(1) };
const E: &'static UnsafeCell<usize> = &D.a;
//~^ ERROR: cannot borrow a constant which contains interior mutability
const F: &'static C = &D;
//~^ ERROR allocations are not allowed in statics
//~| ERROR the trait `core::marker::Sync` is not implemented for the type
//~| ERROR the trait `core::marker::Sync` is not implemented for the type
-//~| ERROR E0015
fn main() { }
use std::ptr;
fn main() {
- let x = ATOMIC_BOOL_INIT;
+ let x = AtomicBool::new(false);
let x = *&x; //~ ERROR: cannot move out of borrowed content
- let x = ATOMIC_ISIZE_INIT;
+ let x = AtomicIsize::new(0);
let x = *&x; //~ ERROR: cannot move out of borrowed content
- let x = ATOMIC_USIZE_INIT;
+ let x = AtomicUsize::new(0);
let x = *&x; //~ ERROR: cannot move out of borrowed content
let x: AtomicPtr<usize> = AtomicPtr::new(ptr::null_mut());
let x = *&x; //~ ERROR: cannot move out of borrowed content
mod s {
#![allow(unstable)]
- use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+ use std::sync::atomic::{AtomicUsize, Ordering};
- static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
+ static S_COUNT: AtomicUsize = AtomicUsize::new(0);
/// generates globally unique count (global across the current
/// process, that is)
// This test makes sure that the compiler doesn't crash when trying to assign
// debug locations to const-expressions.
-use std::sync::MUTEX_INIT;
+use std::sync::StaticMutex;
use std::cell::UnsafeCell;
const CONSTANT: u64 = 3 + 4;
const NESTED: (Struct, TupleStruct) = (STRUCT, TUPLE_STRUCT);
-const UNSAFE_CELL: UnsafeCell<bool> = UnsafeCell { value: false };
+const UNSAFE_CELL: UnsafeCell<bool> = UnsafeCell::new(false);
fn main() {
let mut _constant = CONSTANT;
let mut _string = STRING;
let mut _vec = VEC;
let mut _nested = NESTED;
- let mut _extern = MUTEX_INIT;
+ let mut _extern = StaticMutex::new();
let mut _unsafe_cell = UNSAFE_CELL;
}
C = 2
}
-static FLAG: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT;
+static FLAG: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
impl Drop for E {
fn drop(&mut self) {
// `T`. Issue #20300.
use std::marker::{PhantomData};
-use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
+use std::sync::atomic::{AtomicUsize};
use std::sync::atomic::Ordering::SeqCst;
-static COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
+static COUNTER: AtomicUsize = AtomicUsize::new(0);
// Preamble.
trait Trait { type Item; }
// destructor.
use std::thread;
-use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+use std::sync::atomic::{AtomicUsize, Ordering};
-static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
+static LOG: AtomicUsize = AtomicUsize::new(0);
struct D(u8);
// destructor.
use std::thread;
-use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+use std::sync::atomic::{AtomicUsize, Ordering};
-static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
+static LOG: AtomicUsize = AtomicUsize::new(0);
struct D(u8);
static STATIC1: UnsafeEnum<isize> = UnsafeEnum::VariantSafe;
-static STATIC2: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell { value: 1 });
-const CONST: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell { value: 1 });
+static STATIC2: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell::new(1));
+const CONST: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell::new(1));
static STATIC3: MyUnsafe<isize> = MyUnsafe{value: CONST};
static STATIC4: &'static MyUnsafePack<isize> = &STATIC2;
unsafe impl<T: Send> Sync for Wrap<T> {}
-static UNSAFE: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell{value: 2});
+static UNSAFE: MyUnsafePack<isize> = MyUnsafePack(UnsafeCell::new(2));
static WRAPPED_UNSAFE: Wrap<&'static MyUnsafePack<isize>> = Wrap { value: &UNSAFE };
fn main() {
extern crate issue_17718 as other;
-use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+use std::sync::atomic::{AtomicUsize, Ordering};
const C1: usize = 1;
-const C2: AtomicUsize = ATOMIC_USIZE_INIT;
+const C2: AtomicUsize = AtomicUsize::new(0);
const C3: fn() = foo;
const C4: usize = C1 * C1 + C1 / C1;
const C5: &'static usize = &C4;
};
static S1: usize = 3;
-static S2: AtomicUsize = ATOMIC_USIZE_INIT;
+static S2: AtomicUsize = AtomicUsize::new(0);
mod test {
static A: usize = 4;
// construction.
-use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
+use std::sync::atomic::{Ordering, AtomicUsize};
#[derive(Debug)]
struct Noisy(u8);
assert_eq!(0x03_04, event_log());
}
-static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
+static LOG: AtomicUsize = AtomicUsize::new(0);
fn reset_log() {
LOG.store(0, Ordering::SeqCst);
use std::thread;
-use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+use std::sync::atomic::{AtomicUsize, Ordering};
-static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
+static LOG: AtomicUsize = AtomicUsize::new(0);
struct D(u8);
// even when no Drop-implementations are involved.
-use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
+use std::sync::atomic::{Ordering, AtomicUsize};
struct W { wrapped: u32 }
struct S { f0: W, _f1: i32 }
"expect: 0x{:x} actual: 0x{:x}", expect, actual);
}
-static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
+static LOG: AtomicUsize = AtomicUsize::new(0);
fn event_log() -> usize {
LOG.load(Ordering::SeqCst)
// even when no Drop-implementations are involved.
-use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT};
+use std::sync::atomic::{Ordering, AtomicUsize};
struct W { wrapped: u32 }
struct S { f0: W, _f1: i32 }
"expect: 0x{:x} actual: 0x{:x}", expect, actual);
}
-static LOG: AtomicUsize = ATOMIC_USIZE_INIT;
+static LOG: AtomicUsize = AtomicUsize::new(0);
fn event_log() -> usize {
LOG.load(Ordering::SeqCst)
#![feature(rand, core)]
-use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+use std::sync::atomic::{AtomicUsize, Ordering};
use std::__rand::{thread_rng, Rng};
use std::thread;
static drop_counts: [AtomicUsize; MAX_LEN] =
// FIXME #5244: AtomicUsize is not Copy.
[
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
- ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT,
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0),
+ AtomicUsize::new(0), AtomicUsize::new(0),
];
-static creation_count: AtomicUsize = ATOMIC_USIZE_INIT;
+static creation_count: AtomicUsize = AtomicUsize::new(0);
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord)]
struct DropCounter { x: u32, creation_id: usize }