}
#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> { }
+unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> { }
+unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
-#[cfg(not(stage0))] // remove cfg after new snapshot
+// remove cfg after new snapshot
+#[cfg(not(stage0))]
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
}
#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> { }
+unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> { }
+unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
-#[cfg(not(stage0))] // remove cfg after new snapshot
+// remove cfg after new snapshot
+#[cfg(not(stage0))]
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
pub fn try_unwrap(this: Self) -> Result<T, Self> {
// See `drop` for why all these atomics are like this
if this.inner().strong.compare_and_swap(1, 0, Release) != 1 {
- return Err(this)
+ return Err(this);
}
atomic::fence(Acquire);
// check if the weak counter is currently "locked"; if so, spin.
if cur == usize::MAX {
- continue
+ continue;
}
// NOTE: this code currently ignores the possibility of overflow
// synchronize with the write coming from `is_unique`, so that the
// events prior to that write happen before this read.
if this.inner().weak.compare_and_swap(cur, cur + 1, Acquire) == cur {
- return Weak { _ptr: this._ptr }
+ return Weak { _ptr: this._ptr };
}
}
}
let ptr = *self._ptr;
// if ptr.is_null() { return }
if ptr as *mut u8 as usize == 0 || ptr as *mut u8 as usize == mem::POST_DROP_USIZE {
- return
+ return;
}
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
if self.inner().strong.fetch_sub(1, Release) != 1 {
- return
+ return;
}
// This fence is needed to prevent reordering of use of the data and
// confirmed via the CAS below.
let n = inner.strong.load(Relaxed);
if n == 0 {
- return None
+ return None;
}
// Relaxed is valid for the same reason it is on Arc's Clone impl
let old = inner.strong.compare_and_swap(n, n + 1, Relaxed);
if old == n {
- return Some(Arc { _ptr: self._ptr })
+ return Some(Arc { _ptr: self._ptr });
}
}
}
}
}
- return Weak { _ptr: self._ptr }
+ return Weak { _ptr: self._ptr };
}
}
// see comments above for why this check is here
if ptr as *mut u8 as usize == 0 || ptr as *mut u8 as usize == mem::POST_DROP_USIZE {
- return
+ return;
}
// If we find out that we were the last weak pointer, then its time to
struct Canary(*mut atomic::AtomicUsize);
- impl Drop for Canary
- {
+ impl Drop for Canary {
fn drop(&mut self) {
unsafe {
match *self {
#[test]
fn manually_share_arc() {
- let v = vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+ let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let arc_v = Arc::new(v);
let (tx, rx) = channel();
#[unstable(feature = "box_heap",
reason = "may be renamed; uncertain about custom allocator design",
issue = "27779")]
-pub const HEAP: ExchangeHeapSingleton =
- ExchangeHeapSingleton { _force_singleton: () };
+pub const HEAP: ExchangeHeapSingleton = ExchangeHeapSingleton { _force_singleton: () };
/// This the singleton type used solely for `boxed::HEAP`.
#[unstable(feature = "box_heap",
}
}
-impl<T : ?Sized> Box<T> {
+impl<T: ?Sized> Box<T> {
/// Constructs a box from the raw pointer.
///
/// After this function call, pointer is owned by resulting box.
}
#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")]
-impl<A,F> FnBox<A> for F
- where F: FnOnce<A>
+impl<A, F> FnBox<A> for F where F: FnOnce<A>
{
type Output = F::Output;
}
#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")]
-impl<'a,A,R> FnOnce<A> for Box<FnBox<A,Output=R>+'a> {
+impl<'a, A, R> FnOnce<A> for Box<FnBox<A, Output = R> + 'a> {
type Output = R;
extern "rust-call" fn call_once(self, args: A) -> R {
}
#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")]
-impl<'a,A,R> FnOnce<A> for Box<FnBox<A,Output=R>+Send+'a> {
+impl<'a, A, R> FnOnce<A> for Box<FnBox<A, Output = R> + Send + 'a> {
type Output = R;
extern "rust-call" fn call_once(self, args: A) -> R {
}
#[unstable(feature = "coerce_unsized", issue = "27732")]
-impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
#[stable(feature = "box_slice_clone", since = "1.3.0")]
impl<T: Clone> Clone for Box<[T]> {
#[test]
fn deref() {
- fn homura<T: Deref<Target = i32>>(_: T) {
- }
+ fn homura<T: Deref<Target = i32>>(_: T) {}
homura(Box::new(765));
}
use core::{isize, usize};
#[allow(improper_ctypes)]
-extern {
+extern "C" {
#[allocator]
fn __rust_allocate(size: usize, align: usize) -> *mut u8;
fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize);
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> !marker::Sync for Rc<T> {}
-#[cfg(not(stage0))] // remove cfg after new snapshot
+// remove cfg after new snapshot
+#[cfg(not(stage0))]
#[unstable(feature = "coerce_unsized", issue = "27732")]
-impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
impl<T> Rc<T> {
/// Constructs a new `Rc<T>`.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Clone for Rc<T> {
-
/// Makes a clone of the `Rc<T>`.
///
/// When you clone an `Rc<T>`, it will create another pointer to the data and
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized+Hash> Hash for Rc<T> {
+impl<T: ?Sized + Hash> Hash for Rc<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized+fmt::Display> fmt::Display for Rc<T> {
+impl<T: ?Sized + fmt::Display> fmt::Display for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized+fmt::Debug> fmt::Debug for Rc<T> {
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> !marker::Sync for Weak<T> {}
-#[cfg(not(stage0))] // remove cfg after new snapshot
+// remove cfg after new snapshot
+#[cfg(not(stage0))]
#[unstable(feature = "coerce_unsized", issue = "27732")]
-impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
impl<T: ?Sized> Weak<T> {
/// Upgrades a weak reference to a strong reference.
#[stable(feature = "rc_weak", since = "1.4.0")]
impl<T: ?Sized> Clone for Weak<T> {
-
/// Makes a clone of the `Weak<T>`.
///
/// This increases the weak reference count.
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized+fmt::Debug> fmt::Debug for Weak<T> {
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(Weak)")
}
not(target_os = "android"),
not(target_env = "musl")),
link(name = "pthread"))]
-extern {
+extern "C" {
fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void;
fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
use libc;
use MIN_ALIGN;
- extern {
+ extern "C" {
// Apparently android doesn't have posix_memalign
#[cfg(target_os = "android")]
fn memalign(align: libc::size_t, size: libc::size_t) -> *mut libc::c_void;
} else {
let ptr = HeapAlloc(GetProcessHeap(), 0, (size + align) as SIZE_T) as *mut u8;
if ptr.is_null() {
- return ptr
+ return ptr;
}
align_ptr(ptr, align)
}
header.0 as LPVOID,
(size + align) as SIZE_T) as *mut u8;
if new.is_null() {
- return new
+ return new;
}
align_ptr(new, align)
}
let start = round_up(after_tydesc, align);
- //debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
- // start, size, align, is_done);
+ // debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
+ // start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as isize) as *const i8);
}
align: usize,
}
-trait AllTypes { fn dummy(&self) { } }
-impl<T:?Sized> AllTypes for T { }
+trait AllTypes {
+ fn dummy(&self) {}
+}
+
+impl<T: ?Sized> AllTypes for T {}
unsafe fn get_tydesc<T>() -> *const TyDesc {
use std::raw::TraitObject;
for _ in 0..100000 {
arena.alloc(Noncopy {
string: "hello world".to_string(),
- array: vec!(1, 2, 3, 4, 5),
+ array: vec![1, 2, 3, 4, 5],
});
}
}
b.iter(|| {
arena.alloc(Noncopy {
string: "hello world".to_string(),
- array: vec!(1, 2, 3, 4, 5),
+ array: vec![1, 2, 3, 4, 5],
})
})
}
b.iter(|| {
let _: Box<_> = box Noncopy {
string: "hello world".to_string(),
- array: vec!(1, 2, 3, 4, 5),
+ array: vec![1, 2, 3, 4, 5],
};
})
}
arena.alloc(|| {
Noncopy {
string: "hello world".to_string(),
- array: vec!(1, 2, 3, 4, 5),
+ array: vec![1, 2, 3, 4, 5],
}
})
})