weak: atomic::AtomicUint::new(1),
data: data,
};
- Arc { _ptr: NonZero(unsafe { mem::transmute(x) }) }
+ Arc { _ptr: unsafe { NonZero::new(mem::transmute(x)) } }
}
/// Downgrades the `Arc<T>` to a `Weak<T>` reference.
// pointer is valid. Furthermore, we know that the `ArcInner` structure itself is `Sync`
// because the inner data is `Sync` as well, so we're ok loaning out an immutable pointer
// to these contents.
- let NonZero(ptr) = self._ptr;
- unsafe { &*ptr }
+ unsafe { &**self._ptr }
}
}
// pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at
// this point, and we required the Arc itself to be `mut`, so we're returning the only
// possible reference to the inner data.
- let NonZero(ptr) = self._ptr;
- let inner = unsafe { &mut *ptr };
+ let inner = unsafe { &mut **self._ptr };
&mut inner.data
}
}
fn drop(&mut self) {
// This structure has #[unsafe_no_drop_flag], so this drop glue may run more than once (but
// it is guaranteed to be zeroed after the first if it's run more than once)
- let NonZero(ptr) = self._ptr;
+ let ptr = *self._ptr;
if ptr.is_null() { return }
// Because `fetch_sub` is already atomic, we do not need to synchronize with other threads
#[inline]
fn inner(&self) -> &ArcInner<T> {
// See comments above for why this is "safe"
- let NonZero(ptr) = self._ptr;
- unsafe { &*ptr }
+ unsafe { &**self._ptr }
}
}
/// } // implicit drop
/// ```
fn drop(&mut self) {
- let NonZero(ptr) = self._ptr;
+ let ptr = *self._ptr;
// see comments above for why this check is here
if ptr.is_null() { return }
// there is an implicit weak pointer owned by all the strong pointers, which
// ensures that the weak destructor never frees the allocation while the strong
// destructor is running, even if the weak pointer is stored inside the strong one.
- _ptr: NonZero(transmute(box RcBox {
+ _ptr: NonZero::new(transmute(box RcBox {
value: value,
strong: Cell::new(1),
weak: Cell::new(1)
let val = ptr::read(&*rc); // copy the contained object
// destruct the box and skip our Drop
// we can ignore the refcounts because we know we're unique
- let NonZero(ptr) = rc._ptr;
- deallocate(ptr as *mut u8, size_of::<RcBox<T>>(),
+ deallocate(*rc._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>());
forget(rc);
Ok(val)
#[experimental]
pub fn get_mut<'a, T>(rc: &'a mut Rc<T>) -> Option<&'a mut T> {
if is_unique(rc) {
- let inner = unsafe {
- let NonZero(ptr) = rc._ptr;
- &mut *ptr
- };
+ let inner = unsafe { &mut **rc._ptr };
Some(&mut inner.value)
} else {
None
// pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at
// this point, and we required the `Rc<T>` itself to be `mut`, so we're returning the only
// possible reference to the inner value.
- let inner = unsafe {
- let NonZero(ptr) = self._ptr;
- &mut *ptr
- };
+ let inner = unsafe { &mut **self._ptr };
&mut inner.value
}
}
/// ```
fn drop(&mut self) {
unsafe {
- let NonZero(ptr) = self._ptr;
+ let ptr = *self._ptr;
if !ptr.is_null() {
self.dec_strong();
if self.strong() == 0 {
/// ```
fn drop(&mut self) {
unsafe {
- let NonZero(ptr) = self._ptr;
+ let ptr = *self._ptr;
if !ptr.is_null() {
self.dec_weak();
// the weak count starts at 1, and will only go to zero if all the strong pointers
impl<T> RcBoxPtr<T> for Rc<T> {
#[inline(always)]
- fn inner(&self) -> &RcBox<T> {
- let NonZero(ptr) = self._ptr;
- unsafe { &(*ptr) }
- }
+ fn inner(&self) -> &RcBox<T> { unsafe { &(**self._ptr) } }
}
impl<T> RcBoxPtr<T> for Weak<T> {
#[inline(always)]
- fn inner(&self) -> &RcBox<T> {
- let NonZero(ptr) = self._ptr;
- unsafe { &(*ptr) }
- }
+ fn inner(&self) -> &RcBox<T> { unsafe { &(**self._ptr) } }
}
#[cfg(test)]
// non-null value which is fine since we never call deallocate on the ptr
// if cap is 0. The reason for this is because the pointer of a slice
// being NULL would break the null pointer optimization for enums.
- Vec { ptr: NonZero(EMPTY as *mut T), len: 0, cap: 0 }
+ Vec { ptr: unsafe { NonZero::new(EMPTY as *mut T) }, len: 0, cap: 0 }
}
/// Constructs a new, empty `Vec<T>` with the specified capacity.
#[stable]
pub fn with_capacity(capacity: uint) -> Vec<T> {
if mem::size_of::<T>() == 0 {
- Vec { ptr: NonZero(EMPTY as *mut T), len: 0, cap: uint::MAX }
+ Vec { ptr: unsafe { NonZero::new(EMPTY as *mut T) }, len: 0, cap: uint::MAX }
} else if capacity == 0 {
Vec::new()
} else {
.expect("capacity overflow");
let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) };
if ptr.is_null() { ::alloc::oom() }
- Vec { ptr: NonZero(ptr as *mut T), len: 0, cap: capacity }
+ Vec { ptr: unsafe { NonZero::new(ptr as *mut T) }, len: 0, cap: capacity }
}
}
#[unstable = "needs finalization"]
pub unsafe fn from_raw_parts(ptr: *mut T, length: uint,
capacity: uint) -> Vec<T> {
- Vec { ptr: NonZero(ptr), len: length, cap: capacity }
+ Vec { ptr: NonZero::new(ptr), len: length, cap: capacity }
}
/// Creates a vector by copying the elements from a raw pointer.
pub fn shrink_to_fit(&mut self) {
if mem::size_of::<T>() == 0 { return }
- let NonZero(ptr) = self.ptr;
if self.len == 0 {
if self.cap != 0 {
unsafe {
- dealloc(ptr, self.cap)
+ dealloc(*self.ptr, self.cap)
}
self.cap = 0;
}
unsafe {
// Overflow check is unnecessary as the vector is already at
// least this large.
- let ptr = reallocate(ptr as *mut u8,
+ let ptr = reallocate(*self.ptr as *mut u8,
self.cap * mem::size_of::<T>(),
self.len * mem::size_of::<T>(),
mem::min_align_of::<T>()) as *mut T;
if ptr.is_null() { ::alloc::oom() }
- self.ptr = NonZero(ptr);
+ self.ptr = NonZero::new(ptr);
}
self.cap = self.len;
}
#[inline]
#[stable]
pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] {
- let NonZero(ptr) = self.ptr;
unsafe {
mem::transmute(RawSlice {
- data: ptr as *const T,
+ data: *self.ptr as *const T,
len: self.len,
})
}
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn into_iter(self) -> IntoIter<T> {
unsafe {
- let NonZero(ptr) = self.ptr;
+ let ptr = *self.ptr;
let cap = self.cap;
let begin = ptr as *const T;
let end = if mem::size_of::<T>() == 0 {
let size = max(old_size, 2 * mem::size_of::<T>()) * 2;
if old_size > size { panic!("capacity overflow") }
unsafe {
- let NonZero(ptr) = self.ptr;
- let ptr = alloc_or_realloc(ptr, old_size, size);
+ let ptr = alloc_or_realloc(*self.ptr, old_size, size);
if ptr.is_null() { ::alloc::oom() }
- self.ptr = NonZero(ptr);
+ self.ptr = NonZero::new(ptr);
}
self.cap = max(self.cap, 2) * 2;
}
unsafe {
- let NonZero(end) = self.ptr.offset(self.len as int);
+ let end = *self.ptr.offset(self.len as int);
ptr::write(&mut *end, value);
self.len += 1;
}
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn drain<'a>(&'a mut self) -> Drain<'a, T> {
unsafe {
- let begin = self.ptr.0 as *const T;
+ let begin = *self.ptr as *const T;
let end = if mem::size_of::<T>() == 0 {
- (self.ptr.0 as uint + self.len()) as *const T
+ (*self.ptr as uint + self.len()) as *const T
} else {
- self.ptr.0.offset(self.len() as int) as *const T
+ (*self.ptr).offset(self.len() as int) as *const T
};
self.set_len(0);
Drain {
let size = capacity.checked_mul(mem::size_of::<T>())
.expect("capacity overflow");
unsafe {
- let NonZero(ptr) = self.ptr;
- let ptr = alloc_or_realloc(ptr, self.cap * mem::size_of::<T>(), size);
+ let ptr = alloc_or_realloc(*self.ptr, self.cap * mem::size_of::<T>(), size);
if ptr.is_null() { ::alloc::oom() }
- self.ptr = NonZero(ptr);
+ self.ptr = NonZero::new(ptr);
}
self.cap = capacity;
}
#[inline]
#[stable]
fn as_slice<'a>(&'a self) -> &'a [T] {
- let NonZero(ptr) = self.ptr;
unsafe {
mem::transmute(RawSlice {
- data: ptr as *const T,
+ data: *self.ptr as *const T,
len: self.len
})
}
for x in self.iter() {
ptr::read(x);
}
- let NonZero(ptr) = self.ptr;
- dealloc(ptr, self.cap)
+ dealloc(*self.ptr, self.cap)
}
}
}
for _x in self { }
let IntoIter { allocation, cap, ptr: _ptr, end: _end } = self;
mem::forget(self);
- Vec { ptr: NonZero(allocation), cap: cap, len: 0 }
+ Vec { ptr: NonZero::new(allocation), cap: cap, len: 0 }
}
}