use core::prelude::*;
use core::cmp::Ordering::{Greater, Less, Equal};
+#[cfg(not(stage0))]
+use core::intrinsics::arith_offset;
use core::iter::Zip;
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut, Index, IndexMut};
RawItems::from_parts(slice.as_ptr(), slice.len())
}
+ #[cfg(stage0)]
unsafe fn from_parts(ptr: *const T, len: usize) -> RawItems<T> {
if mem::size_of::<T>() == 0 {
RawItems {
}
}
+ #[cfg(not(stage0))]
+ unsafe fn from_parts(ptr: *const T, len: usize) -> RawItems<T> {
+ if mem::size_of::<T>() == 0 {
+ RawItems {
+ head: ptr,
+ tail: arith_offset(ptr as *const i8, len as isize) as *const T,
+ }
+ } else {
+ RawItems {
+ head: ptr,
+ tail: ptr.offset(len as isize),
+ }
+ }
+ }
+
+ #[cfg(stage0)]
unsafe fn push(&mut self, val: T) {
ptr::write(self.tail as *mut T, val);
self.tail = self.tail.offset(1);
}
}
+
+ #[cfg(not(stage0))]
+ unsafe fn push(&mut self, val: T) {
+ ptr::write(self.tail as *mut T, val);
+
+ if mem::size_of::<T>() == 0 {
+ self.tail = arith_offset(self.tail as *const i8, 1) as *const T;
+ } else {
+ self.tail = self.tail.offset(1);
+ }
+ }
}
impl<T> Iterator for RawItems<T> {
type Item = T;
+ #[cfg(stage0)]
fn next(&mut self) -> Option<T> {
if self.head == self.tail {
None
}
}
}
+
+ #[cfg(not(stage0))]
+ fn next(&mut self) -> Option<T> {
+ if self.head == self.tail {
+ None
+ } else {
+ unsafe {
+ let ret = Some(ptr::read(self.head));
+
+ if mem::size_of::<T>() == 0 {
+ self.head = arith_offset(self.head as *const i8, 1) as *const T;
+ } else {
+ self.head = self.head.offset(1);
+ }
+
+ ret
+ }
+ }
+ }
}
impl<T> DoubleEndedIterator for RawItems<T> {
+ #[cfg(stage0)]
fn next_back(&mut self) -> Option<T> {
if self.head == self.tail {
None
}
}
}
+
+ #[cfg(not(stage0))]
+ fn next_back(&mut self) -> Option<T> {
+ if self.head == self.tail {
+ None
+ } else {
+ unsafe {
+ if mem::size_of::<T>() == 0 {
+ self.tail = arith_offset(self.tail as *const i8, -1) as *const T;
+ } else {
+ self.tail = self.tail.offset(-1);
+ }
+
+ Some(ptr::read(self.tail))
+ }
+ }
+ }
}
impl<T> Drop for RawItems<T> {
use core::fmt;
use core::hash::{self, Hash};
use core::intrinsics::assume;
+#[cfg(not(stage0))]
+use core::intrinsics::arith_offset;
use core::iter::{repeat, FromIterator};
use core::marker::PhantomData;
use core::mem;
/// }
/// ```
#[inline]
+ #[cfg(stage0)]
fn into_iter(self) -> IntoIter<T> {
unsafe {
let ptr = *self.ptr;
IntoIter { allocation: ptr, cap: cap, ptr: begin, end: end }
}
}
+
+ #[inline]
+ #[cfg(not(stage0))]
+ fn into_iter(self) -> IntoIter<T> {
+ unsafe {
+ let ptr = *self.ptr;
+ assume(!ptr.is_null());
+ let cap = self.cap;
+ let begin = ptr as *const T;
+ let end = if mem::size_of::<T>() == 0 {
+ arith_offset(ptr as *const i8, self.len() as isize) as *const T
+ } else {
+ ptr.offset(self.len() as isize) as *const T
+ };
+ mem::forget(self);
+ IntoIter { allocation: ptr, cap: cap, ptr: begin, end: end }
+ }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
type Item = T;
#[inline]
+ #[cfg(stage0)]
fn next(&mut self) -> Option<T> {
unsafe {
if self.ptr == self.end {
}
}
+ #[inline]
+ #[cfg(not(stage0))]
+ fn next(&mut self) -> Option<T> {
+ unsafe {
+ if self.ptr == self.end {
+ None
+ } else {
+ if mem::size_of::<T>() == 0 {
+ // purposefully don't use 'ptr.offset' because for
+ // vectors with 0-size elements this would return the
+ // same pointer.
+ self.ptr = arith_offset(self.ptr as *const i8, 1) as *const T;
+
+ // Use a non-null pointer value
+ Some(ptr::read(EMPTY as *mut T))
+ } else {
+ let old = self.ptr;
+ self.ptr = self.ptr.offset(1);
+
+ Some(ptr::read(old))
+ }
+ }
+ }
+ }
+
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let diff = (self.end as usize) - (self.ptr as usize);
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> DoubleEndedIterator for IntoIter<T> {
#[inline]
+ #[cfg(stage0)]
fn next_back(&mut self) -> Option<T> {
unsafe {
if self.end == self.ptr {
}
}
}
+
+ #[inline]
+ #[cfg(not(stage0))]
+ fn next_back(&mut self) -> Option<T> {
+ unsafe {
+ if self.end == self.ptr {
+ None
+ } else {
+ if mem::size_of::<T>() == 0 {
+ // See above for why 'ptr.offset' isn't used
+ self.end = arith_offset(self.end as *const i8, -1) as *const T;
+
+ // Use a non-null pointer value
+ Some(ptr::read(EMPTY as *mut T))
+ } else {
+ self.end = self.end.offset(-1);
+
+ Some(ptr::read(mem::transmute(self.end)))
+ }
+ }
+ }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
/// returned value will result in undefined behavior.
pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
+ /// Calculates the offset from a pointer, potentially wrapping.
+ ///
+ /// This is implemented as an intrinsic to avoid converting to and from an
+ /// integer, since the conversion inhibits certain optimizations.
+ ///
+ /// # Safety
+ ///
+ /// Unlike the `offset` intrinsic, this intrinsic does not restrict the
+ /// resulting pointer to point into or one byte past the end of an allocated
+ /// object, and it wraps with two's complement arithmetic. The resulting
+ /// value is not necessarily valid to be used to actually access memory.
+ #[cfg(not(stage0))]
+ pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
+
/// Copies `count * size_of<T>` bytes from `src` to `dst`. The source
/// and destination may *not* overlap.
///
fn clone_from_slice(&mut self, &[Self::Item]) -> usize where Self::Item: Clone;
}
+// Use macros to be generic over const/mut
+#[cfg(stage0)]
+macro_rules! slice_offset {
+ ($ptr:expr, $by:expr) => {{
+ let ptr = $ptr;
+ if size_from_ptr(ptr) == 0 {
+ transmute((ptr as isize).wrapping_add($by))
+ } else {
+ ptr.offset($by)
+ }
+ }};
+}
+
+#[cfg(not(stage0))]
+macro_rules! slice_offset {
+ ($ptr:expr, $by:expr) => {{
+ let ptr = $ptr;
+ if size_from_ptr(ptr) == 0 {
+ ::intrinsics::arith_offset(ptr as *mut i8, $by) as *mut _
+ } else {
+ ptr.offset($by)
+ }
+ }};
+}
+
+macro_rules! slice_ref {
+ ($ptr:expr) => {{
+ let ptr = $ptr;
+ if size_from_ptr(ptr) == 0 {
+ // Use a non-null pointer value
+ &mut *(1 as *mut _)
+ } else {
+ transmute(ptr)
+ }
+ }};
+}
+
#[unstable(feature = "core")]
impl<T> SliceExt for [T] {
type Item = T;
#[inline]
fn iter<'a>(&'a self) -> Iter<'a, T> {
unsafe {
- let p = self.as_ptr();
- assume(!p.is_null());
- if mem::size_of::<T>() == 0 {
- Iter {ptr: p,
- end: ((p as usize).wrapping_add(self.len())) as *const T,
- _marker: marker::PhantomData}
+ let p = if mem::size_of::<T>() == 0 {
+ 1 as *const _
} else {
- Iter {ptr: p,
- end: p.offset(self.len() as isize),
- _marker: marker::PhantomData}
+ let p = self.as_ptr();
+ assume(!p.is_null());
+ p
+ };
+
+ Iter {
+ ptr: p,
+ end: slice_offset!(p, self.len() as isize),
+ _marker: marker::PhantomData
}
}
}
#[inline]
fn iter_mut<'a>(&'a mut self) -> IterMut<'a, T> {
unsafe {
- let p = self.as_mut_ptr();
- assume(!p.is_null());
- if mem::size_of::<T>() == 0 {
- IterMut {ptr: p,
- end: ((p as usize).wrapping_add(self.len())) as *mut T,
- _marker: marker::PhantomData}
+ let p = if mem::size_of::<T>() == 0 {
+ 1 as *mut _
} else {
- IterMut {ptr: p,
- end: p.offset(self.len() as isize),
- _marker: marker::PhantomData}
+ let p = self.as_mut_ptr();
+ assume(!p.is_null());
+ p
+ };
+
+ IterMut {
+ ptr: p,
+ end: slice_offset!(p, self.len() as isize),
+ _marker: marker::PhantomData
}
}
}
mem::size_of::<T>()
}
-
-// Use macros to be generic over const/mut
-macro_rules! slice_offset {
- ($ptr:expr, $by:expr) => {{
- let ptr = $ptr;
- if size_from_ptr(ptr) == 0 {
- transmute((ptr as isize).wrapping_add($by))
- } else {
- ptr.offset($by)
- }
- }};
-}
-
-macro_rules! slice_ref {
- ($ptr:expr) => {{
- let ptr = $ptr;
- if size_from_ptr(ptr) == 0 {
- // Use a non-null pointer value
- &mut *(1 as *mut _)
- } else {
- transmute(ptr)
- }
- }};
-}
-
// The shared definition of the `Iter` and `IterMut` iterators
macro_rules! iterator {
(struct $name:ident -> $ptr:ty, $elem:ty) => {
match self.as_slice().get(n) {
Some(elem_ref) => unsafe {
self.ptr = slice_offset!(self.ptr, (n as isize).wrapping_add(1));
- Some(slice_ref!(elem_ref))
+ Some(elem_ref)
},
None => {
self.ptr = self.end;
match make_mut_slice!(self.ptr, self.end).get_mut(n) {
Some(elem_ref) => unsafe {
self.ptr = slice_offset!(self.ptr, (n as isize).wrapping_add(1));
- Some(slice_ref!(elem_ref))
+ Some(elem_ref)
},
None => {
self.ptr = self.end;
let offset = llargs[1];
InBoundsGEP(bcx, ptr, &[offset])
}
+ (_, "arith_offset") => {
+ let ptr = llargs[0];
+ let offset = llargs[1];
+ GEP(bcx, ptr, &[offset])
+ }
(_, "copy_nonoverlapping") => {
copy_intrinsic(bcx,
"type_name" => (1, Vec::new(), ty::mk_str_slice(tcx, tcx.mk_region(ty::ReStatic),
ast::MutImmutable)),
"type_id" => (1, Vec::new(), ccx.tcx.types.u64),
- "offset" => {
+ "offset" | "arith_offset" => {
(1,
vec!(
ty::mk_ptr(tcx, ty::mt {