#![stable(feature = "rust1", since = "1.0.0")]
use core::cmp::{self, Ordering};
+use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
use core::intrinsics::{arith_offset, assume};
impl<T> FromIterator<T> for Vec<T> {
#[inline]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> {
- <Self as SpecFrom<T, I::IntoIter>>::from_iter(iter.into_iter())
+ <Self as SpecFromIter<T, I::IntoIter>>::from_iter(iter.into_iter())
}
}
impl<T> Extend<T> for Vec<T> {
#[inline]
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- if self.capacity() > 0 {
- <Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter())
- } else {
- // if self has no allocation then use the more powerful from_iter specializations
- // and overwrite self
- *self = SpecFrom::from_iter(iter.into_iter());
- }
+ <Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter())
}
#[inline]
}
}
-// Specialization trait used for Vec::from_iter
-trait SpecFrom<T, I> {
+/// Specialization trait used for Vec::from_iter
+///
+/// ## The delegation graph:
+///
+/// ```text
+/// +-------------+
+/// |FromIterator |
+/// +-+-----------+
+/// |
+/// v
+/// +-+-------------------------------+ +---------------------+
+/// |SpecFromIter +---->+SpecFromIterNested |
+/// |where I: | | |where I: |
+/// | Iterator (default)----------+ | | Iterator (default) |
+/// | vec::IntoIter | | | TrustedLen |
+/// | SourceIterMarker---fallback-+ | | |
+/// | slice::Iter | | |
+/// | Iterator<Item = &Clone> | +---------------------+
+/// +---------------------------------+
+///
+/// ```
+trait SpecFromIter<T, I> {
fn from_iter(iter: I) -> Self;
}
-// Another specialization trait for Vec::from_iter
-// necessary to manually prioritize overlapping specializations
-trait SpecFromNested<T, I> {
+/// Another specialization trait for Vec::from_iter
+/// necessary to manually prioritize overlapping specializations
+/// see [`SpecFromIter`] for details.
+trait SpecFromIterNested<T, I> {
fn from_iter(iter: I) -> Self;
}
-impl<T, I> SpecFromNested<T, I> for Vec<T>
+impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: Iterator<Item = T>,
{
}
}
-impl<T, I> SpecFromNested<T, I> for Vec<T>
+impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: TrustedLen<Item = T>,
{
}
}
-impl<T, I> SpecFrom<T, I> for Vec<T>
+impl<T, I> SpecFromIter<T, I> for Vec<T>
where
I: Iterator<Item = T>,
{
default fn from_iter(iterator: I) -> Self {
- SpecFromNested::from_iter(iterator)
+ SpecFromIterNested::from_iter(iterator)
}
}
impl<T> Drop for InPlaceDrop<T> {
#[inline]
fn drop(&mut self) {
- unsafe {
- ptr::drop_in_place(slice::from_raw_parts_mut(self.inner, self.len()));
+ if mem::needs_drop::<T>() {
+ unsafe {
+ ptr::drop_in_place(slice::from_raw_parts_mut(self.inner, self.len()));
+ }
}
}
}
-impl<T> SpecFrom<T, IntoIter<T>> for Vec<T> {
+impl<T> SpecFromIter<T, IntoIter<T>> for Vec<T> {
fn from_iter(iterator: IntoIter<T>) -> Self {
// A common case is passing a vector into a function which immediately
// re-collects into a vector. We can short circuit this if the IntoIter
// has not been advanced at all.
- // We can also reuse the memory and move the data to the front if
- // allocating a new vector and moving to it would result in the same capacity
- let non_zero_offset = iterator.buf.as_ptr() as *const _ != iterator.ptr;
- if !non_zero_offset || iterator.len() >= iterator.cap / 2 {
+ // When it has been advanced We can also reuse the memory and move the data to the front.
+ // But we only do so when the resulting Vec wouldn't have more unused capacity
+ // than creating it through the generic FromIterator implementation would. That limitation
+ // is not strictly necessary as Vec's allocation behavior is intentionally unspecified.
+ // But it is a conservative choice.
+ let has_advanced = iterator.buf.as_ptr() as *const _ != iterator.ptr;
+ if !has_advanced || iterator.len() >= iterator.cap / 2 {
unsafe {
let it = ManuallyDrop::new(iterator);
- if non_zero_offset {
+ if has_advanced {
ptr::copy(it.ptr, it.buf.as_ptr(), it.len());
}
return Vec::from_raw_parts(it.buf.as_ptr(), it.len(), it.cap);
}
}
-fn write_in_place<T>(src_end: *const T) -> impl FnMut(*mut T, T) -> Result<*mut T, !> {
- move |mut dst, item| {
- unsafe {
- // the InPlaceIterable contract cannot be verified precisely here since
- // try_fold has an exclusive reference to the source pointer
- // all we can do is check if it's still in range
- debug_assert!(dst as *const _ <= src_end, "InPlaceIterable contract violation");
- ptr::write(dst, item);
- dst = dst.add(1);
- }
- Ok(dst)
- }
-}
-
fn write_in_place_with_drop<T>(
src_end: *const T,
) -> impl FnMut(InPlaceDrop<T>, T) -> Result<InPlaceDrop<T>, !> {
move |mut sink, item| {
unsafe {
- // same caveat as above
+ // the InPlaceIterable contract cannot be verified precisely here since
+ // try_fold has an exclusive reference to the source pointer
+ // all we can do is check if it's still in range
debug_assert!(sink.dst as *const _ <= src_end, "InPlaceIterable contract violation");
ptr::write(sink.dst, item);
sink.dst = sink.dst.add(1);
}
}
-// Further specialization potential once
-// https://github.com/rust-lang/rust/issues/62645 has been solved:
-// T can be split into IN and OUT which only need to have the same size and alignment
-impl<T, I> SpecFrom<T, I> for Vec<T>
+/// Specialization marker for collecting an iterator pipeline into a Vec while reusing the
+/// source allocation, i.e. executing the pipeline in place.
+///
+/// The SourceIter parent trait is necessary for the specializing function to access the allocation
+/// which is to be reused. But it is not sufficient for the specialization to be valid. See
+/// additional bounds on the impl.
+#[rustc_unsafe_specialization_marker]
+trait SourceIterMarker: SourceIter<Source: AsIntoIter> {}
+
+// The std-internal SourceIter/InPlaceIterable traits are only implemented by chains of
+// Adapter<Adapter<Adapter<IntoIter>>> (all owned by core/std). Additional bounds
+// on the adapter implementations (beyond `impl<I: Trait> Trait for Adapter<I>`) only depend on other
+// traits already marked as specialization traits (Copy, TrustedRandomAccess, FusedIterator).
+// I.e. the marker does not depend on lifetimes of user-supplied types. Modulo the Copy hole, which
+// several other specializations already depend on.
+impl<T> SourceIterMarker for T where T: SourceIter<Source: AsIntoIter> + InPlaceIterable {}
+
+impl<T, I> SpecFromIter<T, I> for Vec<T>
where
- I: Iterator<Item = T> + InPlaceIterable + SourceIter<Source: AsIntoIter<T>>,
+ I: Iterator<Item = T> + SourceIterMarker,
{
default fn from_iter(mut iterator: I) -> Self {
- // This specialization only makes sense if we're juggling real allocations.
- // Additionally some of the pointer arithmetic would panic on ZSTs.
- if mem::size_of::<T>() == 0 {
- return SpecFromNested::from_iter(iterator);
- }
-
- let (src_buf, src_end, cap) = {
- let inner = unsafe { iterator.as_inner().as_into_iter() };
- (inner.buf.as_ptr(), inner.end, inner.cap)
+ // Additional requirements which cannot expressed via trait bounds. We rely on const eval
+ // instead:
+ // a) no ZSTs as there would be no allocation to reuse and pointer arithmetic would panic
+ // b) size match as required by Alloc contract
+ // c) alignments match as required by Alloc contract
+ if mem::size_of::<T>() == 0
+ || mem::size_of::<T>()
+ != mem::size_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
+ || mem::align_of::<T>()
+ != mem::align_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
+ {
+ // fallback to more generic implementations
+ return SpecFromIterNested::from_iter(iterator);
+ }
+
+ let (src_buf, src_ptr, dst_buf, dst_end, cap) = unsafe {
+ let inner = iterator.as_inner().as_into_iter();
+ (
+ inner.buf.as_ptr(),
+ inner.ptr,
+ inner.buf.as_ptr() as *mut T,
+ inner.end as *const T,
+ inner.cap,
+ )
};
- // use try-fold
+ // use try-fold since
// - it vectorizes better for some iterator adapters
// - unlike most internal iteration methods methods it only takes a &mut self
- // - lets us thread the write pointer through its innards and get it back in the end
- let dst = if mem::needs_drop::<T>() {
- // special-case drop handling since it forces us to lug that extra field around which
- // can inhibit optimizations
- let sink = InPlaceDrop { inner: src_buf, dst: src_buf };
- let sink = iterator
- .try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(src_end))
- .unwrap();
- // iteration succeeded, don't drop head
- let sink = mem::ManuallyDrop::new(sink);
- sink.dst
- } else {
- iterator.try_fold::<_, _, Result<_, !>>(src_buf, write_in_place(src_end)).unwrap()
- };
+ // - it lets us thread the write pointer through its innards and get it back in the end
+ let sink = InPlaceDrop { inner: dst_buf, dst: dst_buf };
+ let sink = iterator
+ .try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(dst_end))
+ .unwrap();
+ // iteration succeeded, don't drop head
+ let dst = mem::ManuallyDrop::new(sink).dst;
let src = unsafe { iterator.as_inner().as_into_iter() };
- // check if SourceIter and InPlaceIterable contracts were upheld.
+ // check if SourceIter contract was upheld
// caveat: if they weren't we may not even make it to this point
debug_assert_eq!(src_buf, src.buf.as_ptr());
- debug_assert!(dst as *const _ <= src.ptr, "InPlaceIterable contract violation");
+ // check InPlaceIterable contract. This is only possible if the iterator advanced the
+ // source pointer at all. If it uses unchecked access via TrustedRandomAccess
+ // then the source pointer will stay in its initial position and we can't use it as reference
+ if src.ptr != src_ptr {
+ debug_assert!(
+ dst as *const _ <= src.ptr,
+ "InPlaceIterable contract violation, write pointer advanced beyond read pointer"
+ );
+ }
// drop any remaining values at the tail of the source
- src.drop_in_place();
+ src.drop_remaining();
// but prevent drop of the allocation itself once IntoIter goes out of scope
- src.forget_in_place();
+ src.forget_allocation();
let vec = unsafe {
- let len = dst.offset_from(src_buf) as usize;
- Vec::from_raw_parts(src_buf, len, cap)
+ let len = dst.offset_from(dst_buf) as usize;
+ Vec::from_raw_parts(dst_buf, len, cap)
};
vec
}
}
-impl<'a, T: 'a, I> SpecFrom<&'a T, I> for Vec<T>
+impl<'a, T: 'a, I> SpecFromIter<&'a T, I> for Vec<T>
where
I: Iterator<Item = &'a T>,
T: Clone,
{
default fn from_iter(iterator: I) -> Self {
- SpecFrom::from_iter(iterator.cloned())
+ SpecFromIter::from_iter(iterator.cloned())
}
}
-impl<'a, T: 'a> SpecFrom<&'a T, slice::Iter<'a, T>> for Vec<T>
+impl<'a, T: 'a> SpecFromIter<&'a T, slice::Iter<'a, T>> for Vec<T>
where
T: Copy,
{
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: 'a + Copy> Extend<&'a T> for Vec<T> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
- if self.capacity() > 0 {
- self.spec_extend(iter.into_iter())
- } else {
- // if self has no allocation then use the more powerful from_iter specializations
- // and overwrite self
- *self = SpecFrom::from_iter(iter.into_iter());
- }
+ self.spec_extend(iter.into_iter())
}
#[inline]
}
}
+#[stable(feature = "array_try_from_vec", since = "1.48.0")]
+impl<T, const N: usize> TryFrom<Vec<T>> for [T; N] {
+ type Error = Vec<T>;
+
+ /// Gets the entire contents of the `Vec<T>` as an array,
+ /// if its size exactly matches that of the requested array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::convert::TryInto;
+ /// assert_eq!(vec![1, 2, 3].try_into(), Ok([1, 2, 3]));
+ /// assert_eq!(<Vec<i32>>::new().try_into(), Ok([]));
+ /// ```
+ ///
+ /// If the length doesn't match, the input comes back in `Err`:
+ /// ```
+ /// use std::convert::TryInto;
+ /// let r: Result<[i32; 4], _> = (0..10).collect::<Vec<_>>().try_into();
+ /// assert_eq!(r, Err(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]));
+ /// ```
+ ///
+ /// If you're fine with just getting a prefix of the `Vec<T>`,
+ /// you can call [`.truncate(N)`](Vec::truncate) first.
+ /// ```
+ /// use std::convert::TryInto;
+ /// let mut v = String::from("hello world").into_bytes();
+ /// v.sort();
+ /// v.truncate(2);
+ /// let [a, b]: [_; 2] = v.try_into().unwrap();
+ /// assert_eq!(a, b' ');
+ /// assert_eq!(b, b'd');
+ /// ```
+ fn try_from(mut vec: Vec<T>) -> Result<[T; N], Vec<T>> {
+ if vec.len() != N {
+ return Err(vec);
+ }
+
+ // SAFETY: `.set_len(0)` is always sound.
+ unsafe { vec.set_len(0) };
+
+ // SAFETY: A `Vec`'s pointer is always aligned property, and
+ // the alignment the array needs is the same as the items.
+ // We checked earlier that we have sufficient items.
+ // The items will not double-drop as the `set_len`
+ // tells the `Vec` not to also drop them.
+ let array = unsafe { ptr::read(vec.as_ptr() as *const [T; N]) };
+ Ok(array)
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
// Clone-on-write
////////////////////////////////////////////////////////////////////////////////
ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len())
}
- fn drop_in_place(&mut self) {
+ fn drop_remaining(&mut self) {
if mem::needs_drop::<T>() {
unsafe {
ptr::drop_in_place(self.as_mut_slice());
/// Relinquishes the backing allocation, equivalent to
/// `ptr::write(&mut self, Vec::new().into_iter())`
- fn forget_in_place(&mut self) {
+ fn forget_allocation(&mut self) {
self.cap = 0;
self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) };
self.ptr = self.buf.as_ptr();
fn count(self) -> usize {
self.len()
}
+
+ unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccess,
+ {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::get_unchecked`.
+ unsafe {
+ if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
+ }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
where
T: Copy,
{
- unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
- unsafe {
- if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
- }
- }
-
fn may_have_side_effect() -> bool {
false
}
}
// internal helper trait for in-place iteration specialization.
-pub(crate) trait AsIntoIter<T> {
- fn as_into_iter(&mut self) -> &mut IntoIter<T>;
+#[rustc_specialization_trait]
+pub(crate) trait AsIntoIter {
+ type Item;
+ fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item>;
}
-impl<T> AsIntoIter<T> for IntoIter<T> {
- fn as_into_iter(&mut self) -> &mut IntoIter<T> {
+impl<T> AsIntoIter for IntoIter<T> {
+ type Item = T;
+
+ fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> {
self
}
}