1 //! Utilities for the slice primitive type.
3 //! *[See also the slice primitive type](slice).*
5 //! Most of the structs in this module are iterator types which can only be created
6 //! using a certain function. For example, `slice.iter()` yields an [`Iter`].
8 //! A few functions are provided to create a slice from a value reference
9 //! or from a raw pointer.
10 #![stable(feature = "rust1", since = "1.0.0")]
11 // Many of the usings in this module are only used in the test configuration.
12 // It's cleaner to just turn off the unused_imports warning than to fix them.
13 #![cfg_attr(test, allow(unused_imports, dead_code))]
15 use core::borrow::{Borrow, BorrowMut};
16 #[cfg(not(no_global_oom_handling))]
17 use core::cmp::Ordering::{self, Less};
18 #[cfg(not(no_global_oom_handling))]
19 use core::mem::{self, SizedTypeProperties};
20 #[cfg(not(no_global_oom_handling))]
23 use crate::alloc::Allocator;
24 #[cfg(not(no_global_oom_handling))]
25 use crate::alloc::Global;
26 #[cfg(not(no_global_oom_handling))]
27 use crate::borrow::ToOwned;
28 use crate::boxed::Box;
34 #[unstable(feature = "slice_range", issue = "76393")]
35 pub use core::slice::range;
36 #[unstable(feature = "array_chunks", issue = "74985")]
37 pub use core::slice::ArrayChunks;
38 #[unstable(feature = "array_chunks", issue = "74985")]
39 pub use core::slice::ArrayChunksMut;
40 #[unstable(feature = "array_windows", issue = "75027")]
41 pub use core::slice::ArrayWindows;
42 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
43 pub use core::slice::EscapeAscii;
44 #[stable(feature = "slice_get_slice", since = "1.28.0")]
45 pub use core::slice::SliceIndex;
46 #[stable(feature = "from_ref", since = "1.28.0")]
47 pub use core::slice::{from_mut, from_ref};
48 #[unstable(feature = "slice_from_ptr_range", issue = "89792")]
49 pub use core::slice::{from_mut_ptr_range, from_ptr_range};
50 #[stable(feature = "rust1", since = "1.0.0")]
51 pub use core::slice::{from_raw_parts, from_raw_parts_mut};
52 #[stable(feature = "rust1", since = "1.0.0")]
53 pub use core::slice::{Chunks, Windows};
54 #[stable(feature = "chunks_exact", since = "1.31.0")]
55 pub use core::slice::{ChunksExact, ChunksExactMut};
56 #[stable(feature = "rust1", since = "1.0.0")]
57 pub use core::slice::{ChunksMut, Split, SplitMut};
58 #[unstable(feature = "slice_group_by", issue = "80552")]
59 pub use core::slice::{GroupBy, GroupByMut};
60 #[stable(feature = "rust1", since = "1.0.0")]
61 pub use core::slice::{Iter, IterMut};
62 #[stable(feature = "rchunks", since = "1.31.0")]
63 pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
64 #[stable(feature = "slice_rsplit", since = "1.27.0")]
65 pub use core::slice::{RSplit, RSplitMut};
66 #[stable(feature = "rust1", since = "1.0.0")]
67 pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
68 #[stable(feature = "split_inclusive", since = "1.51.0")]
69 pub use core::slice::{SplitInclusive, SplitInclusiveMut};
71 ////////////////////////////////////////////////////////////////////////////////
72 // Basic slice extension methods
73 ////////////////////////////////////////////////////////////////////////////////
75 // HACK(japaric) needed for the implementation of `vec!` macro during testing
76 // N.B., see the `hack` module in this file for more details.
78 pub use hack::into_vec;
80 // HACK(japaric) needed for the implementation of `Vec::clone` during testing
81 // N.B., see the `hack` module in this file for more details.
85 // HACK(japaric): With cfg(test) `impl [T]` is not available, these three
86 // functions are actually methods that are in `impl [T]` but not in
87 // `core::slice::SliceExt` - we need to supply these functions for the
88 // `test_permutations` test
90 use core::alloc::Allocator;
92 use crate::boxed::Box;
95 // We shouldn't add inline attribute to this since this is used in
96 // `vec!` macro mostly and causes perf regression. See #71204 for
97 // discussion and perf results.
98 pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
101 let (b, alloc) = Box::into_raw_with_allocator(b);
102 Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
106 #[cfg(not(no_global_oom_handling))]
108 pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
112 #[cfg(not(no_global_oom_handling))]
113 pub trait ConvertVec {
114 fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>
119 #[cfg(not(no_global_oom_handling))]
120 impl<T: Clone> ConvertVec for T {
122 default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
123 struct DropGuard<'a, T, A: Allocator> {
124 vec: &'a mut Vec<T, A>,
127 impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
131 // items were marked initialized in the loop below
133 self.vec.set_len(self.num_init);
137 let mut vec = Vec::with_capacity_in(s.len(), alloc);
138 let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
139 let slots = guard.vec.spare_capacity_mut();
140 // .take(slots.len()) is necessary for LLVM to remove bounds checks
141 // and has better codegen than zip.
142 for (i, b) in s.iter().enumerate().take(slots.len()) {
144 slots[i].write(b.clone());
146 core::mem::forget(guard);
148 // the vec was allocated and initialized above to at least this length.
150 vec.set_len(s.len());
156 #[cfg(not(no_global_oom_handling))]
157 impl<T: Copy> ConvertVec for T {
159 fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
160 let mut v = Vec::with_capacity_in(s.len(), alloc);
162 // allocated above with the capacity of `s`, and initialize to `s.len()` in
163 // ptr::copy_to_non_overlapping below.
165 s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len());
177 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
179 /// When applicable, unstable sorting is preferred because it is generally faster than stable
180 /// sorting and it doesn't allocate auxiliary memory.
181 /// See [`sort_unstable`](slice::sort_unstable).
183 /// # Current implementation
185 /// The current algorithm is an adaptive, iterative merge sort inspired by
186 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
187 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
188 /// two or more sorted sequences concatenated one after another.
190 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
191 /// non-allocating insertion sort is used instead.
196 /// let mut v = [-5, 4, 1, -3, 2];
199 /// assert!(v == [-5, -3, 1, 2, 4]);
201 #[cfg(not(no_global_oom_handling))]
202 #[rustc_allow_incoherent_impl]
203 #[stable(feature = "rust1", since = "1.0.0")]
205 pub fn sort(&mut self)
209 merge_sort(self, T::lt);
212 /// Sorts the slice with a comparator function.
214 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
216 /// The comparator function must define a total ordering for the elements in the slice. If
217 /// the ordering is not total, the order of the elements is unspecified. An order is a
218 /// total order if it is (for all `a`, `b` and `c`):
220 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
221 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
223 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
224 /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
227 /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
228 /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
229 /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
232 /// When applicable, unstable sorting is preferred because it is generally faster than stable
233 /// sorting and it doesn't allocate auxiliary memory.
234 /// See [`sort_unstable_by`](slice::sort_unstable_by).
236 /// # Current implementation
238 /// The current algorithm is an adaptive, iterative merge sort inspired by
239 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
240 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
241 /// two or more sorted sequences concatenated one after another.
243 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
244 /// non-allocating insertion sort is used instead.
249 /// let mut v = [5, 4, 1, 3, 2];
250 /// v.sort_by(|a, b| a.cmp(b));
251 /// assert!(v == [1, 2, 3, 4, 5]);
253 /// // reverse sorting
254 /// v.sort_by(|a, b| b.cmp(a));
255 /// assert!(v == [5, 4, 3, 2, 1]);
257 #[cfg(not(no_global_oom_handling))]
258 #[rustc_allow_incoherent_impl]
259 #[stable(feature = "rust1", since = "1.0.0")]
261 pub fn sort_by<F>(&mut self, mut compare: F)
263 F: FnMut(&T, &T) -> Ordering,
265 merge_sort(self, |a, b| compare(a, b) == Less);
268 /// Sorts the slice with a key extraction function.
270 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
271 /// worst-case, where the key function is *O*(*m*).
273 /// For expensive key functions (e.g. functions that are not simple property accesses or
274 /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
275 /// significantly faster, as it does not recompute element keys.
277 /// When applicable, unstable sorting is preferred because it is generally faster than stable
278 /// sorting and it doesn't allocate auxiliary memory.
279 /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
281 /// # Current implementation
283 /// The current algorithm is an adaptive, iterative merge sort inspired by
284 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
285 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
286 /// two or more sorted sequences concatenated one after another.
288 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
289 /// non-allocating insertion sort is used instead.
294 /// let mut v = [-5i32, 4, 1, -3, 2];
296 /// v.sort_by_key(|k| k.abs());
297 /// assert!(v == [1, 2, -3, 4, -5]);
299 #[cfg(not(no_global_oom_handling))]
300 #[rustc_allow_incoherent_impl]
301 #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
303 pub fn sort_by_key<K, F>(&mut self, mut f: F)
308 merge_sort(self, |a, b| f(a).lt(&f(b)));
311 /// Sorts the slice with a key extraction function.
313 /// During sorting, the key function is called at most once per element, by using
314 /// temporary storage to remember the results of key evaluation.
315 /// The order of calls to the key function is unspecified and may change in future versions
316 /// of the standard library.
318 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
319 /// worst-case, where the key function is *O*(*m*).
321 /// For simple key functions (e.g., functions that are property accesses or
322 /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
325 /// # Current implementation
327 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
328 /// which combines the fast average case of randomized quicksort with the fast worst case of
329 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
330 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
331 /// deterministic behavior.
333 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
334 /// length of the slice.
339 /// let mut v = [-5i32, 4, 32, -3, 2];
341 /// v.sort_by_cached_key(|k| k.to_string());
342 /// assert!(v == [-3, -5, 2, 32, 4]);
345 /// [pdqsort]: https://github.com/orlp/pdqsort
346 #[cfg(not(no_global_oom_handling))]
347 #[rustc_allow_incoherent_impl]
348 #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
350 pub fn sort_by_cached_key<K, F>(&mut self, f: F)
355 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
356 macro_rules! sort_by_key {
357 ($t:ty, $slice:ident, $f:ident) => {{
358 let mut indices: Vec<_> =
359 $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
360 // The elements of `indices` are unique, as they are indexed, so any sort will be
361 // stable with respect to the original slice. We use `sort_unstable` here because
362 // it requires less memory allocation.
363 indices.sort_unstable();
364 for i in 0..$slice.len() {
365 let mut index = indices[i].1;
366 while (index as usize) < i {
367 index = indices[index as usize].1;
369 indices[i].1 = index;
370 $slice.swap(i, index as usize);
375 let sz_u8 = mem::size_of::<(K, u8)>();
376 let sz_u16 = mem::size_of::<(K, u16)>();
377 let sz_u32 = mem::size_of::<(K, u32)>();
378 let sz_usize = mem::size_of::<(K, usize)>();
380 let len = self.len();
384 if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
385 return sort_by_key!(u8, self, f);
387 if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
388 return sort_by_key!(u16, self, f);
390 if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
391 return sort_by_key!(u32, self, f);
393 sort_by_key!(usize, self, f)
396 /// Copies `self` into a new `Vec`.
401 /// let s = [10, 40, 30];
402 /// let x = s.to_vec();
403 /// // Here, `s` and `x` can be modified independently.
405 #[cfg(not(no_global_oom_handling))]
406 #[rustc_allow_incoherent_impl]
407 #[rustc_conversion_suggestion]
408 #[stable(feature = "rust1", since = "1.0.0")]
410 pub fn to_vec(&self) -> Vec<T>
414 self.to_vec_in(Global)
417 /// Copies `self` into a new `Vec` with an allocator.
422 /// #![feature(allocator_api)]
424 /// use std::alloc::System;
426 /// let s = [10, 40, 30];
427 /// let x = s.to_vec_in(System);
428 /// // Here, `s` and `x` can be modified independently.
430 #[cfg(not(no_global_oom_handling))]
431 #[rustc_allow_incoherent_impl]
433 #[unstable(feature = "allocator_api", issue = "32838")]
434 pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
438 // N.B., see the `hack` module in this file for more details.
439 hack::to_vec(self, alloc)
442 /// Converts `self` into a vector without clones or allocation.
444 /// The resulting vector can be converted back into a box via
445 /// `Vec<T>`'s `into_boxed_slice` method.
450 /// let s: Box<[i32]> = Box::new([10, 40, 30]);
451 /// let x = s.into_vec();
452 /// // `s` cannot be used anymore because it has been converted into `x`.
454 /// assert_eq!(x, vec![10, 40, 30]);
456 #[rustc_allow_incoherent_impl]
457 #[stable(feature = "rust1", since = "1.0.0")]
459 pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> {
460 // N.B., see the `hack` module in this file for more details.
464 /// Creates a vector by copying a slice `n` times.
468 /// This function will panic if the capacity would overflow.
475 /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
478 /// A panic upon overflow:
481 /// // this will panic at runtime
482 /// b"0123456789abcdef".repeat(usize::MAX);
484 #[rustc_allow_incoherent_impl]
485 #[cfg(not(no_global_oom_handling))]
486 #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
487 pub fn repeat(&self, n: usize) -> Vec<T>
495 // If `n` is larger than zero, it can be split as
496 // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
497 // `2^expn` is the number represented by the leftmost '1' bit of `n`,
498 // and `rem` is the remaining part of `n`.
500 // Using `Vec` to access `set_len()`.
501 let capacity = self.len().checked_mul(n).expect("capacity overflow");
502 let mut buf = Vec::with_capacity(capacity);
504 // `2^expn` repetition is done by doubling `buf` `expn`-times.
508 // If `m > 0`, there are remaining bits up to the leftmost '1'.
510 // `buf.extend(buf)`:
512 ptr::copy_nonoverlapping(
514 (buf.as_mut_ptr() as *mut T).add(buf.len()),
517 // `buf` has capacity of `self.len() * n`.
518 let buf_len = buf.len();
519 buf.set_len(buf_len * 2);
526 // `rem` (`= n - 2^expn`) repetition is done by copying
527 // first `rem` repetitions from `buf` itself.
528 let rem_len = capacity - buf.len(); // `self.len() * rem`
530 // `buf.extend(buf[0 .. rem_len])`:
532 // This is non-overlapping since `2^expn > rem`.
533 ptr::copy_nonoverlapping(
535 (buf.as_mut_ptr() as *mut T).add(buf.len()),
538 // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
539 buf.set_len(capacity);
545 /// Flattens a slice of `T` into a single value `Self::Output`.
550 /// assert_eq!(["hello", "world"].concat(), "helloworld");
551 /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
553 #[rustc_allow_incoherent_impl]
554 #[stable(feature = "rust1", since = "1.0.0")]
555 pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
562 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
563 /// given separator between each.
568 /// assert_eq!(["hello", "world"].join(" "), "hello world");
569 /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
570 /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
572 #[rustc_allow_incoherent_impl]
573 #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
574 pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
576 Self: Join<Separator>,
578 Join::join(self, sep)
581 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
582 /// given separator between each.
587 /// # #![allow(deprecated)]
588 /// assert_eq!(["hello", "world"].connect(" "), "hello world");
589 /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
591 #[rustc_allow_incoherent_impl]
592 #[stable(feature = "rust1", since = "1.0.0")]
593 #[deprecated(since = "1.3.0", note = "renamed to join")]
594 pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
596 Self: Join<Separator>,
598 Join::join(self, sep)
604 /// Returns a vector containing a copy of this slice where each byte
605 /// is mapped to its ASCII upper case equivalent.
607 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
608 /// but non-ASCII letters are unchanged.
610 /// To uppercase the value in-place, use [`make_ascii_uppercase`].
612 /// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
613 #[cfg(not(no_global_oom_handling))]
614 #[rustc_allow_incoherent_impl]
615 #[must_use = "this returns the uppercase bytes as a new Vec, \
616 without modifying the original"]
617 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
619 pub fn to_ascii_uppercase(&self) -> Vec<u8> {
620 let mut me = self.to_vec();
621 me.make_ascii_uppercase();
625 /// Returns a vector containing a copy of this slice where each byte
626 /// is mapped to its ASCII lower case equivalent.
628 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
629 /// but non-ASCII letters are unchanged.
631 /// To lowercase the value in-place, use [`make_ascii_lowercase`].
633 /// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
634 #[cfg(not(no_global_oom_handling))]
635 #[rustc_allow_incoherent_impl]
636 #[must_use = "this returns the lowercase bytes as a new Vec, \
637 without modifying the original"]
638 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
640 pub fn to_ascii_lowercase(&self) -> Vec<u8> {
641 let mut me = self.to_vec();
642 me.make_ascii_lowercase();
647 ////////////////////////////////////////////////////////////////////////////////
648 // Extension traits for slices over specific kinds of data
649 ////////////////////////////////////////////////////////////////////////////////
651 /// Helper trait for [`[T]::concat`](slice::concat).
653 /// Note: the `Item` type parameter is not used in this trait,
654 /// but it allows impls to be more generic.
655 /// Without it, we get this error:
658 /// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
659 /// --> library/alloc/src/slice.rs:608:6
661 /// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
662 /// | ^ unconstrained type parameter
665 /// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
666 /// such that multiple `T` types would apply:
669 /// # #[allow(dead_code)]
670 /// pub struct Foo(Vec<u32>, Vec<String>);
672 /// impl std::borrow::Borrow<[u32]> for Foo {
673 /// fn borrow(&self) -> &[u32] { &self.0 }
676 /// impl std::borrow::Borrow<[String]> for Foo {
677 /// fn borrow(&self) -> &[String] { &self.1 }
680 #[unstable(feature = "slice_concat_trait", issue = "27747")]
681 pub trait Concat<Item: ?Sized> {
682 #[unstable(feature = "slice_concat_trait", issue = "27747")]
683 /// The resulting type after concatenation
686 /// Implementation of [`[T]::concat`](slice::concat)
687 #[unstable(feature = "slice_concat_trait", issue = "27747")]
688 fn concat(slice: &Self) -> Self::Output;
691 /// Helper trait for [`[T]::join`](slice::join)
692 #[unstable(feature = "slice_concat_trait", issue = "27747")]
693 pub trait Join<Separator> {
694 #[unstable(feature = "slice_concat_trait", issue = "27747")]
695 /// The resulting type after concatenation
698 /// Implementation of [`[T]::join`](slice::join)
699 #[unstable(feature = "slice_concat_trait", issue = "27747")]
700 fn join(slice: &Self, sep: Separator) -> Self::Output;
703 #[cfg(not(no_global_oom_handling))]
704 #[unstable(feature = "slice_concat_ext", issue = "27747")]
705 impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
706 type Output = Vec<T>;
708 fn concat(slice: &Self) -> Vec<T> {
709 let size = slice.iter().map(|slice| slice.borrow().len()).sum();
710 let mut result = Vec::with_capacity(size);
712 result.extend_from_slice(v.borrow())
718 #[cfg(not(no_global_oom_handling))]
719 #[unstable(feature = "slice_concat_ext", issue = "27747")]
720 impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
721 type Output = Vec<T>;
723 fn join(slice: &Self, sep: &T) -> Vec<T> {
724 let mut iter = slice.iter();
725 let first = match iter.next() {
726 Some(first) => first,
727 None => return vec![],
729 let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
730 let mut result = Vec::with_capacity(size);
731 result.extend_from_slice(first.borrow());
734 result.push(sep.clone());
735 result.extend_from_slice(v.borrow())
741 #[cfg(not(no_global_oom_handling))]
742 #[unstable(feature = "slice_concat_ext", issue = "27747")]
743 impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
744 type Output = Vec<T>;
746 fn join(slice: &Self, sep: &[T]) -> Vec<T> {
747 let mut iter = slice.iter();
748 let first = match iter.next() {
749 Some(first) => first,
750 None => return vec![],
753 slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
754 let mut result = Vec::with_capacity(size);
755 result.extend_from_slice(first.borrow());
758 result.extend_from_slice(sep);
759 result.extend_from_slice(v.borrow())
765 ////////////////////////////////////////////////////////////////////////////////
766 // Standard trait implementations for slices
767 ////////////////////////////////////////////////////////////////////////////////
769 #[stable(feature = "rust1", since = "1.0.0")]
770 impl<T, A: Allocator> Borrow<[T]> for Vec<T, A> {
771 fn borrow(&self) -> &[T] {
776 #[stable(feature = "rust1", since = "1.0.0")]
777 impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
778 fn borrow_mut(&mut self) -> &mut [T] {
783 #[cfg(not(no_global_oom_handling))]
784 #[stable(feature = "rust1", since = "1.0.0")]
785 impl<T: Clone> ToOwned for [T] {
788 fn to_owned(&self) -> Vec<T> {
793 fn to_owned(&self) -> Vec<T> {
794 hack::to_vec(self, Global)
797 fn clone_into(&self, target: &mut Vec<T>) {
798 // drop anything in target that will not be overwritten
799 target.truncate(self.len());
801 // target.len <= self.len due to the truncate above, so the
802 // slices here are always in-bounds.
803 let (init, tail) = self.split_at(target.len());
805 // reuse the contained values' allocations/resources.
806 target.clone_from_slice(init);
807 target.extend_from_slice(tail);
811 ////////////////////////////////////////////////////////////////////////////////
813 ////////////////////////////////////////////////////////////////////////////////
815 /// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
817 /// This is the integral subroutine of insertion sort.
818 #[cfg(not(no_global_oom_handling))]
819 fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
821 F: FnMut(&T, &T) -> bool,
823 if v.len() >= 2 && is_less(&v[1], &v[0]) {
825 // There are three ways to implement insertion here:
827 // 1. Swap adjacent elements until the first one gets to its final destination.
828 // However, this way we copy data around more than is necessary. If elements are big
829 // structures (costly to copy), this method will be slow.
831 // 2. Iterate until the right place for the first element is found. Then shift the
832 // elements succeeding it to make room for it and finally place it into the
833 // remaining hole. This is a good method.
835 // 3. Copy the first element into a temporary variable. Iterate until the right place
836 // for it is found. As we go along, copy every traversed element into the slot
837 // preceding it. Finally, copy data from the temporary variable into the remaining
838 // hole. This method is very good. Benchmarks demonstrated slightly better
839 // performance than with the 2nd method.
841 // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
842 let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
844 // Intermediate state of the insertion process is always tracked by `hole`, which
845 // serves two purposes:
846 // 1. Protects integrity of `v` from panics in `is_less`.
847 // 2. Fills the remaining hole in `v` in the end.
851 // If `is_less` panics at any point during the process, `hole` will get dropped and
852 // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
853 // initially held exactly once.
854 let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
855 ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
857 for i in 2..v.len() {
858 if !is_less(&v[i], &*tmp) {
861 ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
862 hole.dest = &mut v[i];
864 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
868 // When dropped, copies from `src` into `dest`.
869 struct InsertionHole<T> {
874 impl<T> Drop for InsertionHole<T> {
877 ptr::copy_nonoverlapping(self.src, self.dest, 1);
883 /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
884 /// stores the result into `v[..]`.
888 /// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
889 /// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
890 #[cfg(not(no_global_oom_handling))]
891 unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
893 F: FnMut(&T, &T) -> bool,
896 let v = v.as_mut_ptr();
897 let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
899 // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
900 // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
901 // copying the lesser (or greater) one into `v`.
903 // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
904 // consumed first, then we must copy whatever is left of the shorter run into the remaining
907 // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
908 // 1. Protects integrity of `v` from panics in `is_less`.
909 // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
913 // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
914 // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
915 // object it initially held exactly once.
918 if mid <= len - mid {
919 // The left run is shorter.
921 ptr::copy_nonoverlapping(v, buf, mid);
922 hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
925 // Initially, these pointers point to the beginnings of their arrays.
926 let left = &mut hole.start;
927 let mut right = v_mid;
928 let out = &mut hole.dest;
930 while *left < hole.end && right < v_end {
931 // Consume the lesser side.
932 // If equal, prefer the left run to maintain stability.
934 let to_copy = if is_less(&*right, &**left) {
935 get_and_increment(&mut right)
937 get_and_increment(left)
939 ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
943 // The right run is shorter.
945 ptr::copy_nonoverlapping(v_mid, buf, len - mid);
946 hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
949 // Initially, these pointers point past the ends of their arrays.
950 let left = &mut hole.dest;
951 let right = &mut hole.end;
954 while v < *left && buf < *right {
955 // Consume the greater side.
956 // If equal, prefer the right run to maintain stability.
958 let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
959 decrement_and_get(left)
961 decrement_and_get(right)
963 ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
967 // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
968 // it will now be copied into the hole in `v`.
970 unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
972 *ptr = unsafe { ptr.add(1) };
976 unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
977 *ptr = unsafe { ptr.sub(1) };
981 // When dropped, copies the range `start..end` into `dest..`.
982 struct MergeHole<T> {
988 impl<T> Drop for MergeHole<T> {
990 // `T` is not a zero-sized type, and these are pointers into a slice's elements.
992 let len = self.end.sub_ptr(self.start);
993 ptr::copy_nonoverlapping(self.start, self.dest, len);
999 /// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
1000 /// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
1002 /// The algorithm identifies strictly descending and non-descending subsequences, which are called
1003 /// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
1004 /// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
1007 /// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
1008 /// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
1010 /// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
1011 #[cfg(not(no_global_oom_handling))]
1012 fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
1014 F: FnMut(&T, &T) -> bool,
1016 // Slices of up to this length get sorted using insertion sort.
1017 const MAX_INSERTION: usize = 20;
1018 // Very short runs are extended using insertion sort to span at least this many elements.
1019 const MIN_RUN: usize = 10;
1021 // Sorting has no meaningful behavior on zero-sized types.
1028 // Short arrays get sorted in-place via insertion sort to avoid allocations.
1029 if len <= MAX_INSERTION {
1031 for i in (0..len - 1).rev() {
1032 insert_head(&mut v[i..], &mut is_less);
1038 // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
1039 // shallow copies of the contents of `v` without risking the dtors running on copies if
1040 // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
1041 // which will always have length at most `len / 2`.
1042 let mut buf = Vec::with_capacity(len / 2);
1044 // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
1045 // strange decision, but consider the fact that merges more often go in the opposite direction
1046 // (forwards). According to benchmarks, merging forwards is slightly faster than merging
1047 // backwards. To conclude, identifying runs by traversing backwards improves performance.
1048 let mut runs = vec![];
1051 // Find the next natural run, and reverse it if it's strictly descending.
1052 let mut start = end - 1;
1056 if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
1057 while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
1060 v[start..end].reverse();
1062 while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
1070 // Insert some more elements into the run if it's too short. Insertion sort is faster than
1071 // merge sort on short sequences, so this significantly improves performance.
1072 while start > 0 && end - start < MIN_RUN {
1074 insert_head(&mut v[start..end], &mut is_less);
1077 // Push this run onto the stack.
1078 runs.push(Run { start, len: end - start });
1081 // Merge some pairs of adjacent runs to satisfy the invariants.
1082 while let Some(r) = collapse(&runs) {
1083 let left = runs[r + 1];
1084 let right = runs[r];
1087 &mut v[left.start..right.start + right.len],
1093 runs[r] = Run { start: left.start, len: left.len + right.len };
1098 // Finally, exactly one run must remain in the stack.
1099 debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
1101 // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
1102 // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
1103 // algorithm should continue building a new run instead, `None` is returned.
1105 // TimSort is infamous for its buggy implementations, as described here:
1106 // http://envisage-project.eu/timsort-specification-and-verification/
1108 // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
1109 // Enforcing them on just top three is not sufficient to ensure that the invariants will still
1110 // hold for *all* runs in the stack.
1112 // This function correctly checks invariants for the top four runs. Additionally, if the top
1113 // run starts at index 0, it will always demand a merge operation until the stack is fully
1114 // collapsed, in order to complete the sort.
1116 fn collapse(runs: &[Run]) -> Option<usize> {
1119 && (runs[n - 1].start == 0
1120 || runs[n - 2].len <= runs[n - 1].len
1121 || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
1122 || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
1124 if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
1130 #[derive(Clone, Copy)]