1 //! Utilities for the slice primitive type.
3 //! *[See also the slice primitive type](slice).*
5 //! Most of the structs in this module are iterator types which can only be created
6 //! using a certain function. For example, `slice.iter()` yields an [`Iter`].
8 //! A few functions are provided to create a slice from a value reference
9 //! or from a raw pointer.
10 #![stable(feature = "rust1", since = "1.0.0")]
11 // Many of the usings in this module are only used in the test configuration.
12 // It's cleaner to just turn off the unused_imports warning than to fix them.
13 #![cfg_attr(test, allow(unused_imports, dead_code))]
15 use core::borrow::{Borrow, BorrowMut};
16 #[cfg(not(no_global_oom_handling))]
17 use core::cmp::Ordering::{self, Less};
18 #[cfg(not(no_global_oom_handling))]
19 use core::mem::{self, SizedTypeProperties};
20 #[cfg(not(no_global_oom_handling))]
23 use crate::alloc::Allocator;
24 #[cfg(not(no_global_oom_handling))]
25 use crate::alloc::Global;
26 #[cfg(not(no_global_oom_handling))]
27 use crate::borrow::ToOwned;
28 use crate::boxed::Box;
31 #[unstable(feature = "slice_range", issue = "76393")]
32 pub use core::slice::range;
33 #[unstable(feature = "array_chunks", issue = "74985")]
34 pub use core::slice::ArrayChunks;
35 #[unstable(feature = "array_chunks", issue = "74985")]
36 pub use core::slice::ArrayChunksMut;
37 #[unstable(feature = "array_windows", issue = "75027")]
38 pub use core::slice::ArrayWindows;
39 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
40 pub use core::slice::EscapeAscii;
41 #[stable(feature = "slice_get_slice", since = "1.28.0")]
42 pub use core::slice::SliceIndex;
43 #[stable(feature = "from_ref", since = "1.28.0")]
44 pub use core::slice::{from_mut, from_ref};
45 #[unstable(feature = "slice_from_ptr_range", issue = "89792")]
46 pub use core::slice::{from_mut_ptr_range, from_ptr_range};
47 #[stable(feature = "rust1", since = "1.0.0")]
48 pub use core::slice::{from_raw_parts, from_raw_parts_mut};
49 #[stable(feature = "rust1", since = "1.0.0")]
50 pub use core::slice::{Chunks, Windows};
51 #[stable(feature = "chunks_exact", since = "1.31.0")]
52 pub use core::slice::{ChunksExact, ChunksExactMut};
53 #[stable(feature = "rust1", since = "1.0.0")]
54 pub use core::slice::{ChunksMut, Split, SplitMut};
55 #[unstable(feature = "slice_group_by", issue = "80552")]
56 pub use core::slice::{GroupBy, GroupByMut};
57 #[stable(feature = "rust1", since = "1.0.0")]
58 pub use core::slice::{Iter, IterMut};
59 #[stable(feature = "rchunks", since = "1.31.0")]
60 pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
61 #[stable(feature = "slice_rsplit", since = "1.27.0")]
62 pub use core::slice::{RSplit, RSplitMut};
63 #[stable(feature = "rust1", since = "1.0.0")]
64 pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
65 #[stable(feature = "split_inclusive", since = "1.51.0")]
66 pub use core::slice::{SplitInclusive, SplitInclusiveMut};
68 ////////////////////////////////////////////////////////////////////////////////
69 // Basic slice extension methods
70 ////////////////////////////////////////////////////////////////////////////////
72 // HACK(japaric) needed for the implementation of `vec!` macro during testing
73 // N.B., see the `hack` module in this file for more details.
75 pub use hack::into_vec;
77 // HACK(japaric) needed for the implementation of `Vec::clone` during testing
78 // N.B., see the `hack` module in this file for more details.
82 // HACK(japaric): With cfg(test) `impl [T]` is not available, these three
83 // functions are actually methods that are in `impl [T]` but not in
84 // `core::slice::SliceExt` - we need to supply these functions for the
85 // `test_permutations` test
87 use core::alloc::Allocator;
89 use crate::boxed::Box;
92 // We shouldn't add inline attribute to this since this is used in
93 // `vec!` macro mostly and causes perf regression. See #71204 for
94 // discussion and perf results.
95 pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
98 let (b, alloc) = Box::into_raw_with_allocator(b);
99 Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
103 #[cfg(not(no_global_oom_handling))]
105 pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
109 #[cfg(not(no_global_oom_handling))]
110 pub trait ConvertVec {
111 fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>
116 #[cfg(not(no_global_oom_handling))]
117 impl<T: Clone> ConvertVec for T {
119 default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
120 struct DropGuard<'a, T, A: Allocator> {
121 vec: &'a mut Vec<T, A>,
124 impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
128 // items were marked initialized in the loop below
130 self.vec.set_len(self.num_init);
134 let mut vec = Vec::with_capacity_in(s.len(), alloc);
135 let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
136 let slots = guard.vec.spare_capacity_mut();
137 // .take(slots.len()) is necessary for LLVM to remove bounds checks
138 // and has better codegen than zip.
139 for (i, b) in s.iter().enumerate().take(slots.len()) {
141 slots[i].write(b.clone());
143 core::mem::forget(guard);
145 // the vec was allocated and initialized above to at least this length.
147 vec.set_len(s.len());
153 #[cfg(not(no_global_oom_handling))]
154 impl<T: Copy> ConvertVec for T {
156 fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
157 let mut v = Vec::with_capacity_in(s.len(), alloc);
159 // allocated above with the capacity of `s`, and initialize to `s.len()` in
160 // ptr::copy_to_non_overlapping below.
162 s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len());
174 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
176 /// When applicable, unstable sorting is preferred because it is generally faster than stable
177 /// sorting and it doesn't allocate auxiliary memory.
178 /// See [`sort_unstable`](slice::sort_unstable).
180 /// # Current implementation
182 /// The current algorithm is an adaptive, iterative merge sort inspired by
183 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
184 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
185 /// two or more sorted sequences concatenated one after another.
187 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
188 /// non-allocating insertion sort is used instead.
193 /// let mut v = [-5, 4, 1, -3, 2];
196 /// assert!(v == [-5, -3, 1, 2, 4]);
198 #[cfg(not(no_global_oom_handling))]
199 #[rustc_allow_incoherent_impl]
200 #[stable(feature = "rust1", since = "1.0.0")]
202 pub fn sort(&mut self)
206 merge_sort(self, T::lt);
209 /// Sorts the slice with a comparator function.
211 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
213 /// The comparator function must define a total ordering for the elements in the slice. If
214 /// the ordering is not total, the order of the elements is unspecified. An order is a
215 /// total order if it is (for all `a`, `b` and `c`):
217 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
218 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
220 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
221 /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
224 /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
225 /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
226 /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
229 /// When applicable, unstable sorting is preferred because it is generally faster than stable
230 /// sorting and it doesn't allocate auxiliary memory.
231 /// See [`sort_unstable_by`](slice::sort_unstable_by).
233 /// # Current implementation
235 /// The current algorithm is an adaptive, iterative merge sort inspired by
236 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
237 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
238 /// two or more sorted sequences concatenated one after another.
240 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
241 /// non-allocating insertion sort is used instead.
246 /// let mut v = [5, 4, 1, 3, 2];
247 /// v.sort_by(|a, b| a.cmp(b));
248 /// assert!(v == [1, 2, 3, 4, 5]);
250 /// // reverse sorting
251 /// v.sort_by(|a, b| b.cmp(a));
252 /// assert!(v == [5, 4, 3, 2, 1]);
254 #[cfg(not(no_global_oom_handling))]
255 #[rustc_allow_incoherent_impl]
256 #[stable(feature = "rust1", since = "1.0.0")]
258 pub fn sort_by<F>(&mut self, mut compare: F)
260 F: FnMut(&T, &T) -> Ordering,
262 merge_sort(self, |a, b| compare(a, b) == Less);
265 /// Sorts the slice with a key extraction function.
267 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
268 /// worst-case, where the key function is *O*(*m*).
270 /// For expensive key functions (e.g. functions that are not simple property accesses or
271 /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
272 /// significantly faster, as it does not recompute element keys.
274 /// When applicable, unstable sorting is preferred because it is generally faster than stable
275 /// sorting and it doesn't allocate auxiliary memory.
276 /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
278 /// # Current implementation
280 /// The current algorithm is an adaptive, iterative merge sort inspired by
281 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
282 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
283 /// two or more sorted sequences concatenated one after another.
285 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
286 /// non-allocating insertion sort is used instead.
291 /// let mut v = [-5i32, 4, 1, -3, 2];
293 /// v.sort_by_key(|k| k.abs());
294 /// assert!(v == [1, 2, -3, 4, -5]);
296 #[cfg(not(no_global_oom_handling))]
297 #[rustc_allow_incoherent_impl]
298 #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
300 pub fn sort_by_key<K, F>(&mut self, mut f: F)
305 merge_sort(self, |a, b| f(a).lt(&f(b)));
308 /// Sorts the slice with a key extraction function.
310 /// During sorting, the key function is called at most once per element, by using
311 /// temporary storage to remember the results of key evaluation.
312 /// The order of calls to the key function is unspecified and may change in future versions
313 /// of the standard library.
315 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
316 /// worst-case, where the key function is *O*(*m*).
318 /// For simple key functions (e.g., functions that are property accesses or
319 /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
322 /// # Current implementation
324 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
325 /// which combines the fast average case of randomized quicksort with the fast worst case of
326 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
327 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
328 /// deterministic behavior.
330 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
331 /// length of the slice.
336 /// let mut v = [-5i32, 4, 32, -3, 2];
338 /// v.sort_by_cached_key(|k| k.to_string());
339 /// assert!(v == [-3, -5, 2, 32, 4]);
342 /// [pdqsort]: https://github.com/orlp/pdqsort
343 #[cfg(not(no_global_oom_handling))]
344 #[rustc_allow_incoherent_impl]
345 #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
347 pub fn sort_by_cached_key<K, F>(&mut self, f: F)
352 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
353 macro_rules! sort_by_key {
354 ($t:ty, $slice:ident, $f:ident) => {{
355 let mut indices: Vec<_> =
356 $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
357 // The elements of `indices` are unique, as they are indexed, so any sort will be
358 // stable with respect to the original slice. We use `sort_unstable` here because
359 // it requires less memory allocation.
360 indices.sort_unstable();
361 for i in 0..$slice.len() {
362 let mut index = indices[i].1;
363 while (index as usize) < i {
364 index = indices[index as usize].1;
366 indices[i].1 = index;
367 $slice.swap(i, index as usize);
372 let sz_u8 = mem::size_of::<(K, u8)>();
373 let sz_u16 = mem::size_of::<(K, u16)>();
374 let sz_u32 = mem::size_of::<(K, u32)>();
375 let sz_usize = mem::size_of::<(K, usize)>();
377 let len = self.len();
381 if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
382 return sort_by_key!(u8, self, f);
384 if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
385 return sort_by_key!(u16, self, f);
387 if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
388 return sort_by_key!(u32, self, f);
390 sort_by_key!(usize, self, f)
393 /// Copies `self` into a new `Vec`.
398 /// let s = [10, 40, 30];
399 /// let x = s.to_vec();
400 /// // Here, `s` and `x` can be modified independently.
402 #[cfg(not(no_global_oom_handling))]
403 #[rustc_allow_incoherent_impl]
404 #[rustc_conversion_suggestion]
405 #[stable(feature = "rust1", since = "1.0.0")]
407 pub fn to_vec(&self) -> Vec<T>
411 self.to_vec_in(Global)
414 /// Copies `self` into a new `Vec` with an allocator.
419 /// #![feature(allocator_api)]
421 /// use std::alloc::System;
423 /// let s = [10, 40, 30];
424 /// let x = s.to_vec_in(System);
425 /// // Here, `s` and `x` can be modified independently.
427 #[cfg(not(no_global_oom_handling))]
428 #[rustc_allow_incoherent_impl]
430 #[unstable(feature = "allocator_api", issue = "32838")]
431 pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
435 // N.B., see the `hack` module in this file for more details.
436 hack::to_vec(self, alloc)
439 /// Converts `self` into a vector without clones or allocation.
441 /// The resulting vector can be converted back into a box via
442 /// `Vec<T>`'s `into_boxed_slice` method.
447 /// let s: Box<[i32]> = Box::new([10, 40, 30]);
448 /// let x = s.into_vec();
449 /// // `s` cannot be used anymore because it has been converted into `x`.
451 /// assert_eq!(x, vec![10, 40, 30]);
453 #[rustc_allow_incoherent_impl]
454 #[stable(feature = "rust1", since = "1.0.0")]
456 pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> {
457 // N.B., see the `hack` module in this file for more details.
461 /// Creates a vector by copying a slice `n` times.
465 /// This function will panic if the capacity would overflow.
472 /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
475 /// A panic upon overflow:
478 /// // this will panic at runtime
479 /// b"0123456789abcdef".repeat(usize::MAX);
481 #[rustc_allow_incoherent_impl]
482 #[cfg(not(no_global_oom_handling))]
483 #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
484 pub fn repeat(&self, n: usize) -> Vec<T>
492 // If `n` is larger than zero, it can be split as
493 // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
494 // `2^expn` is the number represented by the leftmost '1' bit of `n`,
495 // and `rem` is the remaining part of `n`.
497 // Using `Vec` to access `set_len()`.
498 let capacity = self.len().checked_mul(n).expect("capacity overflow");
499 let mut buf = Vec::with_capacity(capacity);
501 // `2^expn` repetition is done by doubling `buf` `expn`-times.
505 // If `m > 0`, there are remaining bits up to the leftmost '1'.
507 // `buf.extend(buf)`:
509 ptr::copy_nonoverlapping(
511 (buf.as_mut_ptr() as *mut T).add(buf.len()),
514 // `buf` has capacity of `self.len() * n`.
515 let buf_len = buf.len();
516 buf.set_len(buf_len * 2);
523 // `rem` (`= n - 2^expn`) repetition is done by copying
524 // first `rem` repetitions from `buf` itself.
525 let rem_len = capacity - buf.len(); // `self.len() * rem`
527 // `buf.extend(buf[0 .. rem_len])`:
529 // This is non-overlapping since `2^expn > rem`.
530 ptr::copy_nonoverlapping(
532 (buf.as_mut_ptr() as *mut T).add(buf.len()),
535 // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
536 buf.set_len(capacity);
542 /// Flattens a slice of `T` into a single value `Self::Output`.
547 /// assert_eq!(["hello", "world"].concat(), "helloworld");
548 /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
550 #[rustc_allow_incoherent_impl]
551 #[stable(feature = "rust1", since = "1.0.0")]
552 pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
559 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
560 /// given separator between each.
565 /// assert_eq!(["hello", "world"].join(" "), "hello world");
566 /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
567 /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
569 #[rustc_allow_incoherent_impl]
570 #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
571 pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
573 Self: Join<Separator>,
575 Join::join(self, sep)
578 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
579 /// given separator between each.
584 /// # #![allow(deprecated)]
585 /// assert_eq!(["hello", "world"].connect(" "), "hello world");
586 /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
588 #[rustc_allow_incoherent_impl]
589 #[stable(feature = "rust1", since = "1.0.0")]
590 #[deprecated(since = "1.3.0", note = "renamed to join")]
591 pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
593 Self: Join<Separator>,
595 Join::join(self, sep)
601 /// Returns a vector containing a copy of this slice where each byte
602 /// is mapped to its ASCII upper case equivalent.
604 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
605 /// but non-ASCII letters are unchanged.
607 /// To uppercase the value in-place, use [`make_ascii_uppercase`].
609 /// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
610 #[cfg(not(no_global_oom_handling))]
611 #[rustc_allow_incoherent_impl]
612 #[must_use = "this returns the uppercase bytes as a new Vec, \
613 without modifying the original"]
614 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
616 pub fn to_ascii_uppercase(&self) -> Vec<u8> {
617 let mut me = self.to_vec();
618 me.make_ascii_uppercase();
622 /// Returns a vector containing a copy of this slice where each byte
623 /// is mapped to its ASCII lower case equivalent.
625 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
626 /// but non-ASCII letters are unchanged.
628 /// To lowercase the value in-place, use [`make_ascii_lowercase`].
630 /// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
631 #[cfg(not(no_global_oom_handling))]
632 #[rustc_allow_incoherent_impl]
633 #[must_use = "this returns the lowercase bytes as a new Vec, \
634 without modifying the original"]
635 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
637 pub fn to_ascii_lowercase(&self) -> Vec<u8> {
638 let mut me = self.to_vec();
639 me.make_ascii_lowercase();
644 ////////////////////////////////////////////////////////////////////////////////
645 // Extension traits for slices over specific kinds of data
646 ////////////////////////////////////////////////////////////////////////////////
648 /// Helper trait for [`[T]::concat`](slice::concat).
650 /// Note: the `Item` type parameter is not used in this trait,
651 /// but it allows impls to be more generic.
652 /// Without it, we get this error:
655 /// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
656 /// --> src/liballoc/slice.rs:608:6
658 /// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
659 /// | ^ unconstrained type parameter
662 /// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
663 /// such that multiple `T` types would apply:
666 /// # #[allow(dead_code)]
667 /// pub struct Foo(Vec<u32>, Vec<String>);
669 /// impl std::borrow::Borrow<[u32]> for Foo {
670 /// fn borrow(&self) -> &[u32] { &self.0 }
673 /// impl std::borrow::Borrow<[String]> for Foo {
674 /// fn borrow(&self) -> &[String] { &self.1 }
677 #[unstable(feature = "slice_concat_trait", issue = "27747")]
678 pub trait Concat<Item: ?Sized> {
679 #[unstable(feature = "slice_concat_trait", issue = "27747")]
680 /// The resulting type after concatenation
683 /// Implementation of [`[T]::concat`](slice::concat)
684 #[unstable(feature = "slice_concat_trait", issue = "27747")]
685 fn concat(slice: &Self) -> Self::Output;
688 /// Helper trait for [`[T]::join`](slice::join)
689 #[unstable(feature = "slice_concat_trait", issue = "27747")]
690 pub trait Join<Separator> {
691 #[unstable(feature = "slice_concat_trait", issue = "27747")]
692 /// The resulting type after concatenation
695 /// Implementation of [`[T]::join`](slice::join)
696 #[unstable(feature = "slice_concat_trait", issue = "27747")]
697 fn join(slice: &Self, sep: Separator) -> Self::Output;
700 #[cfg(not(no_global_oom_handling))]
701 #[unstable(feature = "slice_concat_ext", issue = "27747")]
702 impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
703 type Output = Vec<T>;
705 fn concat(slice: &Self) -> Vec<T> {
706 let size = slice.iter().map(|slice| slice.borrow().len()).sum();
707 let mut result = Vec::with_capacity(size);
709 result.extend_from_slice(v.borrow())
715 #[cfg(not(no_global_oom_handling))]
716 #[unstable(feature = "slice_concat_ext", issue = "27747")]
717 impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
718 type Output = Vec<T>;
720 fn join(slice: &Self, sep: &T) -> Vec<T> {
721 let mut iter = slice.iter();
722 let first = match iter.next() {
723 Some(first) => first,
724 None => return vec![],
726 let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
727 let mut result = Vec::with_capacity(size);
728 result.extend_from_slice(first.borrow());
731 result.push(sep.clone());
732 result.extend_from_slice(v.borrow())
738 #[cfg(not(no_global_oom_handling))]
739 #[unstable(feature = "slice_concat_ext", issue = "27747")]
740 impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
741 type Output = Vec<T>;
743 fn join(slice: &Self, sep: &[T]) -> Vec<T> {
744 let mut iter = slice.iter();
745 let first = match iter.next() {
746 Some(first) => first,
747 None => return vec![],
750 slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
751 let mut result = Vec::with_capacity(size);
752 result.extend_from_slice(first.borrow());
755 result.extend_from_slice(sep);
756 result.extend_from_slice(v.borrow())
762 ////////////////////////////////////////////////////////////////////////////////
763 // Standard trait implementations for slices
764 ////////////////////////////////////////////////////////////////////////////////
766 #[stable(feature = "rust1", since = "1.0.0")]
767 impl<T, A: Allocator> Borrow<[T]> for Vec<T, A> {
768 fn borrow(&self) -> &[T] {
773 #[stable(feature = "rust1", since = "1.0.0")]
774 impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
775 fn borrow_mut(&mut self) -> &mut [T] {
780 #[cfg(not(no_global_oom_handling))]
781 #[stable(feature = "rust1", since = "1.0.0")]
782 impl<T: Clone> ToOwned for [T] {
785 fn to_owned(&self) -> Vec<T> {
790 fn to_owned(&self) -> Vec<T> {
791 hack::to_vec(self, Global)
794 fn clone_into(&self, target: &mut Vec<T>) {
795 // drop anything in target that will not be overwritten
796 target.truncate(self.len());
798 // target.len <= self.len due to the truncate above, so the
799 // slices here are always in-bounds.
800 let (init, tail) = self.split_at(target.len());
802 // reuse the contained values' allocations/resources.
803 target.clone_from_slice(init);
804 target.extend_from_slice(tail);
808 ////////////////////////////////////////////////////////////////////////////////
810 ////////////////////////////////////////////////////////////////////////////////
812 /// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
814 /// This is the integral subroutine of insertion sort.
815 #[cfg(not(no_global_oom_handling))]
816 fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
818 F: FnMut(&T, &T) -> bool,
820 if v.len() >= 2 && is_less(&v[1], &v[0]) {
822 // There are three ways to implement insertion here:
824 // 1. Swap adjacent elements until the first one gets to its final destination.
825 // However, this way we copy data around more than is necessary. If elements are big
826 // structures (costly to copy), this method will be slow.
828 // 2. Iterate until the right place for the first element is found. Then shift the
829 // elements succeeding it to make room for it and finally place it into the
830 // remaining hole. This is a good method.
832 // 3. Copy the first element into a temporary variable. Iterate until the right place
833 // for it is found. As we go along, copy every traversed element into the slot
834 // preceding it. Finally, copy data from the temporary variable into the remaining
835 // hole. This method is very good. Benchmarks demonstrated slightly better
836 // performance than with the 2nd method.
838 // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
839 let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
841 // Intermediate state of the insertion process is always tracked by `hole`, which
842 // serves two purposes:
843 // 1. Protects integrity of `v` from panics in `is_less`.
844 // 2. Fills the remaining hole in `v` in the end.
848 // If `is_less` panics at any point during the process, `hole` will get dropped and
849 // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
850 // initially held exactly once.
851 let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
852 ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
854 for i in 2..v.len() {
855 if !is_less(&v[i], &*tmp) {
858 ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
859 hole.dest = &mut v[i];
861 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
865 // When dropped, copies from `src` into `dest`.
866 struct InsertionHole<T> {
871 impl<T> Drop for InsertionHole<T> {
874 ptr::copy_nonoverlapping(self.src, self.dest, 1);
880 /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
881 /// stores the result into `v[..]`.
885 /// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
886 /// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
887 #[cfg(not(no_global_oom_handling))]
888 unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
890 F: FnMut(&T, &T) -> bool,
893 let v = v.as_mut_ptr();
894 let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
896 // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
897 // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
898 // copying the lesser (or greater) one into `v`.
900 // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
901 // consumed first, then we must copy whatever is left of the shorter run into the remaining
904 // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
905 // 1. Protects integrity of `v` from panics in `is_less`.
906 // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
910 // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
911 // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
912 // object it initially held exactly once.
915 if mid <= len - mid {
916 // The left run is shorter.
918 ptr::copy_nonoverlapping(v, buf, mid);
919 hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
922 // Initially, these pointers point to the beginnings of their arrays.
923 let left = &mut hole.start;
924 let mut right = v_mid;
925 let out = &mut hole.dest;
927 while *left < hole.end && right < v_end {
928 // Consume the lesser side.
929 // If equal, prefer the left run to maintain stability.
931 let to_copy = if is_less(&*right, &**left) {
932 get_and_increment(&mut right)
934 get_and_increment(left)
936 ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
940 // The right run is shorter.
942 ptr::copy_nonoverlapping(v_mid, buf, len - mid);
943 hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
946 // Initially, these pointers point past the ends of their arrays.
947 let left = &mut hole.dest;
948 let right = &mut hole.end;
951 while v < *left && buf < *right {
952 // Consume the greater side.
953 // If equal, prefer the right run to maintain stability.
955 let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
956 decrement_and_get(left)
958 decrement_and_get(right)
960 ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
964 // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
965 // it will now be copied into the hole in `v`.
967 unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
969 *ptr = unsafe { ptr.add(1) };
973 unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
974 *ptr = unsafe { ptr.sub(1) };
978 // When dropped, copies the range `start..end` into `dest..`.
979 struct MergeHole<T> {
985 impl<T> Drop for MergeHole<T> {
987 // `T` is not a zero-sized type, and these are pointers into a slice's elements.
989 let len = self.end.sub_ptr(self.start);
990 ptr::copy_nonoverlapping(self.start, self.dest, len);
996 /// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
997 /// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
999 /// The algorithm identifies strictly descending and non-descending subsequences, which are called
1000 /// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
1001 /// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
1004 /// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
1005 /// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
1007 /// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
1008 #[cfg(not(no_global_oom_handling))]
1009 fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
1011 F: FnMut(&T, &T) -> bool,
1013 // Slices of up to this length get sorted using insertion sort.
1014 const MAX_INSERTION: usize = 20;
1015 // Very short runs are extended using insertion sort to span at least this many elements.
1016 const MIN_RUN: usize = 10;
1018 // Sorting has no meaningful behavior on zero-sized types.
1025 // Short arrays get sorted in-place via insertion sort to avoid allocations.
1026 if len <= MAX_INSERTION {
1028 for i in (0..len - 1).rev() {
1029 insert_head(&mut v[i..], &mut is_less);
1035 // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
1036 // shallow copies of the contents of `v` without risking the dtors running on copies if
1037 // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
1038 // which will always have length at most `len / 2`.
1039 let mut buf = Vec::with_capacity(len / 2);
1041 // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
1042 // strange decision, but consider the fact that merges more often go in the opposite direction
1043 // (forwards). According to benchmarks, merging forwards is slightly faster than merging
1044 // backwards. To conclude, identifying runs by traversing backwards improves performance.
1045 let mut runs = vec![];
1048 // Find the next natural run, and reverse it if it's strictly descending.
1049 let mut start = end - 1;
1053 if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
1054 while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
1057 v[start..end].reverse();
1059 while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
1067 // Insert some more elements into the run if it's too short. Insertion sort is faster than
1068 // merge sort on short sequences, so this significantly improves performance.
1069 while start > 0 && end - start < MIN_RUN {
1071 insert_head(&mut v[start..end], &mut is_less);
1074 // Push this run onto the stack.
1075 runs.push(Run { start, len: end - start });
1078 // Merge some pairs of adjacent runs to satisfy the invariants.
1079 while let Some(r) = collapse(&runs) {
1080 let left = runs[r + 1];
1081 let right = runs[r];
1084 &mut v[left.start..right.start + right.len],
1090 runs[r] = Run { start: left.start, len: left.len + right.len };
1095 // Finally, exactly one run must remain in the stack.
1096 debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
1098 // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
1099 // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
1100 // algorithm should continue building a new run instead, `None` is returned.
1102 // TimSort is infamous for its buggy implementations, as described here:
1103 // http://envisage-project.eu/timsort-specification-and-verification/
1105 // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
1106 // Enforcing them on just top three is not sufficient to ensure that the invariants will still
1107 // hold for *all* runs in the stack.
1109 // This function correctly checks invariants for the top four runs. Additionally, if the top
1110 // run starts at index 0, it will always demand a merge operation until the stack is fully
1111 // collapsed, in order to complete the sort.
1113 fn collapse(runs: &[Run]) -> Option<usize> {
1116 && (runs[n - 1].start == 0
1117 || runs[n - 2].len <= runs[n - 1].len
1118 || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
1119 || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
1121 if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
1127 #[derive(Clone, Copy)]