1 //! Utilities for the slice primitive type.
3 //! *[See also the slice primitive type](slice).*
5 //! Most of the structs in this module are iterator types which can only be created
6 //! using a certain function. For example, `slice.iter()` yields an [`Iter`].
8 //! A few functions are provided to create a slice from a value reference
9 //! or from a raw pointer.
10 #![stable(feature = "rust1", since = "1.0.0")]
11 // Many of the usings in this module are only used in the test configuration.
12 // It's cleaner to just turn off the unused_imports warning than to fix them.
13 #![cfg_attr(test, allow(unused_imports, dead_code))]
15 use core::borrow::{Borrow, BorrowMut};
16 #[cfg(not(no_global_oom_handling))]
17 use core::cmp::Ordering::{self, Less};
18 #[cfg(not(no_global_oom_handling))]
20 #[cfg(not(no_global_oom_handling))]
21 use core::mem::size_of;
22 #[cfg(not(no_global_oom_handling))]
25 use crate::alloc::Allocator;
26 #[cfg(not(no_global_oom_handling))]
27 use crate::alloc::Global;
28 #[cfg(not(no_global_oom_handling))]
29 use crate::borrow::ToOwned;
30 use crate::boxed::Box;
33 #[unstable(feature = "slice_range", issue = "76393")]
34 pub use core::slice::range;
35 #[unstable(feature = "array_chunks", issue = "74985")]
36 pub use core::slice::ArrayChunks;
37 #[unstable(feature = "array_chunks", issue = "74985")]
38 pub use core::slice::ArrayChunksMut;
39 #[unstable(feature = "array_windows", issue = "75027")]
40 pub use core::slice::ArrayWindows;
41 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
42 pub use core::slice::EscapeAscii;
43 #[stable(feature = "slice_get_slice", since = "1.28.0")]
44 pub use core::slice::SliceIndex;
45 #[stable(feature = "from_ref", since = "1.28.0")]
46 pub use core::slice::{from_mut, from_ref};
47 #[unstable(feature = "slice_from_ptr_range", issue = "89792")]
48 pub use core::slice::{from_mut_ptr_range, from_ptr_range};
49 #[stable(feature = "rust1", since = "1.0.0")]
50 pub use core::slice::{from_raw_parts, from_raw_parts_mut};
51 #[stable(feature = "rust1", since = "1.0.0")]
52 pub use core::slice::{Chunks, Windows};
53 #[stable(feature = "chunks_exact", since = "1.31.0")]
54 pub use core::slice::{ChunksExact, ChunksExactMut};
55 #[stable(feature = "rust1", since = "1.0.0")]
56 pub use core::slice::{ChunksMut, Split, SplitMut};
57 #[unstable(feature = "slice_group_by", issue = "80552")]
58 pub use core::slice::{GroupBy, GroupByMut};
59 #[stable(feature = "rust1", since = "1.0.0")]
60 pub use core::slice::{Iter, IterMut};
61 #[stable(feature = "rchunks", since = "1.31.0")]
62 pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
63 #[stable(feature = "slice_rsplit", since = "1.27.0")]
64 pub use core::slice::{RSplit, RSplitMut};
65 #[stable(feature = "rust1", since = "1.0.0")]
66 pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
67 #[stable(feature = "split_inclusive", since = "1.51.0")]
68 pub use core::slice::{SplitInclusive, SplitInclusiveMut};
70 ////////////////////////////////////////////////////////////////////////////////
71 // Basic slice extension methods
72 ////////////////////////////////////////////////////////////////////////////////
74 // HACK(japaric) needed for the implementation of `vec!` macro during testing
75 // N.B., see the `hack` module in this file for more details.
77 pub use hack::into_vec;
79 // HACK(japaric) needed for the implementation of `Vec::clone` during testing
80 // N.B., see the `hack` module in this file for more details.
84 // HACK(japaric): With cfg(test) `impl [T]` is not available, these three
85 // functions are actually methods that are in `impl [T]` but not in
86 // `core::slice::SliceExt` - we need to supply these functions for the
87 // `test_permutations` test
89 use core::alloc::Allocator;
91 use crate::boxed::Box;
94 // We shouldn't add inline attribute to this since this is used in
95 // `vec!` macro mostly and causes perf regression. See #71204 for
96 // discussion and perf results.
97 pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
100 let (b, alloc) = Box::into_raw_with_allocator(b);
101 Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
105 #[cfg(not(no_global_oom_handling))]
107 pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
111 #[cfg(not(no_global_oom_handling))]
112 pub trait ConvertVec {
113 fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>
118 #[cfg(not(no_global_oom_handling))]
119 impl<T: Clone> ConvertVec for T {
121 default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
122 struct DropGuard<'a, T, A: Allocator> {
123 vec: &'a mut Vec<T, A>,
126 impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
130 // items were marked initialized in the loop below
132 self.vec.set_len(self.num_init);
136 let mut vec = Vec::with_capacity_in(s.len(), alloc);
137 let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
138 let slots = guard.vec.spare_capacity_mut();
139 // .take(slots.len()) is necessary for LLVM to remove bounds checks
140 // and has better codegen than zip.
141 for (i, b) in s.iter().enumerate().take(slots.len()) {
143 slots[i].write(b.clone());
145 core::mem::forget(guard);
147 // the vec was allocated and initialized above to at least this length.
149 vec.set_len(s.len());
155 #[cfg(not(no_global_oom_handling))]
156 impl<T: Copy> ConvertVec for T {
158 fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
159 let mut v = Vec::with_capacity_in(s.len(), alloc);
161 // allocated above with the capacity of `s`, and initialize to `s.len()` in
162 // ptr::copy_to_non_overlapping below.
164 s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len());
176 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
178 /// When applicable, unstable sorting is preferred because it is generally faster than stable
179 /// sorting and it doesn't allocate auxiliary memory.
180 /// See [`sort_unstable`](slice::sort_unstable).
182 /// # Current implementation
184 /// The current algorithm is an adaptive, iterative merge sort inspired by
185 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
186 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
187 /// two or more sorted sequences concatenated one after another.
189 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
190 /// non-allocating insertion sort is used instead.
195 /// let mut v = [-5, 4, 1, -3, 2];
198 /// assert!(v == [-5, -3, 1, 2, 4]);
200 #[cfg(not(no_global_oom_handling))]
201 #[rustc_allow_incoherent_impl]
202 #[stable(feature = "rust1", since = "1.0.0")]
204 pub fn sort(&mut self)
208 merge_sort(self, |a, b| a.lt(b));
211 /// Sorts the slice with a comparator function.
213 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
215 /// The comparator function must define a total ordering for the elements in the slice. If
216 /// the ordering is not total, the order of the elements is unspecified. An order is a
217 /// total order if it is (for all `a`, `b` and `c`):
219 /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
220 /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
222 /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
223 /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
226 /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
227 /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
228 /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
231 /// When applicable, unstable sorting is preferred because it is generally faster than stable
232 /// sorting and it doesn't allocate auxiliary memory.
233 /// See [`sort_unstable_by`](slice::sort_unstable_by).
235 /// # Current implementation
237 /// The current algorithm is an adaptive, iterative merge sort inspired by
238 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
239 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
240 /// two or more sorted sequences concatenated one after another.
242 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
243 /// non-allocating insertion sort is used instead.
248 /// let mut v = [5, 4, 1, 3, 2];
249 /// v.sort_by(|a, b| a.cmp(b));
250 /// assert!(v == [1, 2, 3, 4, 5]);
252 /// // reverse sorting
253 /// v.sort_by(|a, b| b.cmp(a));
254 /// assert!(v == [5, 4, 3, 2, 1]);
256 #[cfg(not(no_global_oom_handling))]
257 #[rustc_allow_incoherent_impl]
258 #[stable(feature = "rust1", since = "1.0.0")]
260 pub fn sort_by<F>(&mut self, mut compare: F)
262 F: FnMut(&T, &T) -> Ordering,
264 merge_sort(self, |a, b| compare(a, b) == Less);
267 /// Sorts the slice with a key extraction function.
269 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
270 /// worst-case, where the key function is *O*(*m*).
272 /// For expensive key functions (e.g. functions that are not simple property accesses or
273 /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
274 /// significantly faster, as it does not recompute element keys.
276 /// When applicable, unstable sorting is preferred because it is generally faster than stable
277 /// sorting and it doesn't allocate auxiliary memory.
278 /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
280 /// # Current implementation
282 /// The current algorithm is an adaptive, iterative merge sort inspired by
283 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
284 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
285 /// two or more sorted sequences concatenated one after another.
287 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
288 /// non-allocating insertion sort is used instead.
293 /// let mut v = [-5i32, 4, 1, -3, 2];
295 /// v.sort_by_key(|k| k.abs());
296 /// assert!(v == [1, 2, -3, 4, -5]);
298 #[cfg(not(no_global_oom_handling))]
299 #[rustc_allow_incoherent_impl]
300 #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
302 pub fn sort_by_key<K, F>(&mut self, mut f: F)
307 merge_sort(self, |a, b| f(a).lt(&f(b)));
310 /// Sorts the slice with a key extraction function.
312 /// During sorting, the key function is called at most once per element, by using
313 /// temporary storage to remember the results of key evaluation.
314 /// The order of calls to the key function is unspecified and may change in future versions
315 /// of the standard library.
317 /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
318 /// worst-case, where the key function is *O*(*m*).
320 /// For simple key functions (e.g., functions that are property accesses or
321 /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
324 /// # Current implementation
326 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
327 /// which combines the fast average case of randomized quicksort with the fast worst case of
328 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
329 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
330 /// deterministic behavior.
332 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
333 /// length of the slice.
338 /// let mut v = [-5i32, 4, 32, -3, 2];
340 /// v.sort_by_cached_key(|k| k.to_string());
341 /// assert!(v == [-3, -5, 2, 32, 4]);
344 /// [pdqsort]: https://github.com/orlp/pdqsort
345 #[cfg(not(no_global_oom_handling))]
346 #[rustc_allow_incoherent_impl]
347 #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
349 pub fn sort_by_cached_key<K, F>(&mut self, f: F)
354 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
355 macro_rules! sort_by_key {
356 ($t:ty, $slice:ident, $f:ident) => {{
357 let mut indices: Vec<_> =
358 $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
359 // The elements of `indices` are unique, as they are indexed, so any sort will be
360 // stable with respect to the original slice. We use `sort_unstable` here because
361 // it requires less memory allocation.
362 indices.sort_unstable();
363 for i in 0..$slice.len() {
364 let mut index = indices[i].1;
365 while (index as usize) < i {
366 index = indices[index as usize].1;
368 indices[i].1 = index;
369 $slice.swap(i, index as usize);
374 let sz_u8 = mem::size_of::<(K, u8)>();
375 let sz_u16 = mem::size_of::<(K, u16)>();
376 let sz_u32 = mem::size_of::<(K, u32)>();
377 let sz_usize = mem::size_of::<(K, usize)>();
379 let len = self.len();
383 if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
384 return sort_by_key!(u8, self, f);
386 if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
387 return sort_by_key!(u16, self, f);
389 if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
390 return sort_by_key!(u32, self, f);
392 sort_by_key!(usize, self, f)
395 /// Copies `self` into a new `Vec`.
400 /// let s = [10, 40, 30];
401 /// let x = s.to_vec();
402 /// // Here, `s` and `x` can be modified independently.
404 #[cfg(not(no_global_oom_handling))]
405 #[rustc_allow_incoherent_impl]
406 #[rustc_conversion_suggestion]
407 #[stable(feature = "rust1", since = "1.0.0")]
409 pub fn to_vec(&self) -> Vec<T>
413 self.to_vec_in(Global)
416 /// Copies `self` into a new `Vec` with an allocator.
421 /// #![feature(allocator_api)]
423 /// use std::alloc::System;
425 /// let s = [10, 40, 30];
426 /// let x = s.to_vec_in(System);
427 /// // Here, `s` and `x` can be modified independently.
429 #[cfg(not(no_global_oom_handling))]
430 #[rustc_allow_incoherent_impl]
432 #[unstable(feature = "allocator_api", issue = "32838")]
433 pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
437 // N.B., see the `hack` module in this file for more details.
438 hack::to_vec(self, alloc)
441 /// Converts `self` into a vector without clones or allocation.
443 /// The resulting vector can be converted back into a box via
444 /// `Vec<T>`'s `into_boxed_slice` method.
449 /// let s: Box<[i32]> = Box::new([10, 40, 30]);
450 /// let x = s.into_vec();
451 /// // `s` cannot be used anymore because it has been converted into `x`.
453 /// assert_eq!(x, vec![10, 40, 30]);
455 #[rustc_allow_incoherent_impl]
456 #[stable(feature = "rust1", since = "1.0.0")]
458 pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> {
459 // N.B., see the `hack` module in this file for more details.
463 /// Creates a vector by repeating a slice `n` times.
467 /// This function will panic if the capacity would overflow.
474 /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
477 /// A panic upon overflow:
480 /// // this will panic at runtime
481 /// b"0123456789abcdef".repeat(usize::MAX);
483 #[rustc_allow_incoherent_impl]
484 #[cfg(not(no_global_oom_handling))]
485 #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
486 pub fn repeat(&self, n: usize) -> Vec<T>
494 // If `n` is larger than zero, it can be split as
495 // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
496 // `2^expn` is the number represented by the leftmost '1' bit of `n`,
497 // and `rem` is the remaining part of `n`.
499 // Using `Vec` to access `set_len()`.
500 let capacity = self.len().checked_mul(n).expect("capacity overflow");
501 let mut buf = Vec::with_capacity(capacity);
503 // `2^expn` repetition is done by doubling `buf` `expn`-times.
507 // If `m > 0`, there are remaining bits up to the leftmost '1'.
509 // `buf.extend(buf)`:
511 ptr::copy_nonoverlapping(
513 (buf.as_mut_ptr() as *mut T).add(buf.len()),
516 // `buf` has capacity of `self.len() * n`.
517 let buf_len = buf.len();
518 buf.set_len(buf_len * 2);
525 // `rem` (`= n - 2^expn`) repetition is done by copying
526 // first `rem` repetitions from `buf` itself.
527 let rem_len = capacity - buf.len(); // `self.len() * rem`
529 // `buf.extend(buf[0 .. rem_len])`:
531 // This is non-overlapping since `2^expn > rem`.
532 ptr::copy_nonoverlapping(
534 (buf.as_mut_ptr() as *mut T).add(buf.len()),
537 // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
538 buf.set_len(capacity);
544 /// Flattens a slice of `T` into a single value `Self::Output`.
549 /// assert_eq!(["hello", "world"].concat(), "helloworld");
550 /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
552 #[rustc_allow_incoherent_impl]
553 #[stable(feature = "rust1", since = "1.0.0")]
554 pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
561 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
562 /// given separator between each.
567 /// assert_eq!(["hello", "world"].join(" "), "hello world");
568 /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
569 /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
571 #[rustc_allow_incoherent_impl]
572 #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
573 pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
575 Self: Join<Separator>,
577 Join::join(self, sep)
580 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
581 /// given separator between each.
586 /// # #![allow(deprecated)]
587 /// assert_eq!(["hello", "world"].connect(" "), "hello world");
588 /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
590 #[rustc_allow_incoherent_impl]
591 #[stable(feature = "rust1", since = "1.0.0")]
592 #[deprecated(since = "1.3.0", note = "renamed to join")]
593 pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
595 Self: Join<Separator>,
597 Join::join(self, sep)
603 /// Returns a vector containing a copy of this slice where each byte
604 /// is mapped to its ASCII upper case equivalent.
606 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
607 /// but non-ASCII letters are unchanged.
609 /// To uppercase the value in-place, use [`make_ascii_uppercase`].
611 /// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
612 #[cfg(not(no_global_oom_handling))]
613 #[rustc_allow_incoherent_impl]
614 #[must_use = "this returns the uppercase bytes as a new Vec, \
615 without modifying the original"]
616 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
618 pub fn to_ascii_uppercase(&self) -> Vec<u8> {
619 let mut me = self.to_vec();
620 me.make_ascii_uppercase();
624 /// Returns a vector containing a copy of this slice where each byte
625 /// is mapped to its ASCII lower case equivalent.
627 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
628 /// but non-ASCII letters are unchanged.
630 /// To lowercase the value in-place, use [`make_ascii_lowercase`].
632 /// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
633 #[cfg(not(no_global_oom_handling))]
634 #[rustc_allow_incoherent_impl]
635 #[must_use = "this returns the lowercase bytes as a new Vec, \
636 without modifying the original"]
637 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
639 pub fn to_ascii_lowercase(&self) -> Vec<u8> {
640 let mut me = self.to_vec();
641 me.make_ascii_lowercase();
646 ////////////////////////////////////////////////////////////////////////////////
647 // Extension traits for slices over specific kinds of data
648 ////////////////////////////////////////////////////////////////////////////////
650 /// Helper trait for [`[T]::concat`](slice::concat).
652 /// Note: the `Item` type parameter is not used in this trait,
653 /// but it allows impls to be more generic.
654 /// Without it, we get this error:
657 /// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
658 /// --> src/liballoc/slice.rs:608:6
660 /// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
661 /// | ^ unconstrained type parameter
664 /// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
665 /// such that multiple `T` types would apply:
668 /// # #[allow(dead_code)]
669 /// pub struct Foo(Vec<u32>, Vec<String>);
671 /// impl std::borrow::Borrow<[u32]> for Foo {
672 /// fn borrow(&self) -> &[u32] { &self.0 }
675 /// impl std::borrow::Borrow<[String]> for Foo {
676 /// fn borrow(&self) -> &[String] { &self.1 }
679 #[unstable(feature = "slice_concat_trait", issue = "27747")]
680 pub trait Concat<Item: ?Sized> {
681 #[unstable(feature = "slice_concat_trait", issue = "27747")]
682 /// The resulting type after concatenation
685 /// Implementation of [`[T]::concat`](slice::concat)
686 #[unstable(feature = "slice_concat_trait", issue = "27747")]
687 fn concat(slice: &Self) -> Self::Output;
690 /// Helper trait for [`[T]::join`](slice::join)
691 #[unstable(feature = "slice_concat_trait", issue = "27747")]
692 pub trait Join<Separator> {
693 #[unstable(feature = "slice_concat_trait", issue = "27747")]
694 /// The resulting type after concatenation
697 /// Implementation of [`[T]::join`](slice::join)
698 #[unstable(feature = "slice_concat_trait", issue = "27747")]
699 fn join(slice: &Self, sep: Separator) -> Self::Output;
702 #[cfg(not(no_global_oom_handling))]
703 #[unstable(feature = "slice_concat_ext", issue = "27747")]
704 impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
705 type Output = Vec<T>;
707 fn concat(slice: &Self) -> Vec<T> {
708 let size = slice.iter().map(|slice| slice.borrow().len()).sum();
709 let mut result = Vec::with_capacity(size);
711 result.extend_from_slice(v.borrow())
717 #[cfg(not(no_global_oom_handling))]
718 #[unstable(feature = "slice_concat_ext", issue = "27747")]
719 impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
720 type Output = Vec<T>;
722 fn join(slice: &Self, sep: &T) -> Vec<T> {
723 let mut iter = slice.iter();
724 let first = match iter.next() {
725 Some(first) => first,
726 None => return vec![],
728 let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
729 let mut result = Vec::with_capacity(size);
730 result.extend_from_slice(first.borrow());
733 result.push(sep.clone());
734 result.extend_from_slice(v.borrow())
740 #[cfg(not(no_global_oom_handling))]
741 #[unstable(feature = "slice_concat_ext", issue = "27747")]
742 impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
743 type Output = Vec<T>;
745 fn join(slice: &Self, sep: &[T]) -> Vec<T> {
746 let mut iter = slice.iter();
747 let first = match iter.next() {
748 Some(first) => first,
749 None => return vec![],
752 slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
753 let mut result = Vec::with_capacity(size);
754 result.extend_from_slice(first.borrow());
757 result.extend_from_slice(sep);
758 result.extend_from_slice(v.borrow())
764 ////////////////////////////////////////////////////////////////////////////////
765 // Standard trait implementations for slices
766 ////////////////////////////////////////////////////////////////////////////////
768 #[stable(feature = "rust1", since = "1.0.0")]
769 impl<T, A: Allocator> Borrow<[T]> for Vec<T, A> {
770 fn borrow(&self) -> &[T] {
775 #[stable(feature = "rust1", since = "1.0.0")]
776 impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
777 fn borrow_mut(&mut self) -> &mut [T] {
782 #[cfg(not(no_global_oom_handling))]
783 #[stable(feature = "rust1", since = "1.0.0")]
784 impl<T: Clone> ToOwned for [T] {
787 fn to_owned(&self) -> Vec<T> {
792 fn to_owned(&self) -> Vec<T> {
793 hack::to_vec(self, Global)
796 fn clone_into(&self, target: &mut Vec<T>) {
797 // drop anything in target that will not be overwritten
798 target.truncate(self.len());
800 // target.len <= self.len due to the truncate above, so the
801 // slices here are always in-bounds.
802 let (init, tail) = self.split_at(target.len());
804 // reuse the contained values' allocations/resources.
805 target.clone_from_slice(init);
806 target.extend_from_slice(tail);
810 ////////////////////////////////////////////////////////////////////////////////
812 ////////////////////////////////////////////////////////////////////////////////
814 /// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
816 /// This is the integral subroutine of insertion sort.
817 #[cfg(not(no_global_oom_handling))]
818 fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
820 F: FnMut(&T, &T) -> bool,
822 if v.len() >= 2 && is_less(&v[1], &v[0]) {
824 // There are three ways to implement insertion here:
826 // 1. Swap adjacent elements until the first one gets to its final destination.
827 // However, this way we copy data around more than is necessary. If elements are big
828 // structures (costly to copy), this method will be slow.
830 // 2. Iterate until the right place for the first element is found. Then shift the
831 // elements succeeding it to make room for it and finally place it into the
832 // remaining hole. This is a good method.
834 // 3. Copy the first element into a temporary variable. Iterate until the right place
835 // for it is found. As we go along, copy every traversed element into the slot
836 // preceding it. Finally, copy data from the temporary variable into the remaining
837 // hole. This method is very good. Benchmarks demonstrated slightly better
838 // performance than with the 2nd method.
840 // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
841 let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
843 // Intermediate state of the insertion process is always tracked by `hole`, which
844 // serves two purposes:
845 // 1. Protects integrity of `v` from panics in `is_less`.
846 // 2. Fills the remaining hole in `v` in the end.
850 // If `is_less` panics at any point during the process, `hole` will get dropped and
851 // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
852 // initially held exactly once.
853 let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
854 ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
856 for i in 2..v.len() {
857 if !is_less(&v[i], &*tmp) {
860 ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
861 hole.dest = &mut v[i];
863 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
867 // When dropped, copies from `src` into `dest`.
868 struct InsertionHole<T> {
873 impl<T> Drop for InsertionHole<T> {
876 ptr::copy_nonoverlapping(self.src, self.dest, 1);
882 /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
883 /// stores the result into `v[..]`.
887 /// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
888 /// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
889 #[cfg(not(no_global_oom_handling))]
890 unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
892 F: FnMut(&T, &T) -> bool,
895 let v = v.as_mut_ptr();
896 let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
898 // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
899 // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
900 // copying the lesser (or greater) one into `v`.
902 // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
903 // consumed first, then we must copy whatever is left of the shorter run into the remaining
906 // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
907 // 1. Protects integrity of `v` from panics in `is_less`.
908 // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
912 // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
913 // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
914 // object it initially held exactly once.
917 if mid <= len - mid {
918 // The left run is shorter.
920 ptr::copy_nonoverlapping(v, buf, mid);
921 hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
924 // Initially, these pointers point to the beginnings of their arrays.
925 let left = &mut hole.start;
926 let mut right = v_mid;
927 let out = &mut hole.dest;
929 while *left < hole.end && right < v_end {
930 // Consume the lesser side.
931 // If equal, prefer the left run to maintain stability.
933 let to_copy = if is_less(&*right, &**left) {
934 get_and_increment(&mut right)
936 get_and_increment(left)
938 ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
942 // The right run is shorter.
944 ptr::copy_nonoverlapping(v_mid, buf, len - mid);
945 hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
948 // Initially, these pointers point past the ends of their arrays.
949 let left = &mut hole.dest;
950 let right = &mut hole.end;
953 while v < *left && buf < *right {
954 // Consume the greater side.
955 // If equal, prefer the right run to maintain stability.
957 let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
958 decrement_and_get(left)
960 decrement_and_get(right)
962 ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
966 // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
967 // it will now be copied into the hole in `v`.
969 unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
971 *ptr = unsafe { ptr.add(1) };
975 unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
976 *ptr = unsafe { ptr.sub(1) };
980 // When dropped, copies the range `start..end` into `dest..`.
981 struct MergeHole<T> {
987 impl<T> Drop for MergeHole<T> {
989 // `T` is not a zero-sized type, and these are pointers into a slice's elements.
991 let len = self.end.sub_ptr(self.start);
992 ptr::copy_nonoverlapping(self.start, self.dest, len);
998 /// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
999 /// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
1001 /// The algorithm identifies strictly descending and non-descending subsequences, which are called
1002 /// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
1003 /// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
1006 /// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
1007 /// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
1009 /// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
1010 #[cfg(not(no_global_oom_handling))]
1011 fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
1013 F: FnMut(&T, &T) -> bool,
1015 // Slices of up to this length get sorted using insertion sort.
1016 const MAX_INSERTION: usize = 20;
1017 // Very short runs are extended using insertion sort to span at least this many elements.
1018 const MIN_RUN: usize = 10;
1020 // Sorting has no meaningful behavior on zero-sized types.
1021 if size_of::<T>() == 0 {
1027 // Short arrays get sorted in-place via insertion sort to avoid allocations.
1028 if len <= MAX_INSERTION {
1030 for i in (0..len - 1).rev() {
1031 insert_head(&mut v[i..], &mut is_less);
1037 // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
1038 // shallow copies of the contents of `v` without risking the dtors running on copies if
1039 // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
1040 // which will always have length at most `len / 2`.
1041 let mut buf = Vec::with_capacity(len / 2);
1043 // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
1044 // strange decision, but consider the fact that merges more often go in the opposite direction
1045 // (forwards). According to benchmarks, merging forwards is slightly faster than merging
1046 // backwards. To conclude, identifying runs by traversing backwards improves performance.
1047 let mut runs = vec![];
1050 // Find the next natural run, and reverse it if it's strictly descending.
1051 let mut start = end - 1;
1055 if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
1056 while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
1059 v[start..end].reverse();
1061 while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
1069 // Insert some more elements into the run if it's too short. Insertion sort is faster than
1070 // merge sort on short sequences, so this significantly improves performance.
1071 while start > 0 && end - start < MIN_RUN {
1073 insert_head(&mut v[start..end], &mut is_less);
1076 // Push this run onto the stack.
1077 runs.push(Run { start, len: end - start });
1080 // Merge some pairs of adjacent runs to satisfy the invariants.
1081 while let Some(r) = collapse(&runs) {
1082 let left = runs[r + 1];
1083 let right = runs[r];
1086 &mut v[left.start..right.start + right.len],
1092 runs[r] = Run { start: left.start, len: left.len + right.len };
1097 // Finally, exactly one run must remain in the stack.
1098 debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
1100 // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
1101 // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
1102 // algorithm should continue building a new run instead, `None` is returned.
1104 // TimSort is infamous for its buggy implementations, as described here:
1105 // http://envisage-project.eu/timsort-specification-and-verification/
1107 // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
1108 // Enforcing them on just top three is not sufficient to ensure that the invariants will still
1109 // hold for *all* runs in the stack.
1111 // This function correctly checks invariants for the top four runs. Additionally, if the top
1112 // run starts at index 0, it will always demand a merge operation until the stack is fully
1113 // collapsed, in order to complete the sort.
1115 fn collapse(runs: &[Run]) -> Option<usize> {
1118 && (runs[n - 1].start == 0
1119 || runs[n - 2].len <= runs[n - 1].len
1120 || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
1121 || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
1123 if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
1129 #[derive(Clone, Copy)]