1 // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! A dynamically-sized view into a contiguous sequence, `[T]`.
13 //! *[See also the slice primitive type](../../std/primitive.slice.html).*
15 //! Slices are a view into a block of memory represented as a pointer and a
20 //! let vec = vec![1, 2, 3];
21 //! let int_slice = &vec[..];
22 //! // coercing an array to a slice
23 //! let str_slice: &[&str] = &["one", "two", "three"];
26 //! Slices are either mutable or shared. The shared slice type is `&[T]`,
27 //! while the mutable slice type is `&mut [T]`, where `T` represents the element
28 //! type. For example, you can mutate the block of memory that a mutable slice
32 //! let x = &mut [1, 2, 3];
34 //! assert_eq!(x, &[1, 7, 3]);
37 //! Here are some of the things this module contains:
41 //! There are several structs that are useful for slices, such as [`Iter`], which
42 //! represents iteration over a slice.
44 //! ## Trait Implementations
46 //! There are several implementations of common traits for slices. Some examples
50 //! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
51 //! * [`Hash`] - for slices whose element type is [`Hash`].
55 //! The slices implement `IntoIterator`. The iterator yields references to the
59 //! let numbers = &[0, 1, 2];
60 //! for n in numbers {
61 //! println!("{} is a number!", n);
65 //! The mutable slice yields mutable references to the elements:
68 //! let mut scores = [7, 8, 9];
69 //! for score in &mut scores[..] {
74 //! This iterator yields mutable references to the slice's elements, so while
75 //! the element type of the slice is `i32`, the element type of the iterator is
78 //! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
80 //! * Further methods that return iterators are [`.split`], [`.splitn`],
81 //! [`.chunks`], [`.windows`] and more.
83 //! [`Clone`]: ../../std/clone/trait.Clone.html
84 //! [`Eq`]: ../../std/cmp/trait.Eq.html
85 //! [`Ord`]: ../../std/cmp/trait.Ord.html
86 //! [`Iter`]: struct.Iter.html
87 //! [`Hash`]: ../../std/hash/trait.Hash.html
88 //! [`.iter`]: ../../std/primitive.slice.html#method.iter
89 //! [`.iter_mut`]: ../../std/primitive.slice.html#method.iter_mut
90 //! [`.split`]: ../../std/primitive.slice.html#method.split
91 //! [`.splitn`]: ../../std/primitive.slice.html#method.splitn
92 //! [`.chunks`]: ../../std/primitive.slice.html#method.chunks
93 //! [`.windows`]: ../../std/primitive.slice.html#method.windows
94 #![stable(feature = "rust1", since = "1.0.0")]
96 // Many of the usings in this module are only used in the test configuration.
97 // It's cleaner to just turn off the unused_imports warning than to fix them.
98 #![cfg_attr(test, allow(unused_imports, dead_code))]
100 use core::cmp::Ordering::{self, Less};
101 use core::mem::size_of;
104 use core::{u8, u16, u32};
106 use borrow::{Borrow, BorrowMut, ToOwned};
110 #[stable(feature = "rust1", since = "1.0.0")]
111 pub use core::slice::{Chunks, Windows};
112 #[stable(feature = "rust1", since = "1.0.0")]
113 pub use core::slice::{Iter, IterMut};
114 #[stable(feature = "rust1", since = "1.0.0")]
115 pub use core::slice::{SplitMut, ChunksMut, Split};
116 #[stable(feature = "rust1", since = "1.0.0")]
117 pub use core::slice::{SplitN, RSplitN, SplitNMut, RSplitNMut};
118 #[stable(feature = "slice_rsplit", since = "1.27.0")]
119 pub use core::slice::{RSplit, RSplitMut};
120 #[stable(feature = "rust1", since = "1.0.0")]
121 pub use core::slice::{from_raw_parts, from_raw_parts_mut};
122 #[stable(feature = "from_ref", since = "1.28.0")]
123 pub use core::slice::{from_ref, from_mut};
124 #[stable(feature = "slice_get_slice", since = "1.28.0")]
125 pub use core::slice::SliceIndex;
126 #[unstable(feature = "chunks_exact", issue = "47115")]
127 pub use core::slice::{ChunksExact, ChunksExactMut};
128 #[unstable(feature = "rchunks", issue = "55177")]
129 pub use core::slice::{RChunks, RChunksMut, RChunksExact, RChunksExactMut};
131 ////////////////////////////////////////////////////////////////////////////////
132 // Basic slice extension methods
133 ////////////////////////////////////////////////////////////////////////////////
135 // HACK(japaric) needed for the implementation of `vec!` macro during testing
136 // NB see the hack module in this file for more details
138 pub use self::hack::into_vec;
140 // HACK(japaric) needed for the implementation of `Vec::clone` during testing
141 // NB see the hack module in this file for more details
143 pub use self::hack::to_vec;
145 // HACK(japaric): With cfg(test) `impl [T]` is not available, these three
146 // functions are actually methods that are in `impl [T]` but not in
147 // `core::slice::SliceExt` - we need to supply these functions for the
148 // `test_permutations` test
154 use string::ToString;
157 pub fn into_vec<T>(mut b: Box<[T]>) -> Vec<T> {
159 let xs = Vec::from_raw_parts(b.as_mut_ptr(), b.len(), b.len());
166 pub fn to_vec<T>(s: &[T]) -> Vec<T>
169 let mut vector = Vec::with_capacity(s.len());
170 vector.extend_from_slice(s);
175 #[lang = "slice_alloc"]
180 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
182 /// When applicable, unstable sorting is preferred because it is generally faster than stable
183 /// sorting and it doesn't allocate auxiliary memory.
184 /// See [`sort_unstable`](#method.sort_unstable).
186 /// # Current implementation
188 /// The current algorithm is an adaptive, iterative merge sort inspired by
189 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
190 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
191 /// two or more sorted sequences concatenated one after another.
193 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
194 /// non-allocating insertion sort is used instead.
199 /// let mut v = [-5, 4, 1, -3, 2];
202 /// assert!(v == [-5, -3, 1, 2, 4]);
204 #[stable(feature = "rust1", since = "1.0.0")]
206 pub fn sort(&mut self)
209 merge_sort(self, |a, b| a.lt(b));
212 /// Sorts the slice with a comparator function.
214 /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case.
216 /// When applicable, unstable sorting is preferred because it is generally faster than stable
217 /// sorting and it doesn't allocate auxiliary memory.
218 /// See [`sort_unstable_by`](#method.sort_unstable_by).
220 /// # Current implementation
222 /// The current algorithm is an adaptive, iterative merge sort inspired by
223 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
224 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
225 /// two or more sorted sequences concatenated one after another.
227 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
228 /// non-allocating insertion sort is used instead.
233 /// let mut v = [5, 4, 1, 3, 2];
234 /// v.sort_by(|a, b| a.cmp(b));
235 /// assert!(v == [1, 2, 3, 4, 5]);
237 /// // reverse sorting
238 /// v.sort_by(|a, b| b.cmp(a));
239 /// assert!(v == [5, 4, 3, 2, 1]);
241 #[stable(feature = "rust1", since = "1.0.0")]
243 pub fn sort_by<F>(&mut self, mut compare: F)
244 where F: FnMut(&T, &T) -> Ordering
246 merge_sort(self, |a, b| compare(a, b) == Less);
249 /// Sorts the slice with a key extraction function.
251 /// This sort is stable (i.e. does not reorder equal elements) and `O(m n log(m n))`
252 /// worst-case, where the key function is `O(m)`.
254 /// When applicable, unstable sorting is preferred because it is generally faster than stable
255 /// sorting and it doesn't allocate auxiliary memory.
256 /// See [`sort_unstable_by_key`](#method.sort_unstable_by_key).
258 /// # Current implementation
260 /// The current algorithm is an adaptive, iterative merge sort inspired by
261 /// [timsort](https://en.wikipedia.org/wiki/Timsort).
262 /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
263 /// two or more sorted sequences concatenated one after another.
265 /// Also, it allocates temporary storage half the size of `self`, but for short slices a
266 /// non-allocating insertion sort is used instead.
271 /// let mut v = [-5i32, 4, 1, -3, 2];
273 /// v.sort_by_key(|k| k.abs());
274 /// assert!(v == [1, 2, -3, 4, -5]);
276 #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
278 pub fn sort_by_key<K, F>(&mut self, mut f: F)
279 where F: FnMut(&T) -> K, K: Ord
281 merge_sort(self, |a, b| f(a).lt(&f(b)));
284 /// Sorts the slice with a key extraction function.
286 /// During sorting, the key function is called only once per element.
288 /// This sort is stable (i.e. does not reorder equal elements) and `O(m n + n log n)`
289 /// worst-case, where the key function is `O(m)`.
291 /// For simple key functions (e.g. functions that are property accesses or
292 /// basic operations), [`sort_by_key`](#method.sort_by_key) is likely to be
295 /// # Current implementation
297 /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
298 /// which combines the fast average case of randomized quicksort with the fast worst case of
299 /// heapsort, while achieving linear time on slices with certain patterns. It uses some
300 /// randomization to avoid degenerate cases, but with a fixed seed to always provide
301 /// deterministic behavior.
303 /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
304 /// length of the slice.
309 /// #![feature(slice_sort_by_cached_key)]
310 /// let mut v = [-5i32, 4, 32, -3, 2];
312 /// v.sort_by_cached_key(|k| k.to_string());
313 /// assert!(v == [-3, -5, 2, 32, 4]);
316 /// [pdqsort]: https://github.com/orlp/pdqsort
317 #[unstable(feature = "slice_sort_by_cached_key", issue = "34447")]
319 pub fn sort_by_cached_key<K, F>(&mut self, f: F)
320 where F: FnMut(&T) -> K, K: Ord
322 // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
323 macro_rules! sort_by_key {
324 ($t:ty, $slice:ident, $f:ident) => ({
325 let mut indices: Vec<_> =
326 $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
327 // The elements of `indices` are unique, as they are indexed, so any sort will be
328 // stable with respect to the original slice. We use `sort_unstable` here because
329 // it requires less memory allocation.
330 indices.sort_unstable();
331 for i in 0..$slice.len() {
332 let mut index = indices[i].1;
333 while (index as usize) < i {
334 index = indices[index as usize].1;
336 indices[i].1 = index;
337 $slice.swap(i, index as usize);
342 let sz_u8 = mem::size_of::<(K, u8)>();
343 let sz_u16 = mem::size_of::<(K, u16)>();
344 let sz_u32 = mem::size_of::<(K, u32)>();
345 let sz_usize = mem::size_of::<(K, usize)>();
347 let len = self.len();
348 if len < 2 { return }
349 if sz_u8 < sz_u16 && len <= ( u8::MAX as usize) { return sort_by_key!( u8, self, f) }
350 if sz_u16 < sz_u32 && len <= (u16::MAX as usize) { return sort_by_key!(u16, self, f) }
351 if sz_u32 < sz_usize && len <= (u32::MAX as usize) { return sort_by_key!(u32, self, f) }
352 sort_by_key!(usize, self, f)
355 /// Copies `self` into a new `Vec`.
360 /// let s = [10, 40, 30];
361 /// let x = s.to_vec();
362 /// // Here, `s` and `x` can be modified independently.
364 #[rustc_conversion_suggestion]
365 #[stable(feature = "rust1", since = "1.0.0")]
367 pub fn to_vec(&self) -> Vec<T>
370 // NB see hack module in this file
374 /// Converts `self` into a vector without clones or allocation.
376 /// The resulting vector can be converted back into a box via
377 /// `Vec<T>`'s `into_boxed_slice` method.
382 /// let s: Box<[i32]> = Box::new([10, 40, 30]);
383 /// let x = s.into_vec();
384 /// // `s` cannot be used anymore because it has been converted into `x`.
386 /// assert_eq!(x, vec![10, 40, 30]);
388 #[stable(feature = "rust1", since = "1.0.0")]
390 pub fn into_vec(self: Box<Self>) -> Vec<T> {
391 // NB see hack module in this file
395 /// Creates a vector by repeating a slice `n` times.
399 /// This function will panic if the capacity would overflow.
406 /// #![feature(repeat_generic_slice)]
409 /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
413 /// A panic upon overflow:
416 /// #![feature(repeat_generic_slice)]
418 /// // this will panic at runtime
419 /// b"0123456789abcdef".repeat(usize::max_value());
422 #[unstable(feature = "repeat_generic_slice",
423 reason = "it's on str, why not on slice?",
425 pub fn repeat(&self, n: usize) -> Vec<T> where T: Copy {
430 // If `n` is larger than zero, it can be split as
431 // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
432 // `2^expn` is the number represented by the leftmost '1' bit of `n`,
433 // and `rem` is the remaining part of `n`.
435 // Using `Vec` to access `set_len()`.
436 let mut buf = Vec::with_capacity(self.len().checked_mul(n).expect("capacity overflow"));
438 // `2^expn` repetition is done by doubling `buf` `expn`-times.
442 // If `m > 0`, there are remaining bits up to the leftmost '1'.
444 // `buf.extend(buf)`:
446 ptr::copy_nonoverlapping(
448 (buf.as_mut_ptr() as *mut T).add(buf.len()),
451 // `buf` has capacity of `self.len() * n`.
452 let buf_len = buf.len();
453 buf.set_len(buf_len * 2);
460 // `rem` (`= n - 2^expn`) repetition is done by copying
461 // first `rem` repetitions from `buf` itself.
462 let rem_len = self.len() * n - buf.len(); // `self.len() * rem`
464 // `buf.extend(buf[0 .. rem_len])`:
466 // This is non-overlapping since `2^expn > rem`.
467 ptr::copy_nonoverlapping(
469 (buf.as_mut_ptr() as *mut T).add(buf.len()),
472 // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
473 let buf_cap = buf.capacity();
474 buf.set_len(buf_cap);
481 #[lang = "slice_u8_alloc"]
484 /// Returns a vector containing a copy of this slice where each byte
485 /// is mapped to its ASCII upper case equivalent.
487 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
488 /// but non-ASCII letters are unchanged.
490 /// To uppercase the value in-place, use [`make_ascii_uppercase`].
492 /// [`make_ascii_uppercase`]: #method.make_ascii_uppercase
493 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
495 pub fn to_ascii_uppercase(&self) -> Vec<u8> {
496 let mut me = self.to_vec();
497 me.make_ascii_uppercase();
501 /// Returns a vector containing a copy of this slice where each byte
502 /// is mapped to its ASCII lower case equivalent.
504 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
505 /// but non-ASCII letters are unchanged.
507 /// To lowercase the value in-place, use [`make_ascii_lowercase`].
509 /// [`make_ascii_lowercase`]: #method.make_ascii_lowercase
510 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
512 pub fn to_ascii_lowercase(&self) -> Vec<u8> {
513 let mut me = self.to_vec();
514 me.make_ascii_lowercase();
519 ////////////////////////////////////////////////////////////////////////////////
520 // Extension traits for slices over specific kinds of data
521 ////////////////////////////////////////////////////////////////////////////////
522 #[unstable(feature = "slice_concat_ext",
523 reason = "trait should not have to exist",
525 /// An extension trait for concatenating slices
527 /// While this trait is unstable, the methods are stable. `SliceConcatExt` is
528 /// included in the [standard library prelude], so you can use [`join()`] and
529 /// [`concat()`] as if they existed on `[T]` itself.
531 /// [standard library prelude]: ../../std/prelude/index.html
532 /// [`join()`]: #tymethod.join
533 /// [`concat()`]: #tymethod.concat
534 pub trait SliceConcatExt<T: ?Sized> {
535 #[unstable(feature = "slice_concat_ext",
536 reason = "trait should not have to exist",
538 /// The resulting type after concatenation
541 /// Flattens a slice of `T` into a single value `Self::Output`.
546 /// assert_eq!(["hello", "world"].concat(), "helloworld");
547 /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
549 #[stable(feature = "rust1", since = "1.0.0")]
550 fn concat(&self) -> Self::Output;
552 /// Flattens a slice of `T` into a single value `Self::Output`, placing a
553 /// given separator between each.
558 /// assert_eq!(["hello", "world"].join(" "), "hello world");
559 /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
561 #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
562 fn join(&self, sep: &T) -> Self::Output;
564 #[stable(feature = "rust1", since = "1.0.0")]
565 #[rustc_deprecated(since = "1.3.0", reason = "renamed to join")]
566 fn connect(&self, sep: &T) -> Self::Output;
569 #[unstable(feature = "slice_concat_ext",
570 reason = "trait should not have to exist",
572 impl<T: Clone, V: Borrow<[T]>> SliceConcatExt<T> for [V] {
573 type Output = Vec<T>;
575 fn concat(&self) -> Vec<T> {
576 let size = self.iter().fold(0, |acc, v| acc + v.borrow().len());
577 let mut result = Vec::with_capacity(size);
579 result.extend_from_slice(v.borrow())
584 fn join(&self, sep: &T) -> Vec<T> {
585 let mut iter = self.iter();
586 let first = match iter.next() {
587 Some(first) => first,
588 None => return vec![],
590 let size = self.iter().fold(0, |acc, v| acc + v.borrow().len());
591 let mut result = Vec::with_capacity(size + self.len());
592 result.extend_from_slice(first.borrow());
595 result.push(sep.clone());
596 result.extend_from_slice(v.borrow())
601 fn connect(&self, sep: &T) -> Vec<T> {
606 ////////////////////////////////////////////////////////////////////////////////
607 // Standard trait implementations for slices
608 ////////////////////////////////////////////////////////////////////////////////
610 #[stable(feature = "rust1", since = "1.0.0")]
611 impl<T> Borrow<[T]> for Vec<T> {
612 fn borrow(&self) -> &[T] {
617 #[stable(feature = "rust1", since = "1.0.0")]
618 impl<T> BorrowMut<[T]> for Vec<T> {
619 fn borrow_mut(&mut self) -> &mut [T] {
624 #[stable(feature = "rust1", since = "1.0.0")]
625 impl<T: Clone> ToOwned for [T] {
628 fn to_owned(&self) -> Vec<T> {
633 fn to_owned(&self) -> Vec<T> {
637 fn clone_into(&self, target: &mut Vec<T>) {
638 // drop anything in target that will not be overwritten
639 target.truncate(self.len());
640 let len = target.len();
642 // reuse the contained values' allocations/resources.
643 target.clone_from_slice(&self[..len]);
645 // target.len <= self.len due to the truncate above, so the
646 // slice here is always in-bounds.
647 target.extend_from_slice(&self[len..]);
651 ////////////////////////////////////////////////////////////////////////////////
653 ////////////////////////////////////////////////////////////////////////////////
655 /// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
657 /// This is the integral subroutine of insertion sort.
658 fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
659 where F: FnMut(&T, &T) -> bool
661 if v.len() >= 2 && is_less(&v[1], &v[0]) {
663 // There are three ways to implement insertion here:
665 // 1. Swap adjacent elements until the first one gets to its final destination.
666 // However, this way we copy data around more than is necessary. If elements are big
667 // structures (costly to copy), this method will be slow.
669 // 2. Iterate until the right place for the first element is found. Then shift the
670 // elements succeeding it to make room for it and finally place it into the
671 // remaining hole. This is a good method.
673 // 3. Copy the first element into a temporary variable. Iterate until the right place
674 // for it is found. As we go along, copy every traversed element into the slot
675 // preceding it. Finally, copy data from the temporary variable into the remaining
676 // hole. This method is very good. Benchmarks demonstrated slightly better
677 // performance than with the 2nd method.
679 // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
680 let mut tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
682 // Intermediate state of the insertion process is always tracked by `hole`, which
683 // serves two purposes:
684 // 1. Protects integrity of `v` from panics in `is_less`.
685 // 2. Fills the remaining hole in `v` in the end.
689 // If `is_less` panics at any point during the process, `hole` will get dropped and
690 // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
691 // initially held exactly once.
692 let mut hole = InsertionHole {
696 ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
698 for i in 2..v.len() {
699 if !is_less(&v[i], &*tmp) {
702 ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
703 hole.dest = &mut v[i];
705 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
709 // When dropped, copies from `src` into `dest`.
710 struct InsertionHole<T> {
715 impl<T> Drop for InsertionHole<T> {
717 unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); }
722 /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
723 /// stores the result into `v[..]`.
727 /// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
728 /// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
729 unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
730 where F: FnMut(&T, &T) -> bool
733 let v = v.as_mut_ptr();
734 let v_mid = v.add(mid);
735 let v_end = v.add(len);
737 // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
738 // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
739 // copying the lesser (or greater) one into `v`.
741 // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
742 // consumed first, then we must copy whatever is left of the shorter run into the remaining
745 // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
746 // 1. Protects integrity of `v` from panics in `is_less`.
747 // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
751 // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
752 // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
753 // object it initially held exactly once.
756 if mid <= len - mid {
757 // The left run is shorter.
758 ptr::copy_nonoverlapping(v, buf, mid);
765 // Initially, these pointers point to the beginnings of their arrays.
766 let left = &mut hole.start;
767 let mut right = v_mid;
768 let out = &mut hole.dest;
770 while *left < hole.end && right < v_end {
771 // Consume the lesser side.
772 // If equal, prefer the left run to maintain stability.
773 let to_copy = if is_less(&*right, &**left) {
774 get_and_increment(&mut right)
776 get_and_increment(left)
778 ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
781 // The right run is shorter.
782 ptr::copy_nonoverlapping(v_mid, buf, len - mid);
785 end: buf.add(len - mid),
789 // Initially, these pointers point past the ends of their arrays.
790 let left = &mut hole.dest;
791 let right = &mut hole.end;
794 while v < *left && buf < *right {
795 // Consume the greater side.
796 // If equal, prefer the right run to maintain stability.
797 let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
798 decrement_and_get(left)
800 decrement_and_get(right)
802 ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
805 // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
806 // it will now be copied into the hole in `v`.
808 unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
810 *ptr = ptr.offset(1);
814 unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
815 *ptr = ptr.offset(-1);
819 // When dropped, copies the range `start..end` into `dest..`.
820 struct MergeHole<T> {
826 impl<T> Drop for MergeHole<T> {
828 // `T` is not a zero-sized type, so it's okay to divide by its size.
829 let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
830 unsafe { ptr::copy_nonoverlapping(self.start, self.dest, len); }
835 /// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
836 /// [here](http://svn.python.org/projects/python/trunk/Objects/listsort.txt).
838 /// The algorithm identifies strictly descending and non-descending subsequences, which are called
839 /// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
840 /// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
843 /// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
844 /// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
846 /// The invariants ensure that the total running time is `O(n log n)` worst-case.
847 fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
848 where F: FnMut(&T, &T) -> bool
850 // Slices of up to this length get sorted using insertion sort.
851 const MAX_INSERTION: usize = 20;
852 // Very short runs are extended using insertion sort to span at least this many elements.
853 const MIN_RUN: usize = 10;
855 // Sorting has no meaningful behavior on zero-sized types.
856 if size_of::<T>() == 0 {
862 // Short arrays get sorted in-place via insertion sort to avoid allocations.
863 if len <= MAX_INSERTION {
865 for i in (0..len-1).rev() {
866 insert_head(&mut v[i..], &mut is_less);
872 // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
873 // shallow copies of the contents of `v` without risking the dtors running on copies if
874 // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
875 // which will always have length at most `len / 2`.
876 let mut buf = Vec::with_capacity(len / 2);
878 // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
879 // strange decision, but consider the fact that merges more often go in the opposite direction
880 // (forwards). According to benchmarks, merging forwards is slightly faster than merging
881 // backwards. To conclude, identifying runs by traversing backwards improves performance.
882 let mut runs = vec![];
885 // Find the next natural run, and reverse it if it's strictly descending.
886 let mut start = end - 1;
890 if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
891 while start > 0 && is_less(v.get_unchecked(start),
892 v.get_unchecked(start - 1)) {
895 v[start..end].reverse();
897 while start > 0 && !is_less(v.get_unchecked(start),
898 v.get_unchecked(start - 1)) {
905 // Insert some more elements into the run if it's too short. Insertion sort is faster than
906 // merge sort on short sequences, so this significantly improves performance.
907 while start > 0 && end - start < MIN_RUN {
909 insert_head(&mut v[start..end], &mut is_less);
912 // Push this run onto the stack.
919 // Merge some pairs of adjacent runs to satisfy the invariants.
920 while let Some(r) = collapse(&runs) {
921 let left = runs[r + 1];
924 merge(&mut v[left.start .. right.start + right.len], left.len, buf.as_mut_ptr(),
929 len: left.len + right.len,
935 // Finally, exactly one run must remain in the stack.
936 debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
938 // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
939 // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
940 // algorithm should continue building a new run instead, `None` is returned.
942 // TimSort is infamous for its buggy implementations, as described here:
943 // http://envisage-project.eu/timsort-specification-and-verification/
945 // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
946 // Enforcing them on just top three is not sufficient to ensure that the invariants will still
947 // hold for *all* runs in the stack.
949 // This function correctly checks invariants for the top four runs. Additionally, if the top
950 // run starts at index 0, it will always demand a merge operation until the stack is fully
951 // collapsed, in order to complete the sort.
953 fn collapse(runs: &[Run]) -> Option<usize> {
955 if n >= 2 && (runs[n - 1].start == 0 ||
956 runs[n - 2].len <= runs[n - 1].len ||
957 (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) ||
958 (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len)) {
959 if n >= 3 && runs[n - 3].len < runs[n - 1].len {
969 #[derive(Clone, Copy)]