3 //! This module contains a sorting algorithm based on Orson Peters' pattern-defeating quicksort,
4 //! published at: <https://github.com/orlp/pdqsort>
6 //! Unstable sorting is compatible with core because it doesn't allocate memory, unlike our
7 //! stable sorting implementation.
9 //! In addition it also contains the core logic of the stable sort used by `slice::sort` based on
13 use crate::mem::{self, MaybeUninit, SizedTypeProperties};
16 /// When dropped, copies from `src` into `dest`.
17 struct CopyOnDrop<T> {
22 impl<T> Drop for CopyOnDrop<T> {
24 // SAFETY: This is a helper class.
25 // Please refer to its usage for correctness.
26 // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
28 ptr::copy_nonoverlapping(self.src, self.dest, 1);
33 /// Shifts the first element to the right until it encounters a greater or equal element.
34 fn shift_head<T, F>(v: &mut [T], is_less: &mut F)
36 F: FnMut(&T, &T) -> bool,
39 // SAFETY: The unsafe operations below involves indexing without a bounds check (by offsetting a
40 // pointer) and copying memory (`ptr::copy_nonoverlapping`).
43 // 1. We checked the size of the array to >=2.
44 // 2. All the indexing that we will do is always between {0 <= index < len} at most.
47 // 1. We are obtaining pointers to references which are guaranteed to be valid.
48 // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
49 // Namely, `i` and `i-1`.
50 // 3. If the slice is properly aligned, the elements are properly aligned.
51 // It is the caller's responsibility to make sure the slice is properly aligned.
53 // See comments below for further detail.
55 // If the first two elements are out-of-order...
56 if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) {
57 // Read the first element into a stack-allocated variable. If a following comparison
58 // operation panics, `hole` will get dropped and automatically write the element back
60 let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0)));
61 let v = v.as_mut_ptr();
62 let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(1) };
63 ptr::copy_nonoverlapping(v.add(1), v.add(0), 1);
66 if !is_less(&*v.add(i), &*tmp) {
70 // Move `i`-th element one place to the left, thus shifting the hole to the right.
71 ptr::copy_nonoverlapping(v.add(i), v.add(i - 1), 1);
74 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
79 /// Shifts the last element to the left until it encounters a smaller or equal element.
80 fn shift_tail<T, F>(v: &mut [T], is_less: &mut F)
82 F: FnMut(&T, &T) -> bool,
85 // SAFETY: The unsafe operations below involves indexing without a bound check (by offsetting a
86 // pointer) and copying memory (`ptr::copy_nonoverlapping`).
89 // 1. We checked the size of the array to >= 2.
90 // 2. All the indexing that we will do is always between `0 <= index < len-1` at most.
93 // 1. We are obtaining pointers to references which are guaranteed to be valid.
94 // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
95 // Namely, `i` and `i+1`.
96 // 3. If the slice is properly aligned, the elements are properly aligned.
97 // It is the caller's responsibility to make sure the slice is properly aligned.
99 // See comments below for further detail.
101 // If the last two elements are out-of-order...
102 if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) {
103 // Read the last element into a stack-allocated variable. If a following comparison
104 // operation panics, `hole` will get dropped and automatically write the element back
106 let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1)));
107 let v = v.as_mut_ptr();
108 let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(len - 2) };
109 ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1);
111 for i in (0..len - 2).rev() {
112 if !is_less(&*tmp, &*v.add(i)) {
116 // Move `i`-th element one place to the right, thus shifting the hole to the left.
117 ptr::copy_nonoverlapping(v.add(i), v.add(i + 1), 1);
118 hole.dest = v.add(i);
120 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
125 /// Partially sorts a slice by shifting several out-of-order elements around.
127 /// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case.
129 fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &mut F) -> bool
131 F: FnMut(&T, &T) -> bool,
133 // Maximum number of adjacent out-of-order pairs that will get shifted.
134 const MAX_STEPS: usize = 5;
135 // If the slice is shorter than this, don't shift any elements.
136 const SHORTEST_SHIFTING: usize = 50;
141 for _ in 0..MAX_STEPS {
142 // SAFETY: We already explicitly did the bound checking with `i < len`.
143 // All our subsequent indexing is only in the range `0 <= index < len`
145 // Find the next pair of adjacent out-of-order elements.
146 while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
156 // Don't shift elements on short arrays, that has a performance cost.
157 if len < SHORTEST_SHIFTING {
161 // Swap the found pair of elements. This puts them in correct order.
164 // Shift the smaller element to the left.
165 shift_tail(&mut v[..i], is_less);
166 // Shift the greater element to the right.
167 shift_head(&mut v[i..], is_less);
170 // Didn't manage to sort the slice in the limited number of steps.
174 /// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case.
175 fn insertion_sort<T, F>(v: &mut [T], is_less: &mut F)
177 F: FnMut(&T, &T) -> bool,
179 for i in 1..v.len() {
180 shift_tail(&mut v[..i + 1], is_less);
184 /// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case.
186 #[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
187 pub fn heapsort<T, F>(v: &mut [T], mut is_less: F)
189 F: FnMut(&T, &T) -> bool,
191 // This binary heap respects the invariant `parent >= child`.
192 let mut sift_down = |v: &mut [T], mut node| {
194 // Children of `node`.
195 let mut child = 2 * node + 1;
196 if child >= v.len() {
200 // Choose the greater child.
201 if child + 1 < v.len() && is_less(&v[child], &v[child + 1]) {
205 // Stop if the invariant holds at `node`.
206 if !is_less(&v[node], &v[child]) {
210 // Swap `node` with the greater child, move one step down, and continue sifting.
216 // Build the heap in linear time.
217 for i in (0..v.len() / 2).rev() {
221 // Pop maximal elements from the heap.
222 for i in (1..v.len()).rev() {
224 sift_down(&mut v[..i], 0);
228 /// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
231 /// Returns the number of elements smaller than `pivot`.
233 /// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
234 /// This idea is presented in the [BlockQuicksort][pdf] paper.
236 /// [pdf]: https://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
237 fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
239 F: FnMut(&T, &T) -> bool,
241 // Number of elements in a typical block.
242 const BLOCK: usize = 128;
244 // The partitioning algorithm repeats the following steps until completion:
246 // 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
247 // 2. Trace a block from the right side to identify elements smaller than the pivot.
248 // 3. Exchange the identified elements between the left and right side.
250 // We keep the following variables for a block of elements:
252 // 1. `block` - Number of elements in the block.
253 // 2. `start` - Start pointer into the `offsets` array.
254 // 3. `end` - End pointer into the `offsets` array.
255 // 4. `offsets - Indices of out-of-order elements within the block.
257 // The current block on the left side (from `l` to `l.add(block_l)`).
258 let mut l = v.as_mut_ptr();
259 let mut block_l = BLOCK;
260 let mut start_l = ptr::null_mut();
261 let mut end_l = ptr::null_mut();
262 let mut offsets_l = [MaybeUninit::<u8>::uninit(); BLOCK];
264 // The current block on the right side (from `r.sub(block_r)` to `r`).
265 // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe`
266 let mut r = unsafe { l.add(v.len()) };
267 let mut block_r = BLOCK;
268 let mut start_r = ptr::null_mut();
269 let mut end_r = ptr::null_mut();
270 let mut offsets_r = [MaybeUninit::<u8>::uninit(); BLOCK];
272 // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
273 // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
275 // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
276 fn width<T>(l: *mut T, r: *mut T) -> usize {
277 assert!(mem::size_of::<T>() > 0);
278 // FIXME: this should *likely* use `offset_from`, but more
279 // investigation is needed (including running tests in miri).
280 (r.addr() - l.addr()) / mem::size_of::<T>()
284 // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
285 // some patch-up work in order to partition the remaining elements in between.
286 let is_done = width(l, r) <= 2 * BLOCK;
289 // Number of remaining elements (still not compared to the pivot).
290 let mut rem = width(l, r);
291 if start_l < end_l || start_r < end_r {
295 // Adjust block sizes so that the left and right block don't overlap, but get perfectly
296 // aligned to cover the whole remaining gap.
299 } else if start_r < end_r {
302 // There were the same number of elements to switch on both blocks during the last
303 // iteration, so there are no remaining elements on either block. Cover the remaining
304 // items with roughly equally-sized blocks.
306 block_r = rem - block_l;
308 debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
309 debug_assert!(width(l, r) == block_l + block_r);
312 if start_l == end_l {
313 // Trace `block_l` elements from the left side.
314 start_l = MaybeUninit::slice_as_mut_ptr(&mut offsets_l);
318 for i in 0..block_l {
319 // SAFETY: The unsafety operations below involve the usage of the `offset`.
320 // According to the conditions required by the function, we satisfy them because:
321 // 1. `offsets_l` is stack-allocated, and thus considered separate allocated object.
322 // 2. The function `is_less` returns a `bool`.
323 // Casting a `bool` will never overflow `isize`.
324 // 3. We have guaranteed that `block_l` will be `<= BLOCK`.
325 // Plus, `end_l` was initially set to the begin pointer of `offsets_` which was declared on the stack.
326 // Thus, we know that even in the worst case (all invocations of `is_less` returns false) we will only be at most 1 byte pass the end.
327 // Another unsafety operation here is dereferencing `elem`.
328 // However, `elem` was initially the begin pointer to the slice which is always valid.
330 // Branchless comparison.
332 end_l = end_l.add(!is_less(&*elem, pivot) as usize);
338 if start_r == end_r {
339 // Trace `block_r` elements from the right side.
340 start_r = MaybeUninit::slice_as_mut_ptr(&mut offsets_r);
344 for i in 0..block_r {
345 // SAFETY: The unsafety operations below involve the usage of the `offset`.
346 // According to the conditions required by the function, we satisfy them because:
347 // 1. `offsets_r` is stack-allocated, and thus considered separate allocated object.
348 // 2. The function `is_less` returns a `bool`.
349 // Casting a `bool` will never overflow `isize`.
350 // 3. We have guaranteed that `block_r` will be `<= BLOCK`.
351 // Plus, `end_r` was initially set to the begin pointer of `offsets_` which was declared on the stack.
352 // Thus, we know that even in the worst case (all invocations of `is_less` returns true) we will only be at most 1 byte pass the end.
353 // Another unsafety operation here is dereferencing `elem`.
354 // However, `elem` was initially `1 * sizeof(T)` past the end and we decrement it by `1 * sizeof(T)` before accessing it.
355 // Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
357 // Branchless comparison.
360 end_r = end_r.add(is_less(&*elem, pivot) as usize);
365 // Number of out-of-order elements to swap between the left and right side.
366 let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
371 l.add(usize::from(*start_l))
376 r.sub(usize::from(*start_r) + 1)
380 // Instead of swapping one pair at the time, it is more efficient to perform a cyclic
381 // permutation. This is not strictly equivalent to swapping, but produces a similar
382 // result using fewer memory operations.
384 // SAFETY: The use of `ptr::read` is valid because there is at least one element in
385 // both `offsets_l` and `offsets_r`, so `left!` is a valid pointer to read from.
387 // The uses of `left!` involve calls to `offset` on `l`, which points to the
388 // beginning of `v`. All the offsets pointed-to by `start_l` are at most `block_l`, so
389 // these `offset` calls are safe as all reads are within the block. The same argument
390 // applies for the uses of `right!`.
392 // The calls to `start_l.offset` are valid because there are at most `count-1` of them,
393 // plus the final one at the end of the unsafe block, where `count` is the minimum number
394 // of collected offsets in `offsets_l` and `offsets_r`, so there is no risk of there not
395 // being enough elements. The same reasoning applies to the calls to `start_r.offset`.
397 // The calls to `copy_nonoverlapping` are safe because `left!` and `right!` are guaranteed
398 // not to overlap, and are valid because of the reasoning above.
400 let tmp = ptr::read(left!());
401 ptr::copy_nonoverlapping(right!(), left!(), 1);
404 start_l = start_l.add(1);
405 ptr::copy_nonoverlapping(left!(), right!(), 1);
406 start_r = start_r.add(1);
407 ptr::copy_nonoverlapping(right!(), left!(), 1);
410 ptr::copy_nonoverlapping(&tmp, right!(), 1);
412 start_l = start_l.add(1);
413 start_r = start_r.add(1);
417 if start_l == end_l {
418 // All out-of-order elements in the left block were moved. Move to the next block.
420 // block-width-guarantee
421 // SAFETY: if `!is_done` then the slice width is guaranteed to be at least `2*BLOCK` wide. There
422 // are at most `BLOCK` elements in `offsets_l` because of its size, so the `offset` operation is
423 // safe. Otherwise, the debug assertions in the `is_done` case guarantee that
424 // `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
425 // for the smaller number of remaining elements.
426 l = unsafe { l.add(block_l) };
429 if start_r == end_r {
430 // All out-of-order elements in the right block were moved. Move to the previous block.
432 // SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
433 // or `block_r` has been adjusted for the last handful of elements.
434 r = unsafe { r.sub(block_r) };
442 // All that remains now is at most one block (either the left or the right) with out-of-order
443 // elements that need to be moved. Such remaining elements can be simply shifted to the end
444 // within their block.
447 // The left block remains.
448 // Move its remaining out-of-order elements to the far right.
449 debug_assert_eq!(width(l, r), block_l);
450 while start_l < end_l {
451 // remaining-elements-safety
452 // SAFETY: while the loop condition holds there are still elements in `offsets_l`, so it
453 // is safe to point `end_l` to the previous element.
455 // The `ptr::swap` is safe if both its arguments are valid for reads and writes:
456 // - Per the debug assert above, the distance between `l` and `r` is `block_l`
457 // elements, so there can be at most `block_l` remaining offsets between `start_l`
458 // and `end_l`. This means `r` will be moved at most `block_l` steps back, which
459 // makes the `r.offset` calls valid (at that point `l == r`).
460 // - `offsets_l` contains valid offsets into `v` collected during the partitioning of
461 // the last block, so the `l.offset` calls are valid.
463 end_l = end_l.sub(1);
464 ptr::swap(l.add(usize::from(*end_l)), r.sub(1));
468 width(v.as_mut_ptr(), r)
469 } else if start_r < end_r {
470 // The right block remains.
471 // Move its remaining out-of-order elements to the far left.
472 debug_assert_eq!(width(l, r), block_r);
473 while start_r < end_r {
474 // SAFETY: See the reasoning in [remaining-elements-safety].
476 end_r = end_r.sub(1);
477 ptr::swap(l, r.sub(usize::from(*end_r) + 1));
481 width(v.as_mut_ptr(), l)
483 // Nothing else to do, we're done.
484 width(v.as_mut_ptr(), l)
488 /// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
489 /// equal to `v[pivot]`.
491 /// Returns a tuple of:
493 /// 1. Number of elements smaller than `v[pivot]`.
494 /// 2. True if `v` was already partitioned.
495 fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool)
497 F: FnMut(&T, &T) -> bool,
499 let (mid, was_partitioned) = {
500 // Place the pivot at the beginning of slice.
502 let (pivot, v) = v.split_at_mut(1);
503 let pivot = &mut pivot[0];
505 // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
506 // operation panics, the pivot will be automatically written back into the slice.
508 // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe.
509 let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
510 let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot };
513 // Find the first pair of out-of-order elements.
517 // SAFETY: The unsafety below involves indexing an array.
518 // For the first one: We already do the bounds checking here with `l < r`.
519 // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
520 // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
522 // Find the first element greater than or equal to the pivot.
523 while l < r && is_less(v.get_unchecked(l), pivot) {
527 // Find the last element smaller that the pivot.
528 while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
533 (l + partition_in_blocks(&mut v[l..r], pivot, is_less), l >= r)
535 // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated
536 // variable) back into the slice where it originally was. This step is critical in ensuring
540 // Place the pivot between the two partitions.
543 (mid, was_partitioned)
546 /// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
548 /// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
549 /// elements smaller than the pivot.
550 fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> usize
552 F: FnMut(&T, &T) -> bool,
554 // Place the pivot at the beginning of slice.
556 let (pivot, v) = v.split_at_mut(1);
557 let pivot = &mut pivot[0];
559 // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
560 // operation panics, the pivot will be automatically written back into the slice.
561 // SAFETY: The pointer here is valid because it is obtained from a reference to a slice.
562 let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
563 let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot };
566 // Now partition the slice.
570 // SAFETY: The unsafety below involves indexing an array.
571 // For the first one: We already do the bounds checking here with `l < r`.
572 // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
573 // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
575 // Find the first element greater than the pivot.
576 while l < r && !is_less(pivot, v.get_unchecked(l)) {
580 // Find the last element equal to the pivot.
581 while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
590 // Swap the found pair of out-of-order elements.
592 let ptr = v.as_mut_ptr();
593 ptr::swap(ptr.add(l), ptr.add(r));
598 // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
601 // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable)
602 // back into the slice where it originally was. This step is critical in ensuring safety!
605 /// Scatters some elements around in an attempt to break patterns that might cause imbalanced
606 /// partitions in quicksort.
608 fn break_patterns<T>(v: &mut [T]) {
611 // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
612 let mut random = len as u32;
613 let mut gen_u32 = || {
614 random ^= random << 13;
615 random ^= random >> 17;
616 random ^= random << 5;
619 let mut gen_usize = || {
620 if usize::BITS <= 32 {
623 (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
627 // Take random numbers modulo this number.
628 // The number fits into `usize` because `len` is not greater than `isize::MAX`.
629 let modulus = len.next_power_of_two();
631 // Some pivot candidates will be in the nearby of this index. Let's randomize them.
632 let pos = len / 4 * 2;
635 // Generate a random number modulo `len`. However, in order to avoid costly operations
636 // we first take it modulo a power of two, and then decrease by `len` until it fits
637 // into the range `[0, len - 1]`.
638 let mut other = gen_usize() & (modulus - 1);
640 // `other` is guaranteed to be less than `2 * len`.
645 v.swap(pos - 1 + i, other);
650 /// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
652 /// Elements in `v` might be reordered in the process.
653 fn choose_pivot<T, F>(v: &mut [T], is_less: &mut F) -> (usize, bool)
655 F: FnMut(&T, &T) -> bool,
657 // Minimum length to choose the median-of-medians method.
658 // Shorter slices use the simple median-of-three method.
659 const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
660 // Maximum number of swaps that can be performed in this function.
661 const MAX_SWAPS: usize = 4 * 3;
665 // Three indices near which we are going to choose a pivot.
666 let mut a = len / 4 * 1;
667 let mut b = len / 4 * 2;
668 let mut c = len / 4 * 3;
670 // Counts the total number of swaps we are about to perform while sorting indices.
674 // Swaps indices so that `v[a] <= v[b]`.
675 // SAFETY: `len >= 8` so there are at least two elements in the neighborhoods of
676 // `a`, `b` and `c`. This means the three calls to `sort_adjacent` result in
677 // corresponding calls to `sort3` with valid 3-item neighborhoods around each
678 // pointer, which in turn means the calls to `sort2` are done with valid
679 // references. Thus the `v.get_unchecked` calls are safe, as is the `ptr::swap`
681 let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
682 if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
688 // Swaps indices so that `v[a] <= v[b] <= v[c]`.
689 let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
695 if len >= SHORTEST_MEDIAN_OF_MEDIANS {
696 // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
697 let mut sort_adjacent = |a: &mut usize| {
699 sort3(&mut (tmp - 1), a, &mut (tmp + 1));
702 // Find medians in the neighborhoods of `a`, `b`, and `c`.
703 sort_adjacent(&mut a);
704 sort_adjacent(&mut b);
705 sort_adjacent(&mut c);
708 // Find the median among `a`, `b`, and `c`.
709 sort3(&mut a, &mut b, &mut c);
712 if swaps < MAX_SWAPS {
715 // The maximum number of swaps was performed. Chances are the slice is descending or mostly
716 // descending, so reversing will probably help sort it faster.
722 /// Sorts `v` recursively.
724 /// If the slice had a predecessor in the original array, it is specified as `pred`.
726 /// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
727 /// this function will immediately switch to heapsort.
728 fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: u32)
730 F: FnMut(&T, &T) -> bool,
732 // Slices of up to this length get sorted using insertion sort.
733 const MAX_INSERTION: usize = 20;
735 // True if the last partitioning was reasonably balanced.
736 let mut was_balanced = true;
737 // True if the last partitioning didn't shuffle elements (the slice was already partitioned).
738 let mut was_partitioned = true;
743 // Very short slices get sorted using insertion sort.
744 if len <= MAX_INSERTION {
745 insertion_sort(v, is_less);
749 // If too many bad pivot choices were made, simply fall back to heapsort in order to
750 // guarantee `O(n * log(n))` worst-case.
752 heapsort(v, is_less);
756 // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
757 // some elements around. Hopefully we'll choose a better pivot this time.
763 // Choose a pivot and try guessing whether the slice is already sorted.
764 let (pivot, likely_sorted) = choose_pivot(v, is_less);
766 // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
767 // selection predicts the slice is likely already sorted...
768 if was_balanced && was_partitioned && likely_sorted {
769 // Try identifying several out-of-order elements and shifting them to correct
770 // positions. If the slice ends up being completely sorted, we're done.
771 if partial_insertion_sort(v, is_less) {
776 // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
777 // slice. Partition the slice into elements equal to and elements greater than the pivot.
778 // This case is usually hit when the slice contains many duplicate elements.
779 if let Some(p) = pred {
780 if !is_less(p, &v[pivot]) {
781 let mid = partition_equal(v, pivot, is_less);
783 // Continue sorting elements greater than the pivot.
789 // Partition the slice.
790 let (mid, was_p) = partition(v, pivot, is_less);
791 was_balanced = cmp::min(mid, len - mid) >= len / 8;
792 was_partitioned = was_p;
794 // Split the slice into `left`, `pivot`, and `right`.
795 let (left, right) = v.split_at_mut(mid);
796 let (pivot, right) = right.split_at_mut(1);
797 let pivot = &pivot[0];
799 // Recurse into the shorter side only in order to minimize the total number of recursive
800 // calls and consume less stack space. Then just continue with the longer side (this is
801 // akin to tail recursion).
802 if left.len() < right.len() {
803 recurse(left, is_less, pred, limit);
807 recurse(right, is_less, Some(pivot), limit);
813 /// Sorts `v` using pattern-defeating quicksort, which is *O*(*n* \* log(*n*)) worst-case.
814 pub fn quicksort<T, F>(v: &mut [T], mut is_less: F)
816 F: FnMut(&T, &T) -> bool,
818 // Sorting has no meaningful behavior on zero-sized types.
823 // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
824 let limit = usize::BITS - v.len().leading_zeros();
826 recurse(v, &mut is_less, None, limit);
829 fn partition_at_index_loop<'a, T, F>(
833 mut pred: Option<&'a T>,
835 F: FnMut(&T, &T) -> bool,
837 // Limit the amount of iterations and fall back to heapsort, similarly to `slice::sort_unstable`.
838 // This lowers the worst case running time from O(n^2) to O(n log n).
839 // FIXME: Investigate whether it would be better to use something like Median of Medians
840 // or Fast Deterministic Selection to guarantee O(n) worst case.
841 let mut limit = usize::BITS - v.len().leading_zeros();
843 // True if the last partitioning was reasonably balanced.
844 let mut was_balanced = true;
847 // For slices of up to this length it's probably faster to simply sort them.
848 const MAX_INSERTION: usize = 10;
849 if v.len() <= MAX_INSERTION {
850 insertion_sort(v, is_less);
855 heapsort(v, is_less);
859 // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
860 // some elements around. Hopefully we'll choose a better pivot this time.
867 let (pivot, _) = choose_pivot(v, is_less);
869 // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
870 // slice. Partition the slice into elements equal to and elements greater than the pivot.
871 // This case is usually hit when the slice contains many duplicate elements.
872 if let Some(p) = pred {
873 if !is_less(p, &v[pivot]) {
874 let mid = partition_equal(v, pivot, is_less);
876 // If we've passed our index, then we're good.
881 // Otherwise, continue sorting elements greater than the pivot.
889 let (mid, _) = partition(v, pivot, is_less);
890 was_balanced = cmp::min(mid, v.len() - mid) >= v.len() / 8;
892 // Split the slice into `left`, `pivot`, and `right`.
893 let (left, right) = v.split_at_mut(mid);
894 let (pivot, right) = right.split_at_mut(1);
895 let pivot = &pivot[0];
899 index = index - mid - 1;
901 } else if mid > index {
904 // If mid == index, then we're done, since partition() guaranteed that all elements
905 // after mid are greater than or equal to mid.
911 /// Reorder the slice such that the element at `index` is at its final sorted position.
912 pub fn partition_at_index<T, F>(
916 ) -> (&mut [T], &mut T, &mut [T])
918 F: FnMut(&T, &T) -> bool,
920 use cmp::Ordering::Greater;
921 use cmp::Ordering::Less;
923 if index >= v.len() {
924 panic!("partition_at_index index {} greater than length of slice {}", index, v.len());
928 // Sorting has no meaningful behavior on zero-sized types. Do nothing.
929 } else if index == v.len() - 1 {
930 // Find max element and place it in the last position of the array. We're free to use
931 // `unwrap()` here because we know v must not be empty.
932 let (max_index, _) = v
935 .max_by(|&(_, x), &(_, y)| if is_less(x, y) { Less } else { Greater })
937 v.swap(max_index, index);
938 } else if index == 0 {
939 // Find min element and place it in the first position of the array. We're free to use
940 // `unwrap()` here because we know v must not be empty.
941 let (min_index, _) = v
944 .min_by(|&(_, x), &(_, y)| if is_less(x, y) { Less } else { Greater })
946 v.swap(min_index, index);
948 partition_at_index_loop(v, index, &mut is_less, None);
951 let (left, right) = v.split_at_mut(index);
952 let (pivot, right) = right.split_at_mut(1);
953 let pivot = &mut pivot[0];
957 /// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
959 /// This is the integral subroutine of insertion sort.
960 fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
962 F: FnMut(&T, &T) -> bool,
964 if v.len() >= 2 && is_less(&v[1], &v[0]) {
965 // SAFETY: Copy tmp back even if panic, and ensure unique observation.
967 // There are three ways to implement insertion here:
969 // 1. Swap adjacent elements until the first one gets to its final destination.
970 // However, this way we copy data around more than is necessary. If elements are big
971 // structures (costly to copy), this method will be slow.
973 // 2. Iterate until the right place for the first element is found. Then shift the
974 // elements succeeding it to make room for it and finally place it into the
975 // remaining hole. This is a good method.
977 // 3. Copy the first element into a temporary variable. Iterate until the right place
978 // for it is found. As we go along, copy every traversed element into the slot
979 // preceding it. Finally, copy data from the temporary variable into the remaining
980 // hole. This method is very good. Benchmarks demonstrated slightly better
981 // performance than with the 2nd method.
983 // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
984 let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
986 // Intermediate state of the insertion process is always tracked by `hole`, which
987 // serves two purposes:
988 // 1. Protects integrity of `v` from panics in `is_less`.
989 // 2. Fills the remaining hole in `v` in the end.
993 // If `is_less` panics at any point during the process, `hole` will get dropped and
994 // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
995 // initially held exactly once.
996 let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
997 ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
999 for i in 2..v.len() {
1000 if !is_less(&v[i], &*tmp) {
1003 ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
1004 hole.dest = &mut v[i];
1006 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
1010 // When dropped, copies from `src` into `dest`.
1011 struct InsertionHole<T> {
1016 impl<T> Drop for InsertionHole<T> {
1017 fn drop(&mut self) {
1018 // SAFETY: The caller must ensure that src and dest are correctly set.
1020 ptr::copy_nonoverlapping(self.src, self.dest, 1);
1026 /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
1027 /// stores the result into `v[..]`.
1031 /// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
1032 /// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
1033 unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
1035 F: FnMut(&T, &T) -> bool,
1038 let v = v.as_mut_ptr();
1040 // SAFETY: mid and len must be in-bounds of v.
1041 let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
1043 // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
1044 // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
1045 // copying the lesser (or greater) one into `v`.
1047 // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
1048 // consumed first, then we must copy whatever is left of the shorter run into the remaining
1051 // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
1052 // 1. Protects integrity of `v` from panics in `is_less`.
1053 // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
1057 // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
1058 // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
1059 // object it initially held exactly once.
1062 if mid <= len - mid {
1063 // The left run is shorter.
1065 // SAFETY: buf must have enough capacity for `v[..mid]`.
1067 ptr::copy_nonoverlapping(v, buf, mid);
1068 hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
1071 // Initially, these pointers point to the beginnings of their arrays.
1072 let left = &mut hole.start;
1073 let mut right = v_mid;
1074 let out = &mut hole.dest;
1076 while *left < hole.end && right < v_end {
1077 // Consume the lesser side.
1078 // If equal, prefer the left run to maintain stability.
1080 // SAFETY: left and right must be valid and part of v same for out.
1082 let to_copy = if is_less(&*right, &**left) {
1083 get_and_increment(&mut right)
1085 get_and_increment(left)
1087 ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
1091 // The right run is shorter.
1093 // SAFETY: buf must have enough capacity for `v[mid..]`.
1095 ptr::copy_nonoverlapping(v_mid, buf, len - mid);
1096 hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
1099 // Initially, these pointers point past the ends of their arrays.
1100 let left = &mut hole.dest;
1101 let right = &mut hole.end;
1102 let mut out = v_end;
1104 while v < *left && buf < *right {
1105 // Consume the greater side.
1106 // If equal, prefer the right run to maintain stability.
1108 // SAFETY: left and right must be valid and part of v same for out.
1110 let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
1111 decrement_and_get(left)
1113 decrement_and_get(right)
1115 ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
1119 // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
1120 // it will now be copied into the hole in `v`.
1122 unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
1125 // SAFETY: ptr.add(1) must still be a valid pointer and part of `v`.
1126 *ptr = unsafe { ptr.add(1) };
1130 unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
1131 // SAFETY: ptr.sub(1) must still be a valid pointer and part of `v`.
1132 *ptr = unsafe { ptr.sub(1) };
1136 // When dropped, copies the range `start..end` into `dest..`.
1137 struct MergeHole<T> {
1143 impl<T> Drop for MergeHole<T> {
1144 fn drop(&mut self) {
1145 // SAFETY: `T` is not a zero-sized type, and these are pointers into a slice's elements.
1147 let len = self.end.sub_ptr(self.start);
1148 ptr::copy_nonoverlapping(self.start, self.dest, len);
1154 /// This merge sort borrows some (but not all) ideas from TimSort, which used to be described in
1155 /// detail [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt). However Python
1156 /// has switched to a Powersort based implementation.
1158 /// The algorithm identifies strictly descending and non-descending subsequences, which are called
1159 /// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
1160 /// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
1163 /// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
1164 /// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
1166 /// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
1167 pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>(
1170 elem_alloc_fn: ElemAllocF,
1171 elem_dealloc_fn: ElemDeallocF,
1172 run_alloc_fn: RunAllocF,
1173 run_dealloc_fn: RunDeallocF,
1175 CmpF: FnMut(&T, &T) -> bool,
1176 ElemAllocF: Fn(usize) -> *mut T,
1177 ElemDeallocF: Fn(*mut T, usize),
1178 RunAllocF: Fn(usize) -> *mut TimSortRun,
1179 RunDeallocF: Fn(*mut TimSortRun, usize),
1181 // Slices of up to this length get sorted using insertion sort.
1182 const MAX_INSERTION: usize = 20;
1183 // Very short runs are extended using insertion sort to span at least this many elements.
1184 const MIN_RUN: usize = 10;
1186 // The caller should have already checked that.
1187 debug_assert!(!T::IS_ZST);
1191 // Short arrays get sorted in-place via insertion sort to avoid allocations.
1192 if len <= MAX_INSERTION {
1194 for i in (0..len - 1).rev() {
1195 insert_head(&mut v[i..], is_less);
1201 // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
1202 // shallow copies of the contents of `v` without risking the dtors running on copies if
1203 // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
1204 // which will always have length at most `len / 2`.
1205 let buf = BufGuard::new(len / 2, elem_alloc_fn, elem_dealloc_fn);
1206 let buf_ptr = buf.buf_ptr;
1208 let mut runs = RunVec::new(run_alloc_fn, run_dealloc_fn);
1210 // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
1211 // strange decision, but consider the fact that merges more often go in the opposite direction
1212 // (forwards). According to benchmarks, merging forwards is slightly faster than merging
1213 // backwards. To conclude, identifying runs by traversing backwards improves performance.
1216 // Find the next natural run, and reverse it if it's strictly descending.
1217 let mut start = end - 1;
1221 // SAFETY: The v.get_unchecked must be fed with correct inbound indicies.
1223 if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
1224 while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
1227 v[start..end].reverse();
1229 while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
1237 // Insert some more elements into the run if it's too short. Insertion sort is faster than
1238 // merge sort on short sequences, so this significantly improves performance.
1239 while start > 0 && end - start < MIN_RUN {
1241 insert_head(&mut v[start..end], is_less);
1244 // Push this run onto the stack.
1245 runs.push(TimSortRun { start, len: end - start });
1248 // Merge some pairs of adjacent runs to satisfy the invariants.
1249 while let Some(r) = collapse(runs.as_slice()) {
1250 let left = runs[r + 1];
1251 let right = runs[r];
1252 // SAFETY: `buf_ptr` must hold enough capacity for the shorter of the two sides, and
1253 // neither side may be on length 0.
1255 merge(&mut v[left.start..right.start + right.len], left.len, buf_ptr, is_less);
1257 runs[r] = TimSortRun { start: left.start, len: left.len + right.len };
1262 // Finally, exactly one run must remain in the stack.
1263 debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
1265 // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
1266 // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
1267 // algorithm should continue building a new run instead, `None` is returned.
1269 // TimSort is infamous for its buggy implementations, as described here:
1270 // http://envisage-project.eu/timsort-specification-and-verification/
1272 // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
1273 // Enforcing them on just top three is not sufficient to ensure that the invariants will still
1274 // hold for *all* runs in the stack.
1276 // This function correctly checks invariants for the top four runs. Additionally, if the top
1277 // run starts at index 0, it will always demand a merge operation until the stack is fully
1278 // collapsed, in order to complete the sort.
1280 fn collapse(runs: &[TimSortRun]) -> Option<usize> {
1283 && (runs[n - 1].start == 0
1284 || runs[n - 2].len <= runs[n - 1].len
1285 || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
1286 || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
1288 if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
1294 // Extremely basic versions of Vec.
1295 // Their use is super limited and by having the code here, it allows reuse between the sort
1297 struct BufGuard<T, ElemDeallocF>
1299 ElemDeallocF: Fn(*mut T, usize),
1303 elem_dealloc_fn: ElemDeallocF,
1306 impl<T, ElemDeallocF> BufGuard<T, ElemDeallocF>
1308 ElemDeallocF: Fn(*mut T, usize),
1312 elem_alloc_fn: ElemAllocF,
1313 elem_dealloc_fn: ElemDeallocF,
1316 ElemAllocF: Fn(usize) -> *mut T,
1318 Self { buf_ptr: elem_alloc_fn(len), capacity: len, elem_dealloc_fn }
1322 impl<T, ElemDeallocF> Drop for BufGuard<T, ElemDeallocF>
1324 ElemDeallocF: Fn(*mut T, usize),
1326 fn drop(&mut self) {
1327 (self.elem_dealloc_fn)(self.buf_ptr, self.capacity);
1331 struct RunVec<RunAllocF, RunDeallocF>
1333 RunAllocF: Fn(usize) -> *mut TimSortRun,
1334 RunDeallocF: Fn(*mut TimSortRun, usize),
1336 buf_ptr: *mut TimSortRun,
1339 run_alloc_fn: RunAllocF,
1340 run_dealloc_fn: RunDeallocF,
1343 impl<RunAllocF, RunDeallocF> RunVec<RunAllocF, RunDeallocF>
1345 RunAllocF: Fn(usize) -> *mut TimSortRun,
1346 RunDeallocF: Fn(*mut TimSortRun, usize),
1348 fn new(run_alloc_fn: RunAllocF, run_dealloc_fn: RunDeallocF) -> Self {
1349 // Most slices can be sorted with at most 16 runs in-flight.
1350 const START_RUN_CAPACITY: usize = 16;
1353 buf_ptr: run_alloc_fn(START_RUN_CAPACITY),
1354 capacity: START_RUN_CAPACITY,
1361 fn push(&mut self, val: TimSortRun) {
1362 if self.len == self.capacity {
1363 let old_capacity = self.capacity;
1364 let old_buf_ptr = self.buf_ptr;
1366 self.capacity = self.capacity * 2;
1367 self.buf_ptr = (self.run_alloc_fn)(self.capacity);
1369 // SAFETY: buf_ptr new and old were correctly allocated and old_buf_ptr has
1370 // old_capacity valid elements.
1372 ptr::copy_nonoverlapping(old_buf_ptr, self.buf_ptr, old_capacity);
1375 (self.run_dealloc_fn)(old_buf_ptr, old_capacity);
1378 // SAFETY: The invariant was just checked.
1380 self.buf_ptr.add(self.len).write(val);
1385 fn remove(&mut self, index: usize) {
1386 if index >= self.len {
1387 panic!("Index out of bounds");
1390 // SAFETY: buf_ptr needs to be valid and len invariant upheld.
1392 // the place we are taking from.
1393 let ptr = self.buf_ptr.add(index);
1395 // Shift everything down to fill in that spot.
1396 ptr::copy(ptr.add(1), ptr, self.len - index - 1);
1401 fn as_slice(&self) -> &[TimSortRun] {
1402 // SAFETY: Safe as long as buf_ptr is valid and len invariant was upheld.
1403 unsafe { &*ptr::slice_from_raw_parts(self.buf_ptr, self.len) }
1406 fn len(&self) -> usize {
1411 impl<RunAllocF, RunDeallocF> core::ops::Index<usize> for RunVec<RunAllocF, RunDeallocF>
1413 RunAllocF: Fn(usize) -> *mut TimSortRun,
1414 RunDeallocF: Fn(*mut TimSortRun, usize),
1416 type Output = TimSortRun;
1418 fn index(&self, index: usize) -> &Self::Output {
1419 if index < self.len {
1420 // SAFETY: buf_ptr and len invariant must be upheld.
1422 return &*(self.buf_ptr.add(index));
1426 panic!("Index out of bounds");
1430 impl<RunAllocF, RunDeallocF> core::ops::IndexMut<usize> for RunVec<RunAllocF, RunDeallocF>
1432 RunAllocF: Fn(usize) -> *mut TimSortRun,
1433 RunDeallocF: Fn(*mut TimSortRun, usize),
1435 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1436 if index < self.len {
1437 // SAFETY: buf_ptr and len invariant must be upheld.
1439 return &mut *(self.buf_ptr.add(index));
1443 panic!("Index out of bounds");
1447 impl<RunAllocF, RunDeallocF> Drop for RunVec<RunAllocF, RunDeallocF>
1449 RunAllocF: Fn(usize) -> *mut TimSortRun,
1450 RunDeallocF: Fn(*mut TimSortRun, usize),
1452 fn drop(&mut self) {
1453 // As long as TimSortRun is Copy we don't need to drop them individually but just the
1454 // whole allocation.
1455 (self.run_dealloc_fn)(self.buf_ptr, self.capacity);
1460 /// Internal type used by merge_sort.
1461 #[derive(Clone, Copy, Debug)]
1462 pub struct TimSortRun {