1 // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! This module contains an sort algorithm based on Orson Peters' pattern-defeating quicksort,
14 //! published at: https://github.com/orlp/pdqsort
16 //! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our
17 //! stable sorting implementation.
20 use mem::{self, MaybeUninit};
23 /// When dropped, copies from `src` into `dest`.
24 struct CopyOnDrop<T> {
29 impl<T> Drop for CopyOnDrop<T> {
31 unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); }
35 /// Shifts the first element to the right until it encounters a greater or equal element.
36 fn shift_head<T, F>(v: &mut [T], is_less: &mut F)
37 where F: FnMut(&T, &T) -> bool
41 // If the first two elements are out-of-order...
42 if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) {
43 // Read the first element into a stack-allocated variable. If a following comparison
44 // operation panics, `hole` will get dropped and automatically write the element back
46 let mut tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0)));
47 let mut hole = CopyOnDrop {
49 dest: v.get_unchecked_mut(1),
51 ptr::copy_nonoverlapping(v.get_unchecked(1), v.get_unchecked_mut(0), 1);
54 if !is_less(v.get_unchecked(i), &*tmp) {
58 // Move `i`-th element one place to the left, thus shifting the hole to the right.
59 ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i - 1), 1);
60 hole.dest = v.get_unchecked_mut(i);
62 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
67 /// Shifts the last element to the left until it encounters a smaller or equal element.
68 fn shift_tail<T, F>(v: &mut [T], is_less: &mut F)
69 where F: FnMut(&T, &T) -> bool
73 // If the last two elements are out-of-order...
74 if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) {
75 // Read the last element into a stack-allocated variable. If a following comparison
76 // operation panics, `hole` will get dropped and automatically write the element back
78 let mut tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1)));
79 let mut hole = CopyOnDrop {
81 dest: v.get_unchecked_mut(len - 2),
83 ptr::copy_nonoverlapping(v.get_unchecked(len - 2), v.get_unchecked_mut(len - 1), 1);
85 for i in (0..len-2).rev() {
86 if !is_less(&*tmp, v.get_unchecked(i)) {
90 // Move `i`-th element one place to the right, thus shifting the hole to the left.
91 ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i + 1), 1);
92 hole.dest = v.get_unchecked_mut(i);
94 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
99 /// Partially sorts a slice by shifting several out-of-order elements around.
101 /// Returns `true` if the slice is sorted at the end. This function is `O(n)` worst-case.
103 fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &mut F) -> bool
104 where F: FnMut(&T, &T) -> bool
106 // Maximum number of adjacent out-of-order pairs that will get shifted.
107 const MAX_STEPS: usize = 5;
108 // If the slice is shorter than this, don't shift any elements.
109 const SHORTEST_SHIFTING: usize = 50;
114 for _ in 0..MAX_STEPS {
116 // Find the next pair of adjacent out-of-order elements.
117 while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
127 // Don't shift elements on short arrays, that has a performance cost.
128 if len < SHORTEST_SHIFTING {
132 // Swap the found pair of elements. This puts them in correct order.
135 // Shift the smaller element to the left.
136 shift_tail(&mut v[..i], is_less);
137 // Shift the greater element to the right.
138 shift_head(&mut v[i..], is_less);
141 // Didn't manage to sort the slice in the limited number of steps.
145 /// Sorts a slice using insertion sort, which is `O(n^2)` worst-case.
146 fn insertion_sort<T, F>(v: &mut [T], is_less: &mut F)
147 where F: FnMut(&T, &T) -> bool
149 for i in 1..v.len() {
150 shift_tail(&mut v[..i+1], is_less);
154 /// Sorts `v` using heapsort, which guarantees `O(n log n)` worst-case.
156 pub fn heapsort<T, F>(v: &mut [T], is_less: &mut F)
157 where F: FnMut(&T, &T) -> bool
159 // This binary heap respects the invariant `parent >= child`.
160 let mut sift_down = |v: &mut [T], mut node| {
162 // Children of `node`:
163 let left = 2 * node + 1;
164 let right = 2 * node + 2;
166 // Choose the greater child.
167 let greater = if right < v.len() && is_less(&v[left], &v[right]) {
173 // Stop if the invariant holds at `node`.
174 if greater >= v.len() || !is_less(&v[node], &v[greater]) {
178 // Swap `node` with the greater child, move one step down, and continue sifting.
179 v.swap(node, greater);
184 // Build the heap in linear time.
185 for i in (0 .. v.len() / 2).rev() {
189 // Pop maximal elements from the heap.
190 for i in (1 .. v.len()).rev() {
192 sift_down(&mut v[..i], 0);
196 /// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
199 /// Returns the number of elements smaller than `pivot`.
201 /// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
202 /// This idea is presented in the [BlockQuicksort][pdf] paper.
204 /// [pdf]: http://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
205 fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
206 where F: FnMut(&T, &T) -> bool
208 // Number of elements in a typical block.
209 const BLOCK: usize = 128;
211 // The partitioning algorithm repeats the following steps until completion:
213 // 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
214 // 2. Trace a block from the right side to identify elements smaller than the pivot.
215 // 3. Exchange the identified elements between the left and right side.
217 // We keep the following variables for a block of elements:
219 // 1. `block` - Number of elements in the block.
220 // 2. `start` - Start pointer into the `offsets` array.
221 // 3. `end` - End pointer into the `offsets` array.
222 // 4. `offsets - Indices of out-of-order elements within the block.
224 // The current block on the left side (from `l` to `l.add(block_l)`).
225 let mut l = v.as_mut_ptr();
226 let mut block_l = BLOCK;
227 let mut start_l = ptr::null_mut();
228 let mut end_l = ptr::null_mut();
229 let mut offsets_l = MaybeUninit::<[u8; BLOCK]>::uninitialized();
231 // The current block on the right side (from `r.sub(block_r)` to `r`).
232 let mut r = unsafe { l.add(v.len()) };
233 let mut block_r = BLOCK;
234 let mut start_r = ptr::null_mut();
235 let mut end_r = ptr::null_mut();
236 let mut offsets_r = MaybeUninit::<[u8; BLOCK]>::uninitialized();
238 // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
239 // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
241 // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
242 fn width<T>(l: *mut T, r: *mut T) -> usize {
243 assert!(mem::size_of::<T>() > 0);
244 (r as usize - l as usize) / mem::size_of::<T>()
248 // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
249 // some patch-up work in order to partition the remaining elements in between.
250 let is_done = width(l, r) <= 2 * BLOCK;
253 // Number of remaining elements (still not compared to the pivot).
254 let mut rem = width(l, r);
255 if start_l < end_l || start_r < end_r {
259 // Adjust block sizes so that the left and right block don't overlap, but get perfectly
260 // aligned to cover the whole remaining gap.
263 } else if start_r < end_r {
267 block_r = rem - block_l;
269 debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
270 debug_assert!(width(l, r) == block_l + block_r);
273 if start_l == end_l {
274 // Trace `block_l` elements from the left side.
275 start_l = offsets_l.as_mut_ptr() as *mut u8;
276 end_l = offsets_l.as_mut_ptr() as *mut u8;
279 for i in 0..block_l {
281 // Branchless comparison.
283 end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
284 elem = elem.offset(1);
289 if start_r == end_r {
290 // Trace `block_r` elements from the right side.
291 start_r = offsets_r.as_mut_ptr() as *mut u8;
292 end_r = offsets_r.as_mut_ptr() as *mut u8;
295 for i in 0..block_r {
297 // Branchless comparison.
298 elem = elem.offset(-1);
300 end_r = end_r.offset(is_less(&*elem, pivot) as isize);
305 // Number of out-of-order elements to swap between the left and right side.
306 let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
309 macro_rules! left { () => { l.offset(*start_l as isize) } }
310 macro_rules! right { () => { r.offset(-(*start_r as isize) - 1) } }
312 // Instead of swapping one pair at the time, it is more efficient to perform a cyclic
313 // permutation. This is not strictly equivalent to swapping, but produces a similar
314 // result using fewer memory operations.
316 let tmp = ptr::read(left!());
317 ptr::copy_nonoverlapping(right!(), left!(), 1);
320 start_l = start_l.offset(1);
321 ptr::copy_nonoverlapping(left!(), right!(), 1);
322 start_r = start_r.offset(1);
323 ptr::copy_nonoverlapping(right!(), left!(), 1);
326 ptr::copy_nonoverlapping(&tmp, right!(), 1);
328 start_l = start_l.offset(1);
329 start_r = start_r.offset(1);
333 if start_l == end_l {
334 // All out-of-order elements in the left block were moved. Move to the next block.
335 l = unsafe { l.offset(block_l as isize) };
338 if start_r == end_r {
339 // All out-of-order elements in the right block were moved. Move to the previous block.
340 r = unsafe { r.offset(-(block_r as isize)) };
348 // All that remains now is at most one block (either the left or the right) with out-of-order
349 // elements that need to be moved. Such remaining elements can be simply shifted to the end
350 // within their block.
353 // The left block remains.
354 // Move its remaining out-of-order elements to the far right.
355 debug_assert_eq!(width(l, r), block_l);
356 while start_l < end_l {
358 end_l = end_l.offset(-1);
359 ptr::swap(l.offset(*end_l as isize), r.offset(-1));
363 width(v.as_mut_ptr(), r)
364 } else if start_r < end_r {
365 // The right block remains.
366 // Move its remaining out-of-order elements to the far left.
367 debug_assert_eq!(width(l, r), block_r);
368 while start_r < end_r {
370 end_r = end_r.offset(-1);
371 ptr::swap(l, r.offset(-(*end_r as isize) - 1));
375 width(v.as_mut_ptr(), l)
377 // Nothing else to do, we're done.
378 width(v.as_mut_ptr(), l)
382 /// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
383 /// equal to `v[pivot]`.
385 /// Returns a tuple of:
387 /// 1. Number of elements smaller than `v[pivot]`.
388 /// 2. True if `v` was already partitioned.
389 fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool)
390 where F: FnMut(&T, &T) -> bool
392 let (mid, was_partitioned) = {
393 // Place the pivot at the beginning of slice.
395 let (pivot, v) = v.split_at_mut(1);
396 let pivot = &mut pivot[0];
398 // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
399 // operation panics, the pivot will be automatically written back into the slice.
400 let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
401 let _pivot_guard = CopyOnDrop {
407 // Find the first pair of out-of-order elements.
411 // Find the first element greater then or equal to the pivot.
412 while l < r && is_less(v.get_unchecked(l), pivot) {
416 // Find the last element smaller that the pivot.
417 while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
422 (l + partition_in_blocks(&mut v[l..r], pivot, is_less), l >= r)
424 // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated
425 // variable) back into the slice where it originally was. This step is critical in ensuring
429 // Place the pivot between the two partitions.
432 (mid, was_partitioned)
435 /// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
437 /// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
438 /// elements smaller than the pivot.
439 fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> usize
440 where F: FnMut(&T, &T) -> bool
442 // Place the pivot at the beginning of slice.
444 let (pivot, v) = v.split_at_mut(1);
445 let pivot = &mut pivot[0];
447 // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
448 // operation panics, the pivot will be automatically written back into the slice.
449 let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
450 let _pivot_guard = CopyOnDrop {
456 // Now partition the slice.
461 // Find the first element greater that the pivot.
462 while l < r && !is_less(pivot, v.get_unchecked(l)) {
466 // Find the last element equal to the pivot.
467 while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
476 // Swap the found pair of out-of-order elements.
478 ptr::swap(v.get_unchecked_mut(l), v.get_unchecked_mut(r));
483 // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
486 // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable)
487 // back into the slice where it originally was. This step is critical in ensuring safety!
490 /// Scatters some elements around in an attempt to break patterns that might cause imbalanced
491 /// partitions in quicksort.
493 fn break_patterns<T>(v: &mut [T]) {
496 // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
497 let mut random = len as u32;
498 let mut gen_u32 = || {
499 random ^= random << 13;
500 random ^= random >> 17;
501 random ^= random << 5;
504 let mut gen_usize = || {
505 if mem::size_of::<usize>() <= 4 {
508 (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
512 // Take random numbers modulo this number.
513 // The number fits into `usize` because `len` is not greater than `isize::MAX`.
514 let modulus = len.next_power_of_two();
516 // Some pivot candidates will be in the nearby of this index. Let's randomize them.
517 let pos = len / 4 * 2;
520 // Generate a random number modulo `len`. However, in order to avoid costly operations
521 // we first take it modulo a power of two, and then decrease by `len` until it fits
522 // into the range `[0, len - 1]`.
523 let mut other = gen_usize() & (modulus - 1);
525 // `other` is guaranteed to be less than `2 * len`.
530 v.swap(pos - 1 + i, other);
535 /// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
537 /// Elements in `v` might be reordered in the process.
538 fn choose_pivot<T, F>(v: &mut [T], is_less: &mut F) -> (usize, bool)
539 where F: FnMut(&T, &T) -> bool
541 // Minimum length to choose the median-of-medians method.
542 // Shorter slices use the simple median-of-three method.
543 const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
544 // Maximum number of swaps that can be performed in this function.
545 const MAX_SWAPS: usize = 4 * 3;
549 // Three indices near which we are going to choose a pivot.
550 let mut a = len / 4 * 1;
551 let mut b = len / 4 * 2;
552 let mut c = len / 4 * 3;
554 // Counts the total number of swaps we are about to perform while sorting indices.
558 // Swaps indices so that `v[a] <= v[b]`.
559 let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
560 if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
566 // Swaps indices so that `v[a] <= v[b] <= v[c]`.
567 let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
573 if len >= SHORTEST_MEDIAN_OF_MEDIANS {
574 // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
575 let mut sort_adjacent = |a: &mut usize| {
577 sort3(&mut (tmp - 1), a, &mut (tmp + 1));
580 // Find medians in the neighborhoods of `a`, `b`, and `c`.
581 sort_adjacent(&mut a);
582 sort_adjacent(&mut b);
583 sort_adjacent(&mut c);
586 // Find the median among `a`, `b`, and `c`.
587 sort3(&mut a, &mut b, &mut c);
590 if swaps < MAX_SWAPS {
593 // The maximum number of swaps was performed. Chances are the slice is descending or mostly
594 // descending, so reversing will probably help sort it faster.
600 /// Sorts `v` recursively.
602 /// If the slice had a predecessor in the original array, it is specified as `pred`.
604 /// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
605 /// this function will immediately switch to heapsort.
606 fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: usize)
607 where F: FnMut(&T, &T) -> bool
609 // Slices of up to this length get sorted using insertion sort.
610 const MAX_INSERTION: usize = 20;
612 // True if the last partitioning was reasonably balanced.
613 let mut was_balanced = true;
614 // True if the last partitioning didn't shuffle elements (the slice was already partitioned).
615 let mut was_partitioned = true;
620 // Very short slices get sorted using insertion sort.
621 if len <= MAX_INSERTION {
622 insertion_sort(v, is_less);
626 // If too many bad pivot choices were made, simply fall back to heapsort in order to
627 // guarantee `O(n log n)` worst-case.
629 heapsort(v, is_less);
633 // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
634 // some elements around. Hopefully we'll choose a better pivot this time.
640 // Choose a pivot and try guessing whether the slice is already sorted.
641 let (pivot, likely_sorted) = choose_pivot(v, is_less);
643 // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
644 // selection predicts the slice is likely already sorted...
645 if was_balanced && was_partitioned && likely_sorted {
646 // Try identifying several out-of-order elements and shifting them to correct
647 // positions. If the slice ends up being completely sorted, we're done.
648 if partial_insertion_sort(v, is_less) {
653 // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
654 // slice. Partition the slice into elements equal to and elements greater than the pivot.
655 // This case is usually hit when the slice contains many duplicate elements.
656 if let Some(p) = pred {
657 if !is_less(p, &v[pivot]) {
658 let mid = partition_equal(v, pivot, is_less);
660 // Continue sorting elements greater than the pivot.
666 // Partition the slice.
667 let (mid, was_p) = partition(v, pivot, is_less);
668 was_balanced = cmp::min(mid, len - mid) >= len / 8;
669 was_partitioned = was_p;
671 // Split the slice into `left`, `pivot`, and `right`.
672 let (left, right) = {v}.split_at_mut(mid);
673 let (pivot, right) = right.split_at_mut(1);
674 let pivot = &pivot[0];
676 // Recurse into the shorter side only in order to minimize the total number of recursive
677 // calls and consume less stack space. Then just continue with the longer side (this is
678 // akin to tail recursion).
679 if left.len() < right.len() {
680 recurse(left, is_less, pred, limit);
684 recurse(right, is_less, Some(pivot), limit);
690 /// Sorts `v` using pattern-defeating quicksort, which is `O(n log n)` worst-case.
691 pub fn quicksort<T, F>(v: &mut [T], mut is_less: F)
692 where F: FnMut(&T, &T) -> bool
694 // Sorting has no meaningful behavior on zero-sized types.
695 if mem::size_of::<T>() == 0 {
699 // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
700 let limit = mem::size_of::<usize>() * 8 - v.len().leading_zeros() as usize;
702 recurse(v, &mut is_less, None, limit);