1 //! A priority queue implemented with a binary heap.
3 //! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
4 //! Checking the largest element is *O*(1). Converting a vector to a binary heap
5 //! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
6 //! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
11 //! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
12 //! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
13 //! It shows how to use [`BinaryHeap`] with custom types.
15 //! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
16 //! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
17 //! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
20 //! use std::cmp::Ordering;
21 //! use std::collections::BinaryHeap;
23 //! #[derive(Copy, Clone, Eq, PartialEq)]
29 //! // The priority queue depends on `Ord`.
30 //! // Explicitly implement the trait so the queue becomes a min-heap
31 //! // instead of a max-heap.
32 //! impl Ord for State {
33 //! fn cmp(&self, other: &Self) -> Ordering {
34 //! // Notice that the we flip the ordering on costs.
35 //! // In case of a tie we compare positions - this step is necessary
36 //! // to make implementations of `PartialEq` and `Ord` consistent.
37 //! other.cost.cmp(&self.cost)
38 //! .then_with(|| self.position.cmp(&other.position))
42 //! // `PartialOrd` needs to be implemented as well.
43 //! impl PartialOrd for State {
44 //! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
45 //! Some(self.cmp(other))
49 //! // Each node is represented as a `usize`, for a shorter implementation.
55 //! // Dijkstra's shortest path algorithm.
57 //! // Start at `start` and use `dist` to track the current shortest distance
58 //! // to each node. This implementation isn't memory-efficient as it may leave duplicate
59 //! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
60 //! // for a simpler implementation.
61 //! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
62 //! // dist[node] = current shortest distance from `start` to `node`
63 //! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
65 //! let mut heap = BinaryHeap::new();
67 //! // We're at `start`, with a zero cost
69 //! heap.push(State { cost: 0, position: start });
71 //! // Examine the frontier with lower cost nodes first (min-heap)
72 //! while let Some(State { cost, position }) = heap.pop() {
73 //! // Alternatively we could have continued to find all shortest paths
74 //! if position == goal { return Some(cost); }
76 //! // Important as we may have already found a better way
77 //! if cost > dist[position] { continue; }
79 //! // For each node we can reach, see if we can find a way with
80 //! // a lower cost going through this node
81 //! for edge in &adj_list[position] {
82 //! let next = State { cost: cost + edge.cost, position: edge.node };
84 //! // If so, add it to the frontier and continue
85 //! if next.cost < dist[next.position] {
87 //! // Relaxation, we have now found a better way
88 //! dist[next.position] = next.cost;
93 //! // Goal not reachable
98 //! // This is the directed graph we're going to use.
99 //! // The node numbers correspond to the different states,
100 //! // and the edge weights symbolize the cost of moving
101 //! // from one node to another.
102 //! // Note that the edges are one-way.
105 //! // +-----------------+
108 //! // 0 -----> 1 -----> 3 ---> 4
112 //! // +------> 2 -------+ |
114 //! // +---------------+
116 //! // The graph is represented as an adjacency list where each index,
117 //! // corresponding to a node value, has a list of outgoing edges.
118 //! // Chosen for its efficiency.
119 //! let graph = vec![
121 //! vec![Edge { node: 2, cost: 10 },
122 //! Edge { node: 1, cost: 1 }],
124 //! vec![Edge { node: 3, cost: 2 }],
126 //! vec![Edge { node: 1, cost: 1 },
127 //! Edge { node: 3, cost: 3 },
128 //! Edge { node: 4, cost: 1 }],
130 //! vec![Edge { node: 0, cost: 7 },
131 //! Edge { node: 4, cost: 2 }],
135 //! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
136 //! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
137 //! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
138 //! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
139 //! assert_eq!(shortest_path(&graph, 4, 0), None);
143 #![allow(missing_docs)]
144 #![stable(feature = "rust1", since = "1.0.0")]
147 use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
148 use core::mem::{self, swap, ManuallyDrop};
149 use core::ops::{Deref, DerefMut};
152 use crate::collections::TryReserveError;
154 use crate::vec::{self, AsVecIntoIter, Vec};
156 use super::SpecExtend;
161 /// A priority queue implemented with a binary heap.
163 /// This will be a max-heap.
165 /// It is a logic error for an item to be modified in such a way that the
166 /// item's ordering relative to any other item, as determined by the [`Ord`]
167 /// trait, changes while it is in the heap. This is normally only possible
168 /// through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. The
169 /// behavior resulting from such a logic error is not specified, but will
170 /// be encapsulated to the `BinaryHeap` that observed the logic error and not
171 /// result in undefined behavior. This could include panics, incorrect results,
172 /// aborts, memory leaks, and non-termination.
177 /// use std::collections::BinaryHeap;
179 /// // Type inference lets us omit an explicit type signature (which
180 /// // would be `BinaryHeap<i32>` in this example).
181 /// let mut heap = BinaryHeap::new();
183 /// // We can use peek to look at the next item in the heap. In this case,
184 /// // there's no items in there yet so we get None.
185 /// assert_eq!(heap.peek(), None);
187 /// // Let's add some scores...
192 /// // Now peek shows the most important item in the heap.
193 /// assert_eq!(heap.peek(), Some(&5));
195 /// // We can check the length of a heap.
196 /// assert_eq!(heap.len(), 3);
198 /// // We can iterate over the items in the heap, although they are returned in
199 /// // a random order.
204 /// // If we instead pop these scores, they should come back in order.
205 /// assert_eq!(heap.pop(), Some(5));
206 /// assert_eq!(heap.pop(), Some(2));
207 /// assert_eq!(heap.pop(), Some(1));
208 /// assert_eq!(heap.pop(), None);
210 /// // We can clear the heap of any remaining items.
213 /// // The heap should now be empty.
214 /// assert!(heap.is_empty())
217 /// A `BinaryHeap` with a known list of items can be initialized from an array:
220 /// use std::collections::BinaryHeap;
222 /// let heap = BinaryHeap::from([1, 5, 2]);
227 /// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
228 /// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
229 /// value instead of the greatest one.
232 /// use std::collections::BinaryHeap;
233 /// use std::cmp::Reverse;
235 /// let mut heap = BinaryHeap::new();
237 /// // Wrap values in `Reverse`
238 /// heap.push(Reverse(1));
239 /// heap.push(Reverse(5));
240 /// heap.push(Reverse(2));
242 /// // If we pop these scores now, they should come back in the reverse order.
243 /// assert_eq!(heap.pop(), Some(Reverse(1)));
244 /// assert_eq!(heap.pop(), Some(Reverse(2)));
245 /// assert_eq!(heap.pop(), Some(Reverse(5)));
246 /// assert_eq!(heap.pop(), None);
249 /// # Time complexity
251 /// | [push] | [pop] | [peek]/[peek\_mut] |
252 /// |---------|---------------|--------------------|
253 /// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
255 /// The value for `push` is an expected cost; the method documentation gives a
256 /// more detailed analysis.
258 /// [`core::cmp::Reverse`]: core::cmp::Reverse
259 /// [`Ord`]: core::cmp::Ord
260 /// [`Cell`]: core::cell::Cell
261 /// [`RefCell`]: core::cell::RefCell
262 /// [push]: BinaryHeap::push
263 /// [pop]: BinaryHeap::pop
264 /// [peek]: BinaryHeap::peek
265 /// [peek\_mut]: BinaryHeap::peek_mut
266 #[stable(feature = "rust1", since = "1.0.0")]
267 #[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
268 pub struct BinaryHeap<T> {
272 /// Structure wrapping a mutable reference to the greatest item on a
275 /// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
276 /// its documentation for more.
278 /// [`peek_mut`]: BinaryHeap::peek_mut
279 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
280 pub struct PeekMut<'a, T: 'a + Ord> {
281 heap: &'a mut BinaryHeap<T>,
285 #[stable(feature = "collection_debug", since = "1.17.0")]
286 impl<T: Ord + fmt::Debug> fmt::Debug for PeekMut<'_, T> {
287 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
288 f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
292 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
293 impl<T: Ord> Drop for PeekMut<'_, T> {
296 // SAFETY: PeekMut is only instantiated for non-empty heaps.
297 unsafe { self.heap.sift_down(0) };
302 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
303 impl<T: Ord> Deref for PeekMut<'_, T> {
305 fn deref(&self) -> &T {
306 debug_assert!(!self.heap.is_empty());
307 // SAFE: PeekMut is only instantiated for non-empty heaps
308 unsafe { self.heap.data.get_unchecked(0) }
312 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
313 impl<T: Ord> DerefMut for PeekMut<'_, T> {
314 fn deref_mut(&mut self) -> &mut T {
315 debug_assert!(!self.heap.is_empty());
317 // SAFE: PeekMut is only instantiated for non-empty heaps
318 unsafe { self.heap.data.get_unchecked_mut(0) }
322 impl<'a, T: Ord> PeekMut<'a, T> {
323 /// Removes the peeked value from the heap and returns it.
324 #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
325 pub fn pop(mut this: PeekMut<'a, T>) -> T {
326 let value = this.heap.pop().unwrap();
332 #[stable(feature = "rust1", since = "1.0.0")]
333 impl<T: Clone> Clone for BinaryHeap<T> {
334 fn clone(&self) -> Self {
335 BinaryHeap { data: self.data.clone() }
338 fn clone_from(&mut self, source: &Self) {
339 self.data.clone_from(&source.data);
343 #[stable(feature = "rust1", since = "1.0.0")]
344 impl<T: Ord> Default for BinaryHeap<T> {
345 /// Creates an empty `BinaryHeap<T>`.
347 fn default() -> BinaryHeap<T> {
352 #[stable(feature = "binaryheap_debug", since = "1.4.0")]
353 impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
354 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
355 f.debug_list().entries(self.iter()).finish()
359 impl<T: Ord> BinaryHeap<T> {
360 /// Creates an empty `BinaryHeap` as a max-heap.
367 /// use std::collections::BinaryHeap;
368 /// let mut heap = BinaryHeap::new();
371 #[stable(feature = "rust1", since = "1.0.0")]
373 pub fn new() -> BinaryHeap<T> {
374 BinaryHeap { data: vec![] }
377 /// Creates an empty `BinaryHeap` with a specific capacity.
378 /// This preallocates enough memory for `capacity` elements,
379 /// so that the `BinaryHeap` does not have to be reallocated
380 /// until it contains at least that many values.
387 /// use std::collections::BinaryHeap;
388 /// let mut heap = BinaryHeap::with_capacity(10);
391 #[stable(feature = "rust1", since = "1.0.0")]
393 pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
394 BinaryHeap { data: Vec::with_capacity(capacity) }
397 /// Returns a mutable reference to the greatest item in the binary heap, or
398 /// `None` if it is empty.
400 /// Note: If the `PeekMut` value is leaked, the heap may be in an
401 /// inconsistent state.
408 /// use std::collections::BinaryHeap;
409 /// let mut heap = BinaryHeap::new();
410 /// assert!(heap.peek_mut().is_none());
416 /// let mut val = heap.peek_mut().unwrap();
419 /// assert_eq!(heap.peek(), Some(&2));
422 /// # Time complexity
424 /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
425 /// otherwise it's *O*(1).
426 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
427 pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
428 if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: false }) }
431 /// Removes the greatest item from the binary heap and returns it, or `None` if it
439 /// use std::collections::BinaryHeap;
440 /// let mut heap = BinaryHeap::from([1, 3]);
442 /// assert_eq!(heap.pop(), Some(3));
443 /// assert_eq!(heap.pop(), Some(1));
444 /// assert_eq!(heap.pop(), None);
447 /// # Time complexity
449 /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
450 #[stable(feature = "rust1", since = "1.0.0")]
451 pub fn pop(&mut self) -> Option<T> {
452 self.data.pop().map(|mut item| {
453 if !self.is_empty() {
454 swap(&mut item, &mut self.data[0]);
455 // SAFETY: !self.is_empty() means that self.len() > 0
456 unsafe { self.sift_down_to_bottom(0) };
462 /// Pushes an item onto the binary heap.
469 /// use std::collections::BinaryHeap;
470 /// let mut heap = BinaryHeap::new();
475 /// assert_eq!(heap.len(), 3);
476 /// assert_eq!(heap.peek(), Some(&5));
479 /// # Time complexity
481 /// The expected cost of `push`, averaged over every possible ordering of
482 /// the elements being pushed, and over a sufficiently large number of
483 /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
484 /// elements that are *not* already in any sorted pattern.
486 /// The time complexity degrades if elements are pushed in predominantly
487 /// ascending order. In the worst case, elements are pushed in ascending
488 /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
489 /// containing *n* elements.
491 /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
492 /// occurs when capacity is exhausted and needs a resize. The resize cost
493 /// has been amortized in the previous figures.
494 #[stable(feature = "rust1", since = "1.0.0")]
495 pub fn push(&mut self, item: T) {
496 let old_len = self.len();
497 self.data.push(item);
498 // SAFETY: Since we pushed a new item it means that
499 // old_len = self.len() - 1 < self.len()
500 unsafe { self.sift_up(0, old_len) };
503 /// Consumes the `BinaryHeap` and returns a vector in sorted
504 /// (ascending) order.
511 /// use std::collections::BinaryHeap;
513 /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
517 /// let vec = heap.into_sorted_vec();
518 /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
520 #[must_use = "`self` will be dropped if the result is not used"]
521 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
522 pub fn into_sorted_vec(mut self) -> Vec<T> {
523 let mut end = self.len();
526 // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
527 // so it's always a valid index to access.
528 // It is safe to access index 0 (i.e. `ptr`), because
529 // 1 <= end < self.len(), which means self.len() >= 2.
531 let ptr = self.data.as_mut_ptr();
532 ptr::swap(ptr, ptr.add(end));
534 // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
535 // 0 < 1 <= end <= self.len() - 1 < self.len()
536 // Which means 0 < end and end < self.len().
537 unsafe { self.sift_down_range(0, end) };
542 // The implementations of sift_up and sift_down use unsafe blocks in
543 // order to move an element out of the vector (leaving behind a
544 // hole), shift along the others and move the removed element back into the
545 // vector at the final location of the hole.
546 // The `Hole` type is used to represent this, and make sure
547 // the hole is filled back at the end of its scope, even on panic.
548 // Using a hole reduces the constant factor compared to using swaps,
549 // which involves twice as many moves.
553 /// The caller must guarantee that `pos < self.len()`.
554 unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
555 // Take out the value at `pos` and create a hole.
556 // SAFETY: The caller guarantees that pos < self.len()
557 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
559 while hole.pos() > start {
560 let parent = (hole.pos() - 1) / 2;
562 // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
563 // and so hole.pos() - 1 can't underflow.
564 // This guarantees that parent < hole.pos() so
565 // it's a valid index and also != hole.pos().
566 if hole.element() <= unsafe { hole.get(parent) } {
570 // SAFETY: Same as above
571 unsafe { hole.move_to(parent) };
577 /// Take an element at `pos` and move it down the heap,
578 /// while its children are larger.
582 /// The caller must guarantee that `pos < end <= self.len()`.
583 unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
584 // SAFETY: The caller guarantees that pos < end <= self.len().
585 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
586 let mut child = 2 * hole.pos() + 1;
588 // Loop invariant: child == 2 * hole.pos() + 1.
589 while child <= end.saturating_sub(2) {
590 // compare with the greater of the two children
591 // SAFETY: child < end - 1 < self.len() and
592 // child + 1 < end <= self.len(), so they're valid indexes.
593 // child == 2 * hole.pos() + 1 != hole.pos() and
594 // child + 1 == 2 * hole.pos() + 2 != hole.pos().
595 // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
597 child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
599 // if we are already in order, stop.
600 // SAFETY: child is now either the old child or the old child+1
601 // We already proven that both are < self.len() and != hole.pos()
602 if hole.element() >= unsafe { hole.get(child) } {
606 // SAFETY: same as above.
607 unsafe { hole.move_to(child) };
608 child = 2 * hole.pos() + 1;
611 // SAFETY: && short circuit, which means that in the
612 // second condition it's already true that child == end - 1 < self.len().
613 if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
614 // SAFETY: child is already proven to be a valid index and
615 // child == 2 * hole.pos() + 1 != hole.pos().
616 unsafe { hole.move_to(child) };
622 /// The caller must guarantee that `pos < self.len()`.
623 unsafe fn sift_down(&mut self, pos: usize) {
624 let len = self.len();
625 // SAFETY: pos < len is guaranteed by the caller and
626 // obviously len = self.len() <= self.len().
627 unsafe { self.sift_down_range(pos, len) };
630 /// Take an element at `pos` and move it all the way down the heap,
631 /// then sift it up to its position.
633 /// Note: This is faster when the element is known to be large / should
634 /// be closer to the bottom.
638 /// The caller must guarantee that `pos < self.len()`.
639 unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
640 let end = self.len();
643 // SAFETY: The caller guarantees that pos < self.len().
644 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
645 let mut child = 2 * hole.pos() + 1;
647 // Loop invariant: child == 2 * hole.pos() + 1.
648 while child <= end.saturating_sub(2) {
649 // SAFETY: child < end - 1 < self.len() and
650 // child + 1 < end <= self.len(), so they're valid indexes.
651 // child == 2 * hole.pos() + 1 != hole.pos() and
652 // child + 1 == 2 * hole.pos() + 2 != hole.pos().
653 // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
655 child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
657 // SAFETY: Same as above
658 unsafe { hole.move_to(child) };
659 child = 2 * hole.pos() + 1;
662 if child == end - 1 {
663 // SAFETY: child == end - 1 < self.len(), so it's a valid index
664 // and child == 2 * hole.pos() + 1 != hole.pos().
665 unsafe { hole.move_to(child) };
670 // SAFETY: pos is the position in the hole and was already proven
671 // to be a valid index.
672 unsafe { self.sift_up(start, pos) };
675 /// Rebuild assuming data[0..start] is still a proper heap.
676 fn rebuild_tail(&mut self, start: usize) {
677 if start == self.len() {
681 let tail_len = self.len() - start;
684 fn log2_fast(x: usize) -> usize {
685 (usize::BITS - x.leading_zeros() - 1) as usize
688 // `rebuild` takes O(self.len()) operations
689 // and about 2 * self.len() comparisons in the worst case
690 // while repeating `sift_up` takes O(tail_len * log(start)) operations
691 // and about 1 * tail_len * log_2(start) comparisons in the worst case,
692 // assuming start >= tail_len. For larger heaps, the crossover point
693 // no longer follows this reasoning and was determined empirically.
694 let better_to_rebuild = if start < tail_len {
696 } else if self.len() <= 2048 {
697 2 * self.len() < tail_len * log2_fast(start)
699 2 * self.len() < tail_len * 11
702 if better_to_rebuild {
705 for i in start..self.len() {
706 // SAFETY: The index `i` is always less than self.len().
707 unsafe { self.sift_up(0, i) };
712 fn rebuild(&mut self) {
713 let mut n = self.len() / 2;
716 // SAFETY: n starts from self.len() / 2 and goes down to 0.
717 // The only case when !(n < self.len()) is if
718 // self.len() == 0, but it's ruled out by the loop condition.
719 unsafe { self.sift_down(n) };
723 /// Moves all the elements of `other` into `self`, leaving `other` empty.
730 /// use std::collections::BinaryHeap;
732 /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
733 /// let mut b = BinaryHeap::from([-20, 5, 43]);
735 /// a.append(&mut b);
737 /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
738 /// assert!(b.is_empty());
740 #[stable(feature = "binary_heap_append", since = "1.11.0")]
741 pub fn append(&mut self, other: &mut Self) {
742 if self.len() < other.len() {
746 let start = self.data.len();
748 self.data.append(&mut other.data);
750 self.rebuild_tail(start);
753 /// Clears the binary heap, returning an iterator over the removed elements
754 /// in heap order. If the iterator is dropped before being fully consumed,
755 /// it drops the remaining elements in heap order.
757 /// The returned iterator keeps a mutable borrow on the heap to optimize
758 /// its implementation.
761 /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
762 /// You should use the latter for most cases.
769 /// #![feature(binary_heap_drain_sorted)]
770 /// use std::collections::BinaryHeap;
772 /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
773 /// assert_eq!(heap.len(), 5);
775 /// drop(heap.drain_sorted()); // removes all elements in heap order
776 /// assert_eq!(heap.len(), 0);
779 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
780 pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
781 DrainSorted { inner: self }
784 /// Retains only the elements specified by the predicate.
786 /// In other words, remove all elements `e` for which `f(&e)` returns
787 /// `false`. The elements are visited in unsorted (and unspecified) order.
794 /// #![feature(binary_heap_retain)]
795 /// use std::collections::BinaryHeap;
797 /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
799 /// heap.retain(|x| x % 2 == 0); // only keep even numbers
801 /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
803 #[unstable(feature = "binary_heap_retain", issue = "71503")]
804 pub fn retain<F>(&mut self, mut f: F)
806 F: FnMut(&T) -> bool,
808 let mut first_removed = self.len();
810 self.data.retain(|e| {
812 if !keep && i < first_removed {
818 // data[0..first_removed] is untouched, so we only need to rebuild the tail:
819 self.rebuild_tail(first_removed);
823 impl<T> BinaryHeap<T> {
824 /// Returns an iterator visiting all values in the underlying vector, in
832 /// use std::collections::BinaryHeap;
833 /// let heap = BinaryHeap::from([1, 2, 3, 4]);
835 /// // Print 1, 2, 3, 4 in arbitrary order
836 /// for x in heap.iter() {
840 #[stable(feature = "rust1", since = "1.0.0")]
841 pub fn iter(&self) -> Iter<'_, T> {
842 Iter { iter: self.data.iter() }
845 /// Returns an iterator which retrieves elements in heap order.
846 /// This method consumes the original heap.
853 /// #![feature(binary_heap_into_iter_sorted)]
854 /// use std::collections::BinaryHeap;
855 /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
857 /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
859 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
860 pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
861 IntoIterSorted { inner: self }
864 /// Returns the greatest item in the binary heap, or `None` if it is empty.
871 /// use std::collections::BinaryHeap;
872 /// let mut heap = BinaryHeap::new();
873 /// assert_eq!(heap.peek(), None);
878 /// assert_eq!(heap.peek(), Some(&5));
882 /// # Time complexity
884 /// Cost is *O*(1) in the worst case.
886 #[stable(feature = "rust1", since = "1.0.0")]
887 pub fn peek(&self) -> Option<&T> {
891 /// Returns the number of elements the binary heap can hold without reallocating.
898 /// use std::collections::BinaryHeap;
899 /// let mut heap = BinaryHeap::with_capacity(100);
900 /// assert!(heap.capacity() >= 100);
904 #[stable(feature = "rust1", since = "1.0.0")]
905 pub fn capacity(&self) -> usize {
909 /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
910 /// given `BinaryHeap`. Does nothing if the capacity is already sufficient.
912 /// Note that the allocator may give the collection more space than it requests. Therefore
913 /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
914 /// insertions are expected.
918 /// Panics if the new capacity overflows `usize`.
925 /// use std::collections::BinaryHeap;
926 /// let mut heap = BinaryHeap::new();
927 /// heap.reserve_exact(100);
928 /// assert!(heap.capacity() >= 100);
932 /// [`reserve`]: BinaryHeap::reserve
933 #[stable(feature = "rust1", since = "1.0.0")]
934 pub fn reserve_exact(&mut self, additional: usize) {
935 self.data.reserve_exact(additional);
938 /// Reserves capacity for at least `additional` more elements to be inserted in the
939 /// `BinaryHeap`. The collection may reserve more space to avoid frequent reallocations.
943 /// Panics if the new capacity overflows `usize`.
950 /// use std::collections::BinaryHeap;
951 /// let mut heap = BinaryHeap::new();
952 /// heap.reserve(100);
953 /// assert!(heap.capacity() >= 100);
956 #[stable(feature = "rust1", since = "1.0.0")]
957 pub fn reserve(&mut self, additional: usize) {
958 self.data.reserve(additional);
961 /// Tries to reserve the minimum capacity for exactly `additional`
962 /// elements to be inserted in the given `BinaryHeap<T>`. After calling
963 /// `try_reserve_exact`, capacity will be greater than or equal to
964 /// `self.len() + additional` if it returns `Ok(())`.
965 /// Does nothing if the capacity is already sufficient.
967 /// Note that the allocator may give the collection more space than it
968 /// requests. Therefore, capacity can not be relied upon to be precisely
969 /// minimal. Prefer [`try_reserve`] if future insertions are expected.
971 /// [`try_reserve`]: BinaryHeap::try_reserve
975 /// If the capacity overflows, or the allocator reports a failure, then an error
981 /// use std::collections::BinaryHeap;
982 /// use std::collections::TryReserveError;
984 /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
985 /// let mut heap = BinaryHeap::new();
987 /// // Pre-reserve the memory, exiting if we can't
988 /// heap.try_reserve_exact(data.len())?;
990 /// // Now we know this can't OOM in the middle of our complex work
991 /// heap.extend(data.iter());
995 /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
997 #[stable(feature = "try_reserve_2", since = "1.63.0")]
998 pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
999 self.data.try_reserve_exact(additional)
1002 /// Tries to reserve capacity for at least `additional` more elements to be inserted
1003 /// in the given `BinaryHeap<T>`. The collection may reserve more space to avoid
1004 /// frequent reallocations. After calling `try_reserve`, capacity will be
1005 /// greater than or equal to `self.len() + additional`. Does nothing if
1006 /// capacity is already sufficient.
1010 /// If the capacity overflows, or the allocator reports a failure, then an error
1016 /// use std::collections::BinaryHeap;
1017 /// use std::collections::TryReserveError;
1019 /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1020 /// let mut heap = BinaryHeap::new();
1022 /// // Pre-reserve the memory, exiting if we can't
1023 /// heap.try_reserve(data.len())?;
1025 /// // Now we know this can't OOM in the middle of our complex work
1026 /// heap.extend(data.iter());
1030 /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1032 #[stable(feature = "try_reserve_2", since = "1.63.0")]
1033 pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
1034 self.data.try_reserve(additional)
1037 /// Discards as much additional capacity as possible.
1044 /// use std::collections::BinaryHeap;
1045 /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1047 /// assert!(heap.capacity() >= 100);
1048 /// heap.shrink_to_fit();
1049 /// assert!(heap.capacity() == 0);
1051 #[stable(feature = "rust1", since = "1.0.0")]
1052 pub fn shrink_to_fit(&mut self) {
1053 self.data.shrink_to_fit();
1056 /// Discards capacity with a lower bound.
1058 /// The capacity will remain at least as large as both the length
1059 /// and the supplied value.
1061 /// If the current capacity is less than the lower limit, this is a no-op.
1066 /// use std::collections::BinaryHeap;
1067 /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1069 /// assert!(heap.capacity() >= 100);
1070 /// heap.shrink_to(10);
1071 /// assert!(heap.capacity() >= 10);
1074 #[stable(feature = "shrink_to", since = "1.56.0")]
1075 pub fn shrink_to(&mut self, min_capacity: usize) {
1076 self.data.shrink_to(min_capacity)
1079 /// Returns a slice of all values in the underlying vector, in arbitrary
1087 /// #![feature(binary_heap_as_slice)]
1088 /// use std::collections::BinaryHeap;
1089 /// use std::io::{self, Write};
1091 /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1093 /// io::sink().write(heap.as_slice()).unwrap();
1096 #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
1097 pub fn as_slice(&self) -> &[T] {
1098 self.data.as_slice()
1101 /// Consumes the `BinaryHeap` and returns the underlying vector
1102 /// in arbitrary order.
1109 /// use std::collections::BinaryHeap;
1110 /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1111 /// let vec = heap.into_vec();
1113 /// // Will print in some order
1115 /// println!("{x}");
1118 #[must_use = "`self` will be dropped if the result is not used"]
1119 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1120 pub fn into_vec(self) -> Vec<T> {
1124 /// Returns the length of the binary heap.
1131 /// use std::collections::BinaryHeap;
1132 /// let heap = BinaryHeap::from([1, 3]);
1134 /// assert_eq!(heap.len(), 2);
1137 #[stable(feature = "rust1", since = "1.0.0")]
1138 pub fn len(&self) -> usize {
1142 /// Checks if the binary heap is empty.
1149 /// use std::collections::BinaryHeap;
1150 /// let mut heap = BinaryHeap::new();
1152 /// assert!(heap.is_empty());
1158 /// assert!(!heap.is_empty());
1161 #[stable(feature = "rust1", since = "1.0.0")]
1162 pub fn is_empty(&self) -> bool {
1166 /// Clears the binary heap, returning an iterator over the removed elements
1167 /// in arbitrary order. If the iterator is dropped before being fully
1168 /// consumed, it drops the remaining elements in arbitrary order.
1170 /// The returned iterator keeps a mutable borrow on the heap to optimize
1171 /// its implementation.
1178 /// use std::collections::BinaryHeap;
1179 /// let mut heap = BinaryHeap::from([1, 3]);
1181 /// assert!(!heap.is_empty());
1183 /// for x in heap.drain() {
1184 /// println!("{x}");
1187 /// assert!(heap.is_empty());
1190 #[stable(feature = "drain", since = "1.6.0")]
1191 pub fn drain(&mut self) -> Drain<'_, T> {
1192 Drain { iter: self.data.drain(..) }
1195 /// Drops all items from the binary heap.
1202 /// use std::collections::BinaryHeap;
1203 /// let mut heap = BinaryHeap::from([1, 3]);
1205 /// assert!(!heap.is_empty());
1209 /// assert!(heap.is_empty());
1211 #[stable(feature = "rust1", since = "1.0.0")]
1212 pub fn clear(&mut self) {
1217 /// Hole represents a hole in a slice i.e., an index without valid value
1218 /// (because it was moved from or duplicated).
1219 /// In drop, `Hole` will restore the slice by filling the hole
1220 /// position with the value that was originally removed.
1221 struct Hole<'a, T: 'a> {
1223 elt: ManuallyDrop<T>,
1227 impl<'a, T> Hole<'a, T> {
1228 /// Create a new `Hole` at index `pos`.
1230 /// Unsafe because pos must be within the data slice.
1232 unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
1233 debug_assert!(pos < data.len());
1234 // SAFE: pos should be inside the slice
1235 let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
1236 Hole { data, elt: ManuallyDrop::new(elt), pos }
1240 fn pos(&self) -> usize {
1244 /// Returns a reference to the element removed.
1246 fn element(&self) -> &T {
1250 /// Returns a reference to the element at `index`.
1252 /// Unsafe because index must be within the data slice and not equal to pos.
1254 unsafe fn get(&self, index: usize) -> &T {
1255 debug_assert!(index != self.pos);
1256 debug_assert!(index < self.data.len());
1257 unsafe { self.data.get_unchecked(index) }
1260 /// Move hole to new location
1262 /// Unsafe because index must be within the data slice and not equal to pos.
1264 unsafe fn move_to(&mut self, index: usize) {
1265 debug_assert!(index != self.pos);
1266 debug_assert!(index < self.data.len());
1268 let ptr = self.data.as_mut_ptr();
1269 let index_ptr: *const _ = ptr.add(index);
1270 let hole_ptr = ptr.add(self.pos);
1271 ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
1277 impl<T> Drop for Hole<'_, T> {
1279 fn drop(&mut self) {
1280 // fill the hole again
1283 ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
1288 /// An iterator over the elements of a `BinaryHeap`.
1290 /// This `struct` is created by [`BinaryHeap::iter()`]. See its
1291 /// documentation for more.
1293 /// [`iter`]: BinaryHeap::iter
1294 #[must_use = "iterators are lazy and do nothing unless consumed"]
1295 #[stable(feature = "rust1", since = "1.0.0")]
1296 pub struct Iter<'a, T: 'a> {
1297 iter: slice::Iter<'a, T>,
1300 #[stable(feature = "collection_debug", since = "1.17.0")]
1301 impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
1302 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1303 f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
1307 // FIXME(#26925) Remove in favor of `#[derive(Clone)]`
1308 #[stable(feature = "rust1", since = "1.0.0")]
1309 impl<T> Clone for Iter<'_, T> {
1310 fn clone(&self) -> Self {
1311 Iter { iter: self.iter.clone() }
1315 #[stable(feature = "rust1", since = "1.0.0")]
1316 impl<'a, T> Iterator for Iter<'a, T> {
1320 fn next(&mut self) -> Option<&'a T> {
1325 fn size_hint(&self) -> (usize, Option<usize>) {
1326 self.iter.size_hint()
1330 fn last(self) -> Option<&'a T> {
1335 #[stable(feature = "rust1", since = "1.0.0")]
1336 impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
1338 fn next_back(&mut self) -> Option<&'a T> {
1339 self.iter.next_back()
1343 #[stable(feature = "rust1", since = "1.0.0")]
1344 impl<T> ExactSizeIterator for Iter<'_, T> {
1345 fn is_empty(&self) -> bool {
1346 self.iter.is_empty()
1350 #[stable(feature = "fused", since = "1.26.0")]
1351 impl<T> FusedIterator for Iter<'_, T> {}
1353 /// An owning iterator over the elements of a `BinaryHeap`.
1355 /// This `struct` is created by [`BinaryHeap::into_iter()`]
1356 /// (provided by the [`IntoIterator`] trait). See its documentation for more.
1358 /// [`into_iter`]: BinaryHeap::into_iter
1359 /// [`IntoIterator`]: core::iter::IntoIterator
1360 #[stable(feature = "rust1", since = "1.0.0")]
1362 pub struct IntoIter<T> {
1363 iter: vec::IntoIter<T>,
1366 #[stable(feature = "collection_debug", since = "1.17.0")]
1367 impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
1368 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1369 f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
1373 #[stable(feature = "rust1", since = "1.0.0")]
1374 impl<T> Iterator for IntoIter<T> {
1378 fn next(&mut self) -> Option<T> {
1383 fn size_hint(&self) -> (usize, Option<usize>) {
1384 self.iter.size_hint()
1388 #[stable(feature = "rust1", since = "1.0.0")]
1389 impl<T> DoubleEndedIterator for IntoIter<T> {
1391 fn next_back(&mut self) -> Option<T> {
1392 self.iter.next_back()
1396 #[stable(feature = "rust1", since = "1.0.0")]
1397 impl<T> ExactSizeIterator for IntoIter<T> {
1398 fn is_empty(&self) -> bool {
1399 self.iter.is_empty()
1403 #[stable(feature = "fused", since = "1.26.0")]
1404 impl<T> FusedIterator for IntoIter<T> {}
1406 // In addition to the SAFETY invariants of the following three unsafe traits
1407 // also refer to the vec::in_place_collect module documentation to get an overview
1408 #[unstable(issue = "none", feature = "inplace_iteration")]
1410 unsafe impl<T> SourceIter for IntoIter<T> {
1411 type Source = IntoIter<T>;
1414 unsafe fn as_inner(&mut self) -> &mut Self::Source {
1419 #[unstable(issue = "none", feature = "inplace_iteration")]
1421 unsafe impl<I> InPlaceIterable for IntoIter<I> {}
1423 unsafe impl<I> AsVecIntoIter for IntoIter<I> {
1426 fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
1431 #[must_use = "iterators are lazy and do nothing unless consumed"]
1432 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1433 #[derive(Clone, Debug)]
1434 pub struct IntoIterSorted<T> {
1435 inner: BinaryHeap<T>,
1438 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1439 impl<T: Ord> Iterator for IntoIterSorted<T> {
1443 fn next(&mut self) -> Option<T> {
1448 fn size_hint(&self) -> (usize, Option<usize>) {
1449 let exact = self.inner.len();
1450 (exact, Some(exact))
1454 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1455 impl<T: Ord> ExactSizeIterator for IntoIterSorted<T> {}
1457 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1458 impl<T: Ord> FusedIterator for IntoIterSorted<T> {}
1460 #[unstable(feature = "trusted_len", issue = "37572")]
1461 unsafe impl<T: Ord> TrustedLen for IntoIterSorted<T> {}
1463 /// A draining iterator over the elements of a `BinaryHeap`.
1465 /// This `struct` is created by [`BinaryHeap::drain()`]. See its
1466 /// documentation for more.
1468 /// [`drain`]: BinaryHeap::drain
1469 #[stable(feature = "drain", since = "1.6.0")]
1471 pub struct Drain<'a, T: 'a> {
1472 iter: vec::Drain<'a, T>,
1475 #[stable(feature = "drain", since = "1.6.0")]
1476 impl<T> Iterator for Drain<'_, T> {
1480 fn next(&mut self) -> Option<T> {
1485 fn size_hint(&self) -> (usize, Option<usize>) {
1486 self.iter.size_hint()
1490 #[stable(feature = "drain", since = "1.6.0")]
1491 impl<T> DoubleEndedIterator for Drain<'_, T> {
1493 fn next_back(&mut self) -> Option<T> {
1494 self.iter.next_back()
1498 #[stable(feature = "drain", since = "1.6.0")]
1499 impl<T> ExactSizeIterator for Drain<'_, T> {
1500 fn is_empty(&self) -> bool {
1501 self.iter.is_empty()
1505 #[stable(feature = "fused", since = "1.26.0")]
1506 impl<T> FusedIterator for Drain<'_, T> {}
1508 /// A draining iterator over the elements of a `BinaryHeap`.
1510 /// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
1511 /// documentation for more.
1513 /// [`drain_sorted`]: BinaryHeap::drain_sorted
1514 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1516 pub struct DrainSorted<'a, T: Ord> {
1517 inner: &'a mut BinaryHeap<T>,
1520 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1521 impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
1522 /// Removes heap elements in heap order.
1523 fn drop(&mut self) {
1524 struct DropGuard<'r, 'a, T: Ord>(&'r mut DrainSorted<'a, T>);
1526 impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
1527 fn drop(&mut self) {
1528 while self.0.inner.pop().is_some() {}
1532 while let Some(item) = self.inner.pop() {
1533 let guard = DropGuard(self);
1540 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1541 impl<T: Ord> Iterator for DrainSorted<'_, T> {
1545 fn next(&mut self) -> Option<T> {
1550 fn size_hint(&self) -> (usize, Option<usize>) {
1551 let exact = self.inner.len();
1552 (exact, Some(exact))
1556 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1557 impl<T: Ord> ExactSizeIterator for DrainSorted<'_, T> {}
1559 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1560 impl<T: Ord> FusedIterator for DrainSorted<'_, T> {}
1562 #[unstable(feature = "trusted_len", issue = "37572")]
1563 unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
1565 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1566 impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
1567 /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
1569 /// This conversion happens in-place, and has *O*(*n*) time complexity.
1570 fn from(vec: Vec<T>) -> BinaryHeap<T> {
1571 let mut heap = BinaryHeap { data: vec };
1577 #[stable(feature = "std_collections_from_array", since = "1.56.0")]
1578 impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
1580 /// use std::collections::BinaryHeap;
1582 /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
1583 /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
1584 /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
1585 /// assert_eq!(a, b);
1588 fn from(arr: [T; N]) -> Self {
1589 Self::from_iter(arr)
1593 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1594 impl<T> From<BinaryHeap<T>> for Vec<T> {
1595 /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
1597 /// This conversion requires no data movement or allocation, and has
1598 /// constant time complexity.
1599 fn from(heap: BinaryHeap<T>) -> Vec<T> {
1604 #[stable(feature = "rust1", since = "1.0.0")]
1605 impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
1606 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
1607 BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
1611 #[stable(feature = "rust1", since = "1.0.0")]
1612 impl<T> IntoIterator for BinaryHeap<T> {
1614 type IntoIter = IntoIter<T>;
1616 /// Creates a consuming iterator, that is, one that moves each value out of
1617 /// the binary heap in arbitrary order. The binary heap cannot be used
1618 /// after calling this.
1625 /// use std::collections::BinaryHeap;
1626 /// let heap = BinaryHeap::from([1, 2, 3, 4]);
1628 /// // Print 1, 2, 3, 4 in arbitrary order
1629 /// for x in heap.into_iter() {
1630 /// // x has type i32, not &i32
1631 /// println!("{x}");
1634 fn into_iter(self) -> IntoIter<T> {
1635 IntoIter { iter: self.data.into_iter() }
1639 #[stable(feature = "rust1", since = "1.0.0")]
1640 impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
1642 type IntoIter = Iter<'a, T>;
1644 fn into_iter(self) -> Iter<'a, T> {
1649 #[stable(feature = "rust1", since = "1.0.0")]
1650 impl<T: Ord> Extend<T> for BinaryHeap<T> {
1652 fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1653 <Self as SpecExtend<I>>::spec_extend(self, iter);
1657 fn extend_one(&mut self, item: T) {
1662 fn extend_reserve(&mut self, additional: usize) {
1663 self.reserve(additional);
1667 impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
1668 default fn spec_extend(&mut self, iter: I) {
1669 self.extend_desugared(iter.into_iter());
1673 impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
1674 fn spec_extend(&mut self, ref mut other: Vec<T>) {
1675 let start = self.data.len();
1676 self.data.append(other);
1677 self.rebuild_tail(start);
1681 impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
1682 fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
1687 impl<T: Ord> BinaryHeap<T> {
1688 fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1689 let iterator = iter.into_iter();
1690 let (lower, _) = iterator.size_hint();
1692 self.reserve(lower);
1694 iterator.for_each(move |elem| self.push(elem));
1698 #[stable(feature = "extend_ref", since = "1.2.0")]
1699 impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
1700 fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
1701 self.extend(iter.into_iter().cloned());
1705 fn extend_one(&mut self, &item: &'a T) {
1710 fn extend_reserve(&mut self, additional: usize) {
1711 self.reserve(additional);