1 //! A priority queue implemented with a binary heap.
3 //! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
4 //! Checking the largest element is *O*(1). Converting a vector to a binary heap
5 //! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
6 //! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
11 //! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
12 //! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
13 //! It shows how to use [`BinaryHeap`] with custom types.
15 //! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
16 //! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
17 //! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
20 //! use std::cmp::Ordering;
21 //! use std::collections::BinaryHeap;
23 //! #[derive(Copy, Clone, Eq, PartialEq)]
29 //! // The priority queue depends on `Ord`.
30 //! // Explicitly implement the trait so the queue becomes a min-heap
31 //! // instead of a max-heap.
32 //! impl Ord for State {
33 //! fn cmp(&self, other: &Self) -> Ordering {
34 //! // Notice that the we flip the ordering on costs.
35 //! // In case of a tie we compare positions - this step is necessary
36 //! // to make implementations of `PartialEq` and `Ord` consistent.
37 //! other.cost.cmp(&self.cost)
38 //! .then_with(|| self.position.cmp(&other.position))
42 //! // `PartialOrd` needs to be implemented as well.
43 //! impl PartialOrd for State {
44 //! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
45 //! Some(self.cmp(other))
49 //! // Each node is represented as a `usize`, for a shorter implementation.
55 //! // Dijkstra's shortest path algorithm.
57 //! // Start at `start` and use `dist` to track the current shortest distance
58 //! // to each node. This implementation isn't memory-efficient as it may leave duplicate
59 //! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
60 //! // for a simpler implementation.
61 //! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
62 //! // dist[node] = current shortest distance from `start` to `node`
63 //! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
65 //! let mut heap = BinaryHeap::new();
67 //! // We're at `start`, with a zero cost
69 //! heap.push(State { cost: 0, position: start });
71 //! // Examine the frontier with lower cost nodes first (min-heap)
72 //! while let Some(State { cost, position }) = heap.pop() {
73 //! // Alternatively we could have continued to find all shortest paths
74 //! if position == goal { return Some(cost); }
76 //! // Important as we may have already found a better way
77 //! if cost > dist[position] { continue; }
79 //! // For each node we can reach, see if we can find a way with
80 //! // a lower cost going through this node
81 //! for edge in &adj_list[position] {
82 //! let next = State { cost: cost + edge.cost, position: edge.node };
84 //! // If so, add it to the frontier and continue
85 //! if next.cost < dist[next.position] {
87 //! // Relaxation, we have now found a better way
88 //! dist[next.position] = next.cost;
93 //! // Goal not reachable
98 //! // This is the directed graph we're going to use.
99 //! // The node numbers correspond to the different states,
100 //! // and the edge weights symbolize the cost of moving
101 //! // from one node to another.
102 //! // Note that the edges are one-way.
105 //! // +-----------------+
108 //! // 0 -----> 1 -----> 3 ---> 4
112 //! // +------> 2 -------+ |
114 //! // +---------------+
116 //! // The graph is represented as an adjacency list where each index,
117 //! // corresponding to a node value, has a list of outgoing edges.
118 //! // Chosen for its efficiency.
119 //! let graph = vec![
121 //! vec![Edge { node: 2, cost: 10 },
122 //! Edge { node: 1, cost: 1 }],
124 //! vec![Edge { node: 3, cost: 2 }],
126 //! vec![Edge { node: 1, cost: 1 },
127 //! Edge { node: 3, cost: 3 },
128 //! Edge { node: 4, cost: 1 }],
130 //! vec![Edge { node: 0, cost: 7 },
131 //! Edge { node: 4, cost: 2 }],
135 //! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
136 //! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
137 //! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
138 //! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
139 //! assert_eq!(shortest_path(&graph, 4, 0), None);
143 #![allow(missing_docs)]
144 #![stable(feature = "rust1", since = "1.0.0")]
147 use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
148 use core::mem::{self, swap, ManuallyDrop};
149 use core::ops::{Deref, DerefMut};
152 use crate::collections::TryReserveError;
154 use crate::vec::{self, AsVecIntoIter, Vec};
156 use super::SpecExtend;
161 /// A priority queue implemented with a binary heap.
163 /// This will be a max-heap.
165 /// It is a logic error for an item to be modified in such a way that the
166 /// item's ordering relative to any other item, as determined by the [`Ord`]
167 /// trait, changes while it is in the heap. This is normally only possible
168 /// through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. The
169 /// behavior resulting from such a logic error is not specified (it
170 /// could include panics, incorrect results, aborts, memory leaks, or
171 /// non-termination) but will not be undefined behavior.
176 /// use std::collections::BinaryHeap;
178 /// // Type inference lets us omit an explicit type signature (which
179 /// // would be `BinaryHeap<i32>` in this example).
180 /// let mut heap = BinaryHeap::new();
182 /// // We can use peek to look at the next item in the heap. In this case,
183 /// // there's no items in there yet so we get None.
184 /// assert_eq!(heap.peek(), None);
186 /// // Let's add some scores...
191 /// // Now peek shows the most important item in the heap.
192 /// assert_eq!(heap.peek(), Some(&5));
194 /// // We can check the length of a heap.
195 /// assert_eq!(heap.len(), 3);
197 /// // We can iterate over the items in the heap, although they are returned in
198 /// // a random order.
203 /// // If we instead pop these scores, they should come back in order.
204 /// assert_eq!(heap.pop(), Some(5));
205 /// assert_eq!(heap.pop(), Some(2));
206 /// assert_eq!(heap.pop(), Some(1));
207 /// assert_eq!(heap.pop(), None);
209 /// // We can clear the heap of any remaining items.
212 /// // The heap should now be empty.
213 /// assert!(heap.is_empty())
216 /// A `BinaryHeap` with a known list of items can be initialized from an array:
219 /// use std::collections::BinaryHeap;
221 /// let heap = BinaryHeap::from([1, 5, 2]);
226 /// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
227 /// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
228 /// value instead of the greatest one.
231 /// use std::collections::BinaryHeap;
232 /// use std::cmp::Reverse;
234 /// let mut heap = BinaryHeap::new();
236 /// // Wrap values in `Reverse`
237 /// heap.push(Reverse(1));
238 /// heap.push(Reverse(5));
239 /// heap.push(Reverse(2));
241 /// // If we pop these scores now, they should come back in the reverse order.
242 /// assert_eq!(heap.pop(), Some(Reverse(1)));
243 /// assert_eq!(heap.pop(), Some(Reverse(2)));
244 /// assert_eq!(heap.pop(), Some(Reverse(5)));
245 /// assert_eq!(heap.pop(), None);
248 /// # Time complexity
250 /// | [push] | [pop] | [peek]/[peek\_mut] |
251 /// |---------|---------------|--------------------|
252 /// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
254 /// The value for `push` is an expected cost; the method documentation gives a
255 /// more detailed analysis.
257 /// [`core::cmp::Reverse`]: core::cmp::Reverse
258 /// [`Ord`]: core::cmp::Ord
259 /// [`Cell`]: core::cell::Cell
260 /// [`RefCell`]: core::cell::RefCell
261 /// [push]: BinaryHeap::push
262 /// [pop]: BinaryHeap::pop
263 /// [peek]: BinaryHeap::peek
264 /// [peek\_mut]: BinaryHeap::peek_mut
265 #[stable(feature = "rust1", since = "1.0.0")]
266 #[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
267 pub struct BinaryHeap<T> {
271 /// Structure wrapping a mutable reference to the greatest item on a
274 /// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
275 /// its documentation for more.
277 /// [`peek_mut`]: BinaryHeap::peek_mut
278 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
279 pub struct PeekMut<'a, T: 'a + Ord> {
280 heap: &'a mut BinaryHeap<T>,
284 #[stable(feature = "collection_debug", since = "1.17.0")]
285 impl<T: Ord + fmt::Debug> fmt::Debug for PeekMut<'_, T> {
286 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
287 f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
291 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
292 impl<T: Ord> Drop for PeekMut<'_, T> {
295 // SAFETY: PeekMut is only instantiated for non-empty heaps.
296 unsafe { self.heap.sift_down(0) };
301 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
302 impl<T: Ord> Deref for PeekMut<'_, T> {
304 fn deref(&self) -> &T {
305 debug_assert!(!self.heap.is_empty());
306 // SAFE: PeekMut is only instantiated for non-empty heaps
307 unsafe { self.heap.data.get_unchecked(0) }
311 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
312 impl<T: Ord> DerefMut for PeekMut<'_, T> {
313 fn deref_mut(&mut self) -> &mut T {
314 debug_assert!(!self.heap.is_empty());
316 // SAFE: PeekMut is only instantiated for non-empty heaps
317 unsafe { self.heap.data.get_unchecked_mut(0) }
321 impl<'a, T: Ord> PeekMut<'a, T> {
322 /// Removes the peeked value from the heap and returns it.
323 #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
324 pub fn pop(mut this: PeekMut<'a, T>) -> T {
325 let value = this.heap.pop().unwrap();
331 #[stable(feature = "rust1", since = "1.0.0")]
332 impl<T: Clone> Clone for BinaryHeap<T> {
333 fn clone(&self) -> Self {
334 BinaryHeap { data: self.data.clone() }
337 fn clone_from(&mut self, source: &Self) {
338 self.data.clone_from(&source.data);
342 #[stable(feature = "rust1", since = "1.0.0")]
343 impl<T: Ord> Default for BinaryHeap<T> {
344 /// Creates an empty `BinaryHeap<T>`.
346 fn default() -> BinaryHeap<T> {
351 #[stable(feature = "binaryheap_debug", since = "1.4.0")]
352 impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
353 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
354 f.debug_list().entries(self.iter()).finish()
358 impl<T: Ord> BinaryHeap<T> {
359 /// Creates an empty `BinaryHeap` as a max-heap.
366 /// use std::collections::BinaryHeap;
367 /// let mut heap = BinaryHeap::new();
370 #[stable(feature = "rust1", since = "1.0.0")]
372 pub fn new() -> BinaryHeap<T> {
373 BinaryHeap { data: vec![] }
376 /// Creates an empty `BinaryHeap` with a specific capacity.
377 /// This preallocates enough memory for `capacity` elements,
378 /// so that the `BinaryHeap` does not have to be reallocated
379 /// until it contains at least that many values.
386 /// use std::collections::BinaryHeap;
387 /// let mut heap = BinaryHeap::with_capacity(10);
390 #[stable(feature = "rust1", since = "1.0.0")]
392 pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
393 BinaryHeap { data: Vec::with_capacity(capacity) }
396 /// Returns a mutable reference to the greatest item in the binary heap, or
397 /// `None` if it is empty.
399 /// Note: If the `PeekMut` value is leaked, the heap may be in an
400 /// inconsistent state.
407 /// use std::collections::BinaryHeap;
408 /// let mut heap = BinaryHeap::new();
409 /// assert!(heap.peek_mut().is_none());
415 /// let mut val = heap.peek_mut().unwrap();
418 /// assert_eq!(heap.peek(), Some(&2));
421 /// # Time complexity
423 /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
424 /// otherwise it's *O*(1).
425 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
426 pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
427 if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: false }) }
430 /// Removes the greatest item from the binary heap and returns it, or `None` if it
438 /// use std::collections::BinaryHeap;
439 /// let mut heap = BinaryHeap::from([1, 3]);
441 /// assert_eq!(heap.pop(), Some(3));
442 /// assert_eq!(heap.pop(), Some(1));
443 /// assert_eq!(heap.pop(), None);
446 /// # Time complexity
448 /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
449 #[stable(feature = "rust1", since = "1.0.0")]
450 pub fn pop(&mut self) -> Option<T> {
451 self.data.pop().map(|mut item| {
452 if !self.is_empty() {
453 swap(&mut item, &mut self.data[0]);
454 // SAFETY: !self.is_empty() means that self.len() > 0
455 unsafe { self.sift_down_to_bottom(0) };
461 /// Pushes an item onto the binary heap.
468 /// use std::collections::BinaryHeap;
469 /// let mut heap = BinaryHeap::new();
474 /// assert_eq!(heap.len(), 3);
475 /// assert_eq!(heap.peek(), Some(&5));
478 /// # Time complexity
480 /// The expected cost of `push`, averaged over every possible ordering of
481 /// the elements being pushed, and over a sufficiently large number of
482 /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
483 /// elements that are *not* already in any sorted pattern.
485 /// The time complexity degrades if elements are pushed in predominantly
486 /// ascending order. In the worst case, elements are pushed in ascending
487 /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
488 /// containing *n* elements.
490 /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
491 /// occurs when capacity is exhausted and needs a resize. The resize cost
492 /// has been amortized in the previous figures.
493 #[stable(feature = "rust1", since = "1.0.0")]
494 pub fn push(&mut self, item: T) {
495 let old_len = self.len();
496 self.data.push(item);
497 // SAFETY: Since we pushed a new item it means that
498 // old_len = self.len() - 1 < self.len()
499 unsafe { self.sift_up(0, old_len) };
502 /// Consumes the `BinaryHeap` and returns a vector in sorted
503 /// (ascending) order.
510 /// use std::collections::BinaryHeap;
512 /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
516 /// let vec = heap.into_sorted_vec();
517 /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
519 #[must_use = "`self` will be dropped if the result is not used"]
520 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
521 pub fn into_sorted_vec(mut self) -> Vec<T> {
522 let mut end = self.len();
525 // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
526 // so it's always a valid index to access.
527 // It is safe to access index 0 (i.e. `ptr`), because
528 // 1 <= end < self.len(), which means self.len() >= 2.
530 let ptr = self.data.as_mut_ptr();
531 ptr::swap(ptr, ptr.add(end));
533 // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
534 // 0 < 1 <= end <= self.len() - 1 < self.len()
535 // Which means 0 < end and end < self.len().
536 unsafe { self.sift_down_range(0, end) };
541 // The implementations of sift_up and sift_down use unsafe blocks in
542 // order to move an element out of the vector (leaving behind a
543 // hole), shift along the others and move the removed element back into the
544 // vector at the final location of the hole.
545 // The `Hole` type is used to represent this, and make sure
546 // the hole is filled back at the end of its scope, even on panic.
547 // Using a hole reduces the constant factor compared to using swaps,
548 // which involves twice as many moves.
552 /// The caller must guarantee that `pos < self.len()`.
553 unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
554 // Take out the value at `pos` and create a hole.
555 // SAFETY: The caller guarantees that pos < self.len()
556 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
558 while hole.pos() > start {
559 let parent = (hole.pos() - 1) / 2;
561 // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
562 // and so hole.pos() - 1 can't underflow.
563 // This guarantees that parent < hole.pos() so
564 // it's a valid index and also != hole.pos().
565 if hole.element() <= unsafe { hole.get(parent) } {
569 // SAFETY: Same as above
570 unsafe { hole.move_to(parent) };
576 /// Take an element at `pos` and move it down the heap,
577 /// while its children are larger.
581 /// The caller must guarantee that `pos < end <= self.len()`.
582 unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
583 // SAFETY: The caller guarantees that pos < end <= self.len().
584 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
585 let mut child = 2 * hole.pos() + 1;
587 // Loop invariant: child == 2 * hole.pos() + 1.
588 while child <= end.saturating_sub(2) {
589 // compare with the greater of the two children
590 // SAFETY: child < end - 1 < self.len() and
591 // child + 1 < end <= self.len(), so they're valid indexes.
592 // child == 2 * hole.pos() + 1 != hole.pos() and
593 // child + 1 == 2 * hole.pos() + 2 != hole.pos().
594 // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
596 child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
598 // if we are already in order, stop.
599 // SAFETY: child is now either the old child or the old child+1
600 // We already proven that both are < self.len() and != hole.pos()
601 if hole.element() >= unsafe { hole.get(child) } {
605 // SAFETY: same as above.
606 unsafe { hole.move_to(child) };
607 child = 2 * hole.pos() + 1;
610 // SAFETY: && short circuit, which means that in the
611 // second condition it's already true that child == end - 1 < self.len().
612 if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
613 // SAFETY: child is already proven to be a valid index and
614 // child == 2 * hole.pos() + 1 != hole.pos().
615 unsafe { hole.move_to(child) };
621 /// The caller must guarantee that `pos < self.len()`.
622 unsafe fn sift_down(&mut self, pos: usize) {
623 let len = self.len();
624 // SAFETY: pos < len is guaranteed by the caller and
625 // obviously len = self.len() <= self.len().
626 unsafe { self.sift_down_range(pos, len) };
629 /// Take an element at `pos` and move it all the way down the heap,
630 /// then sift it up to its position.
632 /// Note: This is faster when the element is known to be large / should
633 /// be closer to the bottom.
637 /// The caller must guarantee that `pos < self.len()`.
638 unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
639 let end = self.len();
642 // SAFETY: The caller guarantees that pos < self.len().
643 let mut hole = unsafe { Hole::new(&mut self.data, pos) };
644 let mut child = 2 * hole.pos() + 1;
646 // Loop invariant: child == 2 * hole.pos() + 1.
647 while child <= end.saturating_sub(2) {
648 // SAFETY: child < end - 1 < self.len() and
649 // child + 1 < end <= self.len(), so they're valid indexes.
650 // child == 2 * hole.pos() + 1 != hole.pos() and
651 // child + 1 == 2 * hole.pos() + 2 != hole.pos().
652 // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
654 child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
656 // SAFETY: Same as above
657 unsafe { hole.move_to(child) };
658 child = 2 * hole.pos() + 1;
661 if child == end - 1 {
662 // SAFETY: child == end - 1 < self.len(), so it's a valid index
663 // and child == 2 * hole.pos() + 1 != hole.pos().
664 unsafe { hole.move_to(child) };
669 // SAFETY: pos is the position in the hole and was already proven
670 // to be a valid index.
671 unsafe { self.sift_up(start, pos) };
674 /// Rebuild assuming data[0..start] is still a proper heap.
675 fn rebuild_tail(&mut self, start: usize) {
676 if start == self.len() {
680 let tail_len = self.len() - start;
683 fn log2_fast(x: usize) -> usize {
684 (usize::BITS - x.leading_zeros() - 1) as usize
687 // `rebuild` takes O(self.len()) operations
688 // and about 2 * self.len() comparisons in the worst case
689 // while repeating `sift_up` takes O(tail_len * log(start)) operations
690 // and about 1 * tail_len * log_2(start) comparisons in the worst case,
691 // assuming start >= tail_len. For larger heaps, the crossover point
692 // no longer follows this reasoning and was determined empirically.
693 let better_to_rebuild = if start < tail_len {
695 } else if self.len() <= 2048 {
696 2 * self.len() < tail_len * log2_fast(start)
698 2 * self.len() < tail_len * 11
701 if better_to_rebuild {
704 for i in start..self.len() {
705 // SAFETY: The index `i` is always less than self.len().
706 unsafe { self.sift_up(0, i) };
711 fn rebuild(&mut self) {
712 let mut n = self.len() / 2;
715 // SAFETY: n starts from self.len() / 2 and goes down to 0.
716 // The only case when !(n < self.len()) is if
717 // self.len() == 0, but it's ruled out by the loop condition.
718 unsafe { self.sift_down(n) };
722 /// Moves all the elements of `other` into `self`, leaving `other` empty.
729 /// use std::collections::BinaryHeap;
731 /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
732 /// let mut b = BinaryHeap::from([-20, 5, 43]);
734 /// a.append(&mut b);
736 /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
737 /// assert!(b.is_empty());
739 #[stable(feature = "binary_heap_append", since = "1.11.0")]
740 pub fn append(&mut self, other: &mut Self) {
741 if self.len() < other.len() {
745 let start = self.data.len();
747 self.data.append(&mut other.data);
749 self.rebuild_tail(start);
752 /// Clears the binary heap, returning an iterator over the removed elements
753 /// in heap order. If the iterator is dropped before being fully consumed,
754 /// it drops the remaining elements in heap order.
756 /// The returned iterator keeps a mutable borrow on the heap to optimize
757 /// its implementation.
760 /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
761 /// You should use the latter for most cases.
768 /// #![feature(binary_heap_drain_sorted)]
769 /// use std::collections::BinaryHeap;
771 /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
772 /// assert_eq!(heap.len(), 5);
774 /// drop(heap.drain_sorted()); // removes all elements in heap order
775 /// assert_eq!(heap.len(), 0);
778 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
779 pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
780 DrainSorted { inner: self }
783 /// Retains only the elements specified by the predicate.
785 /// In other words, remove all elements `e` for which `f(&e)` returns
786 /// `false`. The elements are visited in unsorted (and unspecified) order.
793 /// #![feature(binary_heap_retain)]
794 /// use std::collections::BinaryHeap;
796 /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
798 /// heap.retain(|x| x % 2 == 0); // only keep even numbers
800 /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
802 #[unstable(feature = "binary_heap_retain", issue = "71503")]
803 pub fn retain<F>(&mut self, mut f: F)
805 F: FnMut(&T) -> bool,
807 let mut first_removed = self.len();
809 self.data.retain(|e| {
811 if !keep && i < first_removed {
817 // data[0..first_removed] is untouched, so we only need to rebuild the tail:
818 self.rebuild_tail(first_removed);
822 impl<T> BinaryHeap<T> {
823 /// Returns an iterator visiting all values in the underlying vector, in
831 /// use std::collections::BinaryHeap;
832 /// let heap = BinaryHeap::from([1, 2, 3, 4]);
834 /// // Print 1, 2, 3, 4 in arbitrary order
835 /// for x in heap.iter() {
839 #[stable(feature = "rust1", since = "1.0.0")]
840 pub fn iter(&self) -> Iter<'_, T> {
841 Iter { iter: self.data.iter() }
844 /// Returns an iterator which retrieves elements in heap order.
845 /// This method consumes the original heap.
852 /// #![feature(binary_heap_into_iter_sorted)]
853 /// use std::collections::BinaryHeap;
854 /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
856 /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
858 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
859 pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
860 IntoIterSorted { inner: self }
863 /// Returns the greatest item in the binary heap, or `None` if it is empty.
870 /// use std::collections::BinaryHeap;
871 /// let mut heap = BinaryHeap::new();
872 /// assert_eq!(heap.peek(), None);
877 /// assert_eq!(heap.peek(), Some(&5));
881 /// # Time complexity
883 /// Cost is *O*(1) in the worst case.
885 #[stable(feature = "rust1", since = "1.0.0")]
886 pub fn peek(&self) -> Option<&T> {
890 /// Returns the number of elements the binary heap can hold without reallocating.
897 /// use std::collections::BinaryHeap;
898 /// let mut heap = BinaryHeap::with_capacity(100);
899 /// assert!(heap.capacity() >= 100);
903 #[stable(feature = "rust1", since = "1.0.0")]
904 pub fn capacity(&self) -> usize {
908 /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
909 /// given `BinaryHeap`. Does nothing if the capacity is already sufficient.
911 /// Note that the allocator may give the collection more space than it requests. Therefore
912 /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
913 /// insertions are expected.
917 /// Panics if the new capacity overflows `usize`.
924 /// use std::collections::BinaryHeap;
925 /// let mut heap = BinaryHeap::new();
926 /// heap.reserve_exact(100);
927 /// assert!(heap.capacity() >= 100);
931 /// [`reserve`]: BinaryHeap::reserve
932 #[stable(feature = "rust1", since = "1.0.0")]
933 pub fn reserve_exact(&mut self, additional: usize) {
934 self.data.reserve_exact(additional);
937 /// Reserves capacity for at least `additional` more elements to be inserted in the
938 /// `BinaryHeap`. The collection may reserve more space to avoid frequent reallocations.
942 /// Panics if the new capacity overflows `usize`.
949 /// use std::collections::BinaryHeap;
950 /// let mut heap = BinaryHeap::new();
951 /// heap.reserve(100);
952 /// assert!(heap.capacity() >= 100);
955 #[stable(feature = "rust1", since = "1.0.0")]
956 pub fn reserve(&mut self, additional: usize) {
957 self.data.reserve(additional);
960 /// Tries to reserve the minimum capacity for exactly `additional`
961 /// elements to be inserted in the given `BinaryHeap<T>`. After calling
962 /// `try_reserve_exact`, capacity will be greater than or equal to
963 /// `self.len() + additional` if it returns `Ok(())`.
964 /// Does nothing if the capacity is already sufficient.
966 /// Note that the allocator may give the collection more space than it
967 /// requests. Therefore, capacity can not be relied upon to be precisely
968 /// minimal. Prefer [`try_reserve`] if future insertions are expected.
970 /// [`try_reserve`]: BinaryHeap::try_reserve
974 /// If the capacity overflows, or the allocator reports a failure, then an error
980 /// #![feature(try_reserve_2)]
981 /// use std::collections::BinaryHeap;
982 /// use std::collections::TryReserveError;
984 /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
985 /// let mut heap = BinaryHeap::new();
987 /// // Pre-reserve the memory, exiting if we can't
988 /// heap.try_reserve_exact(data.len())?;
990 /// // Now we know this can't OOM in the middle of our complex work
991 /// heap.extend(data.iter());
995 /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
997 #[unstable(feature = "try_reserve_2", issue = "91789")]
998 pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
999 self.data.try_reserve_exact(additional)
1002 /// Tries to reserve capacity for at least `additional` more elements to be inserted
1003 /// in the given `BinaryHeap<T>`. The collection may reserve more space to avoid
1004 /// frequent reallocations. After calling `try_reserve`, capacity will be
1005 /// greater than or equal to `self.len() + additional`. Does nothing if
1006 /// capacity is already sufficient.
1010 /// If the capacity overflows, or the allocator reports a failure, then an error
1016 /// #![feature(try_reserve_2)]
1017 /// use std::collections::BinaryHeap;
1018 /// use std::collections::TryReserveError;
1020 /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1021 /// let mut heap = BinaryHeap::new();
1023 /// // Pre-reserve the memory, exiting if we can't
1024 /// heap.try_reserve(data.len())?;
1026 /// // Now we know this can't OOM in the middle of our complex work
1027 /// heap.extend(data.iter());
1031 /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1033 #[unstable(feature = "try_reserve_2", issue = "91789")]
1034 pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
1035 self.data.try_reserve(additional)
1038 /// Discards as much additional capacity as possible.
1045 /// use std::collections::BinaryHeap;
1046 /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1048 /// assert!(heap.capacity() >= 100);
1049 /// heap.shrink_to_fit();
1050 /// assert!(heap.capacity() == 0);
1052 #[stable(feature = "rust1", since = "1.0.0")]
1053 pub fn shrink_to_fit(&mut self) {
1054 self.data.shrink_to_fit();
1057 /// Discards capacity with a lower bound.
1059 /// The capacity will remain at least as large as both the length
1060 /// and the supplied value.
1062 /// If the current capacity is less than the lower limit, this is a no-op.
1067 /// use std::collections::BinaryHeap;
1068 /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1070 /// assert!(heap.capacity() >= 100);
1071 /// heap.shrink_to(10);
1072 /// assert!(heap.capacity() >= 10);
1075 #[stable(feature = "shrink_to", since = "1.56.0")]
1076 pub fn shrink_to(&mut self, min_capacity: usize) {
1077 self.data.shrink_to(min_capacity)
1080 /// Returns a slice of all values in the underlying vector, in arbitrary
1088 /// #![feature(binary_heap_as_slice)]
1089 /// use std::collections::BinaryHeap;
1090 /// use std::io::{self, Write};
1092 /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1094 /// io::sink().write(heap.as_slice()).unwrap();
1097 #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
1098 pub fn as_slice(&self) -> &[T] {
1099 self.data.as_slice()
1102 /// Consumes the `BinaryHeap` and returns the underlying vector
1103 /// in arbitrary order.
1110 /// use std::collections::BinaryHeap;
1111 /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1112 /// let vec = heap.into_vec();
1114 /// // Will print in some order
1116 /// println!("{x}");
1119 #[must_use = "`self` will be dropped if the result is not used"]
1120 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1121 pub fn into_vec(self) -> Vec<T> {
1125 /// Returns the length of the binary heap.
1132 /// use std::collections::BinaryHeap;
1133 /// let heap = BinaryHeap::from([1, 3]);
1135 /// assert_eq!(heap.len(), 2);
1138 #[stable(feature = "rust1", since = "1.0.0")]
1139 pub fn len(&self) -> usize {
1143 /// Checks if the binary heap is empty.
1150 /// use std::collections::BinaryHeap;
1151 /// let mut heap = BinaryHeap::new();
1153 /// assert!(heap.is_empty());
1159 /// assert!(!heap.is_empty());
1162 #[stable(feature = "rust1", since = "1.0.0")]
1163 pub fn is_empty(&self) -> bool {
1167 /// Clears the binary heap, returning an iterator over the removed elements
1168 /// in arbitrary order. If the iterator is dropped before being fully
1169 /// consumed, it drops the remaining elements in arbitrary order.
1171 /// The returned iterator keeps a mutable borrow on the heap to optimize
1172 /// its implementation.
1179 /// use std::collections::BinaryHeap;
1180 /// let mut heap = BinaryHeap::from([1, 3]);
1182 /// assert!(!heap.is_empty());
1184 /// for x in heap.drain() {
1185 /// println!("{x}");
1188 /// assert!(heap.is_empty());
1191 #[stable(feature = "drain", since = "1.6.0")]
1192 pub fn drain(&mut self) -> Drain<'_, T> {
1193 Drain { iter: self.data.drain(..) }
1196 /// Drops all items from the binary heap.
1203 /// use std::collections::BinaryHeap;
1204 /// let mut heap = BinaryHeap::from([1, 3]);
1206 /// assert!(!heap.is_empty());
1210 /// assert!(heap.is_empty());
1212 #[stable(feature = "rust1", since = "1.0.0")]
1213 pub fn clear(&mut self) {
1218 /// Hole represents a hole in a slice i.e., an index without valid value
1219 /// (because it was moved from or duplicated).
1220 /// In drop, `Hole` will restore the slice by filling the hole
1221 /// position with the value that was originally removed.
1222 struct Hole<'a, T: 'a> {
1224 elt: ManuallyDrop<T>,
1228 impl<'a, T> Hole<'a, T> {
1229 /// Create a new `Hole` at index `pos`.
1231 /// Unsafe because pos must be within the data slice.
1233 unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
1234 debug_assert!(pos < data.len());
1235 // SAFE: pos should be inside the slice
1236 let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
1237 Hole { data, elt: ManuallyDrop::new(elt), pos }
1241 fn pos(&self) -> usize {
1245 /// Returns a reference to the element removed.
1247 fn element(&self) -> &T {
1251 /// Returns a reference to the element at `index`.
1253 /// Unsafe because index must be within the data slice and not equal to pos.
1255 unsafe fn get(&self, index: usize) -> &T {
1256 debug_assert!(index != self.pos);
1257 debug_assert!(index < self.data.len());
1258 unsafe { self.data.get_unchecked(index) }
1261 /// Move hole to new location
1263 /// Unsafe because index must be within the data slice and not equal to pos.
1265 unsafe fn move_to(&mut self, index: usize) {
1266 debug_assert!(index != self.pos);
1267 debug_assert!(index < self.data.len());
1269 let ptr = self.data.as_mut_ptr();
1270 let index_ptr: *const _ = ptr.add(index);
1271 let hole_ptr = ptr.add(self.pos);
1272 ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
1278 impl<T> Drop for Hole<'_, T> {
1280 fn drop(&mut self) {
1281 // fill the hole again
1284 ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
1289 /// An iterator over the elements of a `BinaryHeap`.
1291 /// This `struct` is created by [`BinaryHeap::iter()`]. See its
1292 /// documentation for more.
1294 /// [`iter`]: BinaryHeap::iter
1295 #[must_use = "iterators are lazy and do nothing unless consumed"]
1296 #[stable(feature = "rust1", since = "1.0.0")]
1297 pub struct Iter<'a, T: 'a> {
1298 iter: slice::Iter<'a, T>,
1301 #[stable(feature = "collection_debug", since = "1.17.0")]
1302 impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
1303 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1304 f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
1308 // FIXME(#26925) Remove in favor of `#[derive(Clone)]`
1309 #[stable(feature = "rust1", since = "1.0.0")]
1310 impl<T> Clone for Iter<'_, T> {
1311 fn clone(&self) -> Self {
1312 Iter { iter: self.iter.clone() }
1316 #[stable(feature = "rust1", since = "1.0.0")]
1317 impl<'a, T> Iterator for Iter<'a, T> {
1321 fn next(&mut self) -> Option<&'a T> {
1326 fn size_hint(&self) -> (usize, Option<usize>) {
1327 self.iter.size_hint()
1331 fn last(self) -> Option<&'a T> {
1336 #[stable(feature = "rust1", since = "1.0.0")]
1337 impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
1339 fn next_back(&mut self) -> Option<&'a T> {
1340 self.iter.next_back()
1344 #[stable(feature = "rust1", since = "1.0.0")]
1345 impl<T> ExactSizeIterator for Iter<'_, T> {
1346 fn is_empty(&self) -> bool {
1347 self.iter.is_empty()
1351 #[stable(feature = "fused", since = "1.26.0")]
1352 impl<T> FusedIterator for Iter<'_, T> {}
1354 /// An owning iterator over the elements of a `BinaryHeap`.
1356 /// This `struct` is created by [`BinaryHeap::into_iter()`]
1357 /// (provided by the [`IntoIterator`] trait). See its documentation for more.
1359 /// [`into_iter`]: BinaryHeap::into_iter
1360 /// [`IntoIterator`]: core::iter::IntoIterator
1361 #[stable(feature = "rust1", since = "1.0.0")]
1363 pub struct IntoIter<T> {
1364 iter: vec::IntoIter<T>,
1367 #[stable(feature = "collection_debug", since = "1.17.0")]
1368 impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
1369 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1370 f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
1374 #[stable(feature = "rust1", since = "1.0.0")]
1375 impl<T> Iterator for IntoIter<T> {
1379 fn next(&mut self) -> Option<T> {
1384 fn size_hint(&self) -> (usize, Option<usize>) {
1385 self.iter.size_hint()
1389 #[stable(feature = "rust1", since = "1.0.0")]
1390 impl<T> DoubleEndedIterator for IntoIter<T> {
1392 fn next_back(&mut self) -> Option<T> {
1393 self.iter.next_back()
1397 #[stable(feature = "rust1", since = "1.0.0")]
1398 impl<T> ExactSizeIterator for IntoIter<T> {
1399 fn is_empty(&self) -> bool {
1400 self.iter.is_empty()
1404 #[stable(feature = "fused", since = "1.26.0")]
1405 impl<T> FusedIterator for IntoIter<T> {}
1407 // In addition to the SAFETY invariants of the following three unsafe traits
1408 // also refer to the vec::in_place_collect module documentation to get an overview
1409 #[unstable(issue = "none", feature = "inplace_iteration")]
1411 unsafe impl<T> SourceIter for IntoIter<T> {
1412 type Source = IntoIter<T>;
1415 unsafe fn as_inner(&mut self) -> &mut Self::Source {
1420 #[unstable(issue = "none", feature = "inplace_iteration")]
1422 unsafe impl<I> InPlaceIterable for IntoIter<I> {}
1424 unsafe impl<I> AsVecIntoIter for IntoIter<I> {
1427 fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
1432 #[must_use = "iterators are lazy and do nothing unless consumed"]
1433 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1434 #[derive(Clone, Debug)]
1435 pub struct IntoIterSorted<T> {
1436 inner: BinaryHeap<T>,
1439 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1440 impl<T: Ord> Iterator for IntoIterSorted<T> {
1444 fn next(&mut self) -> Option<T> {
1449 fn size_hint(&self) -> (usize, Option<usize>) {
1450 let exact = self.inner.len();
1451 (exact, Some(exact))
1455 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1456 impl<T: Ord> ExactSizeIterator for IntoIterSorted<T> {}
1458 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1459 impl<T: Ord> FusedIterator for IntoIterSorted<T> {}
1461 #[unstable(feature = "trusted_len", issue = "37572")]
1462 unsafe impl<T: Ord> TrustedLen for IntoIterSorted<T> {}
1464 /// A draining iterator over the elements of a `BinaryHeap`.
1466 /// This `struct` is created by [`BinaryHeap::drain()`]. See its
1467 /// documentation for more.
1469 /// [`drain`]: BinaryHeap::drain
1470 #[stable(feature = "drain", since = "1.6.0")]
1472 pub struct Drain<'a, T: 'a> {
1473 iter: vec::Drain<'a, T>,
1476 #[stable(feature = "drain", since = "1.6.0")]
1477 impl<T> Iterator for Drain<'_, T> {
1481 fn next(&mut self) -> Option<T> {
1486 fn size_hint(&self) -> (usize, Option<usize>) {
1487 self.iter.size_hint()
1491 #[stable(feature = "drain", since = "1.6.0")]
1492 impl<T> DoubleEndedIterator for Drain<'_, T> {
1494 fn next_back(&mut self) -> Option<T> {
1495 self.iter.next_back()
1499 #[stable(feature = "drain", since = "1.6.0")]
1500 impl<T> ExactSizeIterator for Drain<'_, T> {
1501 fn is_empty(&self) -> bool {
1502 self.iter.is_empty()
1506 #[stable(feature = "fused", since = "1.26.0")]
1507 impl<T> FusedIterator for Drain<'_, T> {}
1509 /// A draining iterator over the elements of a `BinaryHeap`.
1511 /// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
1512 /// documentation for more.
1514 /// [`drain_sorted`]: BinaryHeap::drain_sorted
1515 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1517 pub struct DrainSorted<'a, T: Ord> {
1518 inner: &'a mut BinaryHeap<T>,
1521 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1522 impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
1523 /// Removes heap elements in heap order.
1524 fn drop(&mut self) {
1525 struct DropGuard<'r, 'a, T: Ord>(&'r mut DrainSorted<'a, T>);
1527 impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
1528 fn drop(&mut self) {
1529 while self.0.inner.pop().is_some() {}
1533 while let Some(item) = self.inner.pop() {
1534 let guard = DropGuard(self);
1541 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1542 impl<T: Ord> Iterator for DrainSorted<'_, T> {
1546 fn next(&mut self) -> Option<T> {
1551 fn size_hint(&self) -> (usize, Option<usize>) {
1552 let exact = self.inner.len();
1553 (exact, Some(exact))
1557 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1558 impl<T: Ord> ExactSizeIterator for DrainSorted<'_, T> {}
1560 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1561 impl<T: Ord> FusedIterator for DrainSorted<'_, T> {}
1563 #[unstable(feature = "trusted_len", issue = "37572")]
1564 unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
1566 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1567 impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
1568 /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
1570 /// This conversion happens in-place, and has *O*(*n*) time complexity.
1571 fn from(vec: Vec<T>) -> BinaryHeap<T> {
1572 let mut heap = BinaryHeap { data: vec };
1578 #[stable(feature = "std_collections_from_array", since = "1.56.0")]
1579 impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
1581 /// use std::collections::BinaryHeap;
1583 /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
1584 /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
1585 /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
1586 /// assert_eq!(a, b);
1589 fn from(arr: [T; N]) -> Self {
1590 Self::from_iter(arr)
1594 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1595 impl<T> From<BinaryHeap<T>> for Vec<T> {
1596 /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
1598 /// This conversion requires no data movement or allocation, and has
1599 /// constant time complexity.
1600 fn from(heap: BinaryHeap<T>) -> Vec<T> {
1605 #[stable(feature = "rust1", since = "1.0.0")]
1606 impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
1607 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
1608 BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
1612 #[stable(feature = "rust1", since = "1.0.0")]
1613 impl<T> IntoIterator for BinaryHeap<T> {
1615 type IntoIter = IntoIter<T>;
1617 /// Creates a consuming iterator, that is, one that moves each value out of
1618 /// the binary heap in arbitrary order. The binary heap cannot be used
1619 /// after calling this.
1626 /// use std::collections::BinaryHeap;
1627 /// let heap = BinaryHeap::from([1, 2, 3, 4]);
1629 /// // Print 1, 2, 3, 4 in arbitrary order
1630 /// for x in heap.into_iter() {
1631 /// // x has type i32, not &i32
1632 /// println!("{x}");
1635 fn into_iter(self) -> IntoIter<T> {
1636 IntoIter { iter: self.data.into_iter() }
1640 #[stable(feature = "rust1", since = "1.0.0")]
1641 impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
1643 type IntoIter = Iter<'a, T>;
1645 fn into_iter(self) -> Iter<'a, T> {
1650 #[stable(feature = "rust1", since = "1.0.0")]
1651 impl<T: Ord> Extend<T> for BinaryHeap<T> {
1653 fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1654 <Self as SpecExtend<I>>::spec_extend(self, iter);
1658 fn extend_one(&mut self, item: T) {
1663 fn extend_reserve(&mut self, additional: usize) {
1664 self.reserve(additional);
1668 impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
1669 default fn spec_extend(&mut self, iter: I) {
1670 self.extend_desugared(iter.into_iter());
1674 impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
1675 fn spec_extend(&mut self, ref mut other: Vec<T>) {
1676 let start = self.data.len();
1677 self.data.append(other);
1678 self.rebuild_tail(start);
1682 impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
1683 fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
1688 impl<T: Ord> BinaryHeap<T> {
1689 fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1690 let iterator = iter.into_iter();
1691 let (lower, _) = iterator.size_hint();
1693 self.reserve(lower);
1695 iterator.for_each(move |elem| self.push(elem));
1699 #[stable(feature = "extend_ref", since = "1.2.0")]
1700 impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
1701 fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
1702 self.extend(iter.into_iter().cloned());
1706 fn extend_one(&mut self, &item: &'a T) {
1711 fn extend_reserve(&mut self, additional: usize) {
1712 self.reserve(additional);