alloc/collections/binary_heap/
mod.rs

1//! A priority queue implemented with a binary heap.
2//!
3//! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
4//! Checking the largest element is *O*(1). Converting a vector to a binary heap
5//! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
6//! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
7//! in-place heapsort.
8//!
9//! # Examples
10//!
11//! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
12//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
13//! It shows how to use [`BinaryHeap`] with custom types.
14//!
15//! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
16//! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
17//! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
18//!
19//! ```
20//! use std::cmp::Ordering;
21//! use std::collections::BinaryHeap;
22//!
23//! #[derive(Copy, Clone, Eq, PartialEq)]
24//! struct State {
25//!     cost: usize,
26//!     position: usize,
27//! }
28//!
29//! // The priority queue depends on `Ord`.
30//! // Explicitly implement the trait so the queue becomes a min-heap
31//! // instead of a max-heap.
32//! impl Ord for State {
33//!     fn cmp(&self, other: &Self) -> Ordering {
34//!         // Notice that we flip the ordering on costs.
35//!         // In case of a tie we compare positions - this step is necessary
36//!         // to make implementations of `PartialEq` and `Ord` consistent.
37//!         other.cost.cmp(&self.cost)
38//!             .then_with(|| self.position.cmp(&other.position))
39//!     }
40//! }
41//!
42//! // `PartialOrd` needs to be implemented as well.
43//! impl PartialOrd for State {
44//!     fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
45//!         Some(self.cmp(other))
46//!     }
47//! }
48//!
49//! // Each node is represented as a `usize`, for a shorter implementation.
50//! struct Edge {
51//!     node: usize,
52//!     cost: usize,
53//! }
54//!
55//! // Dijkstra's shortest path algorithm.
56//!
57//! // Start at `start` and use `dist` to track the current shortest distance
58//! // to each node. This implementation isn't memory-efficient as it may leave duplicate
59//! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
60//! // for a simpler implementation.
61//! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
62//!     // dist[node] = current shortest distance from `start` to `node`
63//!     let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
64//!
65//!     let mut heap = BinaryHeap::new();
66//!
67//!     // We're at `start`, with a zero cost
68//!     dist[start] = 0;
69//!     heap.push(State { cost: 0, position: start });
70//!
71//!     // Examine the frontier with lower cost nodes first (min-heap)
72//!     while let Some(State { cost, position }) = heap.pop() {
73//!         // Alternatively we could have continued to find all shortest paths
74//!         if position == goal { return Some(cost); }
75//!
76//!         // Important as we may have already found a better way
77//!         if cost > dist[position] { continue; }
78//!
79//!         // For each node we can reach, see if we can find a way with
80//!         // a lower cost going through this node
81//!         for edge in &adj_list[position] {
82//!             let next = State { cost: cost + edge.cost, position: edge.node };
83//!
84//!             // If so, add it to the frontier and continue
85//!             if next.cost < dist[next.position] {
86//!                 heap.push(next);
87//!                 // Relaxation, we have now found a better way
88//!                 dist[next.position] = next.cost;
89//!             }
90//!         }
91//!     }
92//!
93//!     // Goal not reachable
94//!     None
95//! }
96//!
97//! fn main() {
98//!     // This is the directed graph we're going to use.
99//!     // The node numbers correspond to the different states,
100//!     // and the edge weights symbolize the cost of moving
101//!     // from one node to another.
102//!     // Note that the edges are one-way.
103//!     //
104//!     //                  7
105//!     //          +-----------------+
106//!     //          |                 |
107//!     //          v   1        2    |  2
108//!     //          0 -----> 1 -----> 3 ---> 4
109//!     //          |        ^        ^      ^
110//!     //          |        | 1      |      |
111//!     //          |        |        | 3    | 1
112//!     //          +------> 2 -------+      |
113//!     //           10      |               |
114//!     //                   +---------------+
115//!     //
116//!     // The graph is represented as an adjacency list where each index,
117//!     // corresponding to a node value, has a list of outgoing edges.
118//!     // Chosen for its efficiency.
119//!     let graph = vec![
120//!         // Node 0
121//!         vec![Edge { node: 2, cost: 10 },
122//!              Edge { node: 1, cost: 1 }],
123//!         // Node 1
124//!         vec![Edge { node: 3, cost: 2 }],
125//!         // Node 2
126//!         vec![Edge { node: 1, cost: 1 },
127//!              Edge { node: 3, cost: 3 },
128//!              Edge { node: 4, cost: 1 }],
129//!         // Node 3
130//!         vec![Edge { node: 0, cost: 7 },
131//!              Edge { node: 4, cost: 2 }],
132//!         // Node 4
133//!         vec![]];
134//!
135//!     assert_eq!(shortest_path(&graph, 0, 1), Some(1));
136//!     assert_eq!(shortest_path(&graph, 0, 3), Some(3));
137//!     assert_eq!(shortest_path(&graph, 3, 0), Some(7));
138//!     assert_eq!(shortest_path(&graph, 0, 4), Some(5));
139//!     assert_eq!(shortest_path(&graph, 4, 0), None);
140//! }
141//! ```
142
143#![allow(missing_docs)]
144#![stable(feature = "rust1", since = "1.0.0")]
145
146use core::alloc::Allocator;
147use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen};
148use core::mem::{self, ManuallyDrop, swap};
149use core::num::NonZero;
150use core::ops::{Deref, DerefMut};
151use core::{fmt, ptr};
152
153use crate::alloc::Global;
154use crate::collections::TryReserveError;
155use crate::slice;
156use crate::vec::{self, AsVecIntoIter, Vec};
157
158/// A priority queue implemented with a binary heap.
159///
160/// This will be a max-heap.
161///
162/// It is a logic error for an item to be modified in such a way that the
163/// item's ordering relative to any other item, as determined by the [`Ord`]
164/// trait, changes while it is in the heap. This is normally only possible
165/// through interior mutability, global state, I/O, or unsafe code. The
166/// behavior resulting from such a logic error is not specified, but will
167/// be encapsulated to the `BinaryHeap` that observed the logic error and not
168/// result in undefined behavior. This could include panics, incorrect results,
169/// aborts, memory leaks, and non-termination.
170///
171/// As long as no elements change their relative order while being in the heap
172/// as described above, the API of `BinaryHeap` guarantees that the heap
173/// invariant remains intact i.e. its methods all behave as documented. For
174/// example if a method is documented as iterating in sorted order, that's
175/// guaranteed to work as long as elements in the heap have not changed order,
176/// even in the presence of closures getting unwinded out of, iterators getting
177/// leaked, and similar foolishness.
178///
179/// # Examples
180///
181/// ```
182/// use std::collections::BinaryHeap;
183///
184/// // Type inference lets us omit an explicit type signature (which
185/// // would be `BinaryHeap<i32>` in this example).
186/// let mut heap = BinaryHeap::new();
187///
188/// // We can use peek to look at the next item in the heap. In this case,
189/// // there's no items in there yet so we get None.
190/// assert_eq!(heap.peek(), None);
191///
192/// // Let's add some scores...
193/// heap.push(1);
194/// heap.push(5);
195/// heap.push(2);
196///
197/// // Now peek shows the most important item in the heap.
198/// assert_eq!(heap.peek(), Some(&5));
199///
200/// // We can check the length of a heap.
201/// assert_eq!(heap.len(), 3);
202///
203/// // We can iterate over the items in the heap, although they are returned in
204/// // a random order.
205/// for x in &heap {
206///     println!("{x}");
207/// }
208///
209/// // If we instead pop these scores, they should come back in order.
210/// assert_eq!(heap.pop(), Some(5));
211/// assert_eq!(heap.pop(), Some(2));
212/// assert_eq!(heap.pop(), Some(1));
213/// assert_eq!(heap.pop(), None);
214///
215/// // We can clear the heap of any remaining items.
216/// heap.clear();
217///
218/// // The heap should now be empty.
219/// assert!(heap.is_empty())
220/// ```
221///
222/// A `BinaryHeap` with a known list of items can be initialized from an array:
223///
224/// ```
225/// use std::collections::BinaryHeap;
226///
227/// let heap = BinaryHeap::from([1, 5, 2]);
228/// ```
229///
230/// ## Min-heap
231///
232/// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
233/// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
234/// value instead of the greatest one.
235///
236/// ```
237/// use std::collections::BinaryHeap;
238/// use std::cmp::Reverse;
239///
240/// let mut heap = BinaryHeap::new();
241///
242/// // Wrap values in `Reverse`
243/// heap.push(Reverse(1));
244/// heap.push(Reverse(5));
245/// heap.push(Reverse(2));
246///
247/// // If we pop these scores now, they should come back in the reverse order.
248/// assert_eq!(heap.pop(), Some(Reverse(1)));
249/// assert_eq!(heap.pop(), Some(Reverse(2)));
250/// assert_eq!(heap.pop(), Some(Reverse(5)));
251/// assert_eq!(heap.pop(), None);
252/// ```
253///
254/// # Time complexity
255///
256/// | [push]  | [pop]         | [peek]/[peek\_mut] |
257/// |---------|---------------|--------------------|
258/// | *O*(1)~ | *O*(log(*n*)) | *O*(1)             |
259///
260/// The value for `push` is an expected cost; the method documentation gives a
261/// more detailed analysis.
262///
263/// [`core::cmp::Reverse`]: core::cmp::Reverse
264/// [`Cell`]: core::cell::Cell
265/// [`RefCell`]: core::cell::RefCell
266/// [push]: BinaryHeap::push
267/// [pop]: BinaryHeap::pop
268/// [peek]: BinaryHeap::peek
269/// [peek\_mut]: BinaryHeap::peek_mut
270#[stable(feature = "rust1", since = "1.0.0")]
271#[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
272pub struct BinaryHeap<
273    T,
274    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
275> {
276    data: Vec<T, A>,
277}
278
279/// Structure wrapping a mutable reference to the greatest item on a
280/// `BinaryHeap`.
281///
282/// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
283/// its documentation for more.
284///
285/// [`peek_mut`]: BinaryHeap::peek_mut
286#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
287pub struct PeekMut<
288    'a,
289    T: 'a + Ord,
290    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
291> {
292    heap: &'a mut BinaryHeap<T, A>,
293    // If a set_len + sift_down are required, this is Some. If a &mut T has not
294    // yet been exposed to peek_mut()'s caller, it's None.
295    original_len: Option<NonZero<usize>>,
296}
297
298#[stable(feature = "collection_debug", since = "1.17.0")]
299impl<T: Ord + fmt::Debug, A: Allocator> fmt::Debug for PeekMut<'_, T, A> {
300    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
301        f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
302    }
303}
304
305#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
306impl<T: Ord, A: Allocator> Drop for PeekMut<'_, T, A> {
307    fn drop(&mut self) {
308        if let Some(original_len) = self.original_len {
309            // SAFETY: That's how many elements were in the Vec at the time of
310            // the PeekMut::deref_mut call, and therefore also at the time of
311            // the BinaryHeap::peek_mut call. Since the PeekMut did not end up
312            // getting leaked, we are now undoing the leak amplification that
313            // the DerefMut prepared for.
314            unsafe { self.heap.data.set_len(original_len.get()) };
315
316            // SAFETY: PeekMut is only instantiated for non-empty heaps.
317            unsafe { self.heap.sift_down(0) };
318        }
319    }
320}
321
322#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
323impl<T: Ord, A: Allocator> Deref for PeekMut<'_, T, A> {
324    type Target = T;
325    fn deref(&self) -> &T {
326        debug_assert!(!self.heap.is_empty());
327        // SAFE: PeekMut is only instantiated for non-empty heaps
328        unsafe { self.heap.data.get_unchecked(0) }
329    }
330}
331
332#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
333impl<T: Ord, A: Allocator> DerefMut for PeekMut<'_, T, A> {
334    fn deref_mut(&mut self) -> &mut T {
335        debug_assert!(!self.heap.is_empty());
336
337        let len = self.heap.len();
338        if len > 1 {
339            // Here we preemptively leak all the rest of the underlying vector
340            // after the currently max element. If the caller mutates the &mut T
341            // we're about to give them, and then leaks the PeekMut, all these
342            // elements will remain leaked. If they don't leak the PeekMut, then
343            // either Drop or PeekMut::pop will un-leak the vector elements.
344            //
345            // This is technique is described throughout several other places in
346            // the standard library as "leak amplification".
347            unsafe {
348                // SAFETY: len > 1 so len != 0.
349                self.original_len = Some(NonZero::new_unchecked(len));
350                // SAFETY: len > 1 so all this does for now is leak elements,
351                // which is safe.
352                self.heap.data.set_len(1);
353            }
354        }
355
356        // SAFE: PeekMut is only instantiated for non-empty heaps
357        unsafe { self.heap.data.get_unchecked_mut(0) }
358    }
359}
360
361impl<'a, T: Ord, A: Allocator> PeekMut<'a, T, A> {
362    /// Removes the peeked value from the heap and returns it.
363    #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
364    pub fn pop(mut this: PeekMut<'a, T, A>) -> T {
365        if let Some(original_len) = this.original_len.take() {
366            // SAFETY: This is how many elements were in the Vec at the time of
367            // the BinaryHeap::peek_mut call.
368            unsafe { this.heap.data.set_len(original_len.get()) };
369
370            // Unlike in Drop, here we don't also need to do a sift_down even if
371            // the caller could've mutated the element. It is removed from the
372            // heap on the next line and pop() is not sensitive to its value.
373        }
374
375        // SAFETY: Have a `PeekMut` element proves that the associated binary heap being non-empty,
376        // so the `pop` operation will not fail.
377        unsafe { this.heap.pop().unwrap_unchecked() }
378    }
379}
380
381#[stable(feature = "rust1", since = "1.0.0")]
382impl<T: Clone, A: Allocator + Clone> Clone for BinaryHeap<T, A> {
383    fn clone(&self) -> Self {
384        BinaryHeap { data: self.data.clone() }
385    }
386
387    /// Overwrites the contents of `self` with a clone of the contents of `source`.
388    ///
389    /// This method is preferred over simply assigning `source.clone()` to `self`,
390    /// as it avoids reallocation if possible.
391    ///
392    /// See [`Vec::clone_from()`] for more details.
393    fn clone_from(&mut self, source: &Self) {
394        self.data.clone_from(&source.data);
395    }
396}
397
398#[stable(feature = "rust1", since = "1.0.0")]
399impl<T: Ord> Default for BinaryHeap<T> {
400    /// Creates an empty `BinaryHeap<T>`.
401    #[inline]
402    fn default() -> BinaryHeap<T> {
403        BinaryHeap::new()
404    }
405}
406
407#[stable(feature = "binaryheap_debug", since = "1.4.0")]
408impl<T: fmt::Debug, A: Allocator> fmt::Debug for BinaryHeap<T, A> {
409    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
410        f.debug_list().entries(self.iter()).finish()
411    }
412}
413
414struct RebuildOnDrop<
415    'a,
416    T: Ord,
417    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
418> {
419    heap: &'a mut BinaryHeap<T, A>,
420    rebuild_from: usize,
421}
422
423impl<T: Ord, A: Allocator> Drop for RebuildOnDrop<'_, T, A> {
424    fn drop(&mut self) {
425        self.heap.rebuild_tail(self.rebuild_from);
426    }
427}
428
429impl<T: Ord> BinaryHeap<T> {
430    /// Creates an empty `BinaryHeap` as a max-heap.
431    ///
432    /// # Examples
433    ///
434    /// Basic usage:
435    ///
436    /// ```
437    /// use std::collections::BinaryHeap;
438    /// let mut heap = BinaryHeap::new();
439    /// heap.push(4);
440    /// ```
441    #[stable(feature = "rust1", since = "1.0.0")]
442    #[rustc_const_stable(feature = "const_binary_heap_constructor", since = "1.80.0")]
443    #[must_use]
444    pub const fn new() -> BinaryHeap<T> {
445        BinaryHeap { data: vec![] }
446    }
447
448    /// Creates an empty `BinaryHeap` with at least the specified capacity.
449    ///
450    /// The binary heap will be able to hold at least `capacity` elements without
451    /// reallocating. This method is allowed to allocate for more elements than
452    /// `capacity`. If `capacity` is zero, the binary heap will not allocate.
453    ///
454    /// # Examples
455    ///
456    /// Basic usage:
457    ///
458    /// ```
459    /// use std::collections::BinaryHeap;
460    /// let mut heap = BinaryHeap::with_capacity(10);
461    /// heap.push(4);
462    /// ```
463    #[stable(feature = "rust1", since = "1.0.0")]
464    #[must_use]
465    pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
466        BinaryHeap { data: Vec::with_capacity(capacity) }
467    }
468}
469
470impl<T: Ord, A: Allocator> BinaryHeap<T, A> {
471    /// Creates an empty `BinaryHeap` as a max-heap, using `A` as allocator.
472    ///
473    /// # Examples
474    ///
475    /// Basic usage:
476    ///
477    /// ```
478    /// #![feature(allocator_api)]
479    ///
480    /// use std::alloc::System;
481    /// use std::collections::BinaryHeap;
482    /// let mut heap = BinaryHeap::new_in(System);
483    /// heap.push(4);
484    /// ```
485    #[unstable(feature = "allocator_api", issue = "32838")]
486    #[must_use]
487    pub const fn new_in(alloc: A) -> BinaryHeap<T, A> {
488        BinaryHeap { data: Vec::new_in(alloc) }
489    }
490
491    /// Creates an empty `BinaryHeap` with at least the specified capacity, using `A` as allocator.
492    ///
493    /// The binary heap will be able to hold at least `capacity` elements without
494    /// reallocating. This method is allowed to allocate for more elements than
495    /// `capacity`. If `capacity` is zero, the binary heap will not allocate.
496    ///
497    /// # Examples
498    ///
499    /// Basic usage:
500    ///
501    /// ```
502    /// #![feature(allocator_api)]
503    ///
504    /// use std::alloc::System;
505    /// use std::collections::BinaryHeap;
506    /// let mut heap = BinaryHeap::with_capacity_in(10, System);
507    /// heap.push(4);
508    /// ```
509    #[unstable(feature = "allocator_api", issue = "32838")]
510    #[must_use]
511    pub fn with_capacity_in(capacity: usize, alloc: A) -> BinaryHeap<T, A> {
512        BinaryHeap { data: Vec::with_capacity_in(capacity, alloc) }
513    }
514
515    /// Returns a mutable reference to the greatest item in the binary heap, or
516    /// `None` if it is empty.
517    ///
518    /// Note: If the `PeekMut` value is leaked, some heap elements might get
519    /// leaked along with it, but the remaining elements will remain a valid
520    /// heap.
521    ///
522    /// # Examples
523    ///
524    /// Basic usage:
525    ///
526    /// ```
527    /// use std::collections::BinaryHeap;
528    /// let mut heap = BinaryHeap::new();
529    /// assert!(heap.peek_mut().is_none());
530    ///
531    /// heap.push(1);
532    /// heap.push(5);
533    /// heap.push(2);
534    /// if let Some(mut val) = heap.peek_mut() {
535    ///     *val = 0;
536    /// }
537    /// assert_eq!(heap.peek(), Some(&2));
538    /// ```
539    ///
540    /// # Time complexity
541    ///
542    /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
543    /// otherwise it's *O*(1).
544    #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
545    pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T, A>> {
546        if self.is_empty() { None } else { Some(PeekMut { heap: self, original_len: None }) }
547    }
548
549    /// Removes the greatest item from the binary heap and returns it, or `None` if it
550    /// is empty.
551    ///
552    /// # Examples
553    ///
554    /// Basic usage:
555    ///
556    /// ```
557    /// use std::collections::BinaryHeap;
558    /// let mut heap = BinaryHeap::from([1, 3]);
559    ///
560    /// assert_eq!(heap.pop(), Some(3));
561    /// assert_eq!(heap.pop(), Some(1));
562    /// assert_eq!(heap.pop(), None);
563    /// ```
564    ///
565    /// # Time complexity
566    ///
567    /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
568    #[stable(feature = "rust1", since = "1.0.0")]
569    pub fn pop(&mut self) -> Option<T> {
570        self.data.pop().map(|mut item| {
571            if !self.is_empty() {
572                swap(&mut item, &mut self.data[0]);
573                // SAFETY: !self.is_empty() means that self.len() > 0
574                unsafe { self.sift_down_to_bottom(0) };
575            }
576            item
577        })
578    }
579
580    /// Pushes an item onto the binary heap.
581    ///
582    /// # Examples
583    ///
584    /// Basic usage:
585    ///
586    /// ```
587    /// use std::collections::BinaryHeap;
588    /// let mut heap = BinaryHeap::new();
589    /// heap.push(3);
590    /// heap.push(5);
591    /// heap.push(1);
592    ///
593    /// assert_eq!(heap.len(), 3);
594    /// assert_eq!(heap.peek(), Some(&5));
595    /// ```
596    ///
597    /// # Time complexity
598    ///
599    /// The expected cost of `push`, averaged over every possible ordering of
600    /// the elements being pushed, and over a sufficiently large number of
601    /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
602    /// elements that are *not* already in any sorted pattern.
603    ///
604    /// The time complexity degrades if elements are pushed in predominantly
605    /// ascending order. In the worst case, elements are pushed in ascending
606    /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
607    /// containing *n* elements.
608    ///
609    /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
610    /// occurs when capacity is exhausted and needs a resize. The resize cost
611    /// has been amortized in the previous figures.
612    #[stable(feature = "rust1", since = "1.0.0")]
613    #[rustc_confusables("append", "put")]
614    pub fn push(&mut self, item: T) {
615        let old_len = self.len();
616        self.data.push(item);
617        // SAFETY: Since we pushed a new item it means that
618        //  old_len = self.len() - 1 < self.len()
619        unsafe { self.sift_up(0, old_len) };
620    }
621
622    /// Consumes the `BinaryHeap` and returns a vector in sorted
623    /// (ascending) order.
624    ///
625    /// # Examples
626    ///
627    /// Basic usage:
628    ///
629    /// ```
630    /// use std::collections::BinaryHeap;
631    ///
632    /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
633    /// heap.push(6);
634    /// heap.push(3);
635    ///
636    /// let vec = heap.into_sorted_vec();
637    /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
638    /// ```
639    #[must_use = "`self` will be dropped if the result is not used"]
640    #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
641    pub fn into_sorted_vec(mut self) -> Vec<T, A> {
642        let mut end = self.len();
643        while end > 1 {
644            end -= 1;
645            // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
646            //  so it's always a valid index to access.
647            //  It is safe to access index 0 (i.e. `ptr`), because
648            //  1 <= end < self.len(), which means self.len() >= 2.
649            unsafe {
650                let ptr = self.data.as_mut_ptr();
651                ptr::swap(ptr, ptr.add(end));
652            }
653            // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
654            //  0 < 1 <= end <= self.len() - 1 < self.len()
655            //  Which means 0 < end and end < self.len().
656            unsafe { self.sift_down_range(0, end) };
657        }
658        self.into_vec()
659    }
660
661    // The implementations of sift_up and sift_down use unsafe blocks in
662    // order to move an element out of the vector (leaving behind a
663    // hole), shift along the others and move the removed element back into the
664    // vector at the final location of the hole.
665    // The `Hole` type is used to represent this, and make sure
666    // the hole is filled back at the end of its scope, even on panic.
667    // Using a hole reduces the constant factor compared to using swaps,
668    // which involves twice as many moves.
669
670    /// # Safety
671    ///
672    /// The caller must guarantee that `pos < self.len()`.
673    unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
674        // Take out the value at `pos` and create a hole.
675        // SAFETY: The caller guarantees that pos < self.len()
676        let mut hole = unsafe { Hole::new(&mut self.data, pos) };
677
678        while hole.pos() > start {
679            let parent = (hole.pos() - 1) / 2;
680
681            // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
682            //  and so hole.pos() - 1 can't underflow.
683            //  This guarantees that parent < hole.pos() so
684            //  it's a valid index and also != hole.pos().
685            if hole.element() <= unsafe { hole.get(parent) } {
686                break;
687            }
688
689            // SAFETY: Same as above
690            unsafe { hole.move_to(parent) };
691        }
692
693        hole.pos()
694    }
695
696    /// Take an element at `pos` and move it down the heap,
697    /// while its children are larger.
698    ///
699    /// # Safety
700    ///
701    /// The caller must guarantee that `pos < end <= self.len()`.
702    unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
703        // SAFETY: The caller guarantees that pos < end <= self.len().
704        let mut hole = unsafe { Hole::new(&mut self.data, pos) };
705        let mut child = 2 * hole.pos() + 1;
706
707        // Loop invariant: child == 2 * hole.pos() + 1.
708        while child <= end.saturating_sub(2) {
709            // compare with the greater of the two children
710            // SAFETY: child < end - 1 < self.len() and
711            //  child + 1 < end <= self.len(), so they're valid indexes.
712            //  child == 2 * hole.pos() + 1 != hole.pos() and
713            //  child + 1 == 2 * hole.pos() + 2 != hole.pos().
714            // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
715            //  if T is a ZST
716            child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
717
718            // if we are already in order, stop.
719            // SAFETY: child is now either the old child or the old child+1
720            //  We already proven that both are < self.len() and != hole.pos()
721            if hole.element() >= unsafe { hole.get(child) } {
722                return;
723            }
724
725            // SAFETY: same as above.
726            unsafe { hole.move_to(child) };
727            child = 2 * hole.pos() + 1;
728        }
729
730        // SAFETY: && short circuit, which means that in the
731        //  second condition it's already true that child == end - 1 < self.len().
732        if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
733            // SAFETY: child is already proven to be a valid index and
734            //  child == 2 * hole.pos() + 1 != hole.pos().
735            unsafe { hole.move_to(child) };
736        }
737    }
738
739    /// # Safety
740    ///
741    /// The caller must guarantee that `pos < self.len()`.
742    unsafe fn sift_down(&mut self, pos: usize) {
743        let len = self.len();
744        // SAFETY: pos < len is guaranteed by the caller and
745        //  obviously len = self.len() <= self.len().
746        unsafe { self.sift_down_range(pos, len) };
747    }
748
749    /// Take an element at `pos` and move it all the way down the heap,
750    /// then sift it up to its position.
751    ///
752    /// Note: This is faster when the element is known to be large / should
753    /// be closer to the bottom.
754    ///
755    /// # Safety
756    ///
757    /// The caller must guarantee that `pos < self.len()`.
758    unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
759        let end = self.len();
760        let start = pos;
761
762        // SAFETY: The caller guarantees that pos < self.len().
763        let mut hole = unsafe { Hole::new(&mut self.data, pos) };
764        let mut child = 2 * hole.pos() + 1;
765
766        // Loop invariant: child == 2 * hole.pos() + 1.
767        while child <= end.saturating_sub(2) {
768            // SAFETY: child < end - 1 < self.len() and
769            //  child + 1 < end <= self.len(), so they're valid indexes.
770            //  child == 2 * hole.pos() + 1 != hole.pos() and
771            //  child + 1 == 2 * hole.pos() + 2 != hole.pos().
772            // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
773            //  if T is a ZST
774            child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
775
776            // SAFETY: Same as above
777            unsafe { hole.move_to(child) };
778            child = 2 * hole.pos() + 1;
779        }
780
781        if child == end - 1 {
782            // SAFETY: child == end - 1 < self.len(), so it's a valid index
783            //  and child == 2 * hole.pos() + 1 != hole.pos().
784            unsafe { hole.move_to(child) };
785        }
786        pos = hole.pos();
787        drop(hole);
788
789        // SAFETY: pos is the position in the hole and was already proven
790        //  to be a valid index.
791        unsafe { self.sift_up(start, pos) };
792    }
793
794    /// Rebuild assuming data[0..start] is still a proper heap.
795    fn rebuild_tail(&mut self, start: usize) {
796        if start == self.len() {
797            return;
798        }
799
800        let tail_len = self.len() - start;
801
802        #[inline(always)]
803        fn log2_fast(x: usize) -> usize {
804            (usize::BITS - x.leading_zeros() - 1) as usize
805        }
806
807        // `rebuild` takes O(self.len()) operations
808        // and about 2 * self.len() comparisons in the worst case
809        // while repeating `sift_up` takes O(tail_len * log(start)) operations
810        // and about 1 * tail_len * log_2(start) comparisons in the worst case,
811        // assuming start >= tail_len. For larger heaps, the crossover point
812        // no longer follows this reasoning and was determined empirically.
813        let better_to_rebuild = if start < tail_len {
814            true
815        } else if self.len() <= 2048 {
816            2 * self.len() < tail_len * log2_fast(start)
817        } else {
818            2 * self.len() < tail_len * 11
819        };
820
821        if better_to_rebuild {
822            self.rebuild();
823        } else {
824            for i in start..self.len() {
825                // SAFETY: The index `i` is always less than self.len().
826                unsafe { self.sift_up(0, i) };
827            }
828        }
829    }
830
831    fn rebuild(&mut self) {
832        let mut n = self.len() / 2;
833        while n > 0 {
834            n -= 1;
835            // SAFETY: n starts from self.len() / 2 and goes down to 0.
836            //  The only case when !(n < self.len()) is if
837            //  self.len() == 0, but it's ruled out by the loop condition.
838            unsafe { self.sift_down(n) };
839        }
840    }
841
842    /// Moves all the elements of `other` into `self`, leaving `other` empty.
843    ///
844    /// # Examples
845    ///
846    /// Basic usage:
847    ///
848    /// ```
849    /// use std::collections::BinaryHeap;
850    ///
851    /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
852    /// let mut b = BinaryHeap::from([-20, 5, 43]);
853    ///
854    /// a.append(&mut b);
855    ///
856    /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
857    /// assert!(b.is_empty());
858    /// ```
859    #[stable(feature = "binary_heap_append", since = "1.11.0")]
860    pub fn append(&mut self, other: &mut Self) {
861        if self.len() < other.len() {
862            swap(self, other);
863        }
864
865        let start = self.data.len();
866
867        self.data.append(&mut other.data);
868
869        self.rebuild_tail(start);
870    }
871
872    /// Clears the binary heap, returning an iterator over the removed elements
873    /// in heap order. If the iterator is dropped before being fully consumed,
874    /// it drops the remaining elements in heap order.
875    ///
876    /// The returned iterator keeps a mutable borrow on the heap to optimize
877    /// its implementation.
878    ///
879    /// Note:
880    /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
881    ///   You should use the latter for most cases.
882    ///
883    /// # Examples
884    ///
885    /// Basic usage:
886    ///
887    /// ```
888    /// #![feature(binary_heap_drain_sorted)]
889    /// use std::collections::BinaryHeap;
890    ///
891    /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
892    /// assert_eq!(heap.len(), 5);
893    ///
894    /// drop(heap.drain_sorted()); // removes all elements in heap order
895    /// assert_eq!(heap.len(), 0);
896    /// ```
897    #[inline]
898    #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
899    pub fn drain_sorted(&mut self) -> DrainSorted<'_, T, A> {
900        DrainSorted { inner: self }
901    }
902
903    /// Retains only the elements specified by the predicate.
904    ///
905    /// In other words, remove all elements `e` for which `f(&e)` returns
906    /// `false`. The elements are visited in unsorted (and unspecified) order.
907    ///
908    /// # Examples
909    ///
910    /// Basic usage:
911    ///
912    /// ```
913    /// use std::collections::BinaryHeap;
914    ///
915    /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
916    ///
917    /// heap.retain(|x| x % 2 == 0); // only keep even numbers
918    ///
919    /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
920    /// ```
921    #[stable(feature = "binary_heap_retain", since = "1.70.0")]
922    pub fn retain<F>(&mut self, mut f: F)
923    where
924        F: FnMut(&T) -> bool,
925    {
926        // rebuild_start will be updated to the first touched element below, and the rebuild will
927        // only be done for the tail.
928        let mut guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
929        let mut i = 0;
930
931        guard.heap.data.retain(|e| {
932            let keep = f(e);
933            if !keep && i < guard.rebuild_from {
934                guard.rebuild_from = i;
935            }
936            i += 1;
937            keep
938        });
939    }
940}
941
942impl<T, A: Allocator> BinaryHeap<T, A> {
943    /// Returns an iterator visiting all values in the underlying vector, in
944    /// arbitrary order.
945    ///
946    /// # Examples
947    ///
948    /// Basic usage:
949    ///
950    /// ```
951    /// use std::collections::BinaryHeap;
952    /// let heap = BinaryHeap::from([1, 2, 3, 4]);
953    ///
954    /// // Print 1, 2, 3, 4 in arbitrary order
955    /// for x in heap.iter() {
956    ///     println!("{x}");
957    /// }
958    /// ```
959    #[stable(feature = "rust1", since = "1.0.0")]
960    #[cfg_attr(not(test), rustc_diagnostic_item = "binaryheap_iter")]
961    pub fn iter(&self) -> Iter<'_, T> {
962        Iter { iter: self.data.iter() }
963    }
964
965    /// Returns an iterator which retrieves elements in heap order.
966    ///
967    /// This method consumes the original heap.
968    ///
969    /// # Examples
970    ///
971    /// Basic usage:
972    ///
973    /// ```
974    /// #![feature(binary_heap_into_iter_sorted)]
975    /// use std::collections::BinaryHeap;
976    /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
977    ///
978    /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
979    /// ```
980    #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
981    pub fn into_iter_sorted(self) -> IntoIterSorted<T, A> {
982        IntoIterSorted { inner: self }
983    }
984
985    /// Returns the greatest item in the binary heap, or `None` if it is empty.
986    ///
987    /// # Examples
988    ///
989    /// Basic usage:
990    ///
991    /// ```
992    /// use std::collections::BinaryHeap;
993    /// let mut heap = BinaryHeap::new();
994    /// assert_eq!(heap.peek(), None);
995    ///
996    /// heap.push(1);
997    /// heap.push(5);
998    /// heap.push(2);
999    /// assert_eq!(heap.peek(), Some(&5));
1000    ///
1001    /// ```
1002    ///
1003    /// # Time complexity
1004    ///
1005    /// Cost is *O*(1) in the worst case.
1006    #[must_use]
1007    #[stable(feature = "rust1", since = "1.0.0")]
1008    pub fn peek(&self) -> Option<&T> {
1009        self.data.get(0)
1010    }
1011
1012    /// Returns the number of elements the binary heap can hold without reallocating.
1013    ///
1014    /// # Examples
1015    ///
1016    /// Basic usage:
1017    ///
1018    /// ```
1019    /// use std::collections::BinaryHeap;
1020    /// let mut heap = BinaryHeap::with_capacity(100);
1021    /// assert!(heap.capacity() >= 100);
1022    /// heap.push(4);
1023    /// ```
1024    #[must_use]
1025    #[stable(feature = "rust1", since = "1.0.0")]
1026    pub fn capacity(&self) -> usize {
1027        self.data.capacity()
1028    }
1029
1030    /// Reserves the minimum capacity for at least `additional` elements more than
1031    /// the current length. Unlike [`reserve`], this will not
1032    /// deliberately over-allocate to speculatively avoid frequent allocations.
1033    /// After calling `reserve_exact`, capacity will be greater than or equal to
1034    /// `self.len() + additional`. Does nothing if the capacity is already
1035    /// sufficient.
1036    ///
1037    /// [`reserve`]: BinaryHeap::reserve
1038    ///
1039    /// # Panics
1040    ///
1041    /// Panics if the new capacity overflows [`usize`].
1042    ///
1043    /// # Examples
1044    ///
1045    /// Basic usage:
1046    ///
1047    /// ```
1048    /// use std::collections::BinaryHeap;
1049    /// let mut heap = BinaryHeap::new();
1050    /// heap.reserve_exact(100);
1051    /// assert!(heap.capacity() >= 100);
1052    /// heap.push(4);
1053    /// ```
1054    ///
1055    /// [`reserve`]: BinaryHeap::reserve
1056    #[stable(feature = "rust1", since = "1.0.0")]
1057    pub fn reserve_exact(&mut self, additional: usize) {
1058        self.data.reserve_exact(additional);
1059    }
1060
1061    /// Reserves capacity for at least `additional` elements more than the
1062    /// current length. The allocator may reserve more space to speculatively
1063    /// avoid frequent allocations. After calling `reserve`,
1064    /// capacity will be greater than or equal to `self.len() + additional`.
1065    /// Does nothing if capacity is already sufficient.
1066    ///
1067    /// # Panics
1068    ///
1069    /// Panics if the new capacity overflows [`usize`].
1070    ///
1071    /// # Examples
1072    ///
1073    /// Basic usage:
1074    ///
1075    /// ```
1076    /// use std::collections::BinaryHeap;
1077    /// let mut heap = BinaryHeap::new();
1078    /// heap.reserve(100);
1079    /// assert!(heap.capacity() >= 100);
1080    /// heap.push(4);
1081    /// ```
1082    #[stable(feature = "rust1", since = "1.0.0")]
1083    pub fn reserve(&mut self, additional: usize) {
1084        self.data.reserve(additional);
1085    }
1086
1087    /// Tries to reserve the minimum capacity for at least `additional` elements
1088    /// more than the current length. Unlike [`try_reserve`], this will not
1089    /// deliberately over-allocate to speculatively avoid frequent allocations.
1090    /// After calling `try_reserve_exact`, capacity will be greater than or
1091    /// equal to `self.len() + additional` if it returns `Ok(())`.
1092    /// Does nothing if the capacity is already sufficient.
1093    ///
1094    /// Note that the allocator may give the collection more space than it
1095    /// requests. Therefore, capacity can not be relied upon to be precisely
1096    /// minimal. Prefer [`try_reserve`] if future insertions are expected.
1097    ///
1098    /// [`try_reserve`]: BinaryHeap::try_reserve
1099    ///
1100    /// # Errors
1101    ///
1102    /// If the capacity overflows, or the allocator reports a failure, then an error
1103    /// is returned.
1104    ///
1105    /// # Examples
1106    ///
1107    /// ```
1108    /// use std::collections::BinaryHeap;
1109    /// use std::collections::TryReserveError;
1110    ///
1111    /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1112    ///     let mut heap = BinaryHeap::new();
1113    ///
1114    ///     // Pre-reserve the memory, exiting if we can't
1115    ///     heap.try_reserve_exact(data.len())?;
1116    ///
1117    ///     // Now we know this can't OOM in the middle of our complex work
1118    ///     heap.extend(data.iter());
1119    ///
1120    ///     Ok(heap.pop())
1121    /// }
1122    /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1123    /// ```
1124    #[stable(feature = "try_reserve_2", since = "1.63.0")]
1125    pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
1126        self.data.try_reserve_exact(additional)
1127    }
1128
1129    /// Tries to reserve capacity for at least `additional` elements more than the
1130    /// current length. The allocator may reserve more space to speculatively
1131    /// avoid frequent allocations. After calling `try_reserve`, capacity will be
1132    /// greater than or equal to `self.len() + additional` if it returns
1133    /// `Ok(())`. Does nothing if capacity is already sufficient. This method
1134    /// preserves the contents even if an error occurs.
1135    ///
1136    /// # Errors
1137    ///
1138    /// If the capacity overflows, or the allocator reports a failure, then an error
1139    /// is returned.
1140    ///
1141    /// # Examples
1142    ///
1143    /// ```
1144    /// use std::collections::BinaryHeap;
1145    /// use std::collections::TryReserveError;
1146    ///
1147    /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1148    ///     let mut heap = BinaryHeap::new();
1149    ///
1150    ///     // Pre-reserve the memory, exiting if we can't
1151    ///     heap.try_reserve(data.len())?;
1152    ///
1153    ///     // Now we know this can't OOM in the middle of our complex work
1154    ///     heap.extend(data.iter());
1155    ///
1156    ///     Ok(heap.pop())
1157    /// }
1158    /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1159    /// ```
1160    #[stable(feature = "try_reserve_2", since = "1.63.0")]
1161    pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
1162        self.data.try_reserve(additional)
1163    }
1164
1165    /// Discards as much additional capacity as possible.
1166    ///
1167    /// # Examples
1168    ///
1169    /// Basic usage:
1170    ///
1171    /// ```
1172    /// use std::collections::BinaryHeap;
1173    /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1174    ///
1175    /// assert!(heap.capacity() >= 100);
1176    /// heap.shrink_to_fit();
1177    /// assert!(heap.capacity() == 0);
1178    /// ```
1179    #[stable(feature = "rust1", since = "1.0.0")]
1180    pub fn shrink_to_fit(&mut self) {
1181        self.data.shrink_to_fit();
1182    }
1183
1184    /// Discards capacity with a lower bound.
1185    ///
1186    /// The capacity will remain at least as large as both the length
1187    /// and the supplied value.
1188    ///
1189    /// If the current capacity is less than the lower limit, this is a no-op.
1190    ///
1191    /// # Examples
1192    ///
1193    /// ```
1194    /// use std::collections::BinaryHeap;
1195    /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1196    ///
1197    /// assert!(heap.capacity() >= 100);
1198    /// heap.shrink_to(10);
1199    /// assert!(heap.capacity() >= 10);
1200    /// ```
1201    #[inline]
1202    #[stable(feature = "shrink_to", since = "1.56.0")]
1203    pub fn shrink_to(&mut self, min_capacity: usize) {
1204        self.data.shrink_to(min_capacity)
1205    }
1206
1207    /// Returns a slice of all values in the underlying vector, in arbitrary
1208    /// order.
1209    ///
1210    /// # Examples
1211    ///
1212    /// Basic usage:
1213    ///
1214    /// ```
1215    /// use std::collections::BinaryHeap;
1216    /// use std::io::{self, Write};
1217    ///
1218    /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1219    ///
1220    /// io::sink().write(heap.as_slice()).unwrap();
1221    /// ```
1222    #[must_use]
1223    #[stable(feature = "binary_heap_as_slice", since = "1.80.0")]
1224    pub fn as_slice(&self) -> &[T] {
1225        self.data.as_slice()
1226    }
1227
1228    /// Consumes the `BinaryHeap` and returns the underlying vector
1229    /// in arbitrary order.
1230    ///
1231    /// # Examples
1232    ///
1233    /// Basic usage:
1234    ///
1235    /// ```
1236    /// use std::collections::BinaryHeap;
1237    /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1238    /// let vec = heap.into_vec();
1239    ///
1240    /// // Will print in some order
1241    /// for x in vec {
1242    ///     println!("{x}");
1243    /// }
1244    /// ```
1245    #[must_use = "`self` will be dropped if the result is not used"]
1246    #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1247    pub fn into_vec(self) -> Vec<T, A> {
1248        self.into()
1249    }
1250
1251    /// Returns a reference to the underlying allocator.
1252    #[unstable(feature = "allocator_api", issue = "32838")]
1253    #[inline]
1254    pub fn allocator(&self) -> &A {
1255        self.data.allocator()
1256    }
1257
1258    /// Returns the length of the binary heap.
1259    ///
1260    /// # Examples
1261    ///
1262    /// Basic usage:
1263    ///
1264    /// ```
1265    /// use std::collections::BinaryHeap;
1266    /// let heap = BinaryHeap::from([1, 3]);
1267    ///
1268    /// assert_eq!(heap.len(), 2);
1269    /// ```
1270    #[must_use]
1271    #[stable(feature = "rust1", since = "1.0.0")]
1272    #[rustc_confusables("length", "size")]
1273    pub fn len(&self) -> usize {
1274        self.data.len()
1275    }
1276
1277    /// Checks if the binary heap is empty.
1278    ///
1279    /// # Examples
1280    ///
1281    /// Basic usage:
1282    ///
1283    /// ```
1284    /// use std::collections::BinaryHeap;
1285    /// let mut heap = BinaryHeap::new();
1286    ///
1287    /// assert!(heap.is_empty());
1288    ///
1289    /// heap.push(3);
1290    /// heap.push(5);
1291    /// heap.push(1);
1292    ///
1293    /// assert!(!heap.is_empty());
1294    /// ```
1295    #[must_use]
1296    #[stable(feature = "rust1", since = "1.0.0")]
1297    pub fn is_empty(&self) -> bool {
1298        self.len() == 0
1299    }
1300
1301    /// Clears the binary heap, returning an iterator over the removed elements
1302    /// in arbitrary order. If the iterator is dropped before being fully
1303    /// consumed, it drops the remaining elements in arbitrary order.
1304    ///
1305    /// The returned iterator keeps a mutable borrow on the heap to optimize
1306    /// its implementation.
1307    ///
1308    /// # Examples
1309    ///
1310    /// Basic usage:
1311    ///
1312    /// ```
1313    /// use std::collections::BinaryHeap;
1314    /// let mut heap = BinaryHeap::from([1, 3]);
1315    ///
1316    /// assert!(!heap.is_empty());
1317    ///
1318    /// for x in heap.drain() {
1319    ///     println!("{x}");
1320    /// }
1321    ///
1322    /// assert!(heap.is_empty());
1323    /// ```
1324    #[inline]
1325    #[stable(feature = "drain", since = "1.6.0")]
1326    pub fn drain(&mut self) -> Drain<'_, T, A> {
1327        Drain { iter: self.data.drain(..) }
1328    }
1329
1330    /// Drops all items from the binary heap.
1331    ///
1332    /// # Examples
1333    ///
1334    /// Basic usage:
1335    ///
1336    /// ```
1337    /// use std::collections::BinaryHeap;
1338    /// let mut heap = BinaryHeap::from([1, 3]);
1339    ///
1340    /// assert!(!heap.is_empty());
1341    ///
1342    /// heap.clear();
1343    ///
1344    /// assert!(heap.is_empty());
1345    /// ```
1346    #[stable(feature = "rust1", since = "1.0.0")]
1347    pub fn clear(&mut self) {
1348        self.drain();
1349    }
1350}
1351
1352/// Hole represents a hole in a slice i.e., an index without valid value
1353/// (because it was moved from or duplicated).
1354/// In drop, `Hole` will restore the slice by filling the hole
1355/// position with the value that was originally removed.
1356struct Hole<'a, T: 'a> {
1357    data: &'a mut [T],
1358    elt: ManuallyDrop<T>,
1359    pos: usize,
1360}
1361
1362impl<'a, T> Hole<'a, T> {
1363    /// Creates a new `Hole` at index `pos`.
1364    ///
1365    /// Unsafe because pos must be within the data slice.
1366    #[inline]
1367    unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
1368        debug_assert!(pos < data.len());
1369        // SAFE: pos should be inside the slice
1370        let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
1371        Hole { data, elt: ManuallyDrop::new(elt), pos }
1372    }
1373
1374    #[inline]
1375    fn pos(&self) -> usize {
1376        self.pos
1377    }
1378
1379    /// Returns a reference to the element removed.
1380    #[inline]
1381    fn element(&self) -> &T {
1382        &self.elt
1383    }
1384
1385    /// Returns a reference to the element at `index`.
1386    ///
1387    /// Unsafe because index must be within the data slice and not equal to pos.
1388    #[inline]
1389    unsafe fn get(&self, index: usize) -> &T {
1390        debug_assert!(index != self.pos);
1391        debug_assert!(index < self.data.len());
1392        unsafe { self.data.get_unchecked(index) }
1393    }
1394
1395    /// Move hole to new location
1396    ///
1397    /// Unsafe because index must be within the data slice and not equal to pos.
1398    #[inline]
1399    unsafe fn move_to(&mut self, index: usize) {
1400        debug_assert!(index != self.pos);
1401        debug_assert!(index < self.data.len());
1402        unsafe {
1403            let ptr = self.data.as_mut_ptr();
1404            let index_ptr: *const _ = ptr.add(index);
1405            let hole_ptr = ptr.add(self.pos);
1406            ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
1407        }
1408        self.pos = index;
1409    }
1410}
1411
1412impl<T> Drop for Hole<'_, T> {
1413    #[inline]
1414    fn drop(&mut self) {
1415        // fill the hole again
1416        unsafe {
1417            let pos = self.pos;
1418            ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
1419        }
1420    }
1421}
1422
1423/// An iterator over the elements of a `BinaryHeap`.
1424///
1425/// This `struct` is created by [`BinaryHeap::iter()`]. See its
1426/// documentation for more.
1427///
1428/// [`iter`]: BinaryHeap::iter
1429#[must_use = "iterators are lazy and do nothing unless consumed"]
1430#[stable(feature = "rust1", since = "1.0.0")]
1431pub struct Iter<'a, T: 'a> {
1432    iter: slice::Iter<'a, T>,
1433}
1434
1435#[stable(feature = "default_iters_sequel", since = "1.82.0")]
1436impl<T> Default for Iter<'_, T> {
1437    /// Creates an empty `binary_heap::Iter`.
1438    ///
1439    /// ```
1440    /// # use std::collections::binary_heap;
1441    /// let iter: binary_heap::Iter<'_, u8> = Default::default();
1442    /// assert_eq!(iter.len(), 0);
1443    /// ```
1444    fn default() -> Self {
1445        Iter { iter: Default::default() }
1446    }
1447}
1448
1449#[stable(feature = "collection_debug", since = "1.17.0")]
1450impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
1451    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1452        f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
1453    }
1454}
1455
1456// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
1457#[stable(feature = "rust1", since = "1.0.0")]
1458impl<T> Clone for Iter<'_, T> {
1459    fn clone(&self) -> Self {
1460        Iter { iter: self.iter.clone() }
1461    }
1462}
1463
1464#[stable(feature = "rust1", since = "1.0.0")]
1465impl<'a, T> Iterator for Iter<'a, T> {
1466    type Item = &'a T;
1467
1468    #[inline]
1469    fn next(&mut self) -> Option<&'a T> {
1470        self.iter.next()
1471    }
1472
1473    #[inline]
1474    fn size_hint(&self) -> (usize, Option<usize>) {
1475        self.iter.size_hint()
1476    }
1477
1478    #[inline]
1479    fn last(self) -> Option<&'a T> {
1480        self.iter.last()
1481    }
1482}
1483
1484#[stable(feature = "rust1", since = "1.0.0")]
1485impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
1486    #[inline]
1487    fn next_back(&mut self) -> Option<&'a T> {
1488        self.iter.next_back()
1489    }
1490}
1491
1492#[stable(feature = "rust1", since = "1.0.0")]
1493impl<T> ExactSizeIterator for Iter<'_, T> {
1494    fn is_empty(&self) -> bool {
1495        self.iter.is_empty()
1496    }
1497}
1498
1499#[stable(feature = "fused", since = "1.26.0")]
1500impl<T> FusedIterator for Iter<'_, T> {}
1501
1502/// An owning iterator over the elements of a `BinaryHeap`.
1503///
1504/// This `struct` is created by [`BinaryHeap::into_iter()`]
1505/// (provided by the [`IntoIterator`] trait). See its documentation for more.
1506///
1507/// [`into_iter`]: BinaryHeap::into_iter
1508#[stable(feature = "rust1", since = "1.0.0")]
1509#[derive(Clone)]
1510pub struct IntoIter<
1511    T,
1512    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1513> {
1514    iter: vec::IntoIter<T, A>,
1515}
1516
1517impl<T, A: Allocator> IntoIter<T, A> {
1518    /// Returns a reference to the underlying allocator.
1519    #[unstable(feature = "allocator_api", issue = "32838")]
1520    pub fn allocator(&self) -> &A {
1521        self.iter.allocator()
1522    }
1523}
1524
1525#[stable(feature = "collection_debug", since = "1.17.0")]
1526impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
1527    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1528        f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
1529    }
1530}
1531
1532#[stable(feature = "rust1", since = "1.0.0")]
1533impl<T, A: Allocator> Iterator for IntoIter<T, A> {
1534    type Item = T;
1535
1536    #[inline]
1537    fn next(&mut self) -> Option<T> {
1538        self.iter.next()
1539    }
1540
1541    #[inline]
1542    fn size_hint(&self) -> (usize, Option<usize>) {
1543        self.iter.size_hint()
1544    }
1545}
1546
1547#[stable(feature = "rust1", since = "1.0.0")]
1548impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
1549    #[inline]
1550    fn next_back(&mut self) -> Option<T> {
1551        self.iter.next_back()
1552    }
1553}
1554
1555#[stable(feature = "rust1", since = "1.0.0")]
1556impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
1557    fn is_empty(&self) -> bool {
1558        self.iter.is_empty()
1559    }
1560}
1561
1562#[stable(feature = "fused", since = "1.26.0")]
1563impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
1564
1565#[doc(hidden)]
1566#[unstable(issue = "none", feature = "trusted_fused")]
1567unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {}
1568
1569#[stable(feature = "default_iters", since = "1.70.0")]
1570impl<T> Default for IntoIter<T> {
1571    /// Creates an empty `binary_heap::IntoIter`.
1572    ///
1573    /// ```
1574    /// # use std::collections::binary_heap;
1575    /// let iter: binary_heap::IntoIter<u8> = Default::default();
1576    /// assert_eq!(iter.len(), 0);
1577    /// ```
1578    fn default() -> Self {
1579        IntoIter { iter: Default::default() }
1580    }
1581}
1582
1583// In addition to the SAFETY invariants of the following three unsafe traits
1584// also refer to the vec::in_place_collect module documentation to get an overview
1585#[unstable(issue = "none", feature = "inplace_iteration")]
1586#[doc(hidden)]
1587unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
1588    type Source = IntoIter<T, A>;
1589
1590    #[inline]
1591    unsafe fn as_inner(&mut self) -> &mut Self::Source {
1592        self
1593    }
1594}
1595
1596#[unstable(issue = "none", feature = "inplace_iteration")]
1597#[doc(hidden)]
1598unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> {
1599    const EXPAND_BY: Option<NonZero<usize>> = NonZero::new(1);
1600    const MERGE_BY: Option<NonZero<usize>> = NonZero::new(1);
1601}
1602
1603unsafe impl<I> AsVecIntoIter for IntoIter<I> {
1604    type Item = I;
1605
1606    fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
1607        &mut self.iter
1608    }
1609}
1610
1611#[must_use = "iterators are lazy and do nothing unless consumed"]
1612#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1613#[derive(Clone, Debug)]
1614pub struct IntoIterSorted<
1615    T,
1616    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1617> {
1618    inner: BinaryHeap<T, A>,
1619}
1620
1621impl<T, A: Allocator> IntoIterSorted<T, A> {
1622    /// Returns a reference to the underlying allocator.
1623    #[unstable(feature = "allocator_api", issue = "32838")]
1624    pub fn allocator(&self) -> &A {
1625        self.inner.allocator()
1626    }
1627}
1628
1629#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1630impl<T: Ord, A: Allocator> Iterator for IntoIterSorted<T, A> {
1631    type Item = T;
1632
1633    #[inline]
1634    fn next(&mut self) -> Option<T> {
1635        self.inner.pop()
1636    }
1637
1638    #[inline]
1639    fn size_hint(&self) -> (usize, Option<usize>) {
1640        let exact = self.inner.len();
1641        (exact, Some(exact))
1642    }
1643}
1644
1645#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1646impl<T: Ord, A: Allocator> ExactSizeIterator for IntoIterSorted<T, A> {}
1647
1648#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1649impl<T: Ord, A: Allocator> FusedIterator for IntoIterSorted<T, A> {}
1650
1651#[unstable(feature = "trusted_len", issue = "37572")]
1652unsafe impl<T: Ord, A: Allocator> TrustedLen for IntoIterSorted<T, A> {}
1653
1654/// A draining iterator over the elements of a `BinaryHeap`.
1655///
1656/// This `struct` is created by [`BinaryHeap::drain()`]. See its
1657/// documentation for more.
1658///
1659/// [`drain`]: BinaryHeap::drain
1660#[stable(feature = "drain", since = "1.6.0")]
1661#[derive(Debug)]
1662pub struct Drain<
1663    'a,
1664    T: 'a,
1665    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1666> {
1667    iter: vec::Drain<'a, T, A>,
1668}
1669
1670impl<T, A: Allocator> Drain<'_, T, A> {
1671    /// Returns a reference to the underlying allocator.
1672    #[unstable(feature = "allocator_api", issue = "32838")]
1673    pub fn allocator(&self) -> &A {
1674        self.iter.allocator()
1675    }
1676}
1677
1678#[stable(feature = "drain", since = "1.6.0")]
1679impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
1680    type Item = T;
1681
1682    #[inline]
1683    fn next(&mut self) -> Option<T> {
1684        self.iter.next()
1685    }
1686
1687    #[inline]
1688    fn size_hint(&self) -> (usize, Option<usize>) {
1689        self.iter.size_hint()
1690    }
1691}
1692
1693#[stable(feature = "drain", since = "1.6.0")]
1694impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
1695    #[inline]
1696    fn next_back(&mut self) -> Option<T> {
1697        self.iter.next_back()
1698    }
1699}
1700
1701#[stable(feature = "drain", since = "1.6.0")]
1702impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {
1703    fn is_empty(&self) -> bool {
1704        self.iter.is_empty()
1705    }
1706}
1707
1708#[stable(feature = "fused", since = "1.26.0")]
1709impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
1710
1711/// A draining iterator over the elements of a `BinaryHeap`.
1712///
1713/// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
1714/// documentation for more.
1715///
1716/// [`drain_sorted`]: BinaryHeap::drain_sorted
1717#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1718#[derive(Debug)]
1719pub struct DrainSorted<
1720    'a,
1721    T: Ord,
1722    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1723> {
1724    inner: &'a mut BinaryHeap<T, A>,
1725}
1726
1727impl<'a, T: Ord, A: Allocator> DrainSorted<'a, T, A> {
1728    /// Returns a reference to the underlying allocator.
1729    #[unstable(feature = "allocator_api", issue = "32838")]
1730    pub fn allocator(&self) -> &A {
1731        self.inner.allocator()
1732    }
1733}
1734
1735#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1736impl<'a, T: Ord, A: Allocator> Drop for DrainSorted<'a, T, A> {
1737    /// Removes heap elements in heap order.
1738    fn drop(&mut self) {
1739        struct DropGuard<'r, 'a, T: Ord, A: Allocator>(&'r mut DrainSorted<'a, T, A>);
1740
1741        impl<'r, 'a, T: Ord, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
1742            fn drop(&mut self) {
1743                while self.0.inner.pop().is_some() {}
1744            }
1745        }
1746
1747        while let Some(item) = self.inner.pop() {
1748            let guard = DropGuard(self);
1749            drop(item);
1750            mem::forget(guard);
1751        }
1752    }
1753}
1754
1755#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1756impl<T: Ord, A: Allocator> Iterator for DrainSorted<'_, T, A> {
1757    type Item = T;
1758
1759    #[inline]
1760    fn next(&mut self) -> Option<T> {
1761        self.inner.pop()
1762    }
1763
1764    #[inline]
1765    fn size_hint(&self) -> (usize, Option<usize>) {
1766        let exact = self.inner.len();
1767        (exact, Some(exact))
1768    }
1769}
1770
1771#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1772impl<T: Ord, A: Allocator> ExactSizeIterator for DrainSorted<'_, T, A> {}
1773
1774#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1775impl<T: Ord, A: Allocator> FusedIterator for DrainSorted<'_, T, A> {}
1776
1777#[unstable(feature = "trusted_len", issue = "37572")]
1778unsafe impl<T: Ord, A: Allocator> TrustedLen for DrainSorted<'_, T, A> {}
1779
1780#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1781impl<T: Ord, A: Allocator> From<Vec<T, A>> for BinaryHeap<T, A> {
1782    /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
1783    ///
1784    /// This conversion happens in-place, and has *O*(*n*) time complexity.
1785    fn from(vec: Vec<T, A>) -> BinaryHeap<T, A> {
1786        let mut heap = BinaryHeap { data: vec };
1787        heap.rebuild();
1788        heap
1789    }
1790}
1791
1792#[stable(feature = "std_collections_from_array", since = "1.56.0")]
1793impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
1794    /// ```
1795    /// use std::collections::BinaryHeap;
1796    ///
1797    /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
1798    /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
1799    /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
1800    ///     assert_eq!(a, b);
1801    /// }
1802    /// ```
1803    fn from(arr: [T; N]) -> Self {
1804        Self::from_iter(arr)
1805    }
1806}
1807
1808#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1809impl<T, A: Allocator> From<BinaryHeap<T, A>> for Vec<T, A> {
1810    /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
1811    ///
1812    /// This conversion requires no data movement or allocation, and has
1813    /// constant time complexity.
1814    fn from(heap: BinaryHeap<T, A>) -> Vec<T, A> {
1815        heap.data
1816    }
1817}
1818
1819#[stable(feature = "rust1", since = "1.0.0")]
1820impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
1821    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
1822        BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
1823    }
1824}
1825
1826#[stable(feature = "rust1", since = "1.0.0")]
1827impl<T, A: Allocator> IntoIterator for BinaryHeap<T, A> {
1828    type Item = T;
1829    type IntoIter = IntoIter<T, A>;
1830
1831    /// Creates a consuming iterator, that is, one that moves each value out of
1832    /// the binary heap in arbitrary order. The binary heap cannot be used
1833    /// after calling this.
1834    ///
1835    /// # Examples
1836    ///
1837    /// Basic usage:
1838    ///
1839    /// ```
1840    /// use std::collections::BinaryHeap;
1841    /// let heap = BinaryHeap::from([1, 2, 3, 4]);
1842    ///
1843    /// // Print 1, 2, 3, 4 in arbitrary order
1844    /// for x in heap.into_iter() {
1845    ///     // x has type i32, not &i32
1846    ///     println!("{x}");
1847    /// }
1848    /// ```
1849    fn into_iter(self) -> IntoIter<T, A> {
1850        IntoIter { iter: self.data.into_iter() }
1851    }
1852}
1853
1854#[stable(feature = "rust1", since = "1.0.0")]
1855impl<'a, T, A: Allocator> IntoIterator for &'a BinaryHeap<T, A> {
1856    type Item = &'a T;
1857    type IntoIter = Iter<'a, T>;
1858
1859    fn into_iter(self) -> Iter<'a, T> {
1860        self.iter()
1861    }
1862}
1863
1864#[stable(feature = "rust1", since = "1.0.0")]
1865impl<T: Ord, A: Allocator> Extend<T> for BinaryHeap<T, A> {
1866    #[inline]
1867    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1868        let guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
1869        guard.heap.data.extend(iter);
1870    }
1871
1872    #[inline]
1873    fn extend_one(&mut self, item: T) {
1874        self.push(item);
1875    }
1876
1877    #[inline]
1878    fn extend_reserve(&mut self, additional: usize) {
1879        self.reserve(additional);
1880    }
1881}
1882
1883#[stable(feature = "extend_ref", since = "1.2.0")]
1884impl<'a, T: 'a + Ord + Copy, A: Allocator> Extend<&'a T> for BinaryHeap<T, A> {
1885    fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
1886        self.extend(iter.into_iter().cloned());
1887    }
1888
1889    #[inline]
1890    fn extend_one(&mut self, &item: &'a T) {
1891        self.push(item);
1892    }
1893
1894    #[inline]
1895    fn extend_reserve(&mut self, additional: usize) {
1896        self.reserve(additional);
1897    }
1898}