alloc/collections/binary_heap/
mod.rs

1//! A priority queue implemented with a binary heap.
2//!
3//! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
4//! Checking the largest element is *O*(1). Converting a vector to a binary heap
5//! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
6//! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
7//! in-place heapsort.
8//!
9//! # Examples
10//!
11//! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
12//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
13//! It shows how to use [`BinaryHeap`] with custom types.
14//!
15//! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
16//! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
17//! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
18//!
19//! ```
20//! use std::cmp::Ordering;
21//! use std::collections::BinaryHeap;
22//!
23//! #[derive(Copy, Clone, Eq, PartialEq)]
24//! struct State {
25//!     cost: usize,
26//!     position: usize,
27//! }
28//!
29//! // The priority queue depends on `Ord`.
30//! // Explicitly implement the trait so the queue becomes a min-heap
31//! // instead of a max-heap.
32//! impl Ord for State {
33//!     fn cmp(&self, other: &Self) -> Ordering {
34//!         // Notice that we flip the ordering on costs.
35//!         // In case of a tie we compare positions - this step is necessary
36//!         // to make implementations of `PartialEq` and `Ord` consistent.
37//!         other.cost.cmp(&self.cost)
38//!             .then_with(|| self.position.cmp(&other.position))
39//!     }
40//! }
41//!
42//! // `PartialOrd` needs to be implemented as well.
43//! impl PartialOrd for State {
44//!     fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
45//!         Some(self.cmp(other))
46//!     }
47//! }
48//!
49//! // Each node is represented as a `usize`, for a shorter implementation.
50//! struct Edge {
51//!     node: usize,
52//!     cost: usize,
53//! }
54//!
55//! // Dijkstra's shortest path algorithm.
56//!
57//! // Start at `start` and use `dist` to track the current shortest distance
58//! // to each node. This implementation isn't memory-efficient as it may leave duplicate
59//! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
60//! // for a simpler implementation.
61//! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
62//!     // dist[node] = current shortest distance from `start` to `node`
63//!     let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
64//!
65//!     let mut heap = BinaryHeap::new();
66//!
67//!     // We're at `start`, with a zero cost
68//!     dist[start] = 0;
69//!     heap.push(State { cost: 0, position: start });
70//!
71//!     // Examine the frontier with lower cost nodes first (min-heap)
72//!     while let Some(State { cost, position }) = heap.pop() {
73//!         // Alternatively we could have continued to find all shortest paths
74//!         if position == goal { return Some(cost); }
75//!
76//!         // Important as we may have already found a better way
77//!         if cost > dist[position] { continue; }
78//!
79//!         // For each node we can reach, see if we can find a way with
80//!         // a lower cost going through this node
81//!         for edge in &adj_list[position] {
82//!             let next = State { cost: cost + edge.cost, position: edge.node };
83//!
84//!             // If so, add it to the frontier and continue
85//!             if next.cost < dist[next.position] {
86//!                 heap.push(next);
87//!                 // Relaxation, we have now found a better way
88//!                 dist[next.position] = next.cost;
89//!             }
90//!         }
91//!     }
92//!
93//!     // Goal not reachable
94//!     None
95//! }
96//!
97//! fn main() {
98//!     // This is the directed graph we're going to use.
99//!     // The node numbers correspond to the different states,
100//!     // and the edge weights symbolize the cost of moving
101//!     // from one node to another.
102//!     // Note that the edges are one-way.
103//!     //
104//!     //                  7
105//!     //          +-----------------+
106//!     //          |                 |
107//!     //          v   1        2    |  2
108//!     //          0 -----> 1 -----> 3 ---> 4
109//!     //          |        ^        ^      ^
110//!     //          |        | 1      |      |
111//!     //          |        |        | 3    | 1
112//!     //          +------> 2 -------+      |
113//!     //           10      |               |
114//!     //                   +---------------+
115//!     //
116//!     // The graph is represented as an adjacency list where each index,
117//!     // corresponding to a node value, has a list of outgoing edges.
118//!     // Chosen for its efficiency.
119//!     let graph = vec![
120//!         // Node 0
121//!         vec![Edge { node: 2, cost: 10 },
122//!              Edge { node: 1, cost: 1 }],
123//!         // Node 1
124//!         vec![Edge { node: 3, cost: 2 }],
125//!         // Node 2
126//!         vec![Edge { node: 1, cost: 1 },
127//!              Edge { node: 3, cost: 3 },
128//!              Edge { node: 4, cost: 1 }],
129//!         // Node 3
130//!         vec![Edge { node: 0, cost: 7 },
131//!              Edge { node: 4, cost: 2 }],
132//!         // Node 4
133//!         vec![]];
134//!
135//!     assert_eq!(shortest_path(&graph, 0, 1), Some(1));
136//!     assert_eq!(shortest_path(&graph, 0, 3), Some(3));
137//!     assert_eq!(shortest_path(&graph, 3, 0), Some(7));
138//!     assert_eq!(shortest_path(&graph, 0, 4), Some(5));
139//!     assert_eq!(shortest_path(&graph, 4, 0), None);
140//! }
141//! ```
142
143#![allow(missing_docs)]
144#![stable(feature = "rust1", since = "1.0.0")]
145
146use core::alloc::Allocator;
147use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen};
148use core::mem::{self, ManuallyDrop, swap};
149use core::num::NonZero;
150use core::ops::{Deref, DerefMut};
151use core::{fmt, ptr};
152
153use crate::alloc::Global;
154use crate::collections::TryReserveError;
155use crate::slice;
156#[cfg(not(test))]
157use crate::vec::AsVecIntoIter;
158use crate::vec::{self, Vec};
159
160/// A priority queue implemented with a binary heap.
161///
162/// This will be a max-heap.
163///
164/// It is a logic error for an item to be modified in such a way that the
165/// item's ordering relative to any other item, as determined by the [`Ord`]
166/// trait, changes while it is in the heap. This is normally only possible
167/// through interior mutability, global state, I/O, or unsafe code. The
168/// behavior resulting from such a logic error is not specified, but will
169/// be encapsulated to the `BinaryHeap` that observed the logic error and not
170/// result in undefined behavior. This could include panics, incorrect results,
171/// aborts, memory leaks, and non-termination.
172///
173/// As long as no elements change their relative order while being in the heap
174/// as described above, the API of `BinaryHeap` guarantees that the heap
175/// invariant remains intact i.e. its methods all behave as documented. For
176/// example if a method is documented as iterating in sorted order, that's
177/// guaranteed to work as long as elements in the heap have not changed order,
178/// even in the presence of closures getting unwinded out of, iterators getting
179/// leaked, and similar foolishness.
180///
181/// # Examples
182///
183/// ```
184/// use std::collections::BinaryHeap;
185///
186/// // Type inference lets us omit an explicit type signature (which
187/// // would be `BinaryHeap<i32>` in this example).
188/// let mut heap = BinaryHeap::new();
189///
190/// // We can use peek to look at the next item in the heap. In this case,
191/// // there's no items in there yet so we get None.
192/// assert_eq!(heap.peek(), None);
193///
194/// // Let's add some scores...
195/// heap.push(1);
196/// heap.push(5);
197/// heap.push(2);
198///
199/// // Now peek shows the most important item in the heap.
200/// assert_eq!(heap.peek(), Some(&5));
201///
202/// // We can check the length of a heap.
203/// assert_eq!(heap.len(), 3);
204///
205/// // We can iterate over the items in the heap, although they are returned in
206/// // a random order.
207/// for x in &heap {
208///     println!("{x}");
209/// }
210///
211/// // If we instead pop these scores, they should come back in order.
212/// assert_eq!(heap.pop(), Some(5));
213/// assert_eq!(heap.pop(), Some(2));
214/// assert_eq!(heap.pop(), Some(1));
215/// assert_eq!(heap.pop(), None);
216///
217/// // We can clear the heap of any remaining items.
218/// heap.clear();
219///
220/// // The heap should now be empty.
221/// assert!(heap.is_empty())
222/// ```
223///
224/// A `BinaryHeap` with a known list of items can be initialized from an array:
225///
226/// ```
227/// use std::collections::BinaryHeap;
228///
229/// let heap = BinaryHeap::from([1, 5, 2]);
230/// ```
231///
232/// ## Min-heap
233///
234/// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
235/// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
236/// value instead of the greatest one.
237///
238/// ```
239/// use std::collections::BinaryHeap;
240/// use std::cmp::Reverse;
241///
242/// let mut heap = BinaryHeap::new();
243///
244/// // Wrap values in `Reverse`
245/// heap.push(Reverse(1));
246/// heap.push(Reverse(5));
247/// heap.push(Reverse(2));
248///
249/// // If we pop these scores now, they should come back in the reverse order.
250/// assert_eq!(heap.pop(), Some(Reverse(1)));
251/// assert_eq!(heap.pop(), Some(Reverse(2)));
252/// assert_eq!(heap.pop(), Some(Reverse(5)));
253/// assert_eq!(heap.pop(), None);
254/// ```
255///
256/// # Time complexity
257///
258/// | [push]  | [pop]         | [peek]/[peek\_mut] |
259/// |---------|---------------|--------------------|
260/// | *O*(1)~ | *O*(log(*n*)) | *O*(1)             |
261///
262/// The value for `push` is an expected cost; the method documentation gives a
263/// more detailed analysis.
264///
265/// [`core::cmp::Reverse`]: core::cmp::Reverse
266/// [`Cell`]: core::cell::Cell
267/// [`RefCell`]: core::cell::RefCell
268/// [push]: BinaryHeap::push
269/// [pop]: BinaryHeap::pop
270/// [peek]: BinaryHeap::peek
271/// [peek\_mut]: BinaryHeap::peek_mut
272#[stable(feature = "rust1", since = "1.0.0")]
273#[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
274pub struct BinaryHeap<
275    T,
276    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
277> {
278    data: Vec<T, A>,
279}
280
281/// Structure wrapping a mutable reference to the greatest item on a
282/// `BinaryHeap`.
283///
284/// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
285/// its documentation for more.
286///
287/// [`peek_mut`]: BinaryHeap::peek_mut
288#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
289pub struct PeekMut<
290    'a,
291    T: 'a + Ord,
292    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
293> {
294    heap: &'a mut BinaryHeap<T, A>,
295    // If a set_len + sift_down are required, this is Some. If a &mut T has not
296    // yet been exposed to peek_mut()'s caller, it's None.
297    original_len: Option<NonZero<usize>>,
298}
299
300#[stable(feature = "collection_debug", since = "1.17.0")]
301impl<T: Ord + fmt::Debug, A: Allocator> fmt::Debug for PeekMut<'_, T, A> {
302    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
303        f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
304    }
305}
306
307#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
308impl<T: Ord, A: Allocator> Drop for PeekMut<'_, T, A> {
309    fn drop(&mut self) {
310        if let Some(original_len) = self.original_len {
311            // SAFETY: That's how many elements were in the Vec at the time of
312            // the PeekMut::deref_mut call, and therefore also at the time of
313            // the BinaryHeap::peek_mut call. Since the PeekMut did not end up
314            // getting leaked, we are now undoing the leak amplification that
315            // the DerefMut prepared for.
316            unsafe { self.heap.data.set_len(original_len.get()) };
317
318            // SAFETY: PeekMut is only instantiated for non-empty heaps.
319            unsafe { self.heap.sift_down(0) };
320        }
321    }
322}
323
324#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
325impl<T: Ord, A: Allocator> Deref for PeekMut<'_, T, A> {
326    type Target = T;
327    fn deref(&self) -> &T {
328        debug_assert!(!self.heap.is_empty());
329        // SAFE: PeekMut is only instantiated for non-empty heaps
330        unsafe { self.heap.data.get_unchecked(0) }
331    }
332}
333
334#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
335impl<T: Ord, A: Allocator> DerefMut for PeekMut<'_, T, A> {
336    fn deref_mut(&mut self) -> &mut T {
337        debug_assert!(!self.heap.is_empty());
338
339        let len = self.heap.len();
340        if len > 1 {
341            // Here we preemptively leak all the rest of the underlying vector
342            // after the currently max element. If the caller mutates the &mut T
343            // we're about to give them, and then leaks the PeekMut, all these
344            // elements will remain leaked. If they don't leak the PeekMut, then
345            // either Drop or PeekMut::pop will un-leak the vector elements.
346            //
347            // This is technique is described throughout several other places in
348            // the standard library as "leak amplification".
349            unsafe {
350                // SAFETY: len > 1 so len != 0.
351                self.original_len = Some(NonZero::new_unchecked(len));
352                // SAFETY: len > 1 so all this does for now is leak elements,
353                // which is safe.
354                self.heap.data.set_len(1);
355            }
356        }
357
358        // SAFE: PeekMut is only instantiated for non-empty heaps
359        unsafe { self.heap.data.get_unchecked_mut(0) }
360    }
361}
362
363impl<'a, T: Ord, A: Allocator> PeekMut<'a, T, A> {
364    /// Sifts the current element to its new position.
365    ///
366    /// Afterwards refers to the new element. Returns if the element changed.
367    ///
368    /// ## Examples
369    ///
370    /// The condition can be used to upper bound all elements in the heap. When only few elements
371    /// are affected, the heap's sort ensures this is faster than a reconstruction from the raw
372    /// element list and requires no additional allocation.
373    ///
374    /// ```
375    /// #![feature(binary_heap_peek_mut_refresh)]
376    /// use std::collections::BinaryHeap;
377    ///
378    /// let mut heap: BinaryHeap<u32> = (0..128).collect();
379    /// let mut peek = heap.peek_mut().unwrap();
380    ///
381    /// loop {
382    ///     *peek = 99;
383    ///
384    ///     if !peek.refresh() {
385    ///         break;
386    ///     }
387    /// }
388    ///
389    /// // Post condition, this is now an upper bound.
390    /// assert!(*peek < 100);
391    /// ```
392    ///
393    /// When the element remains the maximum after modification, the peek remains unchanged:
394    ///
395    /// ```
396    /// #![feature(binary_heap_peek_mut_refresh)]
397    /// use std::collections::BinaryHeap;
398    ///
399    /// let mut heap: BinaryHeap<u32> = [1, 2, 3].into();
400    /// let mut peek = heap.peek_mut().unwrap();
401    ///
402    /// assert_eq!(*peek, 3);
403    /// *peek = 42;
404    ///
405    /// // When we refresh, the peek is updated to the new maximum.
406    /// assert!(!peek.refresh(), "42 is even larger than 3");
407    /// assert_eq!(*peek, 42);
408    /// ```
409    #[unstable(feature = "binary_heap_peek_mut_refresh", issue = "138355")]
410    #[must_use = "is equivalent to dropping and getting a new PeekMut except for return information"]
411    pub fn refresh(&mut self) -> bool {
412        // The length of the underlying heap is unchanged by sifting down. The value stored for leak
413        // amplification thus remains accurate. We erase the leak amplification firstly because the
414        // operation is then equivalent to constructing a new PeekMut and secondly this avoids any
415        // future complication where original_len being non-empty would be interpreted as the heap
416        // having been leak amplified instead of checking the heap itself.
417        if let Some(original_len) = self.original_len.take() {
418            // SAFETY: This is how many elements were in the Vec at the time of
419            // the BinaryHeap::peek_mut call.
420            unsafe { self.heap.data.set_len(original_len.get()) };
421
422            // The length of the heap did not change by sifting, upholding our own invariants.
423
424            // SAFETY: PeekMut is only instantiated for non-empty heaps.
425            (unsafe { self.heap.sift_down(0) }) != 0
426        } else {
427            // The element was not modified.
428            false
429        }
430    }
431
432    /// Removes the peeked value from the heap and returns it.
433    #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
434    pub fn pop(mut this: PeekMut<'a, T, A>) -> T {
435        if let Some(original_len) = this.original_len.take() {
436            // SAFETY: This is how many elements were in the Vec at the time of
437            // the BinaryHeap::peek_mut call.
438            unsafe { this.heap.data.set_len(original_len.get()) };
439
440            // Unlike in Drop, here we don't also need to do a sift_down even if
441            // the caller could've mutated the element. It is removed from the
442            // heap on the next line and pop() is not sensitive to its value.
443        }
444
445        // SAFETY: Have a `PeekMut` element proves that the associated binary heap being non-empty,
446        // so the `pop` operation will not fail.
447        unsafe { this.heap.pop().unwrap_unchecked() }
448    }
449}
450
451#[stable(feature = "rust1", since = "1.0.0")]
452impl<T: Clone, A: Allocator + Clone> Clone for BinaryHeap<T, A> {
453    fn clone(&self) -> Self {
454        BinaryHeap { data: self.data.clone() }
455    }
456
457    /// Overwrites the contents of `self` with a clone of the contents of `source`.
458    ///
459    /// This method is preferred over simply assigning `source.clone()` to `self`,
460    /// as it avoids reallocation if possible.
461    ///
462    /// See [`Vec::clone_from()`] for more details.
463    fn clone_from(&mut self, source: &Self) {
464        self.data.clone_from(&source.data);
465    }
466}
467
468#[stable(feature = "rust1", since = "1.0.0")]
469impl<T: Ord> Default for BinaryHeap<T> {
470    /// Creates an empty `BinaryHeap<T>`.
471    #[inline]
472    fn default() -> BinaryHeap<T> {
473        BinaryHeap::new()
474    }
475}
476
477#[stable(feature = "binaryheap_debug", since = "1.4.0")]
478impl<T: fmt::Debug, A: Allocator> fmt::Debug for BinaryHeap<T, A> {
479    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
480        f.debug_list().entries(self.iter()).finish()
481    }
482}
483
484struct RebuildOnDrop<
485    'a,
486    T: Ord,
487    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
488> {
489    heap: &'a mut BinaryHeap<T, A>,
490    rebuild_from: usize,
491}
492
493impl<T: Ord, A: Allocator> Drop for RebuildOnDrop<'_, T, A> {
494    fn drop(&mut self) {
495        self.heap.rebuild_tail(self.rebuild_from);
496    }
497}
498
499impl<T: Ord> BinaryHeap<T> {
500    /// Creates an empty `BinaryHeap` as a max-heap.
501    ///
502    /// # Examples
503    ///
504    /// Basic usage:
505    ///
506    /// ```
507    /// use std::collections::BinaryHeap;
508    /// let mut heap = BinaryHeap::new();
509    /// heap.push(4);
510    /// ```
511    #[stable(feature = "rust1", since = "1.0.0")]
512    #[rustc_const_stable(feature = "const_binary_heap_constructor", since = "1.80.0")]
513    #[must_use]
514    pub const fn new() -> BinaryHeap<T> {
515        BinaryHeap { data: vec![] }
516    }
517
518    /// Creates an empty `BinaryHeap` with at least the specified capacity.
519    ///
520    /// The binary heap will be able to hold at least `capacity` elements without
521    /// reallocating. This method is allowed to allocate for more elements than
522    /// `capacity`. If `capacity` is zero, the binary heap will not allocate.
523    ///
524    /// # Examples
525    ///
526    /// Basic usage:
527    ///
528    /// ```
529    /// use std::collections::BinaryHeap;
530    /// let mut heap = BinaryHeap::with_capacity(10);
531    /// heap.push(4);
532    /// ```
533    #[stable(feature = "rust1", since = "1.0.0")]
534    #[must_use]
535    pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
536        BinaryHeap { data: Vec::with_capacity(capacity) }
537    }
538}
539
540impl<T: Ord, A: Allocator> BinaryHeap<T, A> {
541    /// Creates an empty `BinaryHeap` as a max-heap, using `A` as allocator.
542    ///
543    /// # Examples
544    ///
545    /// Basic usage:
546    ///
547    /// ```
548    /// #![feature(allocator_api)]
549    ///
550    /// use std::alloc::System;
551    /// use std::collections::BinaryHeap;
552    /// let mut heap = BinaryHeap::new_in(System);
553    /// heap.push(4);
554    /// ```
555    #[unstable(feature = "allocator_api", issue = "32838")]
556    #[must_use]
557    pub const fn new_in(alloc: A) -> BinaryHeap<T, A> {
558        BinaryHeap { data: Vec::new_in(alloc) }
559    }
560
561    /// Creates an empty `BinaryHeap` with at least the specified capacity, using `A` as allocator.
562    ///
563    /// The binary heap will be able to hold at least `capacity` elements without
564    /// reallocating. This method is allowed to allocate for more elements than
565    /// `capacity`. If `capacity` is zero, the binary heap will not allocate.
566    ///
567    /// # Examples
568    ///
569    /// Basic usage:
570    ///
571    /// ```
572    /// #![feature(allocator_api)]
573    ///
574    /// use std::alloc::System;
575    /// use std::collections::BinaryHeap;
576    /// let mut heap = BinaryHeap::with_capacity_in(10, System);
577    /// heap.push(4);
578    /// ```
579    #[unstable(feature = "allocator_api", issue = "32838")]
580    #[must_use]
581    pub fn with_capacity_in(capacity: usize, alloc: A) -> BinaryHeap<T, A> {
582        BinaryHeap { data: Vec::with_capacity_in(capacity, alloc) }
583    }
584
585    /// Returns a mutable reference to the greatest item in the binary heap, or
586    /// `None` if it is empty.
587    ///
588    /// Note: If the `PeekMut` value is leaked, some heap elements might get
589    /// leaked along with it, but the remaining elements will remain a valid
590    /// heap.
591    ///
592    /// # Examples
593    ///
594    /// Basic usage:
595    ///
596    /// ```
597    /// use std::collections::BinaryHeap;
598    /// let mut heap = BinaryHeap::new();
599    /// assert!(heap.peek_mut().is_none());
600    ///
601    /// heap.push(1);
602    /// heap.push(5);
603    /// heap.push(2);
604    /// if let Some(mut val) = heap.peek_mut() {
605    ///     *val = 0;
606    /// }
607    /// assert_eq!(heap.peek(), Some(&2));
608    /// ```
609    ///
610    /// # Time complexity
611    ///
612    /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
613    /// otherwise it's *O*(1).
614    #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
615    pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T, A>> {
616        if self.is_empty() { None } else { Some(PeekMut { heap: self, original_len: None }) }
617    }
618
619    /// Removes the greatest item from the binary heap and returns it, or `None` if it
620    /// is empty.
621    ///
622    /// # Examples
623    ///
624    /// Basic usage:
625    ///
626    /// ```
627    /// use std::collections::BinaryHeap;
628    /// let mut heap = BinaryHeap::from([1, 3]);
629    ///
630    /// assert_eq!(heap.pop(), Some(3));
631    /// assert_eq!(heap.pop(), Some(1));
632    /// assert_eq!(heap.pop(), None);
633    /// ```
634    ///
635    /// # Time complexity
636    ///
637    /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
638    #[stable(feature = "rust1", since = "1.0.0")]
639    pub fn pop(&mut self) -> Option<T> {
640        self.data.pop().map(|mut item| {
641            if !self.is_empty() {
642                swap(&mut item, &mut self.data[0]);
643                // SAFETY: !self.is_empty() means that self.len() > 0
644                unsafe { self.sift_down_to_bottom(0) };
645            }
646            item
647        })
648    }
649
650    /// Pushes an item onto the binary heap.
651    ///
652    /// # Examples
653    ///
654    /// Basic usage:
655    ///
656    /// ```
657    /// use std::collections::BinaryHeap;
658    /// let mut heap = BinaryHeap::new();
659    /// heap.push(3);
660    /// heap.push(5);
661    /// heap.push(1);
662    ///
663    /// assert_eq!(heap.len(), 3);
664    /// assert_eq!(heap.peek(), Some(&5));
665    /// ```
666    ///
667    /// # Time complexity
668    ///
669    /// The expected cost of `push`, averaged over every possible ordering of
670    /// the elements being pushed, and over a sufficiently large number of
671    /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
672    /// elements that are *not* already in any sorted pattern.
673    ///
674    /// The time complexity degrades if elements are pushed in predominantly
675    /// ascending order. In the worst case, elements are pushed in ascending
676    /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
677    /// containing *n* elements.
678    ///
679    /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
680    /// occurs when capacity is exhausted and needs a resize. The resize cost
681    /// has been amortized in the previous figures.
682    #[stable(feature = "rust1", since = "1.0.0")]
683    #[rustc_confusables("append", "put")]
684    pub fn push(&mut self, item: T) {
685        let old_len = self.len();
686        self.data.push(item);
687        // SAFETY: Since we pushed a new item it means that
688        //  old_len = self.len() - 1 < self.len()
689        unsafe { self.sift_up(0, old_len) };
690    }
691
692    /// Consumes the `BinaryHeap` and returns a vector in sorted
693    /// (ascending) order.
694    ///
695    /// # Examples
696    ///
697    /// Basic usage:
698    ///
699    /// ```
700    /// use std::collections::BinaryHeap;
701    ///
702    /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
703    /// heap.push(6);
704    /// heap.push(3);
705    ///
706    /// let vec = heap.into_sorted_vec();
707    /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
708    /// ```
709    #[must_use = "`self` will be dropped if the result is not used"]
710    #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
711    pub fn into_sorted_vec(mut self) -> Vec<T, A> {
712        let mut end = self.len();
713        while end > 1 {
714            end -= 1;
715            // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
716            //  so it's always a valid index to access.
717            //  It is safe to access index 0 (i.e. `ptr`), because
718            //  1 <= end < self.len(), which means self.len() >= 2.
719            unsafe {
720                let ptr = self.data.as_mut_ptr();
721                ptr::swap(ptr, ptr.add(end));
722            }
723            // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
724            //  0 < 1 <= end <= self.len() - 1 < self.len()
725            //  Which means 0 < end and end < self.len().
726            unsafe { self.sift_down_range(0, end) };
727        }
728        self.into_vec()
729    }
730
731    // The implementations of sift_up and sift_down use unsafe blocks in
732    // order to move an element out of the vector (leaving behind a
733    // hole), shift along the others and move the removed element back into the
734    // vector at the final location of the hole.
735    // The `Hole` type is used to represent this, and make sure
736    // the hole is filled back at the end of its scope, even on panic.
737    // Using a hole reduces the constant factor compared to using swaps,
738    // which involves twice as many moves.
739
740    /// # Safety
741    ///
742    /// The caller must guarantee that `pos < self.len()`.
743    ///
744    /// Returns the new position of the element.
745    unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
746        // Take out the value at `pos` and create a hole.
747        // SAFETY: The caller guarantees that pos < self.len()
748        let mut hole = unsafe { Hole::new(&mut self.data, pos) };
749
750        while hole.pos() > start {
751            let parent = (hole.pos() - 1) / 2;
752
753            // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
754            //  and so hole.pos() - 1 can't underflow.
755            //  This guarantees that parent < hole.pos() so
756            //  it's a valid index and also != hole.pos().
757            if hole.element() <= unsafe { hole.get(parent) } {
758                break;
759            }
760
761            // SAFETY: Same as above
762            unsafe { hole.move_to(parent) };
763        }
764
765        hole.pos()
766    }
767
768    /// Take an element at `pos` and move it down the heap,
769    /// while its children are larger.
770    ///
771    /// Returns the new position of the element.
772    ///
773    /// # Safety
774    ///
775    /// The caller must guarantee that `pos < end <= self.len()`.
776    unsafe fn sift_down_range(&mut self, pos: usize, end: usize) -> usize {
777        // SAFETY: The caller guarantees that pos < end <= self.len().
778        let mut hole = unsafe { Hole::new(&mut self.data, pos) };
779        let mut child = 2 * hole.pos() + 1;
780
781        // Loop invariant: child == 2 * hole.pos() + 1.
782        while child <= end.saturating_sub(2) {
783            // compare with the greater of the two children
784            // SAFETY: child < end - 1 < self.len() and
785            //  child + 1 < end <= self.len(), so they're valid indexes.
786            //  child == 2 * hole.pos() + 1 != hole.pos() and
787            //  child + 1 == 2 * hole.pos() + 2 != hole.pos().
788            // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
789            //  if T is a ZST
790            child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
791
792            // if we are already in order, stop.
793            // SAFETY: child is now either the old child or the old child+1
794            //  We already proven that both are < self.len() and != hole.pos()
795            if hole.element() >= unsafe { hole.get(child) } {
796                return hole.pos();
797            }
798
799            // SAFETY: same as above.
800            unsafe { hole.move_to(child) };
801            child = 2 * hole.pos() + 1;
802        }
803
804        // SAFETY: && short circuit, which means that in the
805        //  second condition it's already true that child == end - 1 < self.len().
806        if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
807            // SAFETY: child is already proven to be a valid index and
808            //  child == 2 * hole.pos() + 1 != hole.pos().
809            unsafe { hole.move_to(child) };
810        }
811
812        hole.pos()
813    }
814
815    /// # Safety
816    ///
817    /// The caller must guarantee that `pos < self.len()`.
818    unsafe fn sift_down(&mut self, pos: usize) -> usize {
819        let len = self.len();
820        // SAFETY: pos < len is guaranteed by the caller and
821        //  obviously len = self.len() <= self.len().
822        unsafe { self.sift_down_range(pos, len) }
823    }
824
825    /// Take an element at `pos` and move it all the way down the heap,
826    /// then sift it up to its position.
827    ///
828    /// Note: This is faster when the element is known to be large / should
829    /// be closer to the bottom.
830    ///
831    /// # Safety
832    ///
833    /// The caller must guarantee that `pos < self.len()`.
834    unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
835        let end = self.len();
836        let start = pos;
837
838        // SAFETY: The caller guarantees that pos < self.len().
839        let mut hole = unsafe { Hole::new(&mut self.data, pos) };
840        let mut child = 2 * hole.pos() + 1;
841
842        // Loop invariant: child == 2 * hole.pos() + 1.
843        while child <= end.saturating_sub(2) {
844            // SAFETY: child < end - 1 < self.len() and
845            //  child + 1 < end <= self.len(), so they're valid indexes.
846            //  child == 2 * hole.pos() + 1 != hole.pos() and
847            //  child + 1 == 2 * hole.pos() + 2 != hole.pos().
848            // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
849            //  if T is a ZST
850            child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
851
852            // SAFETY: Same as above
853            unsafe { hole.move_to(child) };
854            child = 2 * hole.pos() + 1;
855        }
856
857        if child == end - 1 {
858            // SAFETY: child == end - 1 < self.len(), so it's a valid index
859            //  and child == 2 * hole.pos() + 1 != hole.pos().
860            unsafe { hole.move_to(child) };
861        }
862        pos = hole.pos();
863        drop(hole);
864
865        // SAFETY: pos is the position in the hole and was already proven
866        //  to be a valid index.
867        unsafe { self.sift_up(start, pos) };
868    }
869
870    /// Rebuild assuming data[0..start] is still a proper heap.
871    fn rebuild_tail(&mut self, start: usize) {
872        if start == self.len() {
873            return;
874        }
875
876        let tail_len = self.len() - start;
877
878        #[inline(always)]
879        fn log2_fast(x: usize) -> usize {
880            (usize::BITS - x.leading_zeros() - 1) as usize
881        }
882
883        // `rebuild` takes O(self.len()) operations
884        // and about 2 * self.len() comparisons in the worst case
885        // while repeating `sift_up` takes O(tail_len * log(start)) operations
886        // and about 1 * tail_len * log_2(start) comparisons in the worst case,
887        // assuming start >= tail_len. For larger heaps, the crossover point
888        // no longer follows this reasoning and was determined empirically.
889        let better_to_rebuild = if start < tail_len {
890            true
891        } else if self.len() <= 2048 {
892            2 * self.len() < tail_len * log2_fast(start)
893        } else {
894            2 * self.len() < tail_len * 11
895        };
896
897        if better_to_rebuild {
898            self.rebuild();
899        } else {
900            for i in start..self.len() {
901                // SAFETY: The index `i` is always less than self.len().
902                unsafe { self.sift_up(0, i) };
903            }
904        }
905    }
906
907    fn rebuild(&mut self) {
908        let mut n = self.len() / 2;
909        while n > 0 {
910            n -= 1;
911            // SAFETY: n starts from self.len() / 2 and goes down to 0.
912            //  The only case when !(n < self.len()) is if
913            //  self.len() == 0, but it's ruled out by the loop condition.
914            unsafe { self.sift_down(n) };
915        }
916    }
917
918    /// Moves all the elements of `other` into `self`, leaving `other` empty.
919    ///
920    /// # Examples
921    ///
922    /// Basic usage:
923    ///
924    /// ```
925    /// use std::collections::BinaryHeap;
926    ///
927    /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
928    /// let mut b = BinaryHeap::from([-20, 5, 43]);
929    ///
930    /// a.append(&mut b);
931    ///
932    /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
933    /// assert!(b.is_empty());
934    /// ```
935    #[stable(feature = "binary_heap_append", since = "1.11.0")]
936    pub fn append(&mut self, other: &mut Self) {
937        if self.len() < other.len() {
938            swap(self, other);
939        }
940
941        let start = self.data.len();
942
943        self.data.append(&mut other.data);
944
945        self.rebuild_tail(start);
946    }
947
948    /// Clears the binary heap, returning an iterator over the removed elements
949    /// in heap order. If the iterator is dropped before being fully consumed,
950    /// it drops the remaining elements in heap order.
951    ///
952    /// The returned iterator keeps a mutable borrow on the heap to optimize
953    /// its implementation.
954    ///
955    /// Note:
956    /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
957    ///   You should use the latter for most cases.
958    ///
959    /// # Examples
960    ///
961    /// Basic usage:
962    ///
963    /// ```
964    /// #![feature(binary_heap_drain_sorted)]
965    /// use std::collections::BinaryHeap;
966    ///
967    /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
968    /// assert_eq!(heap.len(), 5);
969    ///
970    /// drop(heap.drain_sorted()); // removes all elements in heap order
971    /// assert_eq!(heap.len(), 0);
972    /// ```
973    #[inline]
974    #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
975    pub fn drain_sorted(&mut self) -> DrainSorted<'_, T, A> {
976        DrainSorted { inner: self }
977    }
978
979    /// Retains only the elements specified by the predicate.
980    ///
981    /// In other words, remove all elements `e` for which `f(&e)` returns
982    /// `false`. The elements are visited in unsorted (and unspecified) order.
983    ///
984    /// # Examples
985    ///
986    /// Basic usage:
987    ///
988    /// ```
989    /// use std::collections::BinaryHeap;
990    ///
991    /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
992    ///
993    /// heap.retain(|x| x % 2 == 0); // only keep even numbers
994    ///
995    /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
996    /// ```
997    #[stable(feature = "binary_heap_retain", since = "1.70.0")]
998    pub fn retain<F>(&mut self, mut f: F)
999    where
1000        F: FnMut(&T) -> bool,
1001    {
1002        // rebuild_start will be updated to the first touched element below, and the rebuild will
1003        // only be done for the tail.
1004        let mut guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
1005        let mut i = 0;
1006
1007        guard.heap.data.retain(|e| {
1008            let keep = f(e);
1009            if !keep && i < guard.rebuild_from {
1010                guard.rebuild_from = i;
1011            }
1012            i += 1;
1013            keep
1014        });
1015    }
1016}
1017
1018impl<T, A: Allocator> BinaryHeap<T, A> {
1019    /// Returns an iterator visiting all values in the underlying vector, in
1020    /// arbitrary order.
1021    ///
1022    /// # Examples
1023    ///
1024    /// Basic usage:
1025    ///
1026    /// ```
1027    /// use std::collections::BinaryHeap;
1028    /// let heap = BinaryHeap::from([1, 2, 3, 4]);
1029    ///
1030    /// // Print 1, 2, 3, 4 in arbitrary order
1031    /// for x in heap.iter() {
1032    ///     println!("{x}");
1033    /// }
1034    /// ```
1035    #[stable(feature = "rust1", since = "1.0.0")]
1036    #[cfg_attr(not(test), rustc_diagnostic_item = "binaryheap_iter")]
1037    pub fn iter(&self) -> Iter<'_, T> {
1038        Iter { iter: self.data.iter() }
1039    }
1040
1041    /// Returns an iterator which retrieves elements in heap order.
1042    ///
1043    /// This method consumes the original heap.
1044    ///
1045    /// # Examples
1046    ///
1047    /// Basic usage:
1048    ///
1049    /// ```
1050    /// #![feature(binary_heap_into_iter_sorted)]
1051    /// use std::collections::BinaryHeap;
1052    /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
1053    ///
1054    /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
1055    /// ```
1056    #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1057    pub fn into_iter_sorted(self) -> IntoIterSorted<T, A> {
1058        IntoIterSorted { inner: self }
1059    }
1060
1061    /// Returns the greatest item in the binary heap, or `None` if it is empty.
1062    ///
1063    /// # Examples
1064    ///
1065    /// Basic usage:
1066    ///
1067    /// ```
1068    /// use std::collections::BinaryHeap;
1069    /// let mut heap = BinaryHeap::new();
1070    /// assert_eq!(heap.peek(), None);
1071    ///
1072    /// heap.push(1);
1073    /// heap.push(5);
1074    /// heap.push(2);
1075    /// assert_eq!(heap.peek(), Some(&5));
1076    ///
1077    /// ```
1078    ///
1079    /// # Time complexity
1080    ///
1081    /// Cost is *O*(1) in the worst case.
1082    #[must_use]
1083    #[stable(feature = "rust1", since = "1.0.0")]
1084    pub fn peek(&self) -> Option<&T> {
1085        self.data.get(0)
1086    }
1087
1088    /// Returns the number of elements the binary heap can hold without reallocating.
1089    ///
1090    /// # Examples
1091    ///
1092    /// Basic usage:
1093    ///
1094    /// ```
1095    /// use std::collections::BinaryHeap;
1096    /// let mut heap = BinaryHeap::with_capacity(100);
1097    /// assert!(heap.capacity() >= 100);
1098    /// heap.push(4);
1099    /// ```
1100    #[must_use]
1101    #[stable(feature = "rust1", since = "1.0.0")]
1102    pub fn capacity(&self) -> usize {
1103        self.data.capacity()
1104    }
1105
1106    /// Reserves the minimum capacity for at least `additional` elements more than
1107    /// the current length. Unlike [`reserve`], this will not
1108    /// deliberately over-allocate to speculatively avoid frequent allocations.
1109    /// After calling `reserve_exact`, capacity will be greater than or equal to
1110    /// `self.len() + additional`. Does nothing if the capacity is already
1111    /// sufficient.
1112    ///
1113    /// [`reserve`]: BinaryHeap::reserve
1114    ///
1115    /// # Panics
1116    ///
1117    /// Panics if the new capacity overflows [`usize`].
1118    ///
1119    /// # Examples
1120    ///
1121    /// Basic usage:
1122    ///
1123    /// ```
1124    /// use std::collections::BinaryHeap;
1125    /// let mut heap = BinaryHeap::new();
1126    /// heap.reserve_exact(100);
1127    /// assert!(heap.capacity() >= 100);
1128    /// heap.push(4);
1129    /// ```
1130    ///
1131    /// [`reserve`]: BinaryHeap::reserve
1132    #[stable(feature = "rust1", since = "1.0.0")]
1133    pub fn reserve_exact(&mut self, additional: usize) {
1134        self.data.reserve_exact(additional);
1135    }
1136
1137    /// Reserves capacity for at least `additional` elements more than the
1138    /// current length. The allocator may reserve more space to speculatively
1139    /// avoid frequent allocations. After calling `reserve`,
1140    /// capacity will be greater than or equal to `self.len() + additional`.
1141    /// Does nothing if capacity is already sufficient.
1142    ///
1143    /// # Panics
1144    ///
1145    /// Panics if the new capacity overflows [`usize`].
1146    ///
1147    /// # Examples
1148    ///
1149    /// Basic usage:
1150    ///
1151    /// ```
1152    /// use std::collections::BinaryHeap;
1153    /// let mut heap = BinaryHeap::new();
1154    /// heap.reserve(100);
1155    /// assert!(heap.capacity() >= 100);
1156    /// heap.push(4);
1157    /// ```
1158    #[stable(feature = "rust1", since = "1.0.0")]
1159    pub fn reserve(&mut self, additional: usize) {
1160        self.data.reserve(additional);
1161    }
1162
1163    /// Tries to reserve the minimum capacity for at least `additional` elements
1164    /// more than the current length. Unlike [`try_reserve`], this will not
1165    /// deliberately over-allocate to speculatively avoid frequent allocations.
1166    /// After calling `try_reserve_exact`, capacity will be greater than or
1167    /// equal to `self.len() + additional` if it returns `Ok(())`.
1168    /// Does nothing if the capacity is already sufficient.
1169    ///
1170    /// Note that the allocator may give the collection more space than it
1171    /// requests. Therefore, capacity can not be relied upon to be precisely
1172    /// minimal. Prefer [`try_reserve`] if future insertions are expected.
1173    ///
1174    /// [`try_reserve`]: BinaryHeap::try_reserve
1175    ///
1176    /// # Errors
1177    ///
1178    /// If the capacity overflows, or the allocator reports a failure, then an error
1179    /// is returned.
1180    ///
1181    /// # Examples
1182    ///
1183    /// ```
1184    /// use std::collections::BinaryHeap;
1185    /// use std::collections::TryReserveError;
1186    ///
1187    /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1188    ///     let mut heap = BinaryHeap::new();
1189    ///
1190    ///     // Pre-reserve the memory, exiting if we can't
1191    ///     heap.try_reserve_exact(data.len())?;
1192    ///
1193    ///     // Now we know this can't OOM in the middle of our complex work
1194    ///     heap.extend(data.iter());
1195    ///
1196    ///     Ok(heap.pop())
1197    /// }
1198    /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1199    /// ```
1200    #[stable(feature = "try_reserve_2", since = "1.63.0")]
1201    pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
1202        self.data.try_reserve_exact(additional)
1203    }
1204
1205    /// Tries to reserve capacity for at least `additional` elements more than the
1206    /// current length. The allocator may reserve more space to speculatively
1207    /// avoid frequent allocations. After calling `try_reserve`, capacity will be
1208    /// greater than or equal to `self.len() + additional` if it returns
1209    /// `Ok(())`. Does nothing if capacity is already sufficient. This method
1210    /// preserves the contents even if an error occurs.
1211    ///
1212    /// # Errors
1213    ///
1214    /// If the capacity overflows, or the allocator reports a failure, then an error
1215    /// is returned.
1216    ///
1217    /// # Examples
1218    ///
1219    /// ```
1220    /// use std::collections::BinaryHeap;
1221    /// use std::collections::TryReserveError;
1222    ///
1223    /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1224    ///     let mut heap = BinaryHeap::new();
1225    ///
1226    ///     // Pre-reserve the memory, exiting if we can't
1227    ///     heap.try_reserve(data.len())?;
1228    ///
1229    ///     // Now we know this can't OOM in the middle of our complex work
1230    ///     heap.extend(data.iter());
1231    ///
1232    ///     Ok(heap.pop())
1233    /// }
1234    /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1235    /// ```
1236    #[stable(feature = "try_reserve_2", since = "1.63.0")]
1237    pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
1238        self.data.try_reserve(additional)
1239    }
1240
1241    /// Discards as much additional capacity as possible.
1242    ///
1243    /// # Examples
1244    ///
1245    /// Basic usage:
1246    ///
1247    /// ```
1248    /// use std::collections::BinaryHeap;
1249    /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1250    ///
1251    /// assert!(heap.capacity() >= 100);
1252    /// heap.shrink_to_fit();
1253    /// assert!(heap.capacity() == 0);
1254    /// ```
1255    #[stable(feature = "rust1", since = "1.0.0")]
1256    pub fn shrink_to_fit(&mut self) {
1257        self.data.shrink_to_fit();
1258    }
1259
1260    /// Discards capacity with a lower bound.
1261    ///
1262    /// The capacity will remain at least as large as both the length
1263    /// and the supplied value.
1264    ///
1265    /// If the current capacity is less than the lower limit, this is a no-op.
1266    ///
1267    /// # Examples
1268    ///
1269    /// ```
1270    /// use std::collections::BinaryHeap;
1271    /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1272    ///
1273    /// assert!(heap.capacity() >= 100);
1274    /// heap.shrink_to(10);
1275    /// assert!(heap.capacity() >= 10);
1276    /// ```
1277    #[inline]
1278    #[stable(feature = "shrink_to", since = "1.56.0")]
1279    pub fn shrink_to(&mut self, min_capacity: usize) {
1280        self.data.shrink_to(min_capacity)
1281    }
1282
1283    /// Returns a slice of all values in the underlying vector, in arbitrary
1284    /// order.
1285    ///
1286    /// # Examples
1287    ///
1288    /// Basic usage:
1289    ///
1290    /// ```
1291    /// use std::collections::BinaryHeap;
1292    /// use std::io::{self, Write};
1293    ///
1294    /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1295    ///
1296    /// io::sink().write(heap.as_slice()).unwrap();
1297    /// ```
1298    #[must_use]
1299    #[stable(feature = "binary_heap_as_slice", since = "1.80.0")]
1300    pub fn as_slice(&self) -> &[T] {
1301        self.data.as_slice()
1302    }
1303
1304    /// Consumes the `BinaryHeap` and returns the underlying vector
1305    /// in arbitrary order.
1306    ///
1307    /// # Examples
1308    ///
1309    /// Basic usage:
1310    ///
1311    /// ```
1312    /// use std::collections::BinaryHeap;
1313    /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1314    /// let vec = heap.into_vec();
1315    ///
1316    /// // Will print in some order
1317    /// for x in vec {
1318    ///     println!("{x}");
1319    /// }
1320    /// ```
1321    #[must_use = "`self` will be dropped if the result is not used"]
1322    #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1323    pub fn into_vec(self) -> Vec<T, A> {
1324        self.into()
1325    }
1326
1327    /// Returns a reference to the underlying allocator.
1328    #[unstable(feature = "allocator_api", issue = "32838")]
1329    #[inline]
1330    pub fn allocator(&self) -> &A {
1331        self.data.allocator()
1332    }
1333
1334    /// Returns the length of the binary heap.
1335    ///
1336    /// # Examples
1337    ///
1338    /// Basic usage:
1339    ///
1340    /// ```
1341    /// use std::collections::BinaryHeap;
1342    /// let heap = BinaryHeap::from([1, 3]);
1343    ///
1344    /// assert_eq!(heap.len(), 2);
1345    /// ```
1346    #[must_use]
1347    #[stable(feature = "rust1", since = "1.0.0")]
1348    #[rustc_confusables("length", "size")]
1349    pub fn len(&self) -> usize {
1350        self.data.len()
1351    }
1352
1353    /// Checks if the binary heap is empty.
1354    ///
1355    /// # Examples
1356    ///
1357    /// Basic usage:
1358    ///
1359    /// ```
1360    /// use std::collections::BinaryHeap;
1361    /// let mut heap = BinaryHeap::new();
1362    ///
1363    /// assert!(heap.is_empty());
1364    ///
1365    /// heap.push(3);
1366    /// heap.push(5);
1367    /// heap.push(1);
1368    ///
1369    /// assert!(!heap.is_empty());
1370    /// ```
1371    #[must_use]
1372    #[stable(feature = "rust1", since = "1.0.0")]
1373    pub fn is_empty(&self) -> bool {
1374        self.len() == 0
1375    }
1376
1377    /// Clears the binary heap, returning an iterator over the removed elements
1378    /// in arbitrary order. If the iterator is dropped before being fully
1379    /// consumed, it drops the remaining elements in arbitrary order.
1380    ///
1381    /// The returned iterator keeps a mutable borrow on the heap to optimize
1382    /// its implementation.
1383    ///
1384    /// # Examples
1385    ///
1386    /// Basic usage:
1387    ///
1388    /// ```
1389    /// use std::collections::BinaryHeap;
1390    /// let mut heap = BinaryHeap::from([1, 3]);
1391    ///
1392    /// assert!(!heap.is_empty());
1393    ///
1394    /// for x in heap.drain() {
1395    ///     println!("{x}");
1396    /// }
1397    ///
1398    /// assert!(heap.is_empty());
1399    /// ```
1400    #[inline]
1401    #[stable(feature = "drain", since = "1.6.0")]
1402    pub fn drain(&mut self) -> Drain<'_, T, A> {
1403        Drain { iter: self.data.drain(..) }
1404    }
1405
1406    /// Drops all items from the binary heap.
1407    ///
1408    /// # Examples
1409    ///
1410    /// Basic usage:
1411    ///
1412    /// ```
1413    /// use std::collections::BinaryHeap;
1414    /// let mut heap = BinaryHeap::from([1, 3]);
1415    ///
1416    /// assert!(!heap.is_empty());
1417    ///
1418    /// heap.clear();
1419    ///
1420    /// assert!(heap.is_empty());
1421    /// ```
1422    #[stable(feature = "rust1", since = "1.0.0")]
1423    pub fn clear(&mut self) {
1424        self.drain();
1425    }
1426}
1427
1428/// Hole represents a hole in a slice i.e., an index without valid value
1429/// (because it was moved from or duplicated).
1430/// In drop, `Hole` will restore the slice by filling the hole
1431/// position with the value that was originally removed.
1432struct Hole<'a, T: 'a> {
1433    data: &'a mut [T],
1434    elt: ManuallyDrop<T>,
1435    pos: usize,
1436}
1437
1438impl<'a, T> Hole<'a, T> {
1439    /// Creates a new `Hole` at index `pos`.
1440    ///
1441    /// Unsafe because pos must be within the data slice.
1442    #[inline]
1443    unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
1444        debug_assert!(pos < data.len());
1445        // SAFE: pos should be inside the slice
1446        let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
1447        Hole { data, elt: ManuallyDrop::new(elt), pos }
1448    }
1449
1450    #[inline]
1451    fn pos(&self) -> usize {
1452        self.pos
1453    }
1454
1455    /// Returns a reference to the element removed.
1456    #[inline]
1457    fn element(&self) -> &T {
1458        &self.elt
1459    }
1460
1461    /// Returns a reference to the element at `index`.
1462    ///
1463    /// Unsafe because index must be within the data slice and not equal to pos.
1464    #[inline]
1465    unsafe fn get(&self, index: usize) -> &T {
1466        debug_assert!(index != self.pos);
1467        debug_assert!(index < self.data.len());
1468        unsafe { self.data.get_unchecked(index) }
1469    }
1470
1471    /// Move hole to new location
1472    ///
1473    /// Unsafe because index must be within the data slice and not equal to pos.
1474    #[inline]
1475    unsafe fn move_to(&mut self, index: usize) {
1476        debug_assert!(index != self.pos);
1477        debug_assert!(index < self.data.len());
1478        unsafe {
1479            let ptr = self.data.as_mut_ptr();
1480            let index_ptr: *const _ = ptr.add(index);
1481            let hole_ptr = ptr.add(self.pos);
1482            ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
1483        }
1484        self.pos = index;
1485    }
1486}
1487
1488impl<T> Drop for Hole<'_, T> {
1489    #[inline]
1490    fn drop(&mut self) {
1491        // fill the hole again
1492        unsafe {
1493            let pos = self.pos;
1494            ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
1495        }
1496    }
1497}
1498
1499/// An iterator over the elements of a `BinaryHeap`.
1500///
1501/// This `struct` is created by [`BinaryHeap::iter()`]. See its
1502/// documentation for more.
1503///
1504/// [`iter`]: BinaryHeap::iter
1505#[must_use = "iterators are lazy and do nothing unless consumed"]
1506#[stable(feature = "rust1", since = "1.0.0")]
1507pub struct Iter<'a, T: 'a> {
1508    iter: slice::Iter<'a, T>,
1509}
1510
1511#[stable(feature = "default_iters_sequel", since = "1.82.0")]
1512impl<T> Default for Iter<'_, T> {
1513    /// Creates an empty `binary_heap::Iter`.
1514    ///
1515    /// ```
1516    /// # use std::collections::binary_heap;
1517    /// let iter: binary_heap::Iter<'_, u8> = Default::default();
1518    /// assert_eq!(iter.len(), 0);
1519    /// ```
1520    fn default() -> Self {
1521        Iter { iter: Default::default() }
1522    }
1523}
1524
1525#[stable(feature = "collection_debug", since = "1.17.0")]
1526impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
1527    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1528        f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
1529    }
1530}
1531
1532// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
1533#[stable(feature = "rust1", since = "1.0.0")]
1534impl<T> Clone for Iter<'_, T> {
1535    fn clone(&self) -> Self {
1536        Iter { iter: self.iter.clone() }
1537    }
1538}
1539
1540#[stable(feature = "rust1", since = "1.0.0")]
1541impl<'a, T> Iterator for Iter<'a, T> {
1542    type Item = &'a T;
1543
1544    #[inline]
1545    fn next(&mut self) -> Option<&'a T> {
1546        self.iter.next()
1547    }
1548
1549    #[inline]
1550    fn size_hint(&self) -> (usize, Option<usize>) {
1551        self.iter.size_hint()
1552    }
1553
1554    #[inline]
1555    fn last(self) -> Option<&'a T> {
1556        self.iter.last()
1557    }
1558}
1559
1560#[stable(feature = "rust1", since = "1.0.0")]
1561impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
1562    #[inline]
1563    fn next_back(&mut self) -> Option<&'a T> {
1564        self.iter.next_back()
1565    }
1566}
1567
1568#[stable(feature = "rust1", since = "1.0.0")]
1569impl<T> ExactSizeIterator for Iter<'_, T> {
1570    fn is_empty(&self) -> bool {
1571        self.iter.is_empty()
1572    }
1573}
1574
1575#[stable(feature = "fused", since = "1.26.0")]
1576impl<T> FusedIterator for Iter<'_, T> {}
1577
1578/// An owning iterator over the elements of a `BinaryHeap`.
1579///
1580/// This `struct` is created by [`BinaryHeap::into_iter()`]
1581/// (provided by the [`IntoIterator`] trait). See its documentation for more.
1582///
1583/// [`into_iter`]: BinaryHeap::into_iter
1584#[stable(feature = "rust1", since = "1.0.0")]
1585#[derive(Clone)]
1586pub struct IntoIter<
1587    T,
1588    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1589> {
1590    iter: vec::IntoIter<T, A>,
1591}
1592
1593impl<T, A: Allocator> IntoIter<T, A> {
1594    /// Returns a reference to the underlying allocator.
1595    #[unstable(feature = "allocator_api", issue = "32838")]
1596    pub fn allocator(&self) -> &A {
1597        self.iter.allocator()
1598    }
1599}
1600
1601#[stable(feature = "collection_debug", since = "1.17.0")]
1602impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
1603    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1604        f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
1605    }
1606}
1607
1608#[stable(feature = "rust1", since = "1.0.0")]
1609impl<T, A: Allocator> Iterator for IntoIter<T, A> {
1610    type Item = T;
1611
1612    #[inline]
1613    fn next(&mut self) -> Option<T> {
1614        self.iter.next()
1615    }
1616
1617    #[inline]
1618    fn size_hint(&self) -> (usize, Option<usize>) {
1619        self.iter.size_hint()
1620    }
1621}
1622
1623#[stable(feature = "rust1", since = "1.0.0")]
1624impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
1625    #[inline]
1626    fn next_back(&mut self) -> Option<T> {
1627        self.iter.next_back()
1628    }
1629}
1630
1631#[stable(feature = "rust1", since = "1.0.0")]
1632impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
1633    fn is_empty(&self) -> bool {
1634        self.iter.is_empty()
1635    }
1636}
1637
1638#[stable(feature = "fused", since = "1.26.0")]
1639impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
1640
1641#[doc(hidden)]
1642#[unstable(issue = "none", feature = "trusted_fused")]
1643unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {}
1644
1645#[stable(feature = "default_iters", since = "1.70.0")]
1646impl<T> Default for IntoIter<T> {
1647    /// Creates an empty `binary_heap::IntoIter`.
1648    ///
1649    /// ```
1650    /// # use std::collections::binary_heap;
1651    /// let iter: binary_heap::IntoIter<u8> = Default::default();
1652    /// assert_eq!(iter.len(), 0);
1653    /// ```
1654    fn default() -> Self {
1655        IntoIter { iter: Default::default() }
1656    }
1657}
1658
1659// In addition to the SAFETY invariants of the following three unsafe traits
1660// also refer to the vec::in_place_collect module documentation to get an overview
1661#[unstable(issue = "none", feature = "inplace_iteration")]
1662#[doc(hidden)]
1663unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
1664    type Source = IntoIter<T, A>;
1665
1666    #[inline]
1667    unsafe fn as_inner(&mut self) -> &mut Self::Source {
1668        self
1669    }
1670}
1671
1672#[unstable(issue = "none", feature = "inplace_iteration")]
1673#[doc(hidden)]
1674unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> {
1675    const EXPAND_BY: Option<NonZero<usize>> = NonZero::new(1);
1676    const MERGE_BY: Option<NonZero<usize>> = NonZero::new(1);
1677}
1678
1679#[cfg(not(test))]
1680unsafe impl<I> AsVecIntoIter for IntoIter<I> {
1681    type Item = I;
1682
1683    fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
1684        &mut self.iter
1685    }
1686}
1687
1688#[must_use = "iterators are lazy and do nothing unless consumed"]
1689#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1690#[derive(Clone, Debug)]
1691pub struct IntoIterSorted<
1692    T,
1693    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1694> {
1695    inner: BinaryHeap<T, A>,
1696}
1697
1698impl<T, A: Allocator> IntoIterSorted<T, A> {
1699    /// Returns a reference to the underlying allocator.
1700    #[unstable(feature = "allocator_api", issue = "32838")]
1701    pub fn allocator(&self) -> &A {
1702        self.inner.allocator()
1703    }
1704}
1705
1706#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1707impl<T: Ord, A: Allocator> Iterator for IntoIterSorted<T, A> {
1708    type Item = T;
1709
1710    #[inline]
1711    fn next(&mut self) -> Option<T> {
1712        self.inner.pop()
1713    }
1714
1715    #[inline]
1716    fn size_hint(&self) -> (usize, Option<usize>) {
1717        let exact = self.inner.len();
1718        (exact, Some(exact))
1719    }
1720}
1721
1722#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1723impl<T: Ord, A: Allocator> ExactSizeIterator for IntoIterSorted<T, A> {}
1724
1725#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1726impl<T: Ord, A: Allocator> FusedIterator for IntoIterSorted<T, A> {}
1727
1728#[unstable(feature = "trusted_len", issue = "37572")]
1729unsafe impl<T: Ord, A: Allocator> TrustedLen for IntoIterSorted<T, A> {}
1730
1731/// A draining iterator over the elements of a `BinaryHeap`.
1732///
1733/// This `struct` is created by [`BinaryHeap::drain()`]. See its
1734/// documentation for more.
1735///
1736/// [`drain`]: BinaryHeap::drain
1737#[stable(feature = "drain", since = "1.6.0")]
1738#[derive(Debug)]
1739pub struct Drain<
1740    'a,
1741    T: 'a,
1742    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1743> {
1744    iter: vec::Drain<'a, T, A>,
1745}
1746
1747impl<T, A: Allocator> Drain<'_, T, A> {
1748    /// Returns a reference to the underlying allocator.
1749    #[unstable(feature = "allocator_api", issue = "32838")]
1750    pub fn allocator(&self) -> &A {
1751        self.iter.allocator()
1752    }
1753}
1754
1755#[stable(feature = "drain", since = "1.6.0")]
1756impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
1757    type Item = T;
1758
1759    #[inline]
1760    fn next(&mut self) -> Option<T> {
1761        self.iter.next()
1762    }
1763
1764    #[inline]
1765    fn size_hint(&self) -> (usize, Option<usize>) {
1766        self.iter.size_hint()
1767    }
1768}
1769
1770#[stable(feature = "drain", since = "1.6.0")]
1771impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
1772    #[inline]
1773    fn next_back(&mut self) -> Option<T> {
1774        self.iter.next_back()
1775    }
1776}
1777
1778#[stable(feature = "drain", since = "1.6.0")]
1779impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {
1780    fn is_empty(&self) -> bool {
1781        self.iter.is_empty()
1782    }
1783}
1784
1785#[stable(feature = "fused", since = "1.26.0")]
1786impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
1787
1788/// A draining iterator over the elements of a `BinaryHeap`.
1789///
1790/// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
1791/// documentation for more.
1792///
1793/// [`drain_sorted`]: BinaryHeap::drain_sorted
1794#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1795#[derive(Debug)]
1796pub struct DrainSorted<
1797    'a,
1798    T: Ord,
1799    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1800> {
1801    inner: &'a mut BinaryHeap<T, A>,
1802}
1803
1804impl<'a, T: Ord, A: Allocator> DrainSorted<'a, T, A> {
1805    /// Returns a reference to the underlying allocator.
1806    #[unstable(feature = "allocator_api", issue = "32838")]
1807    pub fn allocator(&self) -> &A {
1808        self.inner.allocator()
1809    }
1810}
1811
1812#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1813impl<'a, T: Ord, A: Allocator> Drop for DrainSorted<'a, T, A> {
1814    /// Removes heap elements in heap order.
1815    fn drop(&mut self) {
1816        struct DropGuard<'r, 'a, T: Ord, A: Allocator>(&'r mut DrainSorted<'a, T, A>);
1817
1818        impl<'r, 'a, T: Ord, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
1819            fn drop(&mut self) {
1820                while self.0.inner.pop().is_some() {}
1821            }
1822        }
1823
1824        while let Some(item) = self.inner.pop() {
1825            let guard = DropGuard(self);
1826            drop(item);
1827            mem::forget(guard);
1828        }
1829    }
1830}
1831
1832#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1833impl<T: Ord, A: Allocator> Iterator for DrainSorted<'_, T, A> {
1834    type Item = T;
1835
1836    #[inline]
1837    fn next(&mut self) -> Option<T> {
1838        self.inner.pop()
1839    }
1840
1841    #[inline]
1842    fn size_hint(&self) -> (usize, Option<usize>) {
1843        let exact = self.inner.len();
1844        (exact, Some(exact))
1845    }
1846}
1847
1848#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1849impl<T: Ord, A: Allocator> ExactSizeIterator for DrainSorted<'_, T, A> {}
1850
1851#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1852impl<T: Ord, A: Allocator> FusedIterator for DrainSorted<'_, T, A> {}
1853
1854#[unstable(feature = "trusted_len", issue = "37572")]
1855unsafe impl<T: Ord, A: Allocator> TrustedLen for DrainSorted<'_, T, A> {}
1856
1857#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1858impl<T: Ord, A: Allocator> From<Vec<T, A>> for BinaryHeap<T, A> {
1859    /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
1860    ///
1861    /// This conversion happens in-place, and has *O*(*n*) time complexity.
1862    fn from(vec: Vec<T, A>) -> BinaryHeap<T, A> {
1863        let mut heap = BinaryHeap { data: vec };
1864        heap.rebuild();
1865        heap
1866    }
1867}
1868
1869#[stable(feature = "std_collections_from_array", since = "1.56.0")]
1870impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
1871    /// ```
1872    /// use std::collections::BinaryHeap;
1873    ///
1874    /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
1875    /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
1876    /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
1877    ///     assert_eq!(a, b);
1878    /// }
1879    /// ```
1880    fn from(arr: [T; N]) -> Self {
1881        Self::from_iter(arr)
1882    }
1883}
1884
1885#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1886impl<T, A: Allocator> From<BinaryHeap<T, A>> for Vec<T, A> {
1887    /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
1888    ///
1889    /// This conversion requires no data movement or allocation, and has
1890    /// constant time complexity.
1891    fn from(heap: BinaryHeap<T, A>) -> Vec<T, A> {
1892        heap.data
1893    }
1894}
1895
1896#[stable(feature = "rust1", since = "1.0.0")]
1897impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
1898    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
1899        BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
1900    }
1901}
1902
1903#[stable(feature = "rust1", since = "1.0.0")]
1904impl<T, A: Allocator> IntoIterator for BinaryHeap<T, A> {
1905    type Item = T;
1906    type IntoIter = IntoIter<T, A>;
1907
1908    /// Creates a consuming iterator, that is, one that moves each value out of
1909    /// the binary heap in arbitrary order. The binary heap cannot be used
1910    /// after calling this.
1911    ///
1912    /// # Examples
1913    ///
1914    /// Basic usage:
1915    ///
1916    /// ```
1917    /// use std::collections::BinaryHeap;
1918    /// let heap = BinaryHeap::from([1, 2, 3, 4]);
1919    ///
1920    /// // Print 1, 2, 3, 4 in arbitrary order
1921    /// for x in heap.into_iter() {
1922    ///     // x has type i32, not &i32
1923    ///     println!("{x}");
1924    /// }
1925    /// ```
1926    fn into_iter(self) -> IntoIter<T, A> {
1927        IntoIter { iter: self.data.into_iter() }
1928    }
1929}
1930
1931#[stable(feature = "rust1", since = "1.0.0")]
1932impl<'a, T, A: Allocator> IntoIterator for &'a BinaryHeap<T, A> {
1933    type Item = &'a T;
1934    type IntoIter = Iter<'a, T>;
1935
1936    fn into_iter(self) -> Iter<'a, T> {
1937        self.iter()
1938    }
1939}
1940
1941#[stable(feature = "rust1", since = "1.0.0")]
1942impl<T: Ord, A: Allocator> Extend<T> for BinaryHeap<T, A> {
1943    #[inline]
1944    fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1945        let guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
1946        guard.heap.data.extend(iter);
1947    }
1948
1949    #[inline]
1950    fn extend_one(&mut self, item: T) {
1951        self.push(item);
1952    }
1953
1954    #[inline]
1955    fn extend_reserve(&mut self, additional: usize) {
1956        self.reserve(additional);
1957    }
1958}
1959
1960#[stable(feature = "extend_ref", since = "1.2.0")]
1961impl<'a, T: 'a + Ord + Copy, A: Allocator> Extend<&'a T> for BinaryHeap<T, A> {
1962    fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
1963        self.extend(iter.into_iter().cloned());
1964    }
1965
1966    #[inline]
1967    fn extend_one(&mut self, &item: &'a T) {
1968        self.push(item);
1969    }
1970
1971    #[inline]
1972    fn extend_reserve(&mut self, additional: usize) {
1973        self.reserve(additional);
1974    }
1975}
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy