1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // This implementation is largely based on the high-level description and analysis of B-Trees
12 // found in *Open Data Structures* (ODS). Although our implementation does not use any of
13 // the source found in ODS, if one wishes to review the high-level design of this structure, it
14 // can be freely downloaded at http://opendatastructures.org/. Its contents are as of this
15 // writing (August 2014) freely licensed under the following Creative Commons Attribution
16 // License: [CC BY 2.5 CA](http://creativecommons.org/licenses/by/2.5/ca/).
18 pub use self::Entry::*;
23 use super::node::{mod, Node, Found, GoDown};
24 use super::node::{Traversal, MutTraversal, MoveTraversal};
25 use super::node::TraversalItem::{mod, Elem, Edge};
26 use super::node::ForceResult::{Leaf, Internal};
27 use core::borrow::BorrowFrom;
28 use std::hash::{Writer, Hash};
29 use core::default::Default;
30 use core::{iter, fmt, mem};
34 use ring_buf::RingBuf;
36 use self::Continuation::{Continue, Finished};
38 // FIXME(conventions): implement bounded iterators
40 /// A map based on a B-Tree.
42 /// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing
43 /// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal
44 /// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum amount of
45 /// comparisons necessary to find an element (log<sub>2</sub>n). However, in practice the way this
46 /// is done is *very* inefficient for modern computer architectures. In particular, every element
47 /// is stored in its own individually heap-allocated node. This means that every single insertion
48 /// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these
49 /// are both notably expensive things to do in practice, we are forced to at very least reconsider
52 /// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
53 /// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
54 /// searches. However, this does mean that searches will have to do *more* comparisons on average.
55 /// The precise number of comparisons depends on the node search strategy used. For optimal cache
56 /// efficiency, one could search the nodes linearly. For optimal comparisons, one could search
57 /// the node using binary search. As a compromise, one could also perform a linear search
58 /// that initially only checks every i<sup>th</sup> element for some choice of i.
60 /// Currently, our implementation simply performs naive linear search. This provides excellent
61 /// performance on *small* nodes of elements which are cheap to compare. However in the future we
62 /// would like to further explore choosing the optimal search strategy based on the choice of B,
63 /// and possibly other factors. Using linear search, searching for a random element is expected
64 /// to take O(B log<sub>B</sub>n) comparisons, which is generally worse than a BST. In practice,
65 /// however, performance is excellent. `BTreeMap` is able to readily outperform `TreeMap` under
66 /// many workloads, and is competitive where it doesn't. BTreeMap also generally *scales* better
67 /// than TreeMap, making it more appropriate for large datasets.
69 /// However, `TreeMap` may still be more appropriate to use in many contexts. If elements are very
70 /// large or expensive to compare, `TreeMap` may be more appropriate. It won't allocate any
71 /// more space than is needed, and will perform the minimal number of comparisons necessary.
72 /// `TreeMap` also provides much better performance stability guarantees. Generally, very few
73 /// changes need to be made to update a BST, and two updates are expected to take about the same
74 /// amount of time on roughly equal sized BSTs. However a B-Tree's performance is much more
75 /// amortized. If a node is overfull, it must be split into two nodes. If a node is underfull, it
76 /// may be merged with another. Both of these operations are relatively expensive to perform, and
77 /// it's possible to force one to occur at every single level of the tree in a single insertion or
78 /// deletion. In fact, a malicious or otherwise unlucky sequence of insertions and deletions can
79 /// force this degenerate behaviour to occur on every operation. While the total amount of work
80 /// done on each operation isn't *catastrophic*, and *is* still bounded by O(B log<sub>B</sub>n),
81 /// it is certainly much slower when it does.
83 pub struct BTreeMap<K, V> {
90 /// An abstract base over-which all other BTree iterators are built.
98 /// An iterator over a BTreeMap's entries.
99 pub struct Iter<'a, K: 'a, V: 'a> {
100 inner: AbsIter<Traversal<'a, K, V>>
103 /// A mutable iterator over a BTreeMap's entries.
104 pub struct IterMut<'a, K: 'a, V: 'a> {
105 inner: AbsIter<MutTraversal<'a, K, V>>
108 /// An owning iterator over a BTreeMap's entries.
109 pub struct IntoIter<K, V> {
110 inner: AbsIter<MoveTraversal<K, V>>
113 /// An iterator over a BTreeMap's keys.
114 pub struct Keys<'a, K: 'a, V: 'a> {
115 inner: Map<(&'a K, &'a V), &'a K, Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a K>
118 /// An iterator over a BTreeMap's values.
119 pub struct Values<'a, K: 'a, V: 'a> {
120 inner: Map<(&'a K, &'a V), &'a V, Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a V>
123 /// A view into a single entry in a map, which may either be vacant or occupied.
124 pub enum Entry<'a, K:'a, V:'a> {
126 Vacant(VacantEntry<'a, K, V>),
127 /// An occupied Entry
128 Occupied(OccupiedEntry<'a, K, V>),
132 pub struct VacantEntry<'a, K:'a, V:'a> {
134 stack: stack::SearchStack<'a, K, V, node::handle::Edge, node::handle::Leaf>,
137 /// An occupied Entry.
138 pub struct OccupiedEntry<'a, K:'a, V:'a> {
139 stack: stack::SearchStack<'a, K, V, node::handle::KV, node::handle::LeafOrInternal>,
142 impl<K: Ord, V> BTreeMap<K, V> {
143 /// Makes a new empty BTreeMap with a reasonable choice for B.
144 #[unstable = "matches collection reform specification, waiting for dust to settle"]
145 pub fn new() -> BTreeMap<K, V> {
146 //FIXME(Gankro): Tune this as a function of size_of<K/V>?
150 /// Makes a new empty BTreeMap with the given B.
152 /// B cannot be less than 2.
153 pub fn with_b(b: uint) -> BTreeMap<K, V> {
154 assert!(b > 1, "B must be greater than 1");
158 root: Node::make_leaf_root(b),
163 /// Clears the map, removing all values.
168 /// use std::collections::BTreeMap;
170 /// let mut a = BTreeMap::new();
171 /// a.insert(1u, "a");
173 /// assert!(a.is_empty());
175 #[unstable = "matches collection reform specification, waiting for dust to settle"]
176 pub fn clear(&mut self) {
178 // avoid recursive destructors by manually traversing the tree
179 for _ in mem::replace(self, BTreeMap::with_b(b)).into_iter() {};
182 /// Deprecated: renamed to `get`.
183 #[deprecated = "renamed to `get`"]
184 pub fn find(&self, key: &K) -> Option<&V> {
188 // Searching in a B-Tree is pretty straightforward.
190 // Start at the root. Try to find the key in the current node. If we find it, return it.
191 // If it's not in there, follow the edge *before* the smallest key larger than
192 // the search key. If no such key exists (they're *all* smaller), then just take the last
193 // edge in the node. If we're in a leaf and we don't find our key, then it's not
196 /// Returns a reference to the value corresponding to the key.
198 /// The key may be any borrowed form of the map's key type, but the ordering
199 /// on the borrowed form *must* match the ordering on the key type.
204 /// use std::collections::BTreeMap;
206 /// let mut map = BTreeMap::new();
207 /// map.insert(1u, "a");
208 /// assert_eq!(map.get(&1), Some(&"a"));
209 /// assert_eq!(map.get(&2), None);
211 #[unstable = "matches collection reform specification, waiting for dust to settle"]
212 pub fn get<Sized? Q>(&self, key: &Q) -> Option<&V> where Q: BorrowFrom<K> + Ord {
213 let mut cur_node = &self.root;
215 match Node::search(cur_node, key) {
216 Found(handle) => return Some(handle.into_kv().1),
217 GoDown(handle) => match handle.force() {
218 Leaf(_) => return None,
219 Internal(internal_handle) => {
220 cur_node = internal_handle.into_edge();
228 /// Returns true if the map contains a value for the specified key.
230 /// The key may be any borrowed form of the map's key type, but the ordering
231 /// on the borrowed form *must* match the ordering on the key type.
236 /// use std::collections::BTreeMap;
238 /// let mut map = BTreeMap::new();
239 /// map.insert(1u, "a");
240 /// assert_eq!(map.contains_key(&1), true);
241 /// assert_eq!(map.contains_key(&2), false);
243 #[unstable = "matches collection reform specification, waiting for dust to settle"]
244 pub fn contains_key<Sized? Q>(&self, key: &Q) -> bool where Q: BorrowFrom<K> + Ord {
245 self.get(key).is_some()
248 /// Deprecated: renamed to `get_mut`.
249 #[deprecated = "renamed to `get_mut`"]
250 pub fn find_mut(&mut self, key: &K) -> Option<&mut V> {
254 /// Returns a mutable reference to the value corresponding to the key.
256 /// The key may be any borrowed form of the map's key type, but the ordering
257 /// on the borrowed form *must* match the ordering on the key type.
262 /// use std::collections::BTreeMap;
264 /// let mut map = BTreeMap::new();
265 /// map.insert(1u, "a");
266 /// match map.get_mut(&1) {
267 /// Some(x) => *x = "b",
270 /// assert_eq!(map[1], "b");
272 // See `get` for implementation notes, this is basically a copy-paste with mut's added
273 #[unstable = "matches collection reform specification, waiting for dust to settle"]
274 pub fn get_mut<Sized? Q>(&mut self, key: &Q) -> Option<&mut V> where Q: BorrowFrom<K> + Ord {
275 // temp_node is a Borrowck hack for having a mutable value outlive a loop iteration
276 let mut temp_node = &mut self.root;
278 let cur_node = temp_node;
279 match Node::search(cur_node, key) {
280 Found(handle) => return Some(handle.into_kv_mut().1),
281 GoDown(handle) => match handle.force() {
282 Leaf(_) => return None,
283 Internal(internal_handle) => {
284 temp_node = internal_handle.into_edge_mut();
292 /// Deprecated: renamed to `insert`.
293 #[deprecated = "renamed to `insert`"]
294 pub fn swap(&mut self, key: K, value: V) -> Option<V> {
295 self.insert(key, value)
298 // Insertion in a B-Tree is a bit complicated.
300 // First we do the same kind of search described in `find`. But we need to maintain a stack of
301 // all the nodes/edges in our search path. If we find a match for the key we're trying to
302 // insert, just swap the vals and return the old ones. However, when we bottom out in a leaf,
303 // we attempt to insert our key-value pair at the same location we would want to follow another
306 // If the node has room, then this is done in the obvious way by shifting elements. However,
307 // if the node itself is full, we split node into two, and give its median key-value
308 // pair to its parent to insert the new node with. Of course, the parent may also be
309 // full, and insertion can propagate until we reach the root. If we reach the root, and
310 // it is *also* full, then we split the root and place the two nodes under a newly made root.
312 // Note that we subtly deviate from Open Data Structures in our implementation of split.
313 // ODS describes inserting into the node *regardless* of its capacity, and then
314 // splitting *afterwards* if it happens to be overfull. However, this is inefficient.
315 // Instead, we split beforehand, and then insert the key-value pair into the appropriate
316 // result node. This has two consequences:
318 // 1) While ODS produces a left node of size B-1, and a right node of size B,
319 // we may potentially reverse this. However, this shouldn't effect the analysis.
321 // 2) While ODS may potentially return the pair we *just* inserted after
322 // the split, we will never do this. Again, this shouldn't effect the analysis.
324 /// Inserts a key-value pair from the map. If the key already had a value
325 /// present in the map, that value is returned. Otherwise, `None` is returned.
330 /// use std::collections::BTreeMap;
332 /// let mut map = BTreeMap::new();
333 /// assert_eq!(map.insert(37u, "a"), None);
334 /// assert_eq!(map.is_empty(), false);
336 /// map.insert(37, "b");
337 /// assert_eq!(map.insert(37, "c"), Some("b"));
338 /// assert_eq!(map[37], "c");
340 #[unstable = "matches collection reform specification, waiting for dust to settle"]
341 pub fn insert(&mut self, mut key: K, mut value: V) -> Option<V> {
342 // This is a stack of rawptrs to nodes paired with indices, respectively
343 // representing the nodes and edges of our search path. We have to store rawptrs
344 // because as far as Rust is concerned, we can mutate aliased data with such a
345 // stack. It is of course correct, but what it doesn't know is that we will only
346 // be popping and using these ptrs one at a time in child-to-parent order. The alternative
347 // to doing this is to take the Nodes from their parents. This actually makes
348 // borrowck *really* happy and everything is pretty smooth. However, this creates
349 // *tons* of pointless writes, and requires us to always walk all the way back to
350 // the root after an insertion, even if we only needed to change a leaf. Therefore,
351 // we accept this potential unsafety and complexity in the name of performance.
353 // Regardless, the actual dangerous logic is completely abstracted away from BTreeMap
354 // by the stack module. All it can do is immutably read nodes, and ask the search stack
355 // to proceed down some edge by index. This makes the search logic we'll be reusing in a
356 // few different methods much neater, and of course drastically improves safety.
357 let mut stack = stack::PartialSearchStack::new(self);
360 let result = stack.with(move |pusher, node| {
361 // Same basic logic as found in `find`, but with PartialSearchStack mediating the
362 // actual nodes for us
363 return match Node::search(node, &key) {
364 Found(mut handle) => {
365 // Perfect match, swap the values and return the old one
366 mem::swap(handle.val_mut(), &mut value);
367 Finished(Some(value))
370 // We need to keep searching, try to get the search stack
371 // to go down further
372 match handle.force() {
373 Leaf(leaf_handle) => {
374 // We've reached a leaf, perform the insertion here
375 pusher.seal(leaf_handle).insert(key, value);
378 Internal(internal_handle) => {
379 // We've found the subtree to insert this key/value pair in,
381 Continue((pusher.push(internal_handle), key, value))
388 Finished(ret) => { return ret; },
389 Continue((new_stack, renewed_key, renewed_val)) => {
398 // Deletion is the most complicated operation for a B-Tree.
400 // First we do the same kind of search described in
401 // `find`. But we need to maintain a stack of all the nodes/edges in our search path.
402 // If we don't find the key, then we just return `None` and do nothing. If we do find the
403 // key, we perform two operations: remove the item, and then possibly handle underflow.
405 // # removing the item
406 // If the node is a leaf, we just remove the item, and shift
407 // any items after it back to fill the hole.
409 // If the node is an internal node, we *swap* the item with the smallest item in
410 // in its right subtree (which must reside in a leaf), and then revert to the leaf
413 // # handling underflow
414 // After removing an item, there may be too few items in the node. We want nodes
415 // to be mostly full for efficiency, although we make an exception for the root, which
416 // may have as few as one item. If this is the case, we may first try to steal
417 // an item from our left or right neighbour.
419 // To steal from the left (right) neighbour,
420 // we take the largest (smallest) item and child from it. We then swap the taken item
421 // with the item in their mutual parent that separates them, and then insert the
422 // parent's item and the taken child into the first (last) index of the underflowed node.
424 // However, stealing has the possibility of underflowing our neighbour. If this is the
425 // case, we instead *merge* with our neighbour. This of course reduces the number of
426 // children in the parent. Therefore, we also steal the item that separates the now
427 // merged nodes, and insert it into the merged node.
429 // Merging may cause the parent to underflow. If this is the case, then we must repeat
430 // the underflow handling process on the parent. If merging merges the last two children
431 // of the root, then we replace the root with the merged node.
433 /// Deprecated: renamed to `remove`.
434 #[deprecated = "renamed to `remove`"]
435 pub fn pop(&mut self, key: &K) -> Option<V> {
439 /// Removes a key from the map, returning the value at the key if the key
440 /// was previously in the map.
442 /// The key may be any borrowed form of the map's key type, but the ordering
443 /// on the borrowed form *must* match the ordering on the key type.
448 /// use std::collections::BTreeMap;
450 /// let mut map = BTreeMap::new();
451 /// map.insert(1u, "a");
452 /// assert_eq!(map.remove(&1), Some("a"));
453 /// assert_eq!(map.remove(&1), None);
455 #[unstable = "matches collection reform specification, waiting for dust to settle"]
456 pub fn remove<Sized? Q>(&mut self, key: &Q) -> Option<V> where Q: BorrowFrom<K> + Ord {
457 // See `swap` for a more thorough description of the stuff going on in here
458 let mut stack = stack::PartialSearchStack::new(self);
460 let result = stack.with(move |pusher, node| {
461 return match Node::search(node, key) {
463 // Perfect match. Terminate the stack here, and remove the entry
464 Finished(Some(pusher.seal(handle).remove()))
467 // We need to keep searching, try to go down the next edge
468 match handle.force() {
469 // We're at a leaf; the key isn't in here
470 Leaf(_) => Finished(None),
471 Internal(internal_handle) => Continue(pusher.push(internal_handle))
477 Finished(ret) => return ret,
478 Continue(new_stack) => stack = new_stack
484 /// A helper enum useful for deciding whether to continue a loop since we can't
485 /// return from a closure
486 enum Continuation<A, B> {
491 /// The stack module provides a safe interface for constructing and manipulating a stack of ptrs
492 /// to nodes. By using this module much better safety guarantees can be made, and more search
493 /// boilerplate gets cut out.
495 use core::prelude::*;
496 use core::kinds::marker;
499 use super::super::node::{mod, Node, Fit, Split, Internal, Leaf};
500 use super::super::node::handle;
503 /// A generic mutable reference, identical to `&mut` except for the fact that its lifetime
504 /// parameter is invariant. This means that wherever an `IdRef` is expected, only an `IdRef`
505 /// with the exact requested lifetime can be used. This is in contrast to normal references,
506 /// where `&'static` can be used in any function expecting any lifetime reference.
507 pub struct IdRef<'id, T: 'id> {
509 marker: marker::InvariantLifetime<'id>
512 impl<'id, T> Deref<T> for IdRef<'id, T> {
513 fn deref(&self) -> &T {
518 impl<'id, T> DerefMut<T> for IdRef<'id, T> {
519 fn deref_mut(&mut self) -> &mut T {
524 type StackItem<K, V> = node::Handle<*mut Node<K, V>, handle::Edge, handle::Internal>;
525 type Stack<K, V> = Vec<StackItem<K, V>>;
527 /// A `PartialSearchStack` handles the construction of a search stack.
528 pub struct PartialSearchStack<'a, K:'a, V:'a> {
529 map: &'a mut BTreeMap<K, V>,
531 next: *mut Node<K, V>,
534 /// A `SearchStack` represents a full path to an element or an edge of interest. It provides
535 /// methods depending on the type of what the path points to for removing an element, inserting
536 /// a new element, and manipulating to element at the top of the stack.
537 pub struct SearchStack<'a, K:'a, V:'a, Type, NodeType> {
538 map: &'a mut BTreeMap<K, V>,
540 top: node::Handle<*mut Node<K, V>, Type, NodeType>,
543 /// A `PartialSearchStack` that doesn't hold a a reference to the next node, and is just
544 /// just waiting for a `Handle` to that next node to be pushed. See `PartialSearchStack::with`
545 /// for more details.
546 pub struct Pusher<'id, 'a, K:'a, V:'a> {
547 map: &'a mut BTreeMap<K, V>,
549 marker: marker::InvariantLifetime<'id>
552 impl<'a, K, V> PartialSearchStack<'a, K, V> {
553 /// Creates a new PartialSearchStack from a BTreeMap by initializing the stack with the
554 /// root of the tree.
555 pub fn new(map: &'a mut BTreeMap<K, V>) -> PartialSearchStack<'a, K, V> {
556 let depth = map.depth;
559 next: &mut map.root as *mut _,
561 stack: Vec::with_capacity(depth),
565 /// Breaks up the stack into a `Pusher` and the next `Node`, allowing the given closure
566 /// to interact with, search, and finally push the `Node` onto the stack. The passed in
567 /// closure must be polymorphic on the `'id` lifetime parameter, as this statically
568 /// ensures that only `Handle`s from the correct `Node` can be pushed.
570 /// The reason this works is that the `Pusher` has an `'id` parameter, and will only accept
571 /// handles with the same `'id`. The closure could only get references with that lifetime
572 /// through its arguments or through some other `IdRef` that it has lying around. However,
573 /// no other `IdRef` could possibly work - because the `'id` is held in an invariant
574 /// parameter, it would need to have precisely the correct lifetime, which would mean that
575 /// at least one of the calls to `with` wouldn't be properly polymorphic, wanting a
576 /// specific lifetime instead of the one that `with` chooses to give it.
578 /// See also Haskell's `ST` monad, which uses a similar trick.
579 pub fn with<T, F: for<'id> FnOnce(Pusher<'id, 'a, K, V>,
580 IdRef<'id, Node<K, V>>) -> T>(self, closure: F) -> T {
581 let pusher = Pusher {
584 marker: marker::InvariantLifetime
587 inner: unsafe { &mut *self.next },
588 marker: marker::InvariantLifetime
591 closure(pusher, node)
595 impl<'id, 'a, K, V> Pusher<'id, 'a, K, V> {
596 /// Pushes the requested child of the stack's current top on top of the stack. If the child
597 /// exists, then a new PartialSearchStack is yielded. Otherwise, a VacantSearchStack is
599 pub fn push(mut self, mut edge: node::Handle<IdRef<'id, Node<K, V>>,
602 -> PartialSearchStack<'a, K, V> {
603 self.stack.push(edge.as_raw());
607 next: edge.edge_mut() as *mut _,
611 /// Converts the PartialSearchStack into a SearchStack.
612 pub fn seal<Type, NodeType>
613 (self, mut handle: node::Handle<IdRef<'id, Node<K, V>>, Type, NodeType>)
614 -> SearchStack<'a, K, V, Type, NodeType> {
618 top: handle.as_raw(),
623 impl<'a, K, V, NodeType> SearchStack<'a, K, V, handle::KV, NodeType> {
624 /// Gets a reference to the value the stack points to.
625 pub fn peek(&self) -> &V {
626 unsafe { self.top.from_raw().into_kv().1 }
629 /// Gets a mutable reference to the value the stack points to.
630 pub fn peek_mut(&mut self) -> &mut V {
631 unsafe { self.top.from_raw_mut().into_kv_mut().1 }
634 /// Converts the stack into a mutable reference to the value it points to, with a lifetime
635 /// tied to the original tree.
636 pub fn into_top(mut self) -> &'a mut V {
638 mem::copy_mut_lifetime(
640 self.top.from_raw_mut().val_mut()
646 impl<'a, K, V> SearchStack<'a, K, V, handle::KV, handle::Leaf> {
647 /// Removes the key and value in the top element of the stack, then handles underflows as
648 /// described in BTree's pop function.
649 fn remove_leaf(mut self) -> V {
650 self.map.length -= 1;
652 // Remove the key-value pair from the leaf that this search stack points to.
653 // Then, note if the leaf is underfull, and promptly forget the leaf and its ptr
654 // to avoid ownership issues.
655 let (value, mut underflow) = unsafe {
656 let (_, value) = self.top.from_raw_mut().remove_as_leaf();
657 let underflow = self.top.from_raw().node().is_underfull();
662 match self.stack.pop() {
664 // We've reached the root, so no matter what, we're done. We manually
665 // access the root via the tree itself to avoid creating any dangling
667 if self.map.root.len() == 0 && !self.map.root.is_leaf() {
668 // We've emptied out the root, so make its only child the new root.
669 // If it's a leaf, we just let it become empty.
671 self.map.root.hoist_lone_child();
675 Some(mut handle) => {
677 // Underflow! Handle it!
679 handle.from_raw_mut().handle_underflow();
680 underflow = handle.from_raw().node().is_underfull();
692 impl<'a, K, V> SearchStack<'a, K, V, handle::KV, handle::LeafOrInternal> {
693 /// Removes the key and value in the top element of the stack, then handles underflows as
694 /// described in BTree's pop function.
695 pub fn remove(self) -> V {
696 // Ensure that the search stack goes to a leaf. This is necessary to perform deletion
697 // in a BTree. Note that this may put the tree in an inconsistent state (further
698 // described in into_leaf's comments), but this is immediately fixed by the
699 // removing the value we want to remove
700 self.into_leaf().remove_leaf()
703 /// Subroutine for removal. Takes a search stack for a key that might terminate at an
704 /// internal node, and mutates the tree and search stack to *make* it a search stack
705 /// for that same key that *does* terminates at a leaf. If the mutation occurs, then this
706 /// leaves the tree in an inconsistent state that must be repaired by the caller by
707 /// removing the entry in question. Specifically the key-value pair and its successor will
709 fn into_leaf(mut self) -> SearchStack<'a, K, V, handle::KV, handle::Leaf> {
711 let mut top_raw = self.top;
712 let mut top = top_raw.from_raw_mut();
714 let key_ptr = top.key_mut() as *mut _;
715 let val_ptr = top.val_mut() as *mut _;
717 // Try to go into the right subtree of the found key to find its successor
719 Leaf(mut leaf_handle) => {
720 // We're a proper leaf stack, nothing to do
724 top: leaf_handle.as_raw()
727 Internal(mut internal_handle) => {
728 let mut right_handle = internal_handle.right_edge();
730 //We're not a proper leaf stack, let's get to work.
731 self.stack.push(right_handle.as_raw());
733 let mut temp_node = right_handle.edge_mut();
735 // Walk into the smallest subtree of this node
736 let node = temp_node;
738 match node.kv_handle(0).force() {
739 Leaf(mut handle) => {
740 // This node is a leaf, do the swap and return
741 mem::swap(handle.key_mut(), &mut *key_ptr);
742 mem::swap(handle.val_mut(), &mut *val_ptr);
749 Internal(kv_handle) => {
750 // This node is internal, go deeper
751 let mut handle = kv_handle.into_left_edge();
752 self.stack.push(handle.as_raw());
753 temp_node = handle.into_edge_mut();
763 impl<'a, K, V> SearchStack<'a, K, V, handle::Edge, handle::Leaf> {
764 /// Inserts the key and value into the top element in the stack, and if that node has to
765 /// split recursively inserts the split contents into the next element stack until
768 /// Assumes that the stack represents a search path from the root to a leaf.
770 /// An &mut V is returned to the inserted value, for callers that want a reference to this.
771 pub fn insert(mut self, key: K, val: V) -> &'a mut V {
773 self.map.length += 1;
775 // Insert the key and value into the leaf at the top of the stack
776 let (mut insertion, inserted_ptr) = self.top.from_raw_mut()
777 .insert_as_leaf(key, val);
782 // The last insertion went off without a hitch, no splits! We can stop
784 return &mut *inserted_ptr;
786 Split(key, val, right) => match self.stack.pop() {
787 // The last insertion triggered a split, so get the next element on the
788 // stack to recursively insert the split node into.
790 // The stack was empty; we've split the root, and need to make a
791 // a new one. This is done in-place because we can't move the
792 // root out of a reference to the tree.
793 Node::make_internal_root(&mut self.map.root, self.map.b,
797 return &mut *inserted_ptr;
799 Some(mut handle) => {
800 // The stack wasn't empty, do the insertion and recurse
801 insertion = handle.from_raw_mut()
802 .insert_as_internal(key, val, right);
813 impl<K: Ord, V> FromIterator<(K, V)> for BTreeMap<K, V> {
814 fn from_iter<T: Iterator<(K, V)>>(iter: T) -> BTreeMap<K, V> {
815 let mut map = BTreeMap::new();
821 impl<K: Ord, V> Extend<(K, V)> for BTreeMap<K, V> {
823 fn extend<T: Iterator<(K, V)>>(&mut self, mut iter: T) {
830 impl<S: Writer, K: Hash<S>, V: Hash<S>> Hash<S> for BTreeMap<K, V> {
831 fn hash(&self, state: &mut S) {
832 for elt in self.iter() {
839 impl<K: Ord, V> Default for BTreeMap<K, V> {
841 fn default() -> BTreeMap<K, V> {
846 impl<K: PartialEq, V: PartialEq> PartialEq for BTreeMap<K, V> {
847 fn eq(&self, other: &BTreeMap<K, V>) -> bool {
848 self.len() == other.len() &&
849 self.iter().zip(other.iter()).all(|(a, b)| a == b)
853 impl<K: Eq, V: Eq> Eq for BTreeMap<K, V> {}
855 impl<K: PartialOrd, V: PartialOrd> PartialOrd for BTreeMap<K, V> {
857 fn partial_cmp(&self, other: &BTreeMap<K, V>) -> Option<Ordering> {
858 iter::order::partial_cmp(self.iter(), other.iter())
862 impl<K: Ord, V: Ord> Ord for BTreeMap<K, V> {
864 fn cmp(&self, other: &BTreeMap<K, V>) -> Ordering {
865 iter::order::cmp(self.iter(), other.iter())
869 impl<K: Show, V: Show> Show for BTreeMap<K, V> {
870 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
871 try!(write!(f, "{{"));
873 for (i, (k, v)) in self.iter().enumerate() {
874 if i != 0 { try!(write!(f, ", ")); }
875 try!(write!(f, "{}: {}", *k, *v));
882 impl<K: Ord, Sized? Q, V> Index<Q, V> for BTreeMap<K, V>
883 where Q: BorrowFrom<K> + Ord
885 fn index(&self, key: &Q) -> &V {
886 self.get(key).expect("no entry found for key")
890 impl<K: Ord, Sized? Q, V> IndexMut<Q, V> for BTreeMap<K, V>
891 where Q: BorrowFrom<K> + Ord
893 fn index_mut(&mut self, key: &Q) -> &mut V {
894 self.get_mut(key).expect("no entry found for key")
898 /// Genericises over how to get the correct type of iterator from the correct type
899 /// of Node ownership.
901 fn traverse(node: N) -> Self;
904 impl<'a, K, V> Traverse<&'a Node<K, V>> for Traversal<'a, K, V> {
905 fn traverse(node: &'a Node<K, V>) -> Traversal<'a, K, V> {
910 impl<'a, K, V> Traverse<&'a mut Node<K, V>> for MutTraversal<'a, K, V> {
911 fn traverse(node: &'a mut Node<K, V>) -> MutTraversal<'a, K, V> {
916 impl<K, V> Traverse<Node<K, V>> for MoveTraversal<K, V> {
917 fn traverse(node: Node<K, V>) -> MoveTraversal<K, V> {
922 /// Represents an operation to perform inside the following iterator methods.
923 /// This is necessary to use in `next` because we want to modify self.left inside
924 /// a match that borrows it. Similarly, in `next_back` for self.right. Instead, we use this
925 /// enum to note what we want to do, and do it after the match.
931 impl<K, V, E, T: Traverse<E> + DoubleEndedIterator<TraversalItem<K, V, E>>>
932 Iterator<(K, V)> for AbsIter<T> {
933 // This function is pretty long, but only because there's a lot of cases to consider.
934 // Our iterator represents two search paths, left and right, to the smallest and largest
935 // elements we have yet to yield. lca represents the least common ancestor of these two paths,
936 // above-which we never walk, since everything outside it has already been consumed (or was
937 // never in the range to iterate).
939 // Note that the design of these iterators permits an *arbitrary* initial pair of min and max,
940 // making these arbitrary sub-range iterators. However the logic to construct these paths
941 // efficiently is fairly involved, so this is a FIXME. The sub-range iterators also wouldn't be
942 // able to accurately predict size, so those iterators can't implement ExactSizeIterator.
943 fn next(&mut self) -> Option<(K, V)> {
945 // We want the smallest element, so try to get the top of the left stack
946 let op = match self.left.back_mut() {
947 // The left stack is empty, so try to get the next element of the two paths
948 // LCAs (the left search path is currently a subpath of the right one)
949 None => match self.lca.next() {
950 // The lca has been exhausted, walk further down the right path
951 None => match self.right.pop_front() {
952 // The right path is exhausted, so we're done
954 // The right path had something, make that the new LCA
955 // and restart the whole process
961 // The lca yielded an edge, make that the new head of the left path
962 Some(Edge(next)) => Push(Traverse::traverse(next)),
963 // The lca yielded an entry, so yield that
964 Some(Elem(k, v)) => {
969 // The left stack wasn't empty, so continue along the node in its head
970 Some(iter) => match iter.next() {
971 // The head of the left path is empty, so Pop it off and restart the process
973 // The head of the left path yielded an edge, so make that the new head
975 Some(Edge(next)) => Push(Traverse::traverse(next)),
976 // The head of the left path yielded entry, so yield that
977 Some(Elem(k, v)) => {
984 // Handle any operation on the left stack as necessary
986 Push(item) => { self.left.push_back(item); },
987 Pop => { self.left.pop_back(); },
992 fn size_hint(&self) -> (uint, Option<uint>) {
993 (self.size, Some(self.size))
997 impl<K, V, E, T: Traverse<E> + DoubleEndedIterator<TraversalItem<K, V, E>>>
998 DoubleEndedIterator<(K, V)> for AbsIter<T> {
999 // next_back is totally symmetric to next
1000 fn next_back(&mut self) -> Option<(K, V)> {
1002 let op = match self.right.back_mut() {
1003 None => match self.lca.next_back() {
1004 None => match self.left.pop_front() {
1005 None => return None,
1011 Some(Edge(next)) => Push(Traverse::traverse(next)),
1012 Some(Elem(k, v)) => {
1017 Some(iter) => match iter.next_back() {
1019 Some(Edge(next)) => Push(Traverse::traverse(next)),
1020 Some(Elem(k, v)) => {
1028 Push(item) => { self.right.push_back(item); },
1029 Pop => { self.right.pop_back(); }
1035 impl<'a, K, V> Iterator<(&'a K, &'a V)> for Iter<'a, K, V> {
1036 fn next(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next() }
1037 fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
1039 impl<'a, K, V> DoubleEndedIterator<(&'a K, &'a V)> for Iter<'a, K, V> {
1040 fn next_back(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next_back() }
1042 impl<'a, K, V> ExactSizeIterator<(&'a K, &'a V)> for Iter<'a, K, V> {}
1045 impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for IterMut<'a, K, V> {
1046 fn next(&mut self) -> Option<(&'a K, &'a mut V)> { self.inner.next() }
1047 fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
1049 impl<'a, K, V> DoubleEndedIterator<(&'a K, &'a mut V)> for IterMut<'a, K, V> {
1050 fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> { self.inner.next_back() }
1052 impl<'a, K, V> ExactSizeIterator<(&'a K, &'a mut V)> for IterMut<'a, K, V> {}
1055 impl<K, V> Iterator<(K, V)> for IntoIter<K, V> {
1056 fn next(&mut self) -> Option<(K, V)> { self.inner.next() }
1057 fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
1059 impl<K, V> DoubleEndedIterator<(K, V)> for IntoIter<K, V> {
1060 fn next_back(&mut self) -> Option<(K, V)> { self.inner.next_back() }
1062 impl<K, V> ExactSizeIterator<(K, V)> for IntoIter<K, V> {}
1065 impl<'a, K, V> Iterator<&'a K> for Keys<'a, K, V> {
1066 fn next(&mut self) -> Option<(&'a K)> { self.inner.next() }
1067 fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
1069 impl<'a, K, V> DoubleEndedIterator<&'a K> for Keys<'a, K, V> {
1070 fn next_back(&mut self) -> Option<(&'a K)> { self.inner.next_back() }
1072 impl<'a, K, V> ExactSizeIterator<&'a K> for Keys<'a, K, V> {}
1075 impl<'a, K, V> Iterator<&'a V> for Values<'a, K, V> {
1076 fn next(&mut self) -> Option<(&'a V)> { self.inner.next() }
1077 fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
1079 impl<'a, K, V> DoubleEndedIterator<&'a V> for Values<'a, K, V> {
1080 fn next_back(&mut self) -> Option<(&'a V)> { self.inner.next_back() }
1082 impl<'a, K, V> ExactSizeIterator<&'a V> for Values<'a, K, V> {}
1085 impl<'a, K: Ord, V> VacantEntry<'a, K, V> {
1086 /// Sets the value of the entry with the VacantEntry's key,
1087 /// and returns a mutable reference to it.
1088 pub fn set(self, value: V) -> &'a mut V {
1089 self.stack.insert(self.key, value)
1093 impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> {
1094 /// Gets a reference to the value in the entry.
1095 pub fn get(&self) -> &V {
1099 /// Gets a mutable reference to the value in the entry.
1100 pub fn get_mut(&mut self) -> &mut V {
1101 self.stack.peek_mut()
1104 /// Converts the entry into a mutable reference to its value.
1105 pub fn into_mut(self) -> &'a mut V {
1106 self.stack.into_top()
1109 /// Sets the value of the entry with the OccupiedEntry's key,
1110 /// and returns the entry's old value.
1111 pub fn set(&mut self, mut value: V) -> V {
1112 mem::swap(self.stack.peek_mut(), &mut value);
1116 /// Takes the value of the entry out of the map, and returns it.
1117 pub fn take(self) -> V {
1122 impl<K, V> BTreeMap<K, V> {
1123 /// Gets an iterator over the entries of the map.
1128 /// use std::collections::BTreeMap;
1130 /// let mut map = BTreeMap::new();
1131 /// map.insert(1u, "a");
1132 /// map.insert(2u, "b");
1133 /// map.insert(3u, "c");
1135 /// for (key, value) in map.iter() {
1136 /// println!("{}: {}", key, value);
1139 /// let (first_key, first_value) = map.iter().next().unwrap();
1140 /// assert_eq!((*first_key, *first_value), (1u, "a"));
1142 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1143 pub fn iter<'a>(&'a self) -> Iter<'a, K, V> {
1144 let len = self.len();
1147 lca: Traverse::traverse(&self.root),
1148 left: RingBuf::new(),
1149 right: RingBuf::new(),
1155 /// Gets a mutable iterator over the entries of the map.
1160 /// use std::collections::BTreeMap;
1162 /// let mut map = BTreeMap::new();
1163 /// map.insert("a", 1u);
1164 /// map.insert("b", 2u);
1165 /// map.insert("c", 3u);
1167 /// // add 10 to the value if the key isn't "a"
1168 /// for (key, value) in map.iter_mut() {
1169 /// if key != &"a" {
1174 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1175 pub fn iter_mut<'a>(&'a mut self) -> IterMut<'a, K, V> {
1176 let len = self.len();
1179 lca: Traverse::traverse(&mut self.root),
1180 left: RingBuf::new(),
1181 right: RingBuf::new(),
1187 /// Gets an owning iterator over the entries of the map.
1192 /// use std::collections::BTreeMap;
1194 /// let mut map = BTreeMap::new();
1195 /// map.insert(1u, "a");
1196 /// map.insert(2u, "b");
1197 /// map.insert(3u, "c");
1199 /// for (key, value) in map.into_iter() {
1200 /// println!("{}: {}", key, value);
1203 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1204 pub fn into_iter(self) -> IntoIter<K, V> {
1205 let len = self.len();
1208 lca: Traverse::traverse(self.root),
1209 left: RingBuf::new(),
1210 right: RingBuf::new(),
1216 /// Gets an iterator over the keys of the map.
1221 /// use std::collections::BTreeMap;
1223 /// let mut a = BTreeMap::new();
1224 /// a.insert(1u, "a");
1225 /// a.insert(2u, "b");
1227 /// let keys: Vec<uint> = a.keys().cloned().collect();
1228 /// assert_eq!(keys, vec![1u,2,]);
1230 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1231 pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
1232 fn first<A, B>((a, _): (A, B)) -> A { a }
1233 let first: fn((&'a K, &'a V)) -> &'a K = first; // coerce to fn pointer
1235 Keys { inner: self.iter().map(first) }
1238 /// Gets an iterator over the values of the map.
1243 /// use std::collections::BTreeMap;
1245 /// let mut a = BTreeMap::new();
1246 /// a.insert(1u, "a");
1247 /// a.insert(2u, "b");
1249 /// let values: Vec<&str> = a.values().cloned().collect();
1250 /// assert_eq!(values, vec!["a","b"]);
1252 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1253 pub fn values<'a>(&'a self) -> Values<'a, K, V> {
1254 fn second<A, B>((_, b): (A, B)) -> B { b }
1255 let second: fn((&'a K, &'a V)) -> &'a V = second; // coerce to fn pointer
1257 Values { inner: self.iter().map(second) }
1260 /// Return the number of elements in the map.
1265 /// use std::collections::BTreeMap;
1267 /// let mut a = BTreeMap::new();
1268 /// assert_eq!(a.len(), 0);
1269 /// a.insert(1u, "a");
1270 /// assert_eq!(a.len(), 1);
1272 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1273 pub fn len(&self) -> uint { self.length }
1275 /// Return true if the map contains no elements.
1280 /// use std::collections::BTreeMap;
1282 /// let mut a = BTreeMap::new();
1283 /// assert!(a.is_empty());
1284 /// a.insert(1u, "a");
1285 /// assert!(!a.is_empty());
1287 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1288 pub fn is_empty(&self) -> bool { self.len() == 0 }
1291 impl<K: Ord, V> BTreeMap<K, V> {
1292 /// Gets the given key's corresponding entry in the map for in-place manipulation.
1297 /// use std::collections::BTreeMap;
1298 /// use std::collections::btree_map::Entry;
1300 /// let mut count: BTreeMap<&str, uint> = BTreeMap::new();
1302 /// // count the number of occurrences of letters in the vec
1303 /// for x in vec!["a","b","a","c","a","b"].iter() {
1304 /// match count.entry(*x) {
1305 /// Entry::Vacant(view) => {
1308 /// Entry::Occupied(mut view) => {
1309 /// let v = view.get_mut();
1315 /// assert_eq!(count["a"], 3u);
1317 pub fn entry<'a>(&'a mut self, mut key: K) -> Entry<'a, K, V> {
1318 // same basic logic of `swap` and `pop`, blended together
1319 let mut stack = stack::PartialSearchStack::new(self);
1321 let result = stack.with(move |pusher, node| {
1322 return match Node::search(node, &key) {
1325 Finished(Occupied(OccupiedEntry {
1326 stack: pusher.seal(handle)
1330 match handle.force() {
1331 Leaf(leaf_handle) => {
1332 Finished(Vacant(VacantEntry {
1333 stack: pusher.seal(leaf_handle),
1337 Internal(internal_handle) => {
1339 pusher.push(internal_handle),
1348 Finished(finished) => return finished,
1349 Continue((new_stack, renewed_key)) => {
1366 use super::{BTreeMap, Occupied, Vacant};
1369 fn test_basic_large() {
1370 let mut map = BTreeMap::new();
1372 assert_eq!(map.len(), 0);
1374 for i in range(0, size) {
1375 assert_eq!(map.insert(i, 10*i), None);
1376 assert_eq!(map.len(), i + 1);
1379 for i in range(0, size) {
1380 assert_eq!(map.get(&i).unwrap(), &(i*10));
1383 for i in range(size, size*2) {
1384 assert_eq!(map.get(&i), None);
1387 for i in range(0, size) {
1388 assert_eq!(map.insert(i, 100*i), Some(10*i));
1389 assert_eq!(map.len(), size);
1392 for i in range(0, size) {
1393 assert_eq!(map.get(&i).unwrap(), &(i*100));
1396 for i in range(0, size/2) {
1397 assert_eq!(map.remove(&(i*2)), Some(i*200));
1398 assert_eq!(map.len(), size - i - 1);
1401 for i in range(0, size/2) {
1402 assert_eq!(map.get(&(2*i)), None);
1403 assert_eq!(map.get(&(2*i+1)).unwrap(), &(i*200 + 100));
1406 for i in range(0, size/2) {
1407 assert_eq!(map.remove(&(2*i)), None);
1408 assert_eq!(map.remove(&(2*i+1)), Some(i*200 + 100));
1409 assert_eq!(map.len(), size/2 - i - 1);
1414 fn test_basic_small() {
1415 let mut map = BTreeMap::new();
1416 assert_eq!(map.remove(&1), None);
1417 assert_eq!(map.get(&1), None);
1418 assert_eq!(map.insert(1u, 1u), None);
1419 assert_eq!(map.get(&1), Some(&1));
1420 assert_eq!(map.insert(1, 2), Some(1));
1421 assert_eq!(map.get(&1), Some(&2));
1422 assert_eq!(map.insert(2, 4), None);
1423 assert_eq!(map.get(&2), Some(&4));
1424 assert_eq!(map.remove(&1), Some(2));
1425 assert_eq!(map.remove(&2), Some(4));
1426 assert_eq!(map.remove(&1), None);
1434 let mut map: BTreeMap<uint, uint> = Vec::from_fn(size, |i| (i, i)).into_iter().collect();
1437 let mut iter = map.iter();
1438 for i in range(0, size) {
1439 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1440 assert_eq!(iter.next().unwrap(), (&i, &i));
1442 assert_eq!(iter.size_hint(), (0, Some(0)));
1443 assert_eq!(iter.next(), None);
1447 let mut iter = map.iter_mut();
1448 for i in range(0, size) {
1449 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1450 assert_eq!(iter.next().unwrap(), (&i, &mut (i + 0)));
1452 assert_eq!(iter.size_hint(), (0, Some(0)));
1453 assert_eq!(iter.next(), None);
1457 let mut iter = map.into_iter();
1458 for i in range(0, size) {
1459 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1460 assert_eq!(iter.next().unwrap(), (i, i));
1462 assert_eq!(iter.size_hint(), (0, Some(0)));
1463 assert_eq!(iter.next(), None);
1469 fn test_iter_rev() {
1473 let mut map: BTreeMap<uint, uint> = Vec::from_fn(size, |i| (i, i)).into_iter().collect();
1476 let mut iter = map.iter().rev();
1477 for i in range(0, size) {
1478 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1479 assert_eq!(iter.next().unwrap(), (&(size - i - 1), &(size - i - 1)));
1481 assert_eq!(iter.size_hint(), (0, Some(0)));
1482 assert_eq!(iter.next(), None);
1486 let mut iter = map.iter_mut().rev();
1487 for i in range(0, size) {
1488 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1489 assert_eq!(iter.next().unwrap(), (&(size - i - 1), &mut(size - i - 1)));
1491 assert_eq!(iter.size_hint(), (0, Some(0)));
1492 assert_eq!(iter.next(), None);
1496 let mut iter = map.into_iter().rev();
1497 for i in range(0, size) {
1498 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1499 assert_eq!(iter.next().unwrap(), (size - i - 1, size - i - 1));
1501 assert_eq!(iter.size_hint(), (0, Some(0)));
1502 assert_eq!(iter.next(), None);
1509 let xs = [(1i, 10i), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
1511 let mut map: BTreeMap<int, int> = xs.iter().map(|&x| x).collect();
1513 // Existing key (insert)
1514 match map.entry(1) {
1515 Vacant(_) => unreachable!(),
1516 Occupied(mut view) => {
1517 assert_eq!(view.get(), &10);
1518 assert_eq!(view.set(100), 10);
1521 assert_eq!(map.get(&1).unwrap(), &100);
1522 assert_eq!(map.len(), 6);
1525 // Existing key (update)
1526 match map.entry(2) {
1527 Vacant(_) => unreachable!(),
1528 Occupied(mut view) => {
1529 let v = view.get_mut();
1533 assert_eq!(map.get(&2).unwrap(), &200);
1534 assert_eq!(map.len(), 6);
1536 // Existing key (take)
1537 match map.entry(3) {
1538 Vacant(_) => unreachable!(),
1540 assert_eq!(view.take(), 30);
1543 assert_eq!(map.get(&3), None);
1544 assert_eq!(map.len(), 5);
1547 // Inexistent key (insert)
1548 match map.entry(10) {
1549 Occupied(_) => unreachable!(),
1551 assert_eq!(*view.set(1000), 1000);
1554 assert_eq!(map.get(&10).unwrap(), &1000);
1555 assert_eq!(map.len(), 6);
1567 use std::rand::{weak_rng, Rng};
1568 use test::{Bencher, black_box};
1570 use super::BTreeMap;
1571 use bench::{insert_rand_n, insert_seq_n, find_rand_n, find_seq_n};
1574 pub fn insert_rand_100(b: &mut Bencher) {
1575 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1576 insert_rand_n(100, &mut m, b,
1577 |m, i| { m.insert(i, 1); },
1578 |m, i| { m.remove(&i); });
1582 pub fn insert_rand_10_000(b: &mut Bencher) {
1583 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1584 insert_rand_n(10_000, &mut m, b,
1585 |m, i| { m.insert(i, 1); },
1586 |m, i| { m.remove(&i); });
1591 pub fn insert_seq_100(b: &mut Bencher) {
1592 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1593 insert_seq_n(100, &mut m, b,
1594 |m, i| { m.insert(i, 1); },
1595 |m, i| { m.remove(&i); });
1599 pub fn insert_seq_10_000(b: &mut Bencher) {
1600 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1601 insert_seq_n(10_000, &mut m, b,
1602 |m, i| { m.insert(i, 1); },
1603 |m, i| { m.remove(&i); });
1608 pub fn find_rand_100(b: &mut Bencher) {
1609 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1610 find_rand_n(100, &mut m, b,
1611 |m, i| { m.insert(i, 1); },
1612 |m, i| { m.get(&i); });
1616 pub fn find_rand_10_000(b: &mut Bencher) {
1617 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1618 find_rand_n(10_000, &mut m, b,
1619 |m, i| { m.insert(i, 1); },
1620 |m, i| { m.get(&i); });
1625 pub fn find_seq_100(b: &mut Bencher) {
1626 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1627 find_seq_n(100, &mut m, b,
1628 |m, i| { m.insert(i, 1); },
1629 |m, i| { m.get(&i); });
1633 pub fn find_seq_10_000(b: &mut Bencher) {
1634 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1635 find_seq_n(10_000, &mut m, b,
1636 |m, i| { m.insert(i, 1); },
1637 |m, i| { m.get(&i); });
1640 fn bench_iter(b: &mut Bencher, size: uint) {
1641 let mut map = BTreeMap::<uint, uint>::new();
1642 let mut rng = weak_rng();
1644 for _ in range(0, size) {
1645 map.insert(rng.gen(), rng.gen());
1649 for entry in map.iter() {
1656 pub fn iter_20(b: &mut Bencher) {
1661 pub fn iter_1000(b: &mut Bencher) {
1662 bench_iter(b, 1000);
1666 pub fn iter_100000(b: &mut Bencher) {
1667 bench_iter(b, 100000);