1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // This implementation is largely based on the high-level description and analysis of B-Trees
12 // found in *Open Data Structures* (ODS). Although our implementation does not use any of
13 // the source found in ODS, if one wishes to review the high-level design of this structure, it
14 // can be freely downloaded at http://opendatastructures.org/. Its contents are as of this
15 // writing (August 2014) freely licensed under the following Creative Commons Attribution
16 // License: [CC BY 2.5 CA](http://creativecommons.org/licenses/by/2.5/ca/).
18 pub use self::Entry::*;
24 use core::borrow::BorrowFrom;
25 use std::hash::{Writer, Hash};
26 use core::default::Default;
27 use core::{iter, fmt, mem};
31 use ring_buf::RingBuf;
33 use self::Continuation::{Continue, Finished};
35 // FIXME(conventions): implement bounded iterators
37 /// A map based on a B-Tree.
39 /// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing
40 /// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal
41 /// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum amount of
42 /// comparisons necessary to find an element (log<sub>2</sub>n). However, in practice the way this
43 /// is done is *very* inefficient for modern computer architectures. In particular, every element
44 /// is stored in its own individually heap-allocated node. This means that every single insertion
45 /// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these
46 /// are both notably expensive things to do in practice, we are forced to at very least reconsider
49 /// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
50 /// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
51 /// searches. However, this does mean that searches will have to do *more* comparisons on average.
52 /// The precise number of comparisons depends on the node search strategy used. For optimal cache
53 /// efficiency, one could search the nodes linearly. For optimal comparisons, one could search
54 /// the node using binary search. As a compromise, one could also perform a linear search
55 /// that initially only checks every i<sup>th</sup> element for some choice of i.
57 /// Currently, our implementation simply performs naive linear search. This provides excellent
58 /// performance on *small* nodes of elements which are cheap to compare. However in the future we
59 /// would like to further explore choosing the optimal search strategy based on the choice of B,
60 /// and possibly other factors. Using linear search, searching for a random element is expected
61 /// to take O(B log<sub>B</sub>n) comparisons, which is generally worse than a BST. In practice,
62 /// however, performance is excellent. `BTreeMap` is able to readily outperform `TreeMap` under
63 /// many workloads, and is competitive where it doesn't. BTreeMap also generally *scales* better
64 /// than TreeMap, making it more appropriate for large datasets.
66 /// However, `TreeMap` may still be more appropriate to use in many contexts. If elements are very
67 /// large or expensive to compare, `TreeMap` may be more appropriate. It won't allocate any
68 /// more space than is needed, and will perform the minimal number of comparisons necessary.
69 /// `TreeMap` also provides much better performance stability guarantees. Generally, very few
70 /// changes need to be made to update a BST, and two updates are expected to take about the same
71 /// amount of time on roughly equal sized BSTs. However a B-Tree's performance is much more
72 /// amortized. If a node is overfull, it must be split into two nodes. If a node is underfull, it
73 /// may be merged with another. Both of these operations are relatively expensive to perform, and
74 /// it's possible to force one to occur at every single level of the tree in a single insertion or
75 /// deletion. In fact, a malicious or otherwise unlucky sequence of insertions and deletions can
76 /// force this degenerate behaviour to occur on every operation. While the total amount of work
77 /// done on each operation isn't *catastrophic*, and *is* still bounded by O(B log<sub>B</sub>n),
78 /// it is certainly much slower when it does.
80 pub struct BTreeMap<K, V> {
87 /// An abstract base over-which all other BTree iterators are built.
88 struct AbsEntries<T> {
95 /// An iterator over a BTreeMap's entries.
96 pub struct Entries<'a, K: 'a, V: 'a> {
97 inner: AbsEntries<Traversal<'a, K, V>>
100 /// A mutable iterator over a BTreeMap's entries.
101 pub struct MutEntries<'a, K: 'a, V: 'a> {
102 inner: AbsEntries<MutTraversal<'a, K, V>>
105 /// An owning iterator over a BTreeMap's entries.
106 pub struct MoveEntries<K, V> {
107 inner: AbsEntries<MoveTraversal<K, V>>
110 /// An iterator over a BTreeMap's keys.
111 pub struct Keys<'a, K: 'a, V: 'a> {
112 inner: Map<(&'a K, &'a V), &'a K, Entries<'a, K, V>, fn((&'a K, &'a V)) -> &'a K>
115 /// An iterator over a BTreeMap's values.
116 pub struct Values<'a, K: 'a, V: 'a> {
117 inner: Map<(&'a K, &'a V), &'a V, Entries<'a, K, V>, fn((&'a K, &'a V)) -> &'a V>
120 /// A view into a single entry in a map, which may either be vacant or occupied.
121 pub enum Entry<'a, K:'a, V:'a> {
123 Vacant(VacantEntry<'a, K, V>),
124 /// An occupied Entry
125 Occupied(OccupiedEntry<'a, K, V>),
129 pub struct VacantEntry<'a, K:'a, V:'a> {
131 stack: stack::VacantSearchStack<'a, K, V>,
134 /// An occupied Entry.
135 pub struct OccupiedEntry<'a, K:'a, V:'a> {
136 stack: stack::OccupiedSearchStack<'a, K, V>,
139 impl<K: Ord, V> BTreeMap<K, V> {
140 /// Makes a new empty BTreeMap with a reasonable choice for B.
141 #[unstable = "matches collection reform specification, waiting for dust to settle"]
142 pub fn new() -> BTreeMap<K, V> {
143 //FIXME(Gankro): Tune this as a function of size_of<K/V>?
147 /// Makes a new empty BTreeMap with the given B.
149 /// B cannot be less than 2.
150 pub fn with_b(b: uint) -> BTreeMap<K, V> {
151 assert!(b > 1, "B must be greater than 1");
155 root: Node::make_leaf_root(b),
160 /// Clears the map, removing all values.
165 /// use std::collections::BTreeMap;
167 /// let mut a = BTreeMap::new();
168 /// a.insert(1u, "a");
170 /// assert!(a.is_empty());
172 #[unstable = "matches collection reform specification, waiting for dust to settle"]
173 pub fn clear(&mut self) {
175 // avoid recursive destructors by manually traversing the tree
176 for _ in mem::replace(self, BTreeMap::with_b(b)).into_iter() {};
179 /// Deprecated: renamed to `get`.
180 #[deprecated = "renamed to `get`"]
181 pub fn find(&self, key: &K) -> Option<&V> {
185 // Searching in a B-Tree is pretty straightforward.
187 // Start at the root. Try to find the key in the current node. If we find it, return it.
188 // If it's not in there, follow the edge *before* the smallest key larger than
189 // the search key. If no such key exists (they're *all* smaller), then just take the last
190 // edge in the node. If we're in a leaf and we don't find our key, then it's not
193 /// Returns a reference to the value corresponding to the key.
195 /// The key may be any borrowed form of the map's key type, but the ordering
196 /// on the borrowed form *must* match the ordering on the key type.
201 /// use std::collections::BTreeMap;
203 /// let mut map = BTreeMap::new();
204 /// map.insert(1u, "a");
205 /// assert_eq!(map.get(&1), Some(&"a"));
206 /// assert_eq!(map.get(&2), None);
208 #[unstable = "matches collection reform specification, waiting for dust to settle"]
209 pub fn get<Sized? Q>(&self, key: &Q) -> Option<&V> where Q: BorrowFrom<K> + Ord {
210 let mut cur_node = &self.root;
212 match Node::search(cur_node, key) {
213 Found(handle) => return Some(handle.into_kv().1),
214 GoDown(handle) => match handle.into_edge() {
217 cur_node = next_node;
225 /// Returns true if the map contains a value for the specified key.
227 /// The key may be any borrowed form of the map's key type, but the ordering
228 /// on the borrowed form *must* match the ordering on the key type.
233 /// use std::collections::BTreeMap;
235 /// let mut map = BTreeMap::new();
236 /// map.insert(1u, "a");
237 /// assert_eq!(map.contains_key(&1), true);
238 /// assert_eq!(map.contains_key(&2), false);
240 #[unstable = "matches collection reform specification, waiting for dust to settle"]
241 pub fn contains_key<Sized? Q>(&self, key: &Q) -> bool where Q: BorrowFrom<K> + Ord {
242 self.get(key).is_some()
245 /// Deprecated: renamed to `get_mut`.
246 #[deprecated = "renamed to `get_mut`"]
247 pub fn find_mut(&mut self, key: &K) -> Option<&mut V> {
251 /// Returns a mutable reference to the value corresponding to the key.
253 /// The key may be any borrowed form of the map's key type, but the ordering
254 /// on the borrowed form *must* match the ordering on the key type.
259 /// use std::collections::BTreeMap;
261 /// let mut map = BTreeMap::new();
262 /// map.insert(1u, "a");
263 /// match map.get_mut(&1) {
264 /// Some(x) => *x = "b",
267 /// assert_eq!(map[1], "b");
269 // See `get` for implementation notes, this is basically a copy-paste with mut's added
270 #[unstable = "matches collection reform specification, waiting for dust to settle"]
271 pub fn get_mut<Sized? Q>(&mut self, key: &Q) -> Option<&mut V> where Q: BorrowFrom<K> + Ord {
272 // temp_node is a Borrowck hack for having a mutable value outlive a loop iteration
273 let mut temp_node = &mut self.root;
275 let cur_node = temp_node;
276 match Node::search(cur_node, key) {
277 Found(handle) => return Some(handle.into_kv_mut().1),
278 GoDown(handle) => match handle.into_edge_mut() {
281 temp_node = next_node;
289 /// Deprecated: renamed to `insert`.
290 #[deprecated = "renamed to `insert`"]
291 pub fn swap(&mut self, key: K, value: V) -> Option<V> {
292 self.insert(key, value)
295 // Insertion in a B-Tree is a bit complicated.
297 // First we do the same kind of search described in `find`. But we need to maintain a stack of
298 // all the nodes/edges in our search path. If we find a match for the key we're trying to
299 // insert, just swap the vals and return the old ones. However, when we bottom out in a leaf,
300 // we attempt to insert our key-value pair at the same location we would want to follow another
303 // If the node has room, then this is done in the obvious way by shifting elements. However,
304 // if the node itself is full, we split node into two, and give its median key-value
305 // pair to its parent to insert the new node with. Of course, the parent may also be
306 // full, and insertion can propagate until we reach the root. If we reach the root, and
307 // it is *also* full, then we split the root and place the two nodes under a newly made root.
309 // Note that we subtly deviate from Open Data Structures in our implementation of split.
310 // ODS describes inserting into the node *regardless* of its capacity, and then
311 // splitting *afterwards* if it happens to be overfull. However, this is inefficient.
312 // Instead, we split beforehand, and then insert the key-value pair into the appropriate
313 // result node. This has two consequences:
315 // 1) While ODS produces a left node of size B-1, and a right node of size B,
316 // we may potentially reverse this. However, this shouldn't effect the analysis.
318 // 2) While ODS may potentially return the pair we *just* inserted after
319 // the split, we will never do this. Again, this shouldn't effect the analysis.
321 /// Inserts a key-value pair from the map. If the key already had a value
322 /// present in the map, that value is returned. Otherwise, `None` is returned.
327 /// use std::collections::BTreeMap;
329 /// let mut map = BTreeMap::new();
330 /// assert_eq!(map.insert(37u, "a"), None);
331 /// assert_eq!(map.is_empty(), false);
333 /// map.insert(37, "b");
334 /// assert_eq!(map.insert(37, "c"), Some("b"));
335 /// assert_eq!(map[37], "c");
337 #[unstable = "matches collection reform specification, waiting for dust to settle"]
338 pub fn insert(&mut self, mut key: K, mut value: V) -> Option<V> {
339 // This is a stack of rawptrs to nodes paired with indices, respectively
340 // representing the nodes and edges of our search path. We have to store rawptrs
341 // because as far as Rust is concerned, we can mutate aliased data with such a
342 // stack. It is of course correct, but what it doesn't know is that we will only
343 // be popping and using these ptrs one at a time in child-to-parent order. The alternative
344 // to doing this is to take the Nodes from their parents. This actually makes
345 // borrowck *really* happy and everything is pretty smooth. However, this creates
346 // *tons* of pointless writes, and requires us to always walk all the way back to
347 // the root after an insertion, even if we only needed to change a leaf. Therefore,
348 // we accept this potential unsafety and complexity in the name of performance.
350 // Regardless, the actual dangerous logic is completely abstracted away from BTreeMap
351 // by the stack module. All it can do is immutably read nodes, and ask the search stack
352 // to proceed down some edge by index. This makes the search logic we'll be reusing in a
353 // few different methods much neater, and of course drastically improves safety.
354 let mut stack = stack::PartialSearchStack::new(self);
357 let result = stack.with(move |pusher, node| {
358 // Same basic logic as found in `find`, but with PartialSearchStack mediating the
359 // actual nodes for us
360 match Node::search(node, &key) {
361 Found(mut handle) => {
362 // Perfect match, swap the values and return the old one
363 mem::swap(handle.val_mut(), &mut value);
364 return Finished(Some(value));
367 // We need to keep searching, try to get the search stack
368 // to go down further
369 match pusher.push(handle) {
370 stack::Done(new_stack) => {
371 // We've reached a leaf, perform the insertion here
372 new_stack.insert(key, value);
373 return Finished(None);
375 stack::Grew(new_stack) => {
376 // We've found the subtree to insert this key/value pair in,
378 return Continue((new_stack, key, value));
385 Finished(ret) => { return ret; },
386 Continue((new_stack, renewed_key, renewed_val)) => {
395 // Deletion is the most complicated operation for a B-Tree.
397 // First we do the same kind of search described in
398 // `find`. But we need to maintain a stack of all the nodes/edges in our search path.
399 // If we don't find the key, then we just return `None` and do nothing. If we do find the
400 // key, we perform two operations: remove the item, and then possibly handle underflow.
402 // # removing the item
403 // If the node is a leaf, we just remove the item, and shift
404 // any items after it back to fill the hole.
406 // If the node is an internal node, we *swap* the item with the smallest item in
407 // in its right subtree (which must reside in a leaf), and then revert to the leaf
410 // # handling underflow
411 // After removing an item, there may be too few items in the node. We want nodes
412 // to be mostly full for efficiency, although we make an exception for the root, which
413 // may have as few as one item. If this is the case, we may first try to steal
414 // an item from our left or right neighbour.
416 // To steal from the left (right) neighbour,
417 // we take the largest (smallest) item and child from it. We then swap the taken item
418 // with the item in their mutual parent that separates them, and then insert the
419 // parent's item and the taken child into the first (last) index of the underflowed node.
421 // However, stealing has the possibility of underflowing our neighbour. If this is the
422 // case, we instead *merge* with our neighbour. This of course reduces the number of
423 // children in the parent. Therefore, we also steal the item that separates the now
424 // merged nodes, and insert it into the merged node.
426 // Merging may cause the parent to underflow. If this is the case, then we must repeat
427 // the underflow handling process on the parent. If merging merges the last two children
428 // of the root, then we replace the root with the merged node.
430 /// Deprecated: renamed to `remove`.
431 #[deprecated = "renamed to `remove`"]
432 pub fn pop(&mut self, key: &K) -> Option<V> {
436 /// Removes a key from the map, returning the value at the key if the key
437 /// was previously in the map.
439 /// The key may be any borrowed form of the map's key type, but the ordering
440 /// on the borrowed form *must* match the ordering on the key type.
445 /// use std::collections::BTreeMap;
447 /// let mut map = BTreeMap::new();
448 /// map.insert(1u, "a");
449 /// assert_eq!(map.remove(&1), Some("a"));
450 /// assert_eq!(map.remove(&1), None);
452 #[unstable = "matches collection reform specification, waiting for dust to settle"]
453 pub fn remove<Sized? Q>(&mut self, key: &Q) -> Option<V> where Q: BorrowFrom<K> + Ord {
454 // See `swap` for a more thorough description of the stuff going on in here
455 let mut stack = stack::PartialSearchStack::new(self);
457 let result = stack.with(move |pusher, node| {
458 match Node::search(node, key) {
460 // Perfect match. Terminate the stack here, and remove the entry
461 return Finished(Some(pusher.seal(handle).remove()));
464 // We need to keep searching, try to go down the next edge
465 match pusher.push(handle) {
466 // We're at a leaf; the key isn't in here
467 stack::Done(_) => return Finished(None),
468 stack::Grew(new_stack) => return Continue(new_stack)
474 Finished(ret) => return ret,
475 Continue(new_stack) => stack = new_stack
481 /// A helper enum useful for deciding whether to continue a loop since we can't
482 /// return from a closure
483 enum Continuation<A, B> {
488 /// The stack module provides a safe interface for constructing and manipulating a stack of ptrs
489 /// to nodes. By using this module much better safety guarantees can be made, and more search
490 /// boilerplate gets cut out.
492 pub use self::PushResult::*;
493 use core::prelude::*;
494 use core::kinds::marker;
497 use super::super::node::*;
500 /// A generic mutable reference, identical to `&mut` except for the fact that its lifetime
501 /// parameter is invariant. This means that wherever an `IdRef` is expected, only an `IdRef`
502 /// with the exact requested lifetime can be used. This is in contrast to normal references,
503 /// where `&'static` can be used in any function expecting any lifetime reference.
504 pub struct IdRef<'id, T: 'id> {
506 marker: marker::InvariantLifetime<'id>
509 impl<'id, T> Deref<T> for IdRef<'id, T> {
510 fn deref(&self) -> &T {
515 impl<'id, T> DerefMut<T> for IdRef<'id, T> {
516 fn deref_mut(&mut self) -> &mut T {
521 type StackItem<K, V> = EdgeNodeHandle<*mut Node<K, V>>;
522 type Stack<K, V> = Vec<StackItem<K, V>>;
524 /// A PartialSearchStack handles the construction of a search stack.
525 pub struct PartialSearchStack<'a, K:'a, V:'a> {
526 map: &'a mut BTreeMap<K, V>,
528 next: *mut Node<K, V>,
531 /// An OccupiedSearchStack represents a full path to an element of interest. It provides methods
532 /// for manipulating the element at the top of its stack.
533 pub struct OccupiedSearchStack<'a, K:'a, V:'a> {
534 map: &'a mut BTreeMap<K, V>,
536 top: KVNodeHandle<*mut Node<K, V>>,
539 /// A VacantSearchStack represents a full path to a spot for a new element of interest. It
540 /// provides a method inserting an element at the top of its stack.
541 pub struct VacantSearchStack<'a, K:'a, V:'a> {
542 map: &'a mut BTreeMap<K, V>,
544 top: EdgeNodeHandle<*mut Node<K, V>>,
547 /// A `PartialSearchStack` that doesn't hold a a reference to the next node, and is just
548 /// just waiting for a `Handle` to that next node to be pushed. See `PartialSearchStack::with`
549 /// for more details.
550 pub struct Pusher<'id, 'a, K:'a, V:'a> {
551 map: &'a mut BTreeMap<K, V>,
553 marker: marker::InvariantLifetime<'id>
556 /// The result of asking a PartialSearchStack to push another node onto itself. Either it
557 /// Grew, in which case it's still Partial, or it found its last node was actually a leaf, in
558 /// which case it seals itself and yields a complete SearchStack.
559 pub enum PushResult<'a, K:'a, V:'a> {
560 Grew(PartialSearchStack<'a, K, V>),
561 Done(VacantSearchStack<'a, K, V>),
564 impl<'a, K, V> PartialSearchStack<'a, K, V> {
565 /// Creates a new PartialSearchStack from a BTreeMap by initializing the stack with the
566 /// root of the tree.
567 pub fn new<'a>(map: &'a mut BTreeMap<K, V>) -> PartialSearchStack<'a, K, V> {
568 let depth = map.depth;
571 next: &mut map.root as *mut _,
573 stack: Vec::with_capacity(depth),
577 /// Breaks up the stack into a `Pusher` and the next `Node`, allowing the given closure
578 /// to interact with, search, and finally push the `Node` onto the stack. The passed in
579 /// closure must be polymorphic on the `'id` lifetime parameter, as this statically
580 /// ensures that only `Handle`s from the correct `Node` can be pushed.
582 /// The reason this works is that the `Pusher` has an `'id` parameter, and will only accept
583 /// handles with the same `'id`. The closure could only get references with that lifetime
584 /// through its arguments or through some other `IdRef` that it has lying around. However,
585 /// no other `IdRef` could possibly work - because the `'id` is held in an invariant
586 /// parameter, it would need to have precisely the correct lifetime, which would mean that
587 /// at least one of the calls to `with` wouldn't be properly polymorphic, wanting a
588 /// specific lifetime instead of the one that `with` chooses to give it.
590 /// See also Haskell's `ST` monad, which uses a similar trick.
591 pub fn with<T, F: for<'id> FnOnce(Pusher<'id, 'a, K, V>,
592 IdRef<'id, Node<K, V>>) -> T>(self, closure: F) -> T {
593 let pusher = Pusher {
596 marker: marker::InvariantLifetime
599 inner: unsafe { &mut *self.next },
600 marker: marker::InvariantLifetime
603 closure(pusher, node)
607 impl<'id, 'a, K, V> Pusher<'id, 'a, K, V> {
608 /// Pushes the requested child of the stack's current top on top of the stack. If the child
609 /// exists, then a new PartialSearchStack is yielded. Otherwise, a VacantSearchStack is
611 pub fn push(mut self, mut edge: EdgeNodeHandle<IdRef<'id, Node<K, V>>>)
612 -> PushResult<'a, K, V> {
613 let to_insert = edge.as_raw();
614 match edge.edge_mut() {
616 Done(VacantSearchStack {
623 self.stack.push(to_insert);
624 Grew(PartialSearchStack {
627 next: node as *mut _,
633 /// Converts the PartialSearchStack into an OccupiedSearchStack.
634 pub fn seal(self, mut node: KVNodeHandle<IdRef<'id, Node<K, V>>>)
635 -> OccupiedSearchStack<'a, K, V> {
636 OccupiedSearchStack {
644 impl<'a, K, V> OccupiedSearchStack<'a, K, V> {
645 /// Gets a reference to the value the stack points to.
646 pub fn peek(&self) -> &V {
647 unsafe { self.top.from_raw().into_kv().1 }
650 /// Gets a mutable reference to the value the stack points to.
651 pub fn peek_mut(&mut self) -> &mut V {
652 unsafe { self.top.from_raw_mut().into_kv_mut().1 }
655 /// Converts the stack into a mutable reference to the value it points to, with a lifetime
656 /// tied to the original tree.
657 pub fn into_top(mut self) -> &'a mut V {
659 mem::copy_mut_lifetime(
661 self.top.from_raw_mut().val_mut()
666 /// Removes the key and value in the top element of the stack, then handles underflows as
667 /// described in BTree's pop function.
668 pub fn remove(mut self) -> V {
669 // Ensure that the search stack goes to a leaf. This is necessary to perform deletion
670 // in a BTree. Note that this may put the tree in an inconsistent state (further
671 // described in leafify's comments), but this is immediately fixed by the
672 // removing the value we want to remove
678 let mut stack = self.stack;
680 // Remove the key-value pair from the leaf that this search stack points to.
681 // Then, note if the leaf is underfull, and promptly forget the leaf and its ptr
682 // to avoid ownership issues.
683 let (value, mut underflow) = unsafe {
684 let (_, value) = self.top.from_raw_mut().remove_as_leaf();
685 let underflow = self.top.from_raw().node().is_underfull();
692 // We've reached the root, so no matter what, we're done. We manually
693 // access the root via the tree itself to avoid creating any dangling
695 if map.root.len() == 0 && !map.root.is_leaf() {
696 // We've emptied out the root, so make its only child the new root.
697 // If it's a leaf, we just let it become empty.
699 map.root.into_edge();
703 Some(mut handle) => {
705 // Underflow! Handle it!
707 handle.from_raw_mut().handle_underflow();
708 underflow = handle.from_raw().node().is_underfull();
719 /// Subroutine for removal. Takes a search stack for a key that might terminate at an
720 /// internal node, and mutates the tree and search stack to *make* it a search stack
721 /// for that same key that *does* terminates at a leaf. If the mutation occurs, then this
722 /// leaves the tree in an inconsistent state that must be repaired by the caller by
723 /// removing the entry in question. Specifically the key-value pair and its successor will
725 fn leafify(&mut self) {
727 let mut top_raw = self.top;
728 let mut top = top_raw.from_raw_mut();
730 let key_ptr = top.key_mut() as *mut _;
731 let val_ptr = top.val_mut() as *mut _;
733 // Try to go into the right subtree of the found key to find its successor
734 let mut right_edge = top.right_edge();
735 let right_edge_raw = right_edge.as_raw();
736 match right_edge.edge_mut() {
738 // We're a proper leaf stack, nothing to do
740 Some(mut temp_node) => {
741 //We're not a proper leaf stack, let's get to work.
742 self.stack.push(right_edge_raw);
744 // Walk into the smallest subtree of this node
745 let node = temp_node;
748 // This node is a leaf, do the swap and return
749 let mut handle = node.kv_handle(0);
750 self.top = handle.as_raw();
751 mem::swap(handle.key_mut(), &mut *key_ptr);
752 mem::swap(handle.val_mut(), &mut *val_ptr);
755 // This node is internal, go deeper
756 let mut handle = node.edge_handle(0);
757 self.stack.push(handle.as_raw());
758 temp_node = handle.into_edge_mut().unwrap();
767 impl<'a, K, V> VacantSearchStack<'a, K, V> {
768 /// Inserts the key and value into the top element in the stack, and if that node has to
769 /// split recursively inserts the split contents into the next element stack until
772 /// Assumes that the stack represents a search path from the root to a leaf.
774 /// An &mut V is returned to the inserted value, for callers that want a reference to this.
775 pub fn insert(mut self, key: K, val: V) -> &'a mut V {
777 self.map.length += 1;
779 // Insert the key and value into the leaf at the top of the stack
780 let (mut insertion, inserted_ptr) = self.top.from_raw_mut()
781 .insert_as_leaf(key, val);
786 // The last insertion went off without a hitch, no splits! We can stop
788 return &mut *inserted_ptr;
790 Split(key, val, right) => match self.stack.pop() {
791 // The last insertion triggered a split, so get the next element on the
792 // stack to recursively insert the split node into.
794 // The stack was empty; we've split the root, and need to make a
795 // a new one. This is done in-place because we can't move the
796 // root out of a reference to the tree.
797 Node::make_internal_root(&mut self.map.root, self.map.b,
801 return &mut *inserted_ptr;
803 Some(mut handle) => {
804 // The stack wasn't empty, do the insertion and recurse
805 insertion = handle.from_raw_mut()
806 .insert_as_internal(key, val, right);
817 impl<K: Ord, V> FromIterator<(K, V)> for BTreeMap<K, V> {
818 fn from_iter<T: Iterator<(K, V)>>(iter: T) -> BTreeMap<K, V> {
819 let mut map = BTreeMap::new();
825 impl<K: Ord, V> Extend<(K, V)> for BTreeMap<K, V> {
827 fn extend<T: Iterator<(K, V)>>(&mut self, mut iter: T) {
834 impl<S: Writer, K: Hash<S>, V: Hash<S>> Hash<S> for BTreeMap<K, V> {
835 fn hash(&self, state: &mut S) {
836 for elt in self.iter() {
842 impl<K: Ord, V> Default for BTreeMap<K, V> {
843 fn default() -> BTreeMap<K, V> {
848 impl<K: PartialEq, V: PartialEq> PartialEq for BTreeMap<K, V> {
849 fn eq(&self, other: &BTreeMap<K, V>) -> bool {
850 self.len() == other.len() &&
851 self.iter().zip(other.iter()).all(|(a, b)| a == b)
855 impl<K: Eq, V: Eq> Eq for BTreeMap<K, V> {}
857 impl<K: PartialOrd, V: PartialOrd> PartialOrd for BTreeMap<K, V> {
859 fn partial_cmp(&self, other: &BTreeMap<K, V>) -> Option<Ordering> {
860 iter::order::partial_cmp(self.iter(), other.iter())
864 impl<K: Ord, V: Ord> Ord for BTreeMap<K, V> {
866 fn cmp(&self, other: &BTreeMap<K, V>) -> Ordering {
867 iter::order::cmp(self.iter(), other.iter())
871 impl<K: Show, V: Show> Show for BTreeMap<K, V> {
872 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
873 try!(write!(f, "{{"));
875 for (i, (k, v)) in self.iter().enumerate() {
876 if i != 0 { try!(write!(f, ", ")); }
877 try!(write!(f, "{}: {}", *k, *v));
884 impl<K: Ord, Sized? Q, V> Index<Q, V> for BTreeMap<K, V>
885 where Q: BorrowFrom<K> + Ord
887 fn index(&self, key: &Q) -> &V {
888 self.get(key).expect("no entry found for key")
892 impl<K: Ord, Sized? Q, V> IndexMut<Q, V> for BTreeMap<K, V>
893 where Q: BorrowFrom<K> + Ord
895 fn index_mut(&mut self, key: &Q) -> &mut V {
896 self.get_mut(key).expect("no entry found for key")
900 /// Genericises over how to get the correct type of iterator from the correct type
901 /// of Node ownership.
903 fn traverse(node: N) -> Self;
906 impl<'a, K, V> Traverse<&'a Node<K, V>> for Traversal<'a, K, V> {
907 fn traverse(node: &'a Node<K, V>) -> Traversal<'a, K, V> {
912 impl<'a, K, V> Traverse<&'a mut Node<K, V>> for MutTraversal<'a, K, V> {
913 fn traverse(node: &'a mut Node<K, V>) -> MutTraversal<'a, K, V> {
918 impl<K, V> Traverse<Node<K, V>> for MoveTraversal<K, V> {
919 fn traverse(node: Node<K, V>) -> MoveTraversal<K, V> {
924 /// Represents an operation to perform inside the following iterator methods.
925 /// This is necessary to use in `next` because we want to modify self.left inside
926 /// a match that borrows it. Similarly, in `next_back` for self.right. Instead, we use this
927 /// enum to note what we want to do, and do it after the match.
933 impl<K, V, E, T: Traverse<E> + DoubleEndedIterator<TraversalItem<K, V, E>>>
934 Iterator<(K, V)> for AbsEntries<T> {
935 // This function is pretty long, but only because there's a lot of cases to consider.
936 // Our iterator represents two search paths, left and right, to the smallest and largest
937 // elements we have yet to yield. lca represents the least common ancestor of these two paths,
938 // above-which we never walk, since everything outside it has already been consumed (or was
939 // never in the range to iterate).
941 // Note that the design of these iterators permits an *arbitrary* initial pair of min and max,
942 // making these arbitrary sub-range iterators. However the logic to construct these paths
943 // efficiently is fairly involved, so this is a FIXME. The sub-range iterators also wouldn't be
944 // able to accurately predict size, so those iterators can't implement ExactSizeIterator.
945 fn next(&mut self) -> Option<(K, V)> {
947 // We want the smallest element, so try to get the top of the left stack
948 let op = match self.left.back_mut() {
949 // The left stack is empty, so try to get the next element of the two paths
950 // LCAs (the left search path is currently a subpath of the right one)
951 None => match self.lca.next() {
952 // The lca has been exhausted, walk further down the right path
953 None => match self.right.pop_front() {
954 // The right path is exhausted, so we're done
956 // The right path had something, make that the new LCA
957 // and restart the whole process
963 // The lca yielded an edge, make that the new head of the left path
964 Some(Edge(next)) => Push(Traverse::traverse(next)),
965 // The lca yielded an entry, so yield that
966 Some(Elem(k, v)) => {
971 // The left stack wasn't empty, so continue along the node in its head
972 Some(iter) => match iter.next() {
973 // The head of the left path is empty, so Pop it off and restart the process
975 // The head of the left path yielded an edge, so make that the new head
977 Some(Edge(next)) => Push(Traverse::traverse(next)),
978 // The head of the left path yielded entry, so yield that
979 Some(Elem(k, v)) => {
986 // Handle any operation on the left stack as necessary
988 Push(item) => { self.left.push_back(item); },
989 Pop => { self.left.pop_back(); },
994 fn size_hint(&self) -> (uint, Option<uint>) {
995 (self.size, Some(self.size))
999 impl<K, V, E, T: Traverse<E> + DoubleEndedIterator<TraversalItem<K, V, E>>>
1000 DoubleEndedIterator<(K, V)> for AbsEntries<T> {
1001 // next_back is totally symmetric to next
1002 fn next_back(&mut self) -> Option<(K, V)> {
1004 let op = match self.right.back_mut() {
1005 None => match self.lca.next_back() {
1006 None => match self.left.pop_front() {
1007 None => return None,
1013 Some(Edge(next)) => Push(Traverse::traverse(next)),
1014 Some(Elem(k, v)) => {
1019 Some(iter) => match iter.next_back() {
1021 Some(Edge(next)) => Push(Traverse::traverse(next)),
1022 Some(Elem(k, v)) => {
1030 Push(item) => { self.right.push_back(item); },
1031 Pop => { self.right.pop_back(); }
1037 impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
1038 fn next(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next() }
1039 fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
1041 impl<'a, K, V> DoubleEndedIterator<(&'a K, &'a V)> for Entries<'a, K, V> {
1042 fn next_back(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next_back() }
1044 impl<'a, K, V> ExactSizeIterator<(&'a K, &'a V)> for Entries<'a, K, V> {}
1047 impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
1048 fn next(&mut self) -> Option<(&'a K, &'a mut V)> { self.inner.next() }
1049 fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
1051 impl<'a, K, V> DoubleEndedIterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
1052 fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> { self.inner.next_back() }
1054 impl<'a, K, V> ExactSizeIterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {}
1057 impl<K, V> Iterator<(K, V)> for MoveEntries<K, V> {
1058 fn next(&mut self) -> Option<(K, V)> { self.inner.next() }
1059 fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
1061 impl<K, V> DoubleEndedIterator<(K, V)> for MoveEntries<K, V> {
1062 fn next_back(&mut self) -> Option<(K, V)> { self.inner.next_back() }
1064 impl<K, V> ExactSizeIterator<(K, V)> for MoveEntries<K, V> {}
1067 impl<'a, K, V> Iterator<&'a K> for Keys<'a, K, V> {
1068 fn next(&mut self) -> Option<(&'a K)> { self.inner.next() }
1069 fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
1071 impl<'a, K, V> DoubleEndedIterator<&'a K> for Keys<'a, K, V> {
1072 fn next_back(&mut self) -> Option<(&'a K)> { self.inner.next_back() }
1074 impl<'a, K, V> ExactSizeIterator<&'a K> for Keys<'a, K, V> {}
1077 impl<'a, K, V> Iterator<&'a V> for Values<'a, K, V> {
1078 fn next(&mut self) -> Option<(&'a V)> { self.inner.next() }
1079 fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
1081 impl<'a, K, V> DoubleEndedIterator<&'a V> for Values<'a, K, V> {
1082 fn next_back(&mut self) -> Option<(&'a V)> { self.inner.next_back() }
1084 impl<'a, K, V> ExactSizeIterator<&'a V> for Values<'a, K, V> {}
1087 impl<'a, K: Ord, V> VacantEntry<'a, K, V> {
1088 /// Sets the value of the entry with the VacantEntry's key,
1089 /// and returns a mutable reference to it.
1090 pub fn set(self, value: V) -> &'a mut V {
1091 self.stack.insert(self.key, value)
1095 impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> {
1096 /// Gets a reference to the value in the entry.
1097 pub fn get(&self) -> &V {
1101 /// Gets a mutable reference to the value in the entry.
1102 pub fn get_mut(&mut self) -> &mut V {
1103 self.stack.peek_mut()
1106 /// Converts the entry into a mutable reference to its value.
1107 pub fn into_mut(self) -> &'a mut V {
1108 self.stack.into_top()
1111 /// Sets the value of the entry with the OccupiedEntry's key,
1112 /// and returns the entry's old value.
1113 pub fn set(&mut self, mut value: V) -> V {
1114 mem::swap(self.stack.peek_mut(), &mut value);
1118 /// Takes the value of the entry out of the map, and returns it.
1119 pub fn take(self) -> V {
1124 impl<K, V> BTreeMap<K, V> {
1125 /// Gets an iterator over the entries of the map.
1130 /// use std::collections::BTreeMap;
1132 /// let mut map = BTreeMap::new();
1133 /// map.insert(1u, "a");
1134 /// map.insert(2u, "b");
1135 /// map.insert(3u, "c");
1137 /// for (key, value) in map.iter() {
1138 /// println!("{}: {}", key, value);
1141 /// let (first_key, first_value) = map.iter().next().unwrap();
1142 /// assert_eq!((*first_key, *first_value), (1u, "a"));
1144 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1145 pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
1146 let len = self.len();
1149 lca: Traverse::traverse(&self.root),
1150 left: RingBuf::new(),
1151 right: RingBuf::new(),
1157 /// Gets a mutable iterator over the entries of the map.
1162 /// use std::collections::BTreeMap;
1164 /// let mut map = BTreeMap::new();
1165 /// map.insert("a", 1u);
1166 /// map.insert("b", 2u);
1167 /// map.insert("c", 3u);
1169 /// // add 10 to the value if the key isn't "a"
1170 /// for (key, value) in map.iter_mut() {
1171 /// if key != &"a" {
1176 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1177 pub fn iter_mut<'a>(&'a mut self) -> MutEntries<'a, K, V> {
1178 let len = self.len();
1181 lca: Traverse::traverse(&mut self.root),
1182 left: RingBuf::new(),
1183 right: RingBuf::new(),
1189 /// Gets an owning iterator over the entries of the map.
1194 /// use std::collections::BTreeMap;
1196 /// let mut map = BTreeMap::new();
1197 /// map.insert(1u, "a");
1198 /// map.insert(2u, "b");
1199 /// map.insert(3u, "c");
1201 /// for (key, value) in map.into_iter() {
1202 /// println!("{}: {}", key, value);
1205 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1206 pub fn into_iter(self) -> MoveEntries<K, V> {
1207 let len = self.len();
1210 lca: Traverse::traverse(self.root),
1211 left: RingBuf::new(),
1212 right: RingBuf::new(),
1218 /// Gets an iterator over the keys of the map.
1223 /// use std::collections::BTreeMap;
1225 /// let mut a = BTreeMap::new();
1226 /// a.insert(1u, "a");
1227 /// a.insert(2u, "b");
1229 /// let keys: Vec<uint> = a.keys().cloned().collect();
1230 /// assert_eq!(keys, vec![1u,2,]);
1232 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1233 pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
1234 fn first<A, B>((a, _): (A, B)) -> A { a }
1236 Keys { inner: self.iter().map(first) }
1239 /// Gets an iterator over the values of the map.
1244 /// use std::collections::BTreeMap;
1246 /// let mut a = BTreeMap::new();
1247 /// a.insert(1u, "a");
1248 /// a.insert(2u, "b");
1250 /// let values: Vec<&str> = a.values().cloned().collect();
1251 /// assert_eq!(values, vec!["a","b"]);
1253 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1254 pub fn values<'a>(&'a self) -> Values<'a, K, V> {
1255 fn second<A, B>((_, b): (A, B)) -> B { b }
1257 Values { inner: self.iter().map(second) }
1260 /// Return the number of elements in the map.
1265 /// use std::collections::BTreeMap;
1267 /// let mut a = BTreeMap::new();
1268 /// assert_eq!(a.len(), 0);
1269 /// a.insert(1u, "a");
1270 /// assert_eq!(a.len(), 1);
1272 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1273 pub fn len(&self) -> uint { self.length }
1275 /// Return true if the map contains no elements.
1280 /// use std::collections::BTreeMap;
1282 /// let mut a = BTreeMap::new();
1283 /// assert!(a.is_empty());
1284 /// a.insert(1u, "a");
1285 /// assert!(!a.is_empty());
1287 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1288 pub fn is_empty(&self) -> bool { self.len() == 0 }
1291 impl<K: Ord, V> BTreeMap<K, V> {
1292 /// Gets the given key's corresponding entry in the map for in-place manipulation.
1293 pub fn entry<'a>(&'a mut self, mut key: K) -> Entry<'a, K, V> {
1294 // same basic logic of `swap` and `pop`, blended together
1295 let mut stack = stack::PartialSearchStack::new(self);
1297 let result = stack.with(move |pusher, node| {
1298 match Node::search(node, &key) {
1301 return Finished(Occupied(OccupiedEntry {
1302 stack: pusher.seal(handle)
1306 match pusher.push(handle) {
1307 stack::Done(new_stack) => {
1308 // Not in the tree, but we've found where it goes
1309 return Finished(Vacant(VacantEntry {
1314 stack::Grew(new_stack) => {
1315 // We've found the subtree this key must go in
1316 return Continue((new_stack, key));
1323 Finished(finished) => return finished,
1324 Continue((new_stack, renewed_key)) => {
1339 use std::prelude::*;
1341 use super::{BTreeMap, Occupied, Vacant};
1344 fn test_basic_large() {
1345 let mut map = BTreeMap::new();
1347 assert_eq!(map.len(), 0);
1349 for i in range(0, size) {
1350 assert_eq!(map.insert(i, 10*i), None);
1351 assert_eq!(map.len(), i + 1);
1354 for i in range(0, size) {
1355 assert_eq!(map.get(&i).unwrap(), &(i*10));
1358 for i in range(size, size*2) {
1359 assert_eq!(map.get(&i), None);
1362 for i in range(0, size) {
1363 assert_eq!(map.insert(i, 100*i), Some(10*i));
1364 assert_eq!(map.len(), size);
1367 for i in range(0, size) {
1368 assert_eq!(map.get(&i).unwrap(), &(i*100));
1371 for i in range(0, size/2) {
1372 assert_eq!(map.remove(&(i*2)), Some(i*200));
1373 assert_eq!(map.len(), size - i - 1);
1376 for i in range(0, size/2) {
1377 assert_eq!(map.get(&(2*i)), None);
1378 assert_eq!(map.get(&(2*i+1)).unwrap(), &(i*200 + 100));
1381 for i in range(0, size/2) {
1382 assert_eq!(map.remove(&(2*i)), None);
1383 assert_eq!(map.remove(&(2*i+1)), Some(i*200 + 100));
1384 assert_eq!(map.len(), size/2 - i - 1);
1389 fn test_basic_small() {
1390 let mut map = BTreeMap::new();
1391 assert_eq!(map.remove(&1), None);
1392 assert_eq!(map.get(&1), None);
1393 assert_eq!(map.insert(1u, 1u), None);
1394 assert_eq!(map.get(&1), Some(&1));
1395 assert_eq!(map.insert(1, 2), Some(1));
1396 assert_eq!(map.get(&1), Some(&2));
1397 assert_eq!(map.insert(2, 4), None);
1398 assert_eq!(map.get(&2), Some(&4));
1399 assert_eq!(map.remove(&1), Some(2));
1400 assert_eq!(map.remove(&2), Some(4));
1401 assert_eq!(map.remove(&1), None);
1409 let mut map: BTreeMap<uint, uint> = Vec::from_fn(size, |i| (i, i)).into_iter().collect();
1412 let mut iter = map.iter();
1413 for i in range(0, size) {
1414 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1415 assert_eq!(iter.next().unwrap(), (&i, &i));
1417 assert_eq!(iter.size_hint(), (0, Some(0)));
1418 assert_eq!(iter.next(), None);
1422 let mut iter = map.iter_mut();
1423 for i in range(0, size) {
1424 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1425 assert_eq!(iter.next().unwrap(), (&i, &mut (i + 0)));
1427 assert_eq!(iter.size_hint(), (0, Some(0)));
1428 assert_eq!(iter.next(), None);
1432 let mut iter = map.into_iter();
1433 for i in range(0, size) {
1434 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1435 assert_eq!(iter.next().unwrap(), (i, i));
1437 assert_eq!(iter.size_hint(), (0, Some(0)));
1438 assert_eq!(iter.next(), None);
1444 fn test_iter_rev() {
1448 let mut map: BTreeMap<uint, uint> = Vec::from_fn(size, |i| (i, i)).into_iter().collect();
1451 let mut iter = map.iter().rev();
1452 for i in range(0, size) {
1453 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1454 assert_eq!(iter.next().unwrap(), (&(size - i - 1), &(size - i - 1)));
1456 assert_eq!(iter.size_hint(), (0, Some(0)));
1457 assert_eq!(iter.next(), None);
1461 let mut iter = map.iter_mut().rev();
1462 for i in range(0, size) {
1463 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1464 assert_eq!(iter.next().unwrap(), (&(size - i - 1), &mut(size - i - 1)));
1466 assert_eq!(iter.size_hint(), (0, Some(0)));
1467 assert_eq!(iter.next(), None);
1471 let mut iter = map.into_iter().rev();
1472 for i in range(0, size) {
1473 assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
1474 assert_eq!(iter.next().unwrap(), (size - i - 1, size - i - 1));
1476 assert_eq!(iter.size_hint(), (0, Some(0)));
1477 assert_eq!(iter.next(), None);
1484 let xs = [(1i, 10i), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
1486 let mut map: BTreeMap<int, int> = xs.iter().map(|&x| x).collect();
1488 // Existing key (insert)
1489 match map.entry(1) {
1490 Vacant(_) => unreachable!(),
1491 Occupied(mut view) => {
1492 assert_eq!(view.get(), &10);
1493 assert_eq!(view.set(100), 10);
1496 assert_eq!(map.get(&1).unwrap(), &100);
1497 assert_eq!(map.len(), 6);
1500 // Existing key (update)
1501 match map.entry(2) {
1502 Vacant(_) => unreachable!(),
1503 Occupied(mut view) => {
1504 let v = view.get_mut();
1508 assert_eq!(map.get(&2).unwrap(), &200);
1509 assert_eq!(map.len(), 6);
1511 // Existing key (take)
1512 match map.entry(3) {
1513 Vacant(_) => unreachable!(),
1515 assert_eq!(view.take(), 30);
1518 assert_eq!(map.get(&3), None);
1519 assert_eq!(map.len(), 5);
1522 // Inexistent key (insert)
1523 match map.entry(10) {
1524 Occupied(_) => unreachable!(),
1526 assert_eq!(*view.set(1000), 1000);
1529 assert_eq!(map.get(&10).unwrap(), &1000);
1530 assert_eq!(map.len(), 6);
1541 use std::prelude::*;
1542 use std::rand::{weak_rng, Rng};
1543 use test::{Bencher, black_box};
1545 use super::BTreeMap;
1546 use bench::{insert_rand_n, insert_seq_n, find_rand_n, find_seq_n};
1549 pub fn insert_rand_100(b: &mut Bencher) {
1550 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1551 insert_rand_n(100, &mut m, b,
1552 |m, i| { m.insert(i, 1); },
1553 |m, i| { m.remove(&i); });
1557 pub fn insert_rand_10_000(b: &mut Bencher) {
1558 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1559 insert_rand_n(10_000, &mut m, b,
1560 |m, i| { m.insert(i, 1); },
1561 |m, i| { m.remove(&i); });
1566 pub fn insert_seq_100(b: &mut Bencher) {
1567 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1568 insert_seq_n(100, &mut m, b,
1569 |m, i| { m.insert(i, 1); },
1570 |m, i| { m.remove(&i); });
1574 pub fn insert_seq_10_000(b: &mut Bencher) {
1575 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1576 insert_seq_n(10_000, &mut m, b,
1577 |m, i| { m.insert(i, 1); },
1578 |m, i| { m.remove(&i); });
1583 pub fn find_rand_100(b: &mut Bencher) {
1584 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1585 find_rand_n(100, &mut m, b,
1586 |m, i| { m.insert(i, 1); },
1587 |m, i| { m.get(&i); });
1591 pub fn find_rand_10_000(b: &mut Bencher) {
1592 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1593 find_rand_n(10_000, &mut m, b,
1594 |m, i| { m.insert(i, 1); },
1595 |m, i| { m.get(&i); });
1600 pub fn find_seq_100(b: &mut Bencher) {
1601 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1602 find_seq_n(100, &mut m, b,
1603 |m, i| { m.insert(i, 1); },
1604 |m, i| { m.get(&i); });
1608 pub fn find_seq_10_000(b: &mut Bencher) {
1609 let mut m : BTreeMap<uint,uint> = BTreeMap::new();
1610 find_seq_n(10_000, &mut m, b,
1611 |m, i| { m.insert(i, 1); },
1612 |m, i| { m.get(&i); });
1615 fn bench_iter(b: &mut Bencher, size: uint) {
1616 let mut map = BTreeMap::<uint, uint>::new();
1617 let mut rng = weak_rng();
1619 for _ in range(0, size) {
1620 map.insert(rng.gen(), rng.gen());
1624 for entry in map.iter() {
1631 pub fn iter_20(b: &mut Bencher) {
1636 pub fn iter_1000(b: &mut Bencher) {
1637 bench_iter(b, 1000);
1641 pub fn iter_100000(b: &mut Bencher) {
1642 bench_iter(b, 100000);