1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Unordered containers, implemented as hash-tables (`HashSet` and `HashMap` types)
14 use cmp::{max, Eq, Equiv, PartialEq};
15 use collections::{Collection, Mutable, Set, MutableSet, Map, MutableMap};
19 use hash::{Hash, Hasher, sip};
20 use iter::{Iterator, FilterMap, Chain, Repeat, Zip, Extendable};
21 use iter::{range, range_inclusive, FromIterator};
25 use option::{Some, None, Option};
28 use result::{Ok, Err};
33 use hash::{Hash, Hasher};
34 use iter::range_step_inclusive;
35 use iter::{Iterator, range};
37 use mem::{min_align_of, size_of};
38 use mem::{overwrite, transmute};
39 use num::{CheckedMul, is_power_of_two};
41 use option::{Some, None, Option};
45 use rt::heap::{allocate, deallocate};
47 static EMPTY_BUCKET: u64 = 0u64;
49 /// The raw hashtable, providing safe-ish access to the unzipped and highly
50 /// optimized arrays of hashes, keys, and values.
52 /// This design uses less memory and is a lot faster than the naive
53 /// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
54 /// option on every element, and we get a generally more cache-aware design.
56 /// Key invariants of this structure:
58 /// - if hashes[i] == EMPTY_BUCKET, then keys[i] and vals[i] have
59 /// 'undefined' contents. Don't read from them. This invariant is
60 /// enforced outside this module with the `EmptyIndex`, `FullIndex`,
61 /// and `SafeHash` types.
63 /// - An `EmptyIndex` is only constructed for a bucket at an index with
64 /// a hash of EMPTY_BUCKET.
66 /// - A `FullIndex` is only constructed for a bucket at an index with a
67 /// non-EMPTY_BUCKET hash.
69 /// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
70 /// around hashes of zero by changing them to 0x8000_0000_0000_0000,
71 /// which will likely map to the same bucket, while not being confused
74 /// - All three "arrays represented by pointers" are the same length:
75 /// `capacity`. This is set at creation and never changes. The arrays
76 /// are unzipped to save space (we don't have to pay for the padding
77 /// between odd sized elements, such as in a map from u64 to u8), and
78 /// be more cache aware (scanning through 8 hashes brings in 2 cache
79 /// lines, since they're all right beside each other).
81 /// You can kind of think of this module/data structure as a safe wrapper
82 /// around just the "table" part of the hashtable. It enforces some
83 /// invariants at the type level and employs some performance trickery,
84 /// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
88 /// Feb 11, 2014: This hashtable was just implemented, and, hard as I tried,
89 /// isn't yet totally safe. There's a "known exploit" that you can create
90 /// multiple FullIndexes for a bucket, `take` one, and then still `take`
91 /// the other causing undefined behavior. Currently, there's no story
92 /// for how to protect against this statically. Therefore, there are asserts
93 /// on `take`, `get`, `get_mut`, and `put` which check the bucket state.
94 /// With time, and when we're confident this works correctly, they should
95 /// be removed. Also, the bounds check in `peek` is especially painful,
96 /// as that's called in the innermost loops of the hashtable and has the
97 /// potential to be a major performance drain. Remove this too.
99 /// Or, better than remove, only enable these checks for debug builds.
100 /// There's currently no "debug-only" asserts in rust, so if you're reading
101 /// this and going "what? of course there are debug-only asserts!", then
102 /// please make this use them!
103 #[unsafe_no_drop_flag]
104 pub struct RawTable<K, V> {
112 /// Represents an index into a `RawTable` with no key or value in it.
113 pub struct EmptyIndex {
115 nocopy: marker::NoCopy,
118 /// Represents an index into a `RawTable` with a key, value, and hash
120 pub struct FullIndex {
123 nocopy: marker::NoCopy,
127 /// Since we get the hash for free whenever we check the bucket state,
128 /// this function is provided for fast access, letting us avoid
129 /// redundant trips back to the hashtable.
131 pub fn hash(&self) -> SafeHash { self.hash }
133 /// Same comment as with `hash`.
135 pub fn raw_index(&self) -> uint { self.idx as uint }
138 /// Represents the state of a bucket: it can either have a key/value
139 /// pair (be full) or not (be empty). You cannot `take` empty buckets,
140 /// and you cannot `put` into full buckets.
141 pub enum BucketState {
146 /// A hash that is not zero, since we use a hash of zero to represent empty
148 #[deriving(PartialEq)]
149 pub struct SafeHash {
154 /// Peek at the hash value, which is guaranteed to be non-zero.
156 pub fn inspect(&self) -> u64 { self.hash }
159 /// We need to remove hashes of 0. That's reserved for empty buckets.
160 /// This function wraps up `hash_keyed` to be the only way outside this
161 /// module to generate a SafeHash.
162 pub fn make_hash<T: Hash<S>, S, H: Hasher<S>>(hasher: &H, t: &T) -> SafeHash {
163 match hasher.hash(t) {
164 // This constant is exceedingly likely to hash to the same
165 // bucket, but it won't be counted as empty!
166 EMPTY_BUCKET => SafeHash { hash: 0x8000_0000_0000_0000 },
167 h => SafeHash { hash: h },
171 fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
172 assert!(is_power_of_two(target_alignment));
173 (unrounded + target_alignment - 1) & !(target_alignment - 1)
178 assert_eq!(round_up_to_next(0, 4), 0);
179 assert_eq!(round_up_to_next(1, 4), 4);
180 assert_eq!(round_up_to_next(2, 4), 4);
181 assert_eq!(round_up_to_next(3, 4), 4);
182 assert_eq!(round_up_to_next(4, 4), 4);
183 assert_eq!(round_up_to_next(5, 4), 8);
186 // Returns a tuple of (minimum required malloc alignment, hash_offset,
187 // key_offset, val_offset, array_size), from the start of a mallocated array.
188 fn calculate_offsets(
189 hash_size: uint, hash_align: uint,
190 keys_size: uint, keys_align: uint,
191 vals_size: uint, vals_align: uint) -> (uint, uint, uint, uint, uint) {
194 let end_of_hashes = hash_offset + hash_size;
196 let keys_offset = round_up_to_next(end_of_hashes, keys_align);
197 let end_of_keys = keys_offset + keys_size;
199 let vals_offset = round_up_to_next(end_of_keys, vals_align);
200 let end_of_vals = vals_offset + vals_size;
202 let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
204 (min_align, hash_offset, keys_offset, vals_offset, end_of_vals)
208 fn test_offset_calculation() {
209 assert_eq!(calculate_offsets(128, 8, 15, 1, 4, 4 ), (8, 0, 128, 144, 148));
210 assert_eq!(calculate_offsets(3, 1, 2, 1, 1, 1 ), (1, 0, 3, 5, 6));
211 assert_eq!(calculate_offsets(6, 2, 12, 4, 24, 8), (8, 0, 8, 24, 48));
214 impl<K, V> RawTable<K, V> {
216 /// Does not initialize the buckets. The caller should ensure they,
217 /// at the very least, set every hash to EMPTY_BUCKET.
218 unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
219 let hashes_size = capacity.checked_mul(&size_of::<u64>())
220 .expect("capacity overflow");
221 let keys_size = capacity.checked_mul(&size_of::< K >())
222 .expect("capacity overflow");
223 let vals_size = capacity.checked_mul(&size_of::< V >())
224 .expect("capacity overflow");
226 // Allocating hashmaps is a little tricky. We need to allocate three
227 // arrays, but since we know their sizes and alignments up front,
228 // we just allocate a single array, and then have the subarrays
231 // This is great in theory, but in practice getting the alignment
232 // right is a little subtle. Therefore, calculating offsets has been
233 // factored out into a different function.
234 let (malloc_alignment, hash_offset, keys_offset, vals_offset, size) =
236 hashes_size, min_align_of::<u64>(),
237 keys_size, min_align_of::< K >(),
238 vals_size, min_align_of::< V >());
240 let buffer = allocate(size, malloc_alignment);
242 let hashes = buffer.offset(hash_offset as int) as *mut u64;
243 let keys = buffer.offset(keys_offset as int) as *mut K;
244 let vals = buffer.offset(vals_offset as int) as *mut V;
255 /// Creates a new raw table from a given capacity. All buckets are
257 #[allow(experimental)]
258 pub fn new(capacity: uint) -> RawTable<K, V> {
260 let ret = RawTable::new_uninitialized(capacity);
261 set_memory(ret.hashes, 0u8, capacity);
266 /// Reads a bucket at a given index, returning an enum indicating whether
267 /// there's anything there or not. You need to match on this enum to get
268 /// the appropriate types to pass on to most of the other functions in
270 pub fn peek(&self, index: uint) -> BucketState {
271 debug_assert!(index < self.capacity);
273 let idx = index as int;
274 let hash = unsafe { *self.hashes.offset(idx) };
276 let nocopy = marker::NoCopy;
287 hash: SafeHash { hash: full_hash },
293 /// Gets references to the key and value at a given index.
294 pub fn read<'a>(&'a self, index: &FullIndex) -> (&'a K, &'a V) {
298 debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
299 (&'a *self.keys.offset(idx),
300 &'a *self.vals.offset(idx))
304 /// Gets references to the key and value at a given index, with the
305 /// value's reference being mutable.
306 pub fn read_mut<'a>(&'a mut self, index: &FullIndex) -> (&'a K, &'a mut V) {
310 debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
311 (&'a *self.keys.offset(idx),
312 &'a mut *self.vals.offset(idx))
316 /// Read everything, mutably.
317 pub fn read_all_mut<'a>(&'a mut self, index: &FullIndex)
318 -> (&'a mut SafeHash, &'a mut K, &'a mut V) {
322 debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
323 (transmute(self.hashes.offset(idx)),
324 &'a mut *self.keys.offset(idx),
325 &'a mut *self.vals.offset(idx))
329 /// Puts a key and value pair, along with the key's hash, into a given
330 /// index in the hashtable. Note how the `EmptyIndex` is 'moved' into this
331 /// function, because that slot will no longer be empty when we return!
332 /// A FullIndex is returned for later use, pointing to the newly-filled
333 /// slot in the hashtable.
335 /// Use `make_hash` to construct a `SafeHash` to pass to this function.
336 pub fn put(&mut self, index: EmptyIndex, hash: SafeHash, k: K, v: V) -> FullIndex {
340 debug_assert_eq!(*self.hashes.offset(idx), EMPTY_BUCKET);
341 *self.hashes.offset(idx) = hash.inspect();
342 overwrite(&mut *self.keys.offset(idx), k);
343 overwrite(&mut *self.vals.offset(idx), v);
348 FullIndex { idx: idx, hash: hash, nocopy: marker::NoCopy }
351 /// Removes a key and value from the hashtable.
353 /// This works similarly to `put`, building an `EmptyIndex` out of the
355 pub fn take(&mut self, index: FullIndex) -> (EmptyIndex, K, V) {
359 debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
361 *self.hashes.offset(idx) = EMPTY_BUCKET;
363 // Drop the mutable constraint.
364 let keys = self.keys as *K;
365 let vals = self.vals as *V;
367 let k = ptr::read(keys.offset(idx));
368 let v = ptr::read(vals.offset(idx));
372 (EmptyIndex { idx: idx, nocopy: marker::NoCopy }, k, v)
376 /// The hashtable's capacity, similar to a vector's.
377 pub fn capacity(&self) -> uint {
381 /// The number of elements ever `put` in the hashtable, minus the number
382 /// of elements ever `take`n.
383 pub fn size(&self) -> uint {
387 pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
388 Entries { table: self, idx: 0, elems_seen: 0 }
391 pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
392 MutEntries { table: self, idx: 0, elems_seen: 0 }
395 pub fn move_iter(self) -> MoveEntries<K, V> {
396 MoveEntries { table: self, idx: 0 }
400 // `read_all_mut` casts a `*u64` to a `*SafeHash`. Since we statically
401 // ensure that a `FullIndex` points to an index with a non-zero hash,
402 // and a `SafeHash` is just a `u64` with a different name, this is
405 // This test ensures that a `SafeHash` really IS the same size as a
406 // `u64`. If you need to change the size of `SafeHash` (and
407 // consequently made this test fail), `read_all_mut` needs to be
408 // modified to no longer assume this.
410 fn can_alias_safehash_as_u64() {
411 assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
414 /// Iterator over shared references to entries in a table.
415 pub struct Entries<'a, K, V> {
416 table: &'a RawTable<K, V>,
421 /// Iterator over mutable references to entries in a table.
422 pub struct MutEntries<'a, K, V> {
423 table: &'a mut RawTable<K, V>,
428 /// Iterator over the entries in a table, consuming the table.
429 pub struct MoveEntries<K, V> {
430 table: RawTable<K, V>,
434 impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
435 fn next(&mut self) -> Option<(&'a K, &'a V)> {
436 while self.idx < self.table.capacity() {
440 match self.table.peek(i) {
443 self.elems_seen += 1;
444 return Some(self.table.read(&idx));
452 fn size_hint(&self) -> (uint, Option<uint>) {
453 let size = self.table.size() - self.elems_seen;
458 impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
459 fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
460 while self.idx < self.table.capacity() {
464 match self.table.peek(i) {
466 // the transmute here fixes:
467 // error: lifetime of `self` is too short to guarantee its contents
468 // can be safely reborrowed
469 Full(idx) => unsafe {
470 self.elems_seen += 1;
471 return Some(transmute(self.table.read_mut(&idx)));
479 fn size_hint(&self) -> (uint, Option<uint>) {
480 let size = self.table.size() - self.elems_seen;
485 impl<K, V> Iterator<(SafeHash, K, V)> for MoveEntries<K, V> {
486 fn next(&mut self) -> Option<(SafeHash, K, V)> {
487 while self.idx < self.table.capacity() {
491 match self.table.peek(i) {
495 let (_, k, v) = self.table.take(idx);
496 return Some((h, k, v));
504 fn size_hint(&self) -> (uint, Option<uint>) {
505 let size = self.table.size();
510 impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
511 fn clone(&self) -> RawTable<K, V> {
513 let mut new_ht = RawTable::new_uninitialized(self.capacity());
515 for i in range(0, self.capacity()) {
518 *new_ht.hashes.offset(i as int) = EMPTY_BUCKET;
521 let hash = idx.hash().inspect();
522 let (k, v) = self.read(&idx);
523 *new_ht.hashes.offset(i as int) = hash;
524 overwrite(&mut *new_ht.keys.offset(i as int), (*k).clone());
525 overwrite(&mut *new_ht.vals.offset(i as int), (*v).clone());
530 new_ht.size = self.size();
538 impl<K, V> Drop for RawTable<K, V> {
540 // This is in reverse because we're likely to have partially taken
541 // some elements out with `.move_iter()` from the front.
542 for i in range_step_inclusive(self.capacity as int - 1, 0, -1) {
543 // Check if the size is 0, so we don't do a useless scan when
544 // dropping empty tables such as on resize.
545 if self.size == 0 { break }
547 match self.peek(i as uint) {
549 Full(idx) => { self.take(idx); }
553 assert_eq!(self.size, 0);
555 if self.hashes.is_not_null() {
556 let hashes_size = self.capacity * size_of::<u64>();
557 let keys_size = self.capacity * size_of::<K>();
558 let vals_size = self.capacity * size_of::<V>();
559 let (align, _, _, _, size) = calculate_offsets(hashes_size, min_align_of::<u64>(),
560 keys_size, min_align_of::<K>(),
561 vals_size, min_align_of::<V>());
564 deallocate(self.hashes as *mut u8, size, align);
565 // Remember how everything was allocated out of one buffer
566 // during initialization? We only need one call to free here.
569 self.hashes = RawPtr::null();
575 static INITIAL_LOG2_CAP: uint = 5;
576 static INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5
578 /// The default behavior of HashMap implements a load factor of 90.9%.
579 /// This behavior is characterized by the following conditions:
581 /// - if `size * 1.1 < cap < size * 4` then shouldn't resize
582 /// - if `cap < minimum_capacity * 2` then shouldn't shrink
584 struct DefaultResizePolicy {
585 /// Doubled minimal capacity. The capacity must never drop below
586 /// the minimum capacity. (The check happens before the capacity
587 /// is potentially halved.)
588 minimum_capacity2: uint
591 impl DefaultResizePolicy {
592 fn new(new_capacity: uint) -> DefaultResizePolicy {
593 DefaultResizePolicy {
594 minimum_capacity2: new_capacity << 1
599 fn capacity_range(&self, new_size: uint) -> (uint, uint) {
600 ((new_size * 11) / 10, max(new_size << 3, self.minimum_capacity2))
604 fn reserve(&mut self, new_capacity: uint) {
605 self.minimum_capacity2 = new_capacity << 1;
609 // The main performance trick in this hashmap is called Robin Hood Hashing.
610 // It gains its excellent performance from one key invariant:
612 // If an insertion collides with an existing element, and that elements
613 // "probe distance" (how far away the element is from its ideal location)
614 // is higher than how far we've already probed, swap the elements.
616 // This massively lowers variance in probe distance, and allows us to get very
617 // high load factors with good performance. The 90% load factor I use is rather
620 // > Why a load factor of approximately 90%?
622 // In general, all the distances to initial buckets will converge on the mean.
623 // At a load factor of α, the odds of finding the target bucket after k
624 // probes is approximately 1-α^k. If we set this equal to 50% (since we converge
625 // on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
626 // this down to make the math easier on the CPU and avoid its FPU.
627 // Since on average we start the probing in the middle of a cache line, this
628 // strategy pulls in two cache lines of hashes on every lookup. I think that's
629 // pretty good, but if you want to trade off some space, it could go down to one
630 // cache line on average with an α of 0.84.
632 // > Wait, what? Where did you get 1-α^k from?
634 // On the first probe, your odds of a collision with an existing element is α.
635 // The odds of doing this twice in a row is approximately α^2. For three times,
636 // α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
637 // colliding after k tries is 1-α^k.
639 // Future Improvements (FIXME!)
640 // ============================
642 // Allow the load factor to be changed dynamically and/or at initialization.
644 // Also, would it be possible for us to reuse storage when growing the
645 // underlying table? This is exactly the use case for 'realloc', and may
646 // be worth exploring.
648 // Future Optimizations (FIXME!)
649 // =============================
651 // The paper cited below mentions an implementation which keeps track of the
652 // distance-to-initial-bucket histogram. I'm suspicious of this approach because
653 // it requires maintaining an internal map. If this map were replaced with a
654 // hashmap, it would be faster, but now our data structure is self-referential
655 // and blows up. Also, this allows very good first guesses, but array accesses
656 // are no longer linear and in one direction, as we have now. There is also
657 // memory and cache pressure that this map would entail that would be very
658 // difficult to properly see in a microbenchmark.
660 // Another possible design choice that I made without any real reason is
661 // parameterizing the raw table over keys and values. Technically, all we need
662 // is the size and alignment of keys and values, and the code should be just as
663 // efficient (well, we might need one for power-of-two size and one for not...).
664 // This has the potential to reduce code bloat in rust executables, without
665 // really losing anything except 4 words (key size, key alignment, val size,
666 // val alignment) which can be passed in to every call of a `RawTable` function.
667 // This would definitely be an avenue worth exploring if people start complaining
668 // about the size of rust executables.
670 // There's also an "optimization" that has been omitted regarding how the
671 // hashtable allocates. The vector type has set the expectation that a hashtable
672 // which never has an element inserted should not allocate. I'm suspicious of
673 // implementing this for hashtables, because supporting it has no performance
674 // benefit over using an `Option<HashMap<K, V>>`, and is significantly more
677 /// A hash map implementation which uses linear probing with Robin
678 /// Hood bucket stealing.
680 /// The hashes are all keyed by the task-local random number generator
681 /// on creation by default, this means the ordering of the keys is
682 /// randomized, but makes the tables more resistant to
683 /// denial-of-service attacks (Hash DoS). This behaviour can be
684 /// overridden with one of the constructors.
686 /// It is required that the keys implement the `Eq` and `Hash` traits, although
687 /// this can frequently be achieved by using `#[deriving(Eq, Hash)]`.
689 /// Relevant papers/articles:
691 /// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
692 /// 2. Emmanuel Goossaert. ["Robin Hood
693 /// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
694 /// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
695 /// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
700 /// use std::collections::HashMap;
702 /// // type inference lets us omit an explicit type signature (which
703 /// // would be `HashMap<&str, &str>` in this example).
704 /// let mut book_reviews = HashMap::new();
706 /// // review some books.
707 /// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.");
708 /// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.");
709 /// book_reviews.insert("Pride and Prejudice", "Very enjoyable.");
710 /// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
712 /// // check for a specific one.
713 /// if !book_reviews.contains_key(&("Les Misérables")) {
714 /// println!("We've got {} reviews, but Les Misérables ain't one.",
715 /// book_reviews.len());
718 /// // oops, this review has a lot of spelling mistakes, let's delete it.
719 /// book_reviews.remove(&("The Adventures of Sherlock Holmes"));
721 /// // look up the values associated with some keys.
722 /// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
723 /// for book in to_find.iter() {
724 /// match book_reviews.find(book) {
725 /// Some(review) => println!("{}: {}", *book, *review),
726 /// None => println!("{} is unreviewed.", *book)
730 /// // iterate over everything.
731 /// for (book, review) in book_reviews.iter() {
732 /// println!("{}: \"{}\"", *book, *review);
736 pub struct HashMap<K, V, H = sip::SipHasher> {
737 // All hashes are keyed on these values, to prevent hash collision attacks.
740 table: table::RawTable<K, V>,
742 // We keep this at the end since it might as well have tail padding.
743 resize_policy: DefaultResizePolicy,
746 impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
747 // Probe the `idx`th bucket for a given hash, returning the index of the
750 // This exploits the power-of-two size of the hashtable. As long as this
751 // is always true, we can use a bitmask of cap-1 to do modular arithmetic.
753 // Prefer using this with increasing values of `idx` rather than repeatedly
754 // calling `probe_next`. This reduces data-dependencies between loops, which
755 // can help the optimizer, and certainly won't hurt it. `probe_next` is
756 // simply for convenience, and is no more efficient than `probe`.
757 fn probe(&self, hash: &table::SafeHash, idx: uint) -> uint {
758 let hash_mask = self.table.capacity() - 1;
760 // So I heard a rumor that unsigned overflow is safe in rust..
761 ((hash.inspect() as uint) + idx) & hash_mask
764 // Generate the next probe in a sequence. Prefer using 'probe' by itself,
765 // but this can sometimes be useful.
766 fn probe_next(&self, probe: uint) -> uint {
767 let hash_mask = self.table.capacity() - 1;
768 (probe + 1) & hash_mask
771 fn make_hash<X: Hash<S>>(&self, x: &X) -> table::SafeHash {
772 table::make_hash(&self.hasher, x)
775 /// Get the distance of the bucket at the given index that it lies
776 /// from its 'ideal' location.
778 /// In the cited blog posts above, this is called the "distance to
779 /// initial bucket", or DIB.
780 fn bucket_distance(&self, index_of_elem: &table::FullIndex) -> uint {
781 // where the hash of the element that happens to reside at
782 // `index_of_elem` tried to place itself first.
783 let first_probe_index = self.probe(&index_of_elem.hash(), 0);
785 let raw_index = index_of_elem.raw_index();
787 if first_probe_index <= raw_index {
788 // probe just went forward
789 raw_index - first_probe_index
791 // probe wrapped around the hashtable
792 raw_index + (self.table.capacity() - first_probe_index)
796 /// Search for a pre-hashed key.
797 fn search_hashed_generic(&self, hash: &table::SafeHash, is_match: |&K| -> bool)
798 -> Option<table::FullIndex> {
799 for num_probes in range(0u, self.table.size()) {
800 let probe = self.probe(hash, num_probes);
802 let idx = match self.table.peek(probe) {
803 table::Empty(_) => return None, // hit an empty bucket
804 table::Full(idx) => idx
807 // We can finish the search early if we hit any bucket
808 // with a lower distance to initial bucket than we've probed.
809 if self.bucket_distance(&idx) < num_probes { return None }
811 // If the hash doesn't match, it can't be this one..
812 if *hash != idx.hash() { continue }
814 let (k, _) = self.table.read(&idx);
816 // If the key doesn't match, it can't be this one..
817 if !is_match(k) { continue }
825 fn search_hashed(&self, hash: &table::SafeHash, k: &K) -> Option<table::FullIndex> {
826 self.search_hashed_generic(hash, |k_| *k == *k_)
829 fn search_equiv<Q: Hash<S> + Equiv<K>>(&self, q: &Q) -> Option<table::FullIndex> {
830 self.search_hashed_generic(&self.make_hash(q), |k| q.equiv(k))
833 /// Search for a key, yielding the index if it's found in the hashtable.
834 /// If you already have the hash for the key lying around, use
836 fn search(&self, k: &K) -> Option<table::FullIndex> {
837 self.search_hashed(&self.make_hash(k), k)
840 fn pop_internal(&mut self, starting_index: table::FullIndex) -> Option<V> {
841 let starting_probe = starting_index.raw_index();
844 let mut probe = self.probe_next(starting_probe);
845 for _ in range(0u, self.table.size()) {
846 match self.table.peek(probe) {
847 table::Empty(_) => {}, // empty bucket. this is the end of our shifting.
848 table::Full(idx) => {
849 // Bucket that isn't us, which has a non-zero probe distance.
850 // This isn't the ending index, so keep searching.
851 if self.bucket_distance(&idx) != 0 {
852 probe = self.probe_next(probe);
856 // if we do have a bucket_distance of zero, we're at the end
857 // of what we need to shift.
866 let (_, _, retval) = self.table.take(starting_index);
868 let mut probe = starting_probe;
869 let mut next_probe = self.probe_next(probe);
871 // backwards-shift all the elements after our newly-deleted one.
872 while next_probe != ending_probe {
873 match self.table.peek(next_probe) {
875 // nothing to shift in. just empty it out.
876 match self.table.peek(probe) {
877 table::Empty(_) => {},
878 table::Full(idx) => { self.table.take(idx); }
881 table::Full(next_idx) => {
882 // something to shift. move it over!
883 let next_hash = next_idx.hash();
884 let (_, next_key, next_val) = self.table.take(next_idx);
885 match self.table.peek(probe) {
886 table::Empty(idx) => {
887 self.table.put(idx, next_hash, next_key, next_val);
889 table::Full(idx) => {
890 let (emptyidx, _, _) = self.table.take(idx);
891 self.table.put(emptyidx, next_hash, next_key, next_val);
898 next_probe = self.probe_next(next_probe);
901 // Done the backwards shift, but there's still an element left!
903 match self.table.peek(probe) {
904 table::Empty(_) => {},
905 table::Full(idx) => { self.table.take(idx); }
908 // Now we're done all our shifting. Return the value we grabbed
913 /// Like `pop`, but can operate on any type that is equivalent to a key.
915 pub fn pop_equiv<Q:Hash<S> + Equiv<K>>(&mut self, k: &Q) -> Option<V> {
916 if self.table.size() == 0 {
920 let potential_new_size = self.table.size() - 1;
921 self.make_some_room(potential_new_size);
923 let starting_index = match self.search_equiv(k) {
928 self.pop_internal(starting_index)
932 impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Collection for HashMap<K, V, H> {
933 /// Return the number of elements in the map
934 fn len(&self) -> uint { self.table.size() }
937 impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Mutable for HashMap<K, V, H> {
938 /// Clear the map, removing all key-value pairs. Keeps the allocated memory
940 fn clear(&mut self) {
941 // Prevent reallocations from happening from now on. Makes it possible
942 // for the map to be reused but has a downside: reserves permanently.
943 self.resize_policy.reserve(self.table.size());
945 for i in range(0, self.table.capacity()) {
946 match self.table.peek(i) {
947 table::Empty(_) => {},
948 table::Full(idx) => { self.table.take(idx); }
954 impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Map<K, V> for HashMap<K, V, H> {
955 fn find<'a>(&'a self, k: &K) -> Option<&'a V> {
956 self.search(k).map(|idx| {
957 let (_, v) = self.table.read(&idx);
962 fn contains_key(&self, k: &K) -> bool {
963 self.search(k).is_some()
967 impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> MutableMap<K, V> for HashMap<K, V, H> {
968 fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> {
969 match self.search(k) {
972 let (_, v) = self.table.read_mut(&idx);
978 fn swap(&mut self, k: K, v: V) -> Option<V> {
979 let hash = self.make_hash(&k);
980 let potential_new_size = self.table.size() + 1;
981 self.make_some_room(potential_new_size);
983 for dib in range_inclusive(0u, self.table.size()) {
984 let probe = self.probe(&hash, dib);
986 let idx = match self.table.peek(probe) {
987 table::Empty(idx) => {
989 self.table.put(idx, hash, k, v);
992 table::Full(idx) => idx
995 if idx.hash() == hash {
996 let (bucket_k, bucket_v) = self.table.read_mut(&idx);
998 // Found an existing value.
999 return Some(replace(bucket_v, v));
1003 let probe_dib = self.bucket_distance(&idx);
1005 if probe_dib < dib {
1006 // Found a luckier bucket. This implies that the key does not
1007 // already exist in the hashtable. Just do a robin hood
1009 self.robin_hood(idx, probe_dib, hash, k, v);
1014 // We really shouldn't be here.
1015 fail!("Internal HashMap error: Out of space.");
1018 fn pop(&mut self, k: &K) -> Option<V> {
1019 if self.table.size() == 0 {
1023 let potential_new_size = self.table.size() - 1;
1024 self.make_some_room(potential_new_size);
1026 let starting_index = match self.search(k) {
1028 None => return None,
1031 self.pop_internal(starting_index)
1036 impl<K: Hash + Eq, V> HashMap<K, V, sip::SipHasher> {
1037 /// Create an empty HashMap.
1038 pub fn new() -> HashMap<K, V, sip::SipHasher> {
1039 HashMap::with_capacity(INITIAL_CAPACITY)
1042 /// Creates an empty hash map with the given initial capacity.
1043 pub fn with_capacity(capacity: uint) -> HashMap<K, V, sip::SipHasher> {
1044 let mut r = rand::task_rng();
1047 let hasher = sip::SipHasher::new_with_keys(r0, r1);
1048 HashMap::with_capacity_and_hasher(capacity, hasher)
1052 impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
1053 /// Creates an empty hashmap which will use the given hasher to hash keys.
1055 /// The creates map has the default initial capacity.
1056 pub fn with_hasher(hasher: H) -> HashMap<K, V, H> {
1057 HashMap::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
1060 /// Create an empty HashMap with space for at least `capacity`
1061 /// elements, using `hasher` to hash the keys.
1063 /// Warning: `hasher` is normally randomly generated, and
1064 /// is designed to allow HashMaps to be resistant to attacks that
1065 /// cause many collisions and very poor performance. Setting it
1066 /// manually using this function can expose a DoS attack vector.
1067 pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashMap<K, V, H> {
1068 let cap = num::next_power_of_two(max(INITIAL_CAPACITY, capacity));
1071 resize_policy: DefaultResizePolicy::new(cap),
1072 table: table::RawTable::new(cap),
1076 /// The hashtable will never try to shrink below this size. You can use
1077 /// this function to reduce reallocations if your hashtable frequently
1078 /// grows and shrinks by large amounts.
1080 /// This function has no effect on the operational semantics of the
1081 /// hashtable, only on performance.
1082 pub fn reserve(&mut self, new_minimum_capacity: uint) {
1083 let cap = num::next_power_of_two(
1084 max(INITIAL_CAPACITY, new_minimum_capacity));
1086 self.resize_policy.reserve(cap);
1088 if self.table.capacity() < cap {
1093 /// Resizes the internal vectors to a new capacity. It's your responsibility to:
1094 /// 1) Make sure the new capacity is enough for all the elements, accounting
1095 /// for the load factor.
1096 /// 2) Ensure new_capacity is a power of two.
1097 fn resize(&mut self, new_capacity: uint) {
1098 assert!(self.table.size() <= new_capacity);
1099 assert!(num::is_power_of_two(new_capacity));
1101 let old_table = replace(&mut self.table, table::RawTable::new(new_capacity));
1102 let old_size = old_table.size();
1104 for (h, k, v) in old_table.move_iter() {
1105 self.insert_hashed_nocheck(h, k, v);
1108 assert_eq!(self.table.size(), old_size);
1111 /// Performs any necessary resize operations, such that there's space for
1112 /// new_size elements.
1113 fn make_some_room(&mut self, new_size: uint) {
1114 let (grow_at, shrink_at) = self.resize_policy.capacity_range(new_size);
1115 let cap = self.table.capacity();
1117 // An invalid value shouldn't make us run out of space.
1118 debug_assert!(grow_at >= new_size);
1121 let new_capacity = cap << 1;
1122 self.resize(new_capacity);
1123 } else if shrink_at <= cap {
1124 let new_capacity = cap >> 1;
1125 self.resize(new_capacity);
1129 /// Perform robin hood bucket stealing at the given 'index'. You must
1130 /// also pass that probe's "distance to initial bucket" so we don't have
1131 /// to recalculate it, as well as the total number of probes already done
1132 /// so we have some sort of upper bound on the number of probes to do.
1134 /// 'hash', 'k', and 'v' are the elements to robin hood into the hashtable.
1135 fn robin_hood(&mut self, mut index: table::FullIndex, mut dib_param: uint,
1136 mut hash: table::SafeHash, mut k: K, mut v: V) {
1138 let (old_hash, old_key, old_val) = {
1139 let (old_hash_ref, old_key_ref, old_val_ref) =
1140 self.table.read_all_mut(&index);
1142 let old_hash = replace(old_hash_ref, hash);
1143 let old_key = replace(old_key_ref, k);
1144 let old_val = replace(old_val_ref, v);
1146 (old_hash, old_key, old_val)
1149 let mut probe = self.probe_next(index.raw_index());
1151 for dib in range(dib_param + 1, self.table.size()) {
1152 let full_index = match self.table.peek(probe) {
1153 table::Empty(idx) => {
1155 self.table.put(idx, old_hash, old_key, old_val);
1158 table::Full(idx) => idx
1161 let probe_dib = self.bucket_distance(&full_index);
1163 // Robin hood! Steal the spot.
1164 if probe_dib < dib {
1166 dib_param = probe_dib;
1173 probe = self.probe_next(probe);
1176 fail!("HashMap fatal error: 100% load factor?");
1180 /// Insert a pre-hashed key-value pair, without first checking
1181 /// that there's enough room in the buckets. Returns a reference to the
1182 /// newly insert value.
1184 /// If the key already exists, the hashtable will be returned untouched
1185 /// and a reference to the existing element will be returned.
1186 fn insert_hashed_nocheck<'a>(
1187 &'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
1189 for dib in range_inclusive(0u, self.table.size()) {
1190 let probe = self.probe(&hash, dib);
1192 let idx = match self.table.peek(probe) {
1193 table::Empty(idx) => {
1195 let fullidx = self.table.put(idx, hash, k, v);
1196 let (_, val) = self.table.read_mut(&fullidx);
1199 table::Full(idx) => idx
1202 if idx.hash() == hash {
1203 let (bucket_k, bucket_v) = self.table.read_mut(&idx);
1204 // FIXME #12147 the conditional return confuses
1205 // borrowck if we return bucket_v directly
1206 let bv: *mut V = bucket_v;
1208 // Key already exists. Get its reference.
1209 return unsafe {&mut *bv};
1213 let probe_dib = self.bucket_distance(&idx);
1215 if probe_dib < dib {
1216 // Found a luckier bucket than me. Better steal his spot.
1217 self.robin_hood(idx, probe_dib, hash, k, v);
1219 // Now that it's stolen, just read the value's pointer
1220 // right out of the table!
1221 match self.table.peek(probe) {
1222 table::Empty(_) => fail!("Just stole a spot, but now that spot's empty."),
1223 table::Full(idx) => {
1224 let (_, v) = self.table.read_mut(&idx);
1231 // We really shouldn't be here.
1232 fail!("Internal HashMap error: Out of space.");
1235 /// Inserts an element which has already been hashed, returning a reference
1236 /// to that element inside the hashtable. This is more efficient that using
1237 /// `insert`, since the key will not be rehashed.
1238 fn insert_hashed<'a>(&'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
1239 let potential_new_size = self.table.size() + 1;
1240 self.make_some_room(potential_new_size);
1241 self.insert_hashed_nocheck(hash, k, v)
1244 /// Return the value corresponding to the key in the map, or insert
1245 /// and return the value if it doesn't exist.
1246 pub fn find_or_insert<'a>(&'a mut self, k: K, v: V) -> &'a mut V {
1247 self.find_with_or_insert_with(k, v, |_k, _v, _a| (), |_k, a| a)
1250 /// Return the value corresponding to the key in the map, or create,
1251 /// insert, and return a new value if it doesn't exist.
1252 pub fn find_or_insert_with<'a>(&'a mut self, k: K, f: |&K| -> V)
1254 self.find_with_or_insert_with(k, (), |_k, _v, _a| (), |k, _a| f(k))
1257 /// Insert a key-value pair into the map if the key is not already present.
1258 /// Otherwise, modify the existing value for the key.
1259 /// Returns the new or modified value for the key.
1260 pub fn insert_or_update_with<'a>(
1266 self.find_with_or_insert_with(k, v, |k, v, _a| f(k, v), |_k, a| a)
1269 /// Modify and return the value corresponding to the key in the map, or
1270 /// insert and return a new value if it doesn't exist.
1272 /// This method allows for all insertion behaviours of a hashmap;
1273 /// see methods like `insert`, `find_or_insert` and
1274 /// `insert_or_update_with` for less general and more friendly
1275 /// variations of this.
1280 /// use std::collections::HashMap;
1282 /// // map some strings to vectors of strings
1283 /// let mut map = HashMap::new();
1284 /// map.insert("a key", vec!["value"]);
1285 /// map.insert("z key", vec!["value"]);
1287 /// let new = vec!["a key", "b key", "z key"];
1289 /// for k in new.move_iter() {
1290 /// map.find_with_or_insert_with(
1292 /// // if the key does exist either prepend or append this
1293 /// // new value based on the first letter of the key.
1294 /// |key, already, new| {
1295 /// if key.as_slice().starts_with("z") {
1296 /// already.unshift(new);
1298 /// already.push(new);
1301 /// // if the key doesn't exist in the map yet, add it in
1302 /// // the obvious way.
1303 /// |_k, v| vec![v]);
1306 /// assert_eq!(map.len(), 3);
1307 /// assert_eq!(map.get(&"a key"), &vec!["value", "new value"]);
1308 /// assert_eq!(map.get(&"b key"), &vec!["new value"]);
1309 /// assert_eq!(map.get(&"z key"), &vec!["new value", "value"]);
1311 pub fn find_with_or_insert_with<'a, A>(&'a mut self,
1314 found: |&K, &mut V, A|,
1315 not_found: |&K, A| -> V)
1317 let hash = self.make_hash(&k);
1318 match self.search_hashed(&hash, &k) {
1320 let v = not_found(&k, a);
1321 self.insert_hashed(hash, k, v)
1324 let (_, v_ref) = self.table.read_mut(&idx);
1325 found(&k, v_ref, a);
1331 /// Retrieves a value for the given key, failing if the key is not present.
1332 pub fn get<'a>(&'a self, k: &K) -> &'a V {
1333 match self.find(k) {
1335 None => fail!("no entry found for key")
1339 /// Retrieves a (mutable) value for the given key, failing if the key is not present.
1340 pub fn get_mut<'a>(&'a mut self, k: &K) -> &'a mut V {
1341 match self.find_mut(k) {
1343 None => fail!("no entry found for key")
1347 /// Return true if the map contains a value for the specified key,
1348 /// using equivalence.
1349 pub fn contains_key_equiv<Q: Hash<S> + Equiv<K>>(&self, key: &Q) -> bool {
1350 self.search_equiv(key).is_some()
1353 /// Return the value corresponding to the key in the map, using
1355 pub fn find_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, k: &Q) -> Option<&'a V> {
1356 match self.search_equiv(k) {
1359 let (_, v_ref) = self.table.read(&idx);
1365 /// An iterator visiting all keys in arbitrary order.
1366 /// Iterator element type is &'a K.
1367 pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
1368 self.iter().map(|(k, _v)| k)
1371 /// An iterator visiting all values in arbitrary order.
1372 /// Iterator element type is &'a V.
1373 pub fn values<'a>(&'a self) -> Values<'a, K, V> {
1374 self.iter().map(|(_k, v)| v)
1377 /// An iterator visiting all key-value pairs in arbitrary order.
1378 /// Iterator element type is (&'a K, &'a V).
1379 pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
1383 /// An iterator visiting all key-value pairs in arbitrary order,
1384 /// with mutable references to the values.
1385 /// Iterator element type is (&'a K, &'a mut V).
1386 pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
1387 self.table.mut_iter()
1390 /// Creates a consuming iterator, that is, one that moves each key-value
1391 /// pair out of the map in arbitrary order. The map cannot be used after
1393 pub fn move_iter(self) -> MoveEntries<K, V> {
1394 self.table.move_iter().map(|(_, k, v)| (k, v))
1398 impl<K: Eq + Hash<S>, V: Clone, S, H: Hasher<S>> HashMap<K, V, H> {
1399 /// Like `find`, but returns a copy of the value.
1400 pub fn find_copy(&self, k: &K) -> Option<V> {
1401 self.find(k).map(|v| (*v).clone())
1404 /// Like `get`, but returns a copy of the value.
1405 pub fn get_copy(&self, k: &K) -> V {
1406 (*self.get(k)).clone()
1410 impl<K: Eq + Hash<S>, V: PartialEq, S, H: Hasher<S>> PartialEq for HashMap<K, V, H> {
1411 fn eq(&self, other: &HashMap<K, V, H>) -> bool {
1412 if self.len() != other.len() { return false; }
1415 .all(|(key, value)| {
1416 match other.find(key) {
1418 Some(v) => *value == *v
1424 impl<K: Eq + Hash<S>, V: Eq, S, H: Hasher<S>> Eq for HashMap<K, V, H> {}
1426 impl<K: Eq + Hash<S> + Show, V: Show, S, H: Hasher<S>> Show for HashMap<K, V, H> {
1427 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1428 try!(write!(f, "{{"));
1430 for (i, (k, v)) in self.iter().enumerate() {
1431 if i != 0 { try!(write!(f, ", ")); }
1432 try!(write!(f, "{}: {}", *k, *v));
1439 impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Default for HashMap<K, V, H> {
1440 fn default() -> HashMap<K, V, H> {
1441 HashMap::with_hasher(Default::default())
1445 /// HashMap iterator
1446 pub type Entries<'a, K, V> = table::Entries<'a, K, V>;
1448 /// HashMap mutable values iterator
1449 pub type MutEntries<'a, K, V> = table::MutEntries<'a, K, V>;
1451 /// HashMap move iterator
1452 pub type MoveEntries<K, V> =
1453 iter::Map<'static, (table::SafeHash, K, V), (K, V), table::MoveEntries<K, V>>;
1455 /// HashMap keys iterator
1456 pub type Keys<'a, K, V> =
1457 iter::Map<'static, (&'a K, &'a V), &'a K, Entries<'a, K, V>>;
1459 /// HashMap values iterator
1460 pub type Values<'a, K, V> =
1461 iter::Map<'static, (&'a K, &'a V), &'a V, Entries<'a, K, V>>;
1463 impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> FromIterator<(K, V)> for HashMap<K, V, H> {
1464 fn from_iter<T: Iterator<(K, V)>>(iter: T) -> HashMap<K, V, H> {
1465 let (lower, _) = iter.size_hint();
1466 let mut map = HashMap::with_capacity_and_hasher(lower, Default::default());
1472 impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Extendable<(K, V)> for HashMap<K, V, H> {
1473 fn extend<T: Iterator<(K, V)>>(&mut self, mut iter: T) {
1474 for (k, v) in iter {
1480 /// HashSet iterator
1481 pub type SetItems<'a, K> =
1482 iter::Map<'static, (&'a K, &'a ()), &'a K, Entries<'a, K, ()>>;
1484 /// HashSet move iterator
1485 pub type SetMoveItems<K> =
1486 iter::Map<'static, (K, ()), K, MoveEntries<K, ()>>;
1488 /// An implementation of a hash set using the underlying representation of a
1489 /// HashMap where the value is (). As with the `HashMap` type, a `HashSet`
1490 /// requires that the elements implement the `Eq` and `Hash` traits.
1492 pub struct HashSet<T, H = sip::SipHasher> {
1493 map: HashMap<T, (), H>
1496 impl<T: Eq + Hash<S>, S, H: Hasher<S>> PartialEq for HashSet<T, H> {
1497 fn eq(&self, other: &HashSet<T, H>) -> bool {
1498 if self.len() != other.len() { return false; }
1500 self.iter().all(|key| other.contains(key))
1504 impl<T: Eq + Hash<S>, S, H: Hasher<S>> Eq for HashSet<T, H> {}
1506 impl<T: Eq + Hash<S>, S, H: Hasher<S>> Collection for HashSet<T, H> {
1507 fn len(&self) -> uint { self.map.len() }
1510 impl<T: Eq + Hash<S>, S, H: Hasher<S>> Mutable for HashSet<T, H> {
1511 fn clear(&mut self) { self.map.clear() }
1514 impl<T: Eq + Hash<S>, S, H: Hasher<S>> Set<T> for HashSet<T, H> {
1515 fn contains(&self, value: &T) -> bool { self.map.contains_key(value) }
1517 fn is_disjoint(&self, other: &HashSet<T, H>) -> bool {
1518 self.iter().all(|v| !other.contains(v))
1521 fn is_subset(&self, other: &HashSet<T, H>) -> bool {
1522 self.iter().all(|v| other.contains(v))
1526 impl<T: Eq + Hash<S>, S, H: Hasher<S>> MutableSet<T> for HashSet<T, H> {
1527 fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()) }
1529 fn remove(&mut self, value: &T) -> bool { self.map.remove(value) }
1532 impl<T: Hash + Eq> HashSet<T, sip::SipHasher> {
1533 /// Create an empty HashSet
1534 pub fn new() -> HashSet<T, sip::SipHasher> {
1535 HashSet::with_capacity(INITIAL_CAPACITY)
1538 /// Create an empty HashSet with space for at least `n` elements in
1540 pub fn with_capacity(capacity: uint) -> HashSet<T, sip::SipHasher> {
1541 HashSet { map: HashMap::with_capacity(capacity) }
1545 impl<T: Eq + Hash<S>, S, H: Hasher<S>> HashSet<T, H> {
1546 /// Creates a new empty hash set which will use the given hasher to hash
1549 /// The hash set is also created with the default initial capacity.
1550 pub fn with_hasher(hasher: H) -> HashSet<T, H> {
1551 HashSet::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
1554 /// Create an empty HashSet with space for at least `capacity`
1555 /// elements in the hash table, using `hasher` to hash the keys.
1557 /// Warning: `hasher` is normally randomly generated, and
1558 /// is designed to allow `HashSet`s to be resistant to attacks that
1559 /// cause many collisions and very poor performance. Setting it
1560 /// manually using this function can expose a DoS attack vector.
1561 pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashSet<T, H> {
1562 HashSet { map: HashMap::with_capacity_and_hasher(capacity, hasher) }
1565 /// Reserve space for at least `n` elements in the hash table.
1566 pub fn reserve(&mut self, n: uint) {
1570 /// Returns true if the hash set contains a value equivalent to the
1571 /// given query value.
1572 pub fn contains_equiv<Q: Hash<S> + Equiv<T>>(&self, value: &Q) -> bool {
1573 self.map.contains_key_equiv(value)
1576 /// An iterator visiting all elements in arbitrary order.
1577 /// Iterator element type is &'a T.
1578 pub fn iter<'a>(&'a self) -> SetItems<'a, T> {
1582 /// Creates a consuming iterator, that is, one that moves each value out
1583 /// of the set in arbitrary order. The set cannot be used after calling
1585 pub fn move_iter(self) -> SetMoveItems<T> {
1586 self.map.move_iter().map(|(k, _)| k)
1589 /// Visit the values representing the difference
1590 pub fn difference<'a>(&'a self, other: &'a HashSet<T, H>) -> SetAlgebraItems<'a, T, H> {
1591 Repeat::new(other).zip(self.iter())
1592 .filter_map(|(other, elt)| {
1593 if !other.contains(elt) { Some(elt) } else { None }
1597 /// Visit the values representing the symmetric difference
1598 pub fn symmetric_difference<'a>(&'a self, other: &'a HashSet<T, H>)
1599 -> Chain<SetAlgebraItems<'a, T, H>, SetAlgebraItems<'a, T, H>> {
1600 self.difference(other).chain(other.difference(self))
1603 /// Visit the values representing the intersection
1604 pub fn intersection<'a>(&'a self, other: &'a HashSet<T, H>)
1605 -> SetAlgebraItems<'a, T, H> {
1606 Repeat::new(other).zip(self.iter())
1607 .filter_map(|(other, elt)| {
1608 if other.contains(elt) { Some(elt) } else { None }
1612 /// Visit the values representing the union
1613 pub fn union<'a>(&'a self, other: &'a HashSet<T, H>)
1614 -> Chain<SetItems<'a, T>, SetAlgebraItems<'a, T, H>> {
1615 self.iter().chain(other.difference(self))
1619 impl<T: Eq + Hash<S> + fmt::Show, S, H: Hasher<S>> fmt::Show for HashSet<T, H> {
1620 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1621 try!(write!(f, "{{"));
1623 for (i, x) in self.iter().enumerate() {
1624 if i != 0 { try!(write!(f, ", ")); }
1625 try!(write!(f, "{}", *x));
1632 impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> FromIterator<T> for HashSet<T, H> {
1633 fn from_iter<I: Iterator<T>>(iter: I) -> HashSet<T, H> {
1634 let (lower, _) = iter.size_hint();
1635 let mut set = HashSet::with_capacity_and_hasher(lower, Default::default());
1641 impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Extendable<T> for HashSet<T, H> {
1642 fn extend<I: Iterator<T>>(&mut self, mut iter: I) {
1649 impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Default for HashSet<T, H> {
1650 fn default() -> HashSet<T, H> {
1651 HashSet::with_hasher(Default::default())
1655 // `Repeat` is used to feed the filter closure an explicit capture
1656 // of a reference to the other set
1657 /// Set operations iterator
1658 pub type SetAlgebraItems<'a, T, H> =
1659 FilterMap<'static, (&'a HashSet<T, H>, &'a T), &'a T,
1660 Zip<Repeat<&'a HashSet<T, H>>, SetItems<'a, T>>>;
1669 use iter::{Iterator,range_inclusive,range_step_inclusive};
1672 struct KindaIntLike(int);
1674 impl Equiv<int> for KindaIntLike {
1675 fn equiv(&self, other: &int) -> bool {
1676 let KindaIntLike(this) = *self;
1680 impl<S: hash::Writer> hash::Hash<S> for KindaIntLike {
1681 fn hash(&self, state: &mut S) {
1682 let KindaIntLike(this) = *self;
1688 fn test_create_capacity_zero() {
1689 let mut m = HashMap::with_capacity(0);
1691 assert!(m.insert(1i, 1i));
1693 assert!(m.contains_key(&1));
1694 assert!(!m.contains_key(&0));
1699 let mut m = HashMap::new();
1700 assert_eq!(m.len(), 0);
1701 assert!(m.insert(1i, 2i));
1702 assert_eq!(m.len(), 1);
1703 assert!(m.insert(2i, 4i));
1704 assert_eq!(m.len(), 2);
1705 assert_eq!(*m.find(&1).unwrap(), 2);
1706 assert_eq!(*m.find(&2).unwrap(), 4);
1709 local_data_key!(drop_vector: RefCell<Vec<int>>)
1711 #[deriving(Hash, PartialEq, Eq)]
1718 fn new(k: uint) -> Dropable {
1719 let v = drop_vector.get().unwrap();
1720 v.borrow_mut().as_mut_slice()[k] += 1;
1726 impl Drop for Dropable {
1727 fn drop(&mut self) {
1728 let v = drop_vector.get().unwrap();
1729 v.borrow_mut().as_mut_slice()[self.k] -= 1;
1735 drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0i))));
1738 let mut m = HashMap::new();
1740 let v = drop_vector.get().unwrap();
1741 for i in range(0u, 200) {
1742 assert_eq!(v.borrow().as_slice()[i], 0);
1746 for i in range(0u, 100) {
1747 let d1 = Dropable::new(i);
1748 let d2 = Dropable::new(i+100);
1752 let v = drop_vector.get().unwrap();
1753 for i in range(0u, 200) {
1754 assert_eq!(v.borrow().as_slice()[i], 1);
1758 for i in range(0u, 50) {
1759 let k = Dropable::new(i);
1762 assert!(v.is_some());
1764 let v = drop_vector.get().unwrap();
1765 assert_eq!(v.borrow().as_slice()[i], 1);
1766 assert_eq!(v.borrow().as_slice()[i+100], 1);
1769 let v = drop_vector.get().unwrap();
1770 for i in range(0u, 50) {
1771 assert_eq!(v.borrow().as_slice()[i], 0);
1772 assert_eq!(v.borrow().as_slice()[i+100], 0);
1775 for i in range(50u, 100) {
1776 assert_eq!(v.borrow().as_slice()[i], 1);
1777 assert_eq!(v.borrow().as_slice()[i+100], 1);
1781 let v = drop_vector.get().unwrap();
1782 for i in range(0u, 200) {
1783 assert_eq!(v.borrow().as_slice()[i], 0);
1788 fn test_empty_pop() {
1789 let mut m: HashMap<int, bool> = HashMap::new();
1790 assert_eq!(m.pop(&0), None);
1794 fn test_lots_of_insertions() {
1795 let mut m = HashMap::new();
1797 // Try this a few times to make sure we never screw up the hashmap's
1799 for _ in range(0i, 10) {
1800 assert!(m.is_empty());
1802 for i in range_inclusive(1i, 1000) {
1803 assert!(m.insert(i, i));
1805 for j in range_inclusive(1, i) {
1807 assert_eq!(r, Some(&j));
1810 for j in range_inclusive(i+1, 1000) {
1812 assert_eq!(r, None);
1816 for i in range_inclusive(1001i, 2000) {
1817 assert!(!m.contains_key(&i));
1821 for i in range_inclusive(1i, 1000) {
1822 assert!(m.remove(&i));
1824 for j in range_inclusive(1, i) {
1825 assert!(!m.contains_key(&j));
1828 for j in range_inclusive(i+1, 1000) {
1829 assert!(m.contains_key(&j));
1833 for i in range_inclusive(1i, 1000) {
1834 assert!(!m.contains_key(&i));
1837 for i in range_inclusive(1i, 1000) {
1838 assert!(m.insert(i, i));
1842 for i in range_step_inclusive(1000i, 1, -1) {
1843 assert!(m.remove(&i));
1845 for j in range_inclusive(i, 1000) {
1846 assert!(!m.contains_key(&j));
1849 for j in range_inclusive(1, i-1) {
1850 assert!(m.contains_key(&j));
1857 fn test_find_mut() {
1858 let mut m = HashMap::new();
1859 assert!(m.insert(1i, 12i));
1860 assert!(m.insert(2i, 8i));
1861 assert!(m.insert(5i, 14i));
1863 match m.find_mut(&5) {
1864 None => fail!(), Some(x) => *x = new
1866 assert_eq!(m.find(&5), Some(&new));
1870 fn test_insert_overwrite() {
1871 let mut m = HashMap::new();
1872 assert!(m.insert(1i, 2i));
1873 assert_eq!(*m.find(&1).unwrap(), 2);
1874 assert!(!m.insert(1i, 3i));
1875 assert_eq!(*m.find(&1).unwrap(), 3);
1879 fn test_insert_conflicts() {
1880 let mut m = HashMap::with_capacity(4);
1881 assert!(m.insert(1i, 2i));
1882 assert!(m.insert(5i, 3i));
1883 assert!(m.insert(9i, 4i));
1884 assert_eq!(*m.find(&9).unwrap(), 4);
1885 assert_eq!(*m.find(&5).unwrap(), 3);
1886 assert_eq!(*m.find(&1).unwrap(), 2);
1890 fn test_conflict_remove() {
1891 let mut m = HashMap::with_capacity(4);
1892 assert!(m.insert(1i, 2i));
1893 assert_eq!(*m.find(&1).unwrap(), 2);
1894 assert!(m.insert(5, 3));
1895 assert_eq!(*m.find(&1).unwrap(), 2);
1896 assert_eq!(*m.find(&5).unwrap(), 3);
1897 assert!(m.insert(9, 4));
1898 assert_eq!(*m.find(&1).unwrap(), 2);
1899 assert_eq!(*m.find(&5).unwrap(), 3);
1900 assert_eq!(*m.find(&9).unwrap(), 4);
1901 assert!(m.remove(&1));
1902 assert_eq!(*m.find(&9).unwrap(), 4);
1903 assert_eq!(*m.find(&5).unwrap(), 3);
1907 fn test_is_empty() {
1908 let mut m = HashMap::with_capacity(4);
1909 assert!(m.insert(1i, 2i));
1910 assert!(!m.is_empty());
1911 assert!(m.remove(&1));
1912 assert!(m.is_empty());
1917 let mut m = HashMap::new();
1919 assert_eq!(m.pop(&1), Some(2));
1920 assert_eq!(m.pop(&1), None);
1924 #[allow(experimental)]
1925 fn test_pop_equiv() {
1926 let mut m = HashMap::new();
1928 assert_eq!(m.pop_equiv(&KindaIntLike(1)), Some(2));
1929 assert_eq!(m.pop_equiv(&KindaIntLike(1)), None);
1934 let mut m = HashMap::new();
1935 assert_eq!(m.swap(1i, 2i), None);
1936 assert_eq!(m.swap(1i, 3i), Some(2));
1937 assert_eq!(m.swap(1i, 4i), Some(3));
1941 fn test_move_iter() {
1943 let mut hm = HashMap::new();
1951 let v = hm.move_iter().collect::<Vec<(char, int)>>();
1952 assert!([('a', 1), ('b', 2)] == v.as_slice() || [('b', 2), ('a', 1)] == v.as_slice());
1957 let mut m = HashMap::with_capacity(4);
1958 for i in range(0u, 32) {
1959 assert!(m.insert(i, i*2));
1961 assert_eq!(m.len(), 32);
1963 let mut observed: u32 = 0;
1965 for (k, v) in m.iter() {
1966 assert_eq!(*v, *k * 2);
1967 observed |= 1 << *k;
1969 assert_eq!(observed, 0xFFFF_FFFF);
1974 let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
1975 let map = vec.move_iter().collect::<HashMap<int, char>>();
1976 let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
1977 assert_eq!(keys.len(), 3);
1978 assert!(keys.contains(&1));
1979 assert!(keys.contains(&2));
1980 assert!(keys.contains(&3));
1985 let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
1986 let map = vec.move_iter().collect::<HashMap<int, char>>();
1987 let values = map.values().map(|&v| v).collect::<Vec<char>>();
1988 assert_eq!(values.len(), 3);
1989 assert!(values.contains(&'a'));
1990 assert!(values.contains(&'b'));
1991 assert!(values.contains(&'c'));
1996 let mut m = HashMap::new();
1997 assert!(m.find(&1i).is_none());
2001 Some(v) => assert_eq!(*v, 2)
2007 let mut m1 = HashMap::new();
2012 let mut m2 = HashMap::new();
2025 let mut map: HashMap<int, int> = HashMap::new();
2026 let empty: HashMap<int, int> = HashMap::new();
2031 let map_str = format!("{}", map);
2033 assert!(map_str == "{1: 2, 3: 4}".to_string() || map_str == "{3: 4, 1: 2}".to_string());
2034 assert_eq!(format!("{}", empty), "{}".to_string());
2039 let mut m = HashMap::new();
2041 assert_eq!(m.len(), 0);
2042 assert!(m.is_empty());
2045 let old_cap = m.table.capacity();
2046 while old_cap == m.table.capacity() {
2051 assert_eq!(m.len(), i);
2052 assert!(!m.is_empty());
2056 fn test_resize_policy() {
2057 let mut m = HashMap::new();
2059 assert_eq!(m.len(), 0);
2060 assert!(m.is_empty());
2062 let initial_cap = m.table.capacity();
2063 m.reserve(initial_cap * 2);
2064 let cap = m.table.capacity();
2066 assert_eq!(cap, initial_cap * 2);
2069 for _ in range(0, cap * 3 / 4) {
2074 assert_eq!(m.len(), i);
2075 assert_eq!(m.table.capacity(), cap);
2077 for _ in range(0, cap / 4) {
2082 let new_cap = m.table.capacity();
2083 assert_eq!(new_cap, cap * 2);
2085 for _ in range(0, cap / 2) {
2088 assert_eq!(m.table.capacity(), new_cap);
2091 for _ in range(0, cap / 2 - 1) {
2096 assert_eq!(m.table.capacity(), cap);
2097 assert_eq!(m.len(), i);
2098 assert!(!m.is_empty());
2102 fn test_find_equiv() {
2103 let mut m = HashMap::new();
2105 let (foo, bar, baz) = (1i,2i,3i);
2106 m.insert("foo".to_string(), foo);
2107 m.insert("bar".to_string(), bar);
2108 m.insert("baz".to_string(), baz);
2111 assert_eq!(m.find_equiv(&("foo")), Some(&foo));
2112 assert_eq!(m.find_equiv(&("bar")), Some(&bar));
2113 assert_eq!(m.find_equiv(&("baz")), Some(&baz));
2115 assert_eq!(m.find_equiv(&("qux")), None);
2119 fn test_from_iter() {
2120 let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2122 let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2124 for &(k, v) in xs.iter() {
2125 assert_eq!(map.find(&k), Some(&v));
2130 fn test_size_hint() {
2131 let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2133 let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2135 let mut iter = map.iter();
2137 for _ in iter.by_ref().take(3) {}
2139 assert_eq!(iter.size_hint(), (3, Some(3)));
2143 fn test_mut_size_hint() {
2144 let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2146 let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2148 let mut iter = map.mut_iter();
2150 for _ in iter.by_ref().take(3) {}
2152 assert_eq!(iter.size_hint(), (3, Some(3)));
2161 use slice::ImmutableEqVector;
2162 use collections::Collection;
2165 fn test_disjoint() {
2166 let mut xs = HashSet::new();
2167 let mut ys = HashSet::new();
2168 assert!(xs.is_disjoint(&ys));
2169 assert!(ys.is_disjoint(&xs));
2170 assert!(xs.insert(5i));
2171 assert!(ys.insert(11i));
2172 assert!(xs.is_disjoint(&ys));
2173 assert!(ys.is_disjoint(&xs));
2174 assert!(xs.insert(7));
2175 assert!(xs.insert(19));
2176 assert!(xs.insert(4));
2177 assert!(ys.insert(2));
2178 assert!(ys.insert(-11));
2179 assert!(xs.is_disjoint(&ys));
2180 assert!(ys.is_disjoint(&xs));
2181 assert!(ys.insert(7));
2182 assert!(!xs.is_disjoint(&ys));
2183 assert!(!ys.is_disjoint(&xs));
2187 fn test_subset_and_superset() {
2188 let mut a = HashSet::new();
2189 assert!(a.insert(0i));
2190 assert!(a.insert(5));
2191 assert!(a.insert(11));
2192 assert!(a.insert(7));
2194 let mut b = HashSet::new();
2195 assert!(b.insert(0i));
2196 assert!(b.insert(7));
2197 assert!(b.insert(19));
2198 assert!(b.insert(250));
2199 assert!(b.insert(11));
2200 assert!(b.insert(200));
2202 assert!(!a.is_subset(&b));
2203 assert!(!a.is_superset(&b));
2204 assert!(!b.is_subset(&a));
2205 assert!(!b.is_superset(&a));
2207 assert!(b.insert(5));
2209 assert!(a.is_subset(&b));
2210 assert!(!a.is_superset(&b));
2211 assert!(!b.is_subset(&a));
2212 assert!(b.is_superset(&a));
2217 let mut a = HashSet::new();
2218 for i in range(0u, 32) {
2219 assert!(a.insert(i));
2221 let mut observed: u32 = 0;
2223 observed |= 1 << *k;
2225 assert_eq!(observed, 0xFFFF_FFFF);
2229 fn test_intersection() {
2230 let mut a = HashSet::new();
2231 let mut b = HashSet::new();
2233 assert!(a.insert(11i));
2234 assert!(a.insert(1));
2235 assert!(a.insert(3));
2236 assert!(a.insert(77));
2237 assert!(a.insert(103));
2238 assert!(a.insert(5));
2239 assert!(a.insert(-5));
2241 assert!(b.insert(2i));
2242 assert!(b.insert(11));
2243 assert!(b.insert(77));
2244 assert!(b.insert(-9));
2245 assert!(b.insert(-42));
2246 assert!(b.insert(5));
2247 assert!(b.insert(3));
2250 let expected = [3, 5, 11, 77];
2251 for x in a.intersection(&b) {
2252 assert!(expected.contains(x));
2255 assert_eq!(i, expected.len());
2259 fn test_difference() {
2260 let mut a = HashSet::new();
2261 let mut b = HashSet::new();
2263 assert!(a.insert(1i));
2264 assert!(a.insert(3));
2265 assert!(a.insert(5));
2266 assert!(a.insert(9));
2267 assert!(a.insert(11));
2269 assert!(b.insert(3i));
2270 assert!(b.insert(9));
2273 let expected = [1, 5, 11];
2274 for x in a.difference(&b) {
2275 assert!(expected.contains(x));
2278 assert_eq!(i, expected.len());
2282 fn test_symmetric_difference() {
2283 let mut a = HashSet::new();
2284 let mut b = HashSet::new();
2286 assert!(a.insert(1i));
2287 assert!(a.insert(3));
2288 assert!(a.insert(5));
2289 assert!(a.insert(9));
2290 assert!(a.insert(11));
2292 assert!(b.insert(-2i));
2293 assert!(b.insert(3));
2294 assert!(b.insert(9));
2295 assert!(b.insert(14));
2296 assert!(b.insert(22));
2299 let expected = [-2, 1, 5, 11, 14, 22];
2300 for x in a.symmetric_difference(&b) {
2301 assert!(expected.contains(x));
2304 assert_eq!(i, expected.len());
2309 let mut a = HashSet::new();
2310 let mut b = HashSet::new();
2312 assert!(a.insert(1i));
2313 assert!(a.insert(3));
2314 assert!(a.insert(5));
2315 assert!(a.insert(9));
2316 assert!(a.insert(11));
2317 assert!(a.insert(16));
2318 assert!(a.insert(19));
2319 assert!(a.insert(24));
2321 assert!(b.insert(-2i));
2322 assert!(b.insert(1));
2323 assert!(b.insert(5));
2324 assert!(b.insert(9));
2325 assert!(b.insert(13));
2326 assert!(b.insert(19));
2329 let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
2330 for x in a.union(&b) {
2331 assert!(expected.contains(x));
2334 assert_eq!(i, expected.len());
2338 fn test_from_iter() {
2339 let xs = [1i, 2, 3, 4, 5, 6, 7, 8, 9];
2341 let set: HashSet<int> = xs.iter().map(|&x| x).collect();
2343 for x in xs.iter() {
2344 assert!(set.contains(x));
2349 fn test_move_iter() {
2351 let mut hs = HashSet::new();
2359 let v = hs.move_iter().collect::<Vec<char>>();
2360 assert!(['a', 'b'] == v.as_slice() || ['b', 'a'] == v.as_slice());
2365 // These constants once happened to expose a bug in insert().
2366 // I'm keeping them around to prevent a regression.
2367 let mut s1 = HashSet::new();
2373 let mut s2 = HashSet::new();
2387 let mut set: HashSet<int> = HashSet::new();
2388 let empty: HashSet<int> = HashSet::new();
2393 let set_str = format!("{}", set);
2395 assert!(set_str == "{1, 2}".to_string() || set_str == "{2, 1}".to_string());
2396 assert_eq!(format!("{}", empty), "{}".to_string());
2405 use self::test::Bencher;
2406 use iter::{range_inclusive};
2409 fn new_drop(b : &mut Bencher) {
2413 let m : HashMap<int, int> = HashMap::new();
2414 assert_eq!(m.len(), 0);
2419 fn new_insert_drop(b : &mut Bencher) {
2423 let mut m = HashMap::new();
2425 assert_eq!(m.len(), 1);
2430 fn insert(b: &mut Bencher) {
2433 let mut m = HashMap::new();
2435 for i in range_inclusive(1i, 1000) {
2448 fn find_existing(b: &mut Bencher) {
2451 let mut m = HashMap::new();
2453 for i in range_inclusive(1i, 1000) {
2458 for i in range_inclusive(1i, 1000) {
2465 fn find_nonexisting(b: &mut Bencher) {
2468 let mut m = HashMap::new();
2470 for i in range_inclusive(1i, 1000) {
2475 for i in range_inclusive(1001i, 2000) {
2482 fn hashmap_as_queue(b: &mut Bencher) {
2485 let mut m = HashMap::new();
2487 for i in range_inclusive(1i, 1000) {
2495 m.insert(k + 1000, k + 1000);
2501 fn find_pop_insert(b: &mut Bencher) {
2504 let mut m = HashMap::new();
2506 for i in range_inclusive(1i, 1000) {
2514 m.find(&(k + 2000));
2516 m.insert(k + 1000, k + 1000);