1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // ignore-lexer-test FIXME #15883
14 use self::SearchResult::*;
15 use self::VacantEntryState::*;
17 use borrow::BorrowFrom;
19 use cmp::{max, Eq, PartialEq};
21 use fmt::{self, Show};
22 use hash::{self, Hash, SipHasher};
23 use iter::{self, Iterator, ExactSizeIterator, IteratorExt, FromIterator, Extend, Map};
25 use mem::{self, replace};
26 use num::{Int, UnsignedInt};
27 use ops::{Deref, FnMut, Index, IndexMut};
28 use option::Option::{self, Some, None};
29 use rand::{self, Rng};
30 use result::Result::{self, Ok, Err};
42 use super::table::BucketState::{
46 use super::state::HashState;
48 const INITIAL_LOG2_CAP: uint = 5;
49 pub const INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5
51 /// The default behavior of HashMap implements a load factor of 90.9%.
52 /// This behavior is characterized by the following condition:
54 /// - if size > 0.909 * capacity: grow the map
56 struct DefaultResizePolicy;
58 impl DefaultResizePolicy {
59 fn new() -> DefaultResizePolicy {
64 fn min_capacity(&self, usable_size: uint) -> uint {
65 // Here, we are rephrasing the logic by specifying the lower limit
68 // - if `cap < size * 1.1`: grow the map
72 /// An inverse of `min_capacity`, approximately.
74 fn usable_capacity(&self, cap: uint) -> uint {
75 // As the number of entries approaches usable capacity,
76 // min_capacity(size) must be smaller than the internal capacity,
77 // so that the map is not resized:
78 // `min_capacity(usable_capacity(x)) <= x`.
79 // The lef-hand side can only be smaller due to flooring by integer
82 // This doesn't have to be checked for overflow since allocation size
83 // in bytes will overflow earlier than multiplication by 10.
89 fn test_resize_policy() {
91 let rp = DefaultResizePolicy;
92 for n in range(0u, 1000) {
93 assert!(rp.min_capacity(rp.usable_capacity(n)) <= n);
94 assert!(rp.usable_capacity(rp.min_capacity(n)) <= n);
98 // The main performance trick in this hashmap is called Robin Hood Hashing.
99 // It gains its excellent performance from one essential operation:
101 // If an insertion collides with an existing element, and that element's
102 // "probe distance" (how far away the element is from its ideal location)
103 // is higher than how far we've already probed, swap the elements.
105 // This massively lowers variance in probe distance, and allows us to get very
106 // high load factors with good performance. The 90% load factor I use is rather
109 // > Why a load factor of approximately 90%?
111 // In general, all the distances to initial buckets will converge on the mean.
112 // At a load factor of α, the odds of finding the target bucket after k
113 // probes is approximately 1-α^k. If we set this equal to 50% (since we converge
114 // on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
115 // this down to make the math easier on the CPU and avoid its FPU.
116 // Since on average we start the probing in the middle of a cache line, this
117 // strategy pulls in two cache lines of hashes on every lookup. I think that's
118 // pretty good, but if you want to trade off some space, it could go down to one
119 // cache line on average with an α of 0.84.
121 // > Wait, what? Where did you get 1-α^k from?
123 // On the first probe, your odds of a collision with an existing element is α.
124 // The odds of doing this twice in a row is approximately α^2. For three times,
125 // α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
126 // colliding after k tries is 1-α^k.
128 // The paper from 1986 cited below mentions an implementation which keeps track
129 // of the distance-to-initial-bucket histogram. This approach is not suitable
130 // for modern architectures because it requires maintaining an internal data
131 // structure. This allows very good first guesses, but we are most concerned
132 // with guessing entire cache lines, not individual indexes. Furthermore, array
133 // accesses are no longer linear and in one direction, as we have now. There
134 // is also memory and cache pressure that this would entail that would be very
135 // difficult to properly see in a microbenchmark.
137 // ## Future Improvements (FIXME!)
139 // Allow the load factor to be changed dynamically and/or at initialization.
141 // Also, would it be possible for us to reuse storage when growing the
142 // underlying table? This is exactly the use case for 'realloc', and may
143 // be worth exploring.
145 // ## Future Optimizations (FIXME!)
147 // Another possible design choice that I made without any real reason is
148 // parameterizing the raw table over keys and values. Technically, all we need
149 // is the size and alignment of keys and values, and the code should be just as
150 // efficient (well, we might need one for power-of-two size and one for not...).
151 // This has the potential to reduce code bloat in rust executables, without
152 // really losing anything except 4 words (key size, key alignment, val size,
153 // val alignment) which can be passed in to every call of a `RawTable` function.
154 // This would definitely be an avenue worth exploring if people start complaining
155 // about the size of rust executables.
157 // Annotate exceedingly likely branches in `table::make_hash`
158 // and `search_hashed` to reduce instruction cache pressure
159 // and mispredictions once it becomes possible (blocked on issue #11092).
161 // Shrinking the table could simply reallocate in place after moving buckets
162 // to the first half.
164 // The growth algorithm (fragment of the Proof of Correctness)
165 // --------------------
167 // The growth algorithm is basically a fast path of the naive reinsertion-
168 // during-resize algorithm. Other paths should never be taken.
170 // Consider growing a robin hood hashtable of capacity n. Normally, we do this
171 // by allocating a new table of capacity `2n`, and then individually reinsert
172 // each element in the old table into the new one. This guarantees that the
173 // new table is a valid robin hood hashtable with all the desired statistical
174 // properties. Remark that the order we reinsert the elements in should not
175 // matter. For simplicity and efficiency, we will consider only linear
176 // reinsertions, which consist of reinserting all elements in the old table
177 // into the new one by increasing order of index. However we will not be
178 // starting our reinsertions from index 0 in general. If we start from index
179 // i, for the purpose of reinsertion we will consider all elements with real
180 // index j < i to have virtual index n + j.
182 // Our hash generation scheme consists of generating a 64-bit hash and
183 // truncating the most significant bits. When moving to the new table, we
184 // simply introduce a new bit to the front of the hash. Therefore, if an
185 // elements has ideal index i in the old table, it can have one of two ideal
186 // locations in the new table. If the new bit is 0, then the new ideal index
187 // is i. If the new bit is 1, then the new ideal index is n + i. Intuitively,
188 // we are producing two independent tables of size n, and for each element we
189 // independently choose which table to insert it into with equal probability.
190 // However the rather than wrapping around themselves on overflowing their
191 // indexes, the first table overflows into the first, and the first into the
192 // second. Visually, our new table will look something like:
194 // [yy_xxx_xxxx_xxx|xx_yyy_yyyy_yyy]
196 // Where x's are elements inserted into the first table, y's are elements
197 // inserted into the second, and _'s are empty sections. We now define a few
198 // key concepts that we will use later. Note that this is a very abstract
199 // perspective of the table. A real resized table would be at least half
202 // Theorem: A linear robin hood reinsertion from the first ideal element
203 // produces identical results to a linear naive reinsertion from the same
206 // FIXME(Gankro, pczarn): review the proof and put it all in a separate doc.rs
208 /// A hash map implementation which uses linear probing with Robin
209 /// Hood bucket stealing.
211 /// The hashes are all keyed by the task-local random number generator
212 /// on creation by default. This means that the ordering of the keys is
213 /// randomized, but makes the tables more resistant to
214 /// denial-of-service attacks (Hash DoS). This behaviour can be
215 /// overridden with one of the constructors.
217 /// It is required that the keys implement the `Eq` and `Hash` traits, although
218 /// this can frequently be achieved by using `#[derive(Eq, Hash)]`.
220 /// Relevant papers/articles:
222 /// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
223 /// 2. Emmanuel Goossaert. ["Robin Hood
224 /// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
225 /// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
226 /// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
231 /// use std::collections::HashMap;
233 /// // type inference lets us omit an explicit type signature (which
234 /// // would be `HashMap<&str, &str>` in this example).
235 /// let mut book_reviews = HashMap::new();
237 /// // review some books.
238 /// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.");
239 /// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.");
240 /// book_reviews.insert("Pride and Prejudice", "Very enjoyable.");
241 /// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
243 /// // check for a specific one.
244 /// if !book_reviews.contains_key(&("Les Misérables")) {
245 /// println!("We've got {} reviews, but Les Misérables ain't one.",
246 /// book_reviews.len());
249 /// // oops, this review has a lot of spelling mistakes, let's delete it.
250 /// book_reviews.remove(&("The Adventures of Sherlock Holmes"));
252 /// // look up the values associated with some keys.
253 /// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
254 /// for book in to_find.iter() {
255 /// match book_reviews.get(book) {
256 /// Some(review) => println!("{}: {}", *book, *review),
257 /// None => println!("{} is unreviewed.", *book)
261 /// // iterate over everything.
262 /// for (book, review) in book_reviews.iter() {
263 /// println!("{}: \"{}\"", *book, *review);
267 /// The easiest way to use `HashMap` with a custom type as key is to derive `Eq` and `Hash`.
268 /// We must also derive `PartialEq`.
271 /// use std::collections::HashMap;
273 /// #[derive(Hash, Eq, PartialEq, Show)]
280 /// /// Create a new Viking.
281 /// fn new(name: &str, country: &str) -> Viking {
282 /// Viking { name: name.to_string(), country: country.to_string() }
286 /// // Use a HashMap to store the vikings' health points.
287 /// let mut vikings = HashMap::new();
289 /// vikings.insert(Viking::new("Einar", "Norway"), 25u);
290 /// vikings.insert(Viking::new("Olaf", "Denmark"), 24u);
291 /// vikings.insert(Viking::new("Harald", "Iceland"), 12u);
293 /// // Use derived implementation to print the status of the vikings.
294 /// for (viking, health) in vikings.iter() {
295 /// println!("{:?} has {} hp", viking, health);
300 pub struct HashMap<K, V, S = RandomState> {
301 // All hashes are keyed on these values, to prevent hash collision attacks.
304 table: RawTable<K, V>,
306 resize_policy: DefaultResizePolicy,
309 /// Search for a pre-hashed key.
310 fn search_hashed<K, V, M, F>(table: M,
313 -> SearchResult<K, V, M> where
314 M: Deref<Target=RawTable<K, V>>,
315 F: FnMut(&K) -> bool,
317 let size = table.size();
318 let mut probe = Bucket::new(table, hash);
319 let ib = probe.index();
321 while probe.index() != ib + size {
322 let full = match probe.peek() {
323 Empty(b) => return TableRef(b.into_table()), // hit an empty bucket
327 if full.distance() + ib < full.index() {
328 // We can finish the search early if we hit any bucket
329 // with a lower distance to initial bucket than we've probed.
330 return TableRef(full.into_table());
333 // If the hash doesn't match, it can't be this one..
334 if hash == full.hash() {
335 // If the key doesn't match, it can't be this one..
336 if is_match(full.read().0) {
337 return FoundExisting(full);
344 TableRef(probe.into_table())
347 fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>) -> (K, V) {
348 let (empty, retkey, retval) = starting_bucket.take();
349 let mut gap = match empty.gap_peek() {
351 None => return (retkey, retval)
354 while gap.full().distance() != 0 {
355 gap = match gap.shift() {
361 // Now we've done all our shifting. Return the value we grabbed earlier.
365 /// Perform robin hood bucket stealing at the given `bucket`. You must
366 /// also pass the position of that bucket's initial bucket so we don't have
367 /// to recalculate it.
369 /// `hash`, `k`, and `v` are the elements to "robin hood" into the hashtable.
370 fn robin_hood<'a, K: 'a, V: 'a>(mut bucket: FullBucketMut<'a, K, V>,
376 let starting_index = bucket.index();
378 let table = bucket.table(); // FIXME "lifetime too short".
381 // There can be at most `size - dib` buckets to displace, because
382 // in the worst case, there are `size` elements and we already are
383 // `distance` buckets away from the initial one.
384 let idx_end = starting_index + size - bucket.distance();
387 let (old_hash, old_key, old_val) = bucket.replace(hash, k, v);
389 let probe = bucket.next();
390 assert!(probe.index() != idx_end);
392 let full_bucket = match probe.peek() {
395 let b = bucket.put(old_hash, old_key, old_val);
396 // Now that it's stolen, just read the value's pointer
397 // right out of the table!
398 return Bucket::at_index(b.into_table(), starting_index)
404 Full(bucket) => bucket
407 let probe_ib = full_bucket.index() - full_bucket.distance();
409 bucket = full_bucket;
411 // Robin hood! Steal the spot.
423 /// A result that works like Option<FullBucket<..>> but preserves
424 /// the reference that grants us access to the table in any case.
425 enum SearchResult<K, V, M> {
426 // This is an entry that holds the given key:
427 FoundExisting(FullBucket<K, V, M>),
429 // There was no such entry. The reference is given back:
433 impl<K, V, M> SearchResult<K, V, M> {
434 fn into_option(self) -> Option<FullBucket<K, V, M>> {
436 FoundExisting(bucket) => Some(bucket),
442 impl<K, V, S, H> HashMap<K, V, S>
443 where K: Eq + Hash<H>,
444 S: HashState<Hasher=H>,
445 H: hash::Hasher<Output=u64>
447 fn make_hash<X: ?Sized>(&self, x: &X) -> SafeHash where X: Hash<H> {
448 table::make_hash(&self.hash_state, x)
451 /// Search for a key, yielding the index if it's found in the hashtable.
452 /// If you already have the hash for the key lying around, use
454 fn search<'a, Q: ?Sized>(&'a self, q: &Q) -> Option<FullBucketImm<'a, K, V>>
455 where Q: BorrowFrom<K> + Eq + Hash<H>
457 let hash = self.make_hash(q);
458 search_hashed(&self.table, hash, |k| q.eq(BorrowFrom::borrow_from(k)))
462 fn search_mut<'a, Q: ?Sized>(&'a mut self, q: &Q) -> Option<FullBucketMut<'a, K, V>>
463 where Q: BorrowFrom<K> + Eq + Hash<H>
465 let hash = self.make_hash(q);
466 search_hashed(&mut self.table, hash, |k| q.eq(BorrowFrom::borrow_from(k)))
470 // The caller should ensure that invariants by Robin Hood Hashing hold.
471 fn insert_hashed_ordered(&mut self, hash: SafeHash, k: K, v: V) {
472 let cap = self.table.capacity();
473 let mut buckets = Bucket::new(&mut self.table, hash);
474 let ib = buckets.index();
476 while buckets.index() != ib + cap {
477 // We don't need to compare hashes for value swap.
478 // Not even DIBs for Robin Hood.
479 buckets = match buckets.peek() {
481 empty.put(hash, k, v);
484 Full(b) => b.into_bucket()
488 panic!("Internal HashMap error: Out of space.");
492 impl<K: Hash<Hasher> + Eq, V> HashMap<K, V, RandomState> {
493 /// Create an empty HashMap.
498 /// use std::collections::HashMap;
499 /// let mut map: HashMap<&str, int> = HashMap::new();
503 pub fn new() -> HashMap<K, V, RandomState> {
507 /// Creates an empty hash map with the given initial capacity.
512 /// use std::collections::HashMap;
513 /// let mut map: HashMap<&str, int> = HashMap::with_capacity(10);
517 pub fn with_capacity(capacity: uint) -> HashMap<K, V, RandomState> {
518 HashMap::with_capacity_and_hash_state(capacity, Default::default())
522 impl<K, V, S, H> HashMap<K, V, S>
523 where K: Eq + Hash<H>,
524 S: HashState<Hasher=H>,
525 H: hash::Hasher<Output=u64>
527 /// Creates an empty hashmap which will use the given hasher to hash keys.
529 /// The creates map has the default initial capacity.
534 /// use std::collections::HashMap;
535 /// use std::collections::hash_map::RandomState;
537 /// let s = RandomState::new();
538 /// let mut map = HashMap::with_hash_state(s);
539 /// map.insert(1i, 2u);
542 #[unstable = "hasher stuff is unclear"]
543 pub fn with_hash_state(hash_state: S) -> HashMap<K, V, S> {
545 hash_state: hash_state,
546 resize_policy: DefaultResizePolicy::new(),
547 table: RawTable::new(0),
551 /// Create an empty HashMap with space for at least `capacity`
552 /// elements, using `hasher` to hash the keys.
554 /// Warning: `hasher` is normally randomly generated, and
555 /// is designed to allow HashMaps to be resistant to attacks that
556 /// cause many collisions and very poor performance. Setting it
557 /// manually using this function can expose a DoS attack vector.
562 /// use std::collections::HashMap;
563 /// use std::collections::hash_map::RandomState;
565 /// let s = RandomState::new();
566 /// let mut map = HashMap::with_capacity_and_hash_state(10, s);
567 /// map.insert(1i, 2u);
570 #[unstable = "hasher stuff is unclear"]
571 pub fn with_capacity_and_hash_state(capacity: uint, hash_state: S)
572 -> HashMap<K, V, S> {
573 let resize_policy = DefaultResizePolicy::new();
574 let min_cap = max(INITIAL_CAPACITY, resize_policy.min_capacity(capacity));
575 let internal_cap = min_cap.checked_next_power_of_two().expect("capacity overflow");
576 assert!(internal_cap >= capacity, "capacity overflow");
578 hash_state: hash_state,
579 resize_policy: resize_policy,
580 table: RawTable::new(internal_cap),
584 /// Returns the number of elements the map can hold without reallocating.
589 /// use std::collections::HashMap;
590 /// let map: HashMap<int, int> = HashMap::with_capacity(100);
591 /// assert!(map.capacity() >= 100);
595 pub fn capacity(&self) -> uint {
596 self.resize_policy.usable_capacity(self.table.capacity())
599 /// Reserves capacity for at least `additional` more elements to be inserted
600 /// in the `HashMap`. The collection may reserve more space to avoid
601 /// frequent reallocations.
605 /// Panics if the new allocation size overflows `uint`.
610 /// use std::collections::HashMap;
611 /// let mut map: HashMap<&str, int> = HashMap::new();
615 pub fn reserve(&mut self, additional: uint) {
616 let new_size = self.len().checked_add(additional).expect("capacity overflow");
617 let min_cap = self.resize_policy.min_capacity(new_size);
619 // An invalid value shouldn't make us run out of space. This includes
620 // an overflow check.
621 assert!(new_size <= min_cap);
623 if self.table.capacity() < min_cap {
624 let new_capacity = max(min_cap.next_power_of_two(), INITIAL_CAPACITY);
625 self.resize(new_capacity);
629 /// Resizes the internal vectors to a new capacity. It's your responsibility to:
630 /// 1) Make sure the new capacity is enough for all the elements, accounting
631 /// for the load factor.
632 /// 2) Ensure new_capacity is a power of two or zero.
633 fn resize(&mut self, new_capacity: uint) {
634 assert!(self.table.size() <= new_capacity);
635 assert!(new_capacity.is_power_of_two() || new_capacity == 0);
637 let mut old_table = replace(&mut self.table, RawTable::new(new_capacity));
638 let old_size = old_table.size();
640 if old_table.capacity() == 0 || old_table.size() == 0 {
645 // Specialization of the other branch.
646 let mut bucket = Bucket::first(&mut old_table);
648 // "So a few of the first shall be last: for many be called,
651 // We'll most likely encounter a few buckets at the beginning that
652 // have their initial buckets near the end of the table. They were
653 // placed at the beginning as the probe wrapped around the table
654 // during insertion. We must skip forward to a bucket that won't
655 // get reinserted too early and won't unfairly steal others spot.
656 // This eliminates the need for robin hood.
658 bucket = match bucket.peek() {
660 if full.distance() == 0 {
661 // This bucket occupies its ideal spot.
662 // It indicates the start of another "cluster".
663 bucket = full.into_bucket();
666 // Leaving this bucket in the last cluster for later.
670 // Encountered a hole between clusters.
677 // This is how the buckets might be laid out in memory:
678 // ($ marks an initialized bucket)
680 // |$$$_$$$$$$_$$$$$|
682 // But we've skipped the entire initial cluster of buckets
683 // and will continue iteration in this order:
686 // ^ wrap around once end is reached
689 // ^ exit once table.size == 0
691 bucket = match bucket.peek() {
693 let h = bucket.hash();
694 let (b, k, v) = bucket.take();
695 self.insert_hashed_ordered(h, k, v);
697 let t = b.table(); // FIXME "lifetime too short".
698 if t.size() == 0 { break }
702 Empty(b) => b.into_bucket()
707 assert_eq!(self.table.size(), old_size);
710 /// Shrinks the capacity of the map as much as possible. It will drop
711 /// down as much as possible while maintaining the internal rules
712 /// and possibly leaving some space in accordance with the resize policy.
717 /// use std::collections::HashMap;
719 /// let mut map: HashMap<int, int> = HashMap::with_capacity(100);
720 /// map.insert(1, 2);
721 /// map.insert(3, 4);
722 /// assert!(map.capacity() >= 100);
723 /// map.shrink_to_fit();
724 /// assert!(map.capacity() >= 2);
727 pub fn shrink_to_fit(&mut self) {
728 let min_capacity = self.resize_policy.min_capacity(self.len());
729 let min_capacity = max(min_capacity.next_power_of_two(), INITIAL_CAPACITY);
731 // An invalid value shouldn't make us run out of space.
732 debug_assert!(self.len() <= min_capacity);
734 if self.table.capacity() != min_capacity {
735 let old_table = replace(&mut self.table, RawTable::new(min_capacity));
736 let old_size = old_table.size();
738 // Shrink the table. Naive algorithm for resizing:
739 for (h, k, v) in old_table.into_iter() {
740 self.insert_hashed_nocheck(h, k, v);
743 debug_assert_eq!(self.table.size(), old_size);
747 /// Insert a pre-hashed key-value pair, without first checking
748 /// that there's enough room in the buckets. Returns a reference to the
749 /// newly insert value.
751 /// If the key already exists, the hashtable will be returned untouched
752 /// and a reference to the existing element will be returned.
753 fn insert_hashed_nocheck(&mut self, hash: SafeHash, k: K, v: V) -> &mut V {
754 self.insert_or_replace_with(hash, k, v, |_, _, _| ())
757 fn insert_or_replace_with<'a, F>(&'a mut self,
761 mut found_existing: F)
763 F: FnMut(&mut K, &mut V, V),
765 // Worst case, we'll find one empty bucket among `size + 1` buckets.
766 let size = self.table.size();
767 let mut probe = Bucket::new(&mut self.table, hash);
768 let ib = probe.index();
771 let mut bucket = match probe.peek() {
774 return bucket.put(hash, k, v).into_mut_refs().1;
776 Full(bucket) => bucket
780 if bucket.hash() == hash {
782 if k == *bucket.read_mut().0 {
783 let (bucket_k, bucket_v) = bucket.into_mut_refs();
784 debug_assert!(k == *bucket_k);
785 // Key already exists. Get its reference.
786 found_existing(bucket_k, bucket_v, v);
791 let robin_ib = bucket.index() as int - bucket.distance() as int;
793 if (ib as int) < robin_ib {
794 // Found a luckier bucket than me. Better steal his spot.
795 return robin_hood(bucket, robin_ib as uint, hash, k, v);
798 probe = bucket.next();
799 assert!(probe.index() != ib + size + 1);
803 /// An iterator visiting all keys in arbitrary order.
804 /// Iterator element type is `&'a K`.
809 /// use std::collections::HashMap;
811 /// let mut map = HashMap::new();
812 /// map.insert("a", 1i);
813 /// map.insert("b", 2);
814 /// map.insert("c", 3);
816 /// for key in map.keys() {
817 /// println!("{}", key);
821 pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
822 fn first<A, B>((a, _): (A, B)) -> A { a }
823 let first: fn((&'a K,&'a V)) -> &'a K = first; // coerce to fn ptr
825 Keys { inner: self.iter().map(first) }
828 /// An iterator visiting all values in arbitrary order.
829 /// Iterator element type is `&'a V`.
834 /// use std::collections::HashMap;
836 /// let mut map = HashMap::new();
837 /// map.insert("a", 1i);
838 /// map.insert("b", 2);
839 /// map.insert("c", 3);
841 /// for key in map.values() {
842 /// println!("{}", key);
846 pub fn values<'a>(&'a self) -> Values<'a, K, V> {
847 fn second<A, B>((_, b): (A, B)) -> B { b }
848 let second: fn((&'a K,&'a V)) -> &'a V = second; // coerce to fn ptr
850 Values { inner: self.iter().map(second) }
853 /// An iterator visiting all key-value pairs in arbitrary order.
854 /// Iterator element type is `(&'a K, &'a V)`.
859 /// use std::collections::HashMap;
861 /// let mut map = HashMap::new();
862 /// map.insert("a", 1i);
863 /// map.insert("b", 2);
864 /// map.insert("c", 3);
866 /// for (key, val) in map.iter() {
867 /// println!("key: {} val: {}", key, val);
871 pub fn iter(&self) -> Iter<K, V> {
872 Iter { inner: self.table.iter() }
875 /// An iterator visiting all key-value pairs in arbitrary order,
876 /// with mutable references to the values.
877 /// Iterator element type is `(&'a K, &'a mut V)`.
882 /// use std::collections::HashMap;
884 /// let mut map = HashMap::new();
885 /// map.insert("a", 1i);
886 /// map.insert("b", 2);
887 /// map.insert("c", 3);
889 /// // Update all values
890 /// for (_, val) in map.iter_mut() {
894 /// for (key, val) in map.iter() {
895 /// println!("key: {} val: {}", key, val);
899 pub fn iter_mut(&mut self) -> IterMut<K, V> {
900 IterMut { inner: self.table.iter_mut() }
903 /// Creates a consuming iterator, that is, one that moves each key-value
904 /// pair out of the map in arbitrary order. The map cannot be used after
910 /// use std::collections::HashMap;
912 /// let mut map = HashMap::new();
913 /// map.insert("a", 1i);
914 /// map.insert("b", 2);
915 /// map.insert("c", 3);
917 /// // Not possible with .iter()
918 /// let vec: Vec<(&str, int)> = map.into_iter().collect();
921 pub fn into_iter(self) -> IntoIter<K, V> {
922 fn last_two<A, B, C>((_, b, c): (A, B, C)) -> (B, C) { (b, c) }
923 let last_two: fn((SafeHash, K, V)) -> (K, V) = last_two;
926 inner: self.table.into_iter().map(last_two)
930 /// Gets the given key's corresponding entry in the map for in-place manipulation.
931 #[unstable = "precise API still being fleshed out"]
932 pub fn entry<'a>(&'a mut self, key: K) -> Entry<'a, K, V>
937 let hash = self.make_hash(&key);
938 search_entry_hashed(&mut self.table, hash, key)
941 /// Return the number of elements in the map.
946 /// use std::collections::HashMap;
948 /// let mut a = HashMap::new();
949 /// assert_eq!(a.len(), 0);
950 /// a.insert(1u, "a");
951 /// assert_eq!(a.len(), 1);
954 pub fn len(&self) -> uint { self.table.size() }
956 /// Return true if the map contains no elements.
961 /// use std::collections::HashMap;
963 /// let mut a = HashMap::new();
964 /// assert!(a.is_empty());
965 /// a.insert(1u, "a");
966 /// assert!(!a.is_empty());
970 pub fn is_empty(&self) -> bool { self.len() == 0 }
972 /// Clears the map, returning all key-value pairs as an iterator. Keeps the
973 /// allocated memory for reuse.
978 /// use std::collections::HashMap;
980 /// let mut a = HashMap::new();
981 /// a.insert(1u, "a");
982 /// a.insert(2u, "b");
984 /// for (k, v) in a.drain().take(1) {
985 /// assert!(k == 1 || k == 2);
986 /// assert!(v == "a" || v == "b");
989 /// assert!(a.is_empty());
992 #[unstable = "matches collection reform specification, waiting for dust to settle"]
993 pub fn drain(&mut self) -> Drain<K, V> {
994 fn last_two<A, B, C>((_, b, c): (A, B, C)) -> (B, C) { (b, c) }
995 let last_two: fn((SafeHash, K, V)) -> (K, V) = last_two; // coerce to fn pointer
998 inner: self.table.drain().map(last_two),
1002 /// Clears the map, removing all key-value pairs. Keeps the allocated memory
1008 /// use std::collections::HashMap;
1010 /// let mut a = HashMap::new();
1011 /// a.insert(1u, "a");
1013 /// assert!(a.is_empty());
1017 pub fn clear(&mut self) {
1021 /// Returns a reference to the value corresponding to the key.
1023 /// The key may be any borrowed form of the map's key type, but
1024 /// `Hash` and `Eq` on the borrowed form *must* match those for
1030 /// use std::collections::HashMap;
1032 /// let mut map = HashMap::new();
1033 /// map.insert(1u, "a");
1034 /// assert_eq!(map.get(&1), Some(&"a"));
1035 /// assert_eq!(map.get(&2), None);
1038 pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
1039 where Q: Hash<H> + Eq + BorrowFrom<K>
1041 self.search(k).map(|bucket| bucket.into_refs().1)
1044 /// Returns true if the map contains a value for the specified key.
1046 /// The key may be any borrowed form of the map's key type, but
1047 /// `Hash` and `Eq` on the borrowed form *must* match those for
1053 /// use std::collections::HashMap;
1055 /// let mut map = HashMap::new();
1056 /// map.insert(1u, "a");
1057 /// assert_eq!(map.contains_key(&1), true);
1058 /// assert_eq!(map.contains_key(&2), false);
1061 pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
1062 where Q: Hash<H> + Eq + BorrowFrom<K>
1064 self.search(k).is_some()
1067 /// Returns a mutable reference to the value corresponding to the key.
1069 /// The key may be any borrowed form of the map's key type, but
1070 /// `Hash` and `Eq` on the borrowed form *must* match those for
1076 /// use std::collections::HashMap;
1078 /// let mut map = HashMap::new();
1079 /// map.insert(1u, "a");
1080 /// match map.get_mut(&1) {
1081 /// Some(x) => *x = "b",
1084 /// assert_eq!(map[1], "b");
1087 pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
1088 where Q: Hash<H> + Eq + BorrowFrom<K>
1090 self.search_mut(k).map(|bucket| bucket.into_mut_refs().1)
1093 /// Inserts a key-value pair from the map. If the key already had a value
1094 /// present in the map, that value is returned. Otherwise, `None` is returned.
1099 /// use std::collections::HashMap;
1101 /// let mut map = HashMap::new();
1102 /// assert_eq!(map.insert(37u, "a"), None);
1103 /// assert_eq!(map.is_empty(), false);
1105 /// map.insert(37, "b");
1106 /// assert_eq!(map.insert(37, "c"), Some("b"));
1107 /// assert_eq!(map[37], "c");
1110 pub fn insert(&mut self, k: K, v: V) -> Option<V> {
1111 let hash = self.make_hash(&k);
1114 let mut retval = None;
1115 self.insert_or_replace_with(hash, k, v, |_, val_ref, val| {
1116 retval = Some(replace(val_ref, val));
1121 /// Removes a key from the map, returning the value at the key if the key
1122 /// was previously in the map.
1124 /// The key may be any borrowed form of the map's key type, but
1125 /// `Hash` and `Eq` on the borrowed form *must* match those for
1131 /// use std::collections::HashMap;
1133 /// let mut map = HashMap::new();
1134 /// map.insert(1u, "a");
1135 /// assert_eq!(map.remove(&1), Some("a"));
1136 /// assert_eq!(map.remove(&1), None);
1139 pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
1140 where Q: Hash<H> + Eq + BorrowFrom<K>
1142 if self.table.size() == 0 {
1146 self.search_mut(k).map(|bucket| pop_internal(bucket).1)
1150 fn search_entry_hashed<'a, K: Eq, V>(table: &'a mut RawTable<K,V>, hash: SafeHash, k: K)
1153 // Worst case, we'll find one empty bucket among `size + 1` buckets.
1154 let size = table.size();
1155 let mut probe = Bucket::new(table, hash);
1156 let ib = probe.index();
1159 let bucket = match probe.peek() {
1162 return Vacant(VacantEntry {
1165 elem: NoElem(bucket),
1168 Full(bucket) => bucket
1172 if bucket.hash() == hash {
1174 if k == *bucket.read().0 {
1175 return Occupied(OccupiedEntry{
1181 let robin_ib = bucket.index() as int - bucket.distance() as int;
1183 if (ib as int) < robin_ib {
1184 // Found a luckier bucket than me. Better steal his spot.
1185 return Vacant(VacantEntry {
1188 elem: NeqElem(bucket, robin_ib as uint),
1192 probe = bucket.next();
1193 assert!(probe.index() != ib + size + 1);
1197 impl<K, V, S, H> PartialEq for HashMap<K, V, S>
1198 where K: Eq + Hash<H>, V: PartialEq,
1199 S: HashState<Hasher=H>,
1200 H: hash::Hasher<Output=u64>
1202 fn eq(&self, other: &HashMap<K, V, S>) -> bool {
1203 if self.len() != other.len() { return false; }
1205 self.iter().all(|(key, value)|
1206 other.get(key).map_or(false, |v| *value == *v)
1212 impl<K, V, S, H> Eq for HashMap<K, V, S>
1213 where K: Eq + Hash<H>, V: Eq,
1214 S: HashState<Hasher=H>,
1215 H: hash::Hasher<Output=u64>
1219 impl<K, V, S, H> Show for HashMap<K, V, S>
1220 where K: Eq + Hash<H> + Show, V: Show,
1221 S: HashState<Hasher=H>,
1222 H: hash::Hasher<Output=u64>
1224 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1225 try!(write!(f, "HashMap {{"));
1227 for (i, (k, v)) in self.iter().enumerate() {
1228 if i != 0 { try!(write!(f, ", ")); }
1229 try!(write!(f, "{:?}: {:?}", *k, *v));
1237 impl<K, V, S, H> Default for HashMap<K, V, S>
1238 where K: Eq + Hash<H>,
1239 S: HashState<Hasher=H> + Default,
1240 H: hash::Hasher<Output=u64>
1242 fn default() -> HashMap<K, V, S> {
1243 HashMap::with_hash_state(Default::default())
1248 impl<K, Q: ?Sized, V, S, H> Index<Q> for HashMap<K, V, S>
1249 where K: Eq + Hash<H>,
1250 Q: Eq + Hash<H> + BorrowFrom<K>,
1251 S: HashState<Hasher=H>,
1252 H: hash::Hasher<Output=u64>
1257 fn index<'a>(&'a self, index: &Q) -> &'a V {
1258 self.get(index).expect("no entry found for key")
1263 impl<K, V, S, H, Q: ?Sized> IndexMut<Q> for HashMap<K, V, S>
1264 where K: Eq + Hash<H>,
1265 Q: Eq + Hash<H> + BorrowFrom<K>,
1266 S: HashState<Hasher=H>,
1267 H: hash::Hasher<Output=u64>
1272 fn index_mut<'a>(&'a mut self, index: &Q) -> &'a mut V {
1273 self.get_mut(index).expect("no entry found for key")
1277 /// HashMap iterator
1279 pub struct Iter<'a, K: 'a, V: 'a> {
1280 inner: table::Iter<'a, K, V>
1283 // FIXME(#19839) Remove in favor of `#[derive(Clone)]`
1284 impl<'a, K, V> Clone for Iter<'a, K, V> {
1285 fn clone(&self) -> Iter<'a, K, V> {
1287 inner: self.inner.clone()
1292 /// HashMap mutable values iterator
1294 pub struct IterMut<'a, K: 'a, V: 'a> {
1295 inner: table::IterMut<'a, K, V>
1298 /// HashMap move iterator
1300 pub struct IntoIter<K, V> {
1304 table::IntoIter<K, V>,
1305 fn((SafeHash, K, V)) -> (K, V),
1309 /// HashMap keys iterator
1311 pub struct Keys<'a, K: 'a, V: 'a> {
1312 inner: Map<(&'a K, &'a V), &'a K, Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a K>
1315 // FIXME(#19839) Remove in favor of `#[derive(Clone)]`
1316 impl<'a, K, V> Clone for Keys<'a, K, V> {
1317 fn clone(&self) -> Keys<'a, K, V> {
1319 inner: self.inner.clone()
1324 /// HashMap values iterator
1326 pub struct Values<'a, K: 'a, V: 'a> {
1327 inner: Map<(&'a K, &'a V), &'a V, Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a V>
1330 // FIXME(#19839) Remove in favor of `#[derive(Clone)]`
1331 impl<'a, K, V> Clone for Values<'a, K, V> {
1332 fn clone(&self) -> Values<'a, K, V> {
1334 inner: self.inner.clone()
1339 /// HashMap drain iterator
1340 #[unstable = "matches collection reform specification, waiting for dust to settle"]
1341 pub struct Drain<'a, K: 'a, V: 'a> {
1345 table::Drain<'a, K, V>,
1346 fn((SafeHash, K, V)) -> (K, V),
1350 /// A view into a single occupied location in a HashMap
1351 #[unstable = "precise API still being fleshed out"]
1352 pub struct OccupiedEntry<'a, K: 'a, V: 'a> {
1353 elem: FullBucket<K, V, &'a mut RawTable<K, V>>,
1356 /// A view into a single empty location in a HashMap
1357 #[unstable = "precise API still being fleshed out"]
1358 pub struct VacantEntry<'a, K: 'a, V: 'a> {
1361 elem: VacantEntryState<K, V, &'a mut RawTable<K, V>>,
1364 /// A view into a single location in a map, which may be vacant or occupied
1365 #[unstable = "precise API still being fleshed out"]
1366 pub enum Entry<'a, K: 'a, V: 'a> {
1367 /// An occupied Entry
1368 Occupied(OccupiedEntry<'a, K, V>),
1370 Vacant(VacantEntry<'a, K, V>),
1373 /// Possible states of a VacantEntry
1374 enum VacantEntryState<K, V, M> {
1375 /// The index is occupied, but the key to insert has precedence,
1376 /// and will kick the current one out on insertion
1377 NeqElem(FullBucket<K, V, M>, uint),
1378 /// The index is genuinely vacant
1379 NoElem(EmptyBucket<K, V, M>),
1383 impl<'a, K, V> Iterator for Iter<'a, K, V> {
1384 type Item = (&'a K, &'a V);
1386 #[inline] fn next(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next() }
1387 #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
1390 impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
1391 #[inline] fn len(&self) -> usize { self.inner.len() }
1395 impl<'a, K, V> Iterator for IterMut<'a, K, V> {
1396 type Item = (&'a K, &'a mut V);
1398 #[inline] fn next(&mut self) -> Option<(&'a K, &'a mut V)> { self.inner.next() }
1399 #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
1402 impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
1403 #[inline] fn len(&self) -> usize { self.inner.len() }
1407 impl<K, V> Iterator for IntoIter<K, V> {
1410 #[inline] fn next(&mut self) -> Option<(K, V)> { self.inner.next() }
1411 #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
1414 impl<K, V> ExactSizeIterator for IntoIter<K, V> {
1415 #[inline] fn len(&self) -> usize { self.inner.len() }
1419 impl<'a, K, V> Iterator for Keys<'a, K, V> {
1422 #[inline] fn next(&mut self) -> Option<(&'a K)> { self.inner.next() }
1423 #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
1426 impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {
1427 #[inline] fn len(&self) -> usize { self.inner.len() }
1431 impl<'a, K, V> Iterator for Values<'a, K, V> {
1434 #[inline] fn next(&mut self) -> Option<(&'a V)> { self.inner.next() }
1435 #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
1438 impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {
1439 #[inline] fn len(&self) -> usize { self.inner.len() }
1443 impl<'a, K, V> Iterator for Drain<'a, K, V> {
1446 #[inline] fn next(&mut self) -> Option<(K, V)> { self.inner.next() }
1447 #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
1450 impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
1451 #[inline] fn len(&self) -> usize { self.inner.len() }
1454 #[unstable = "matches collection reform v2 specification, waiting for dust to settle"]
1455 impl<'a, K, V> Entry<'a, K, V> {
1456 /// Returns a mutable reference to the entry if occupied, or the VacantEntry if vacant
1457 pub fn get(self) -> Result<&'a mut V, VacantEntry<'a, K, V>> {
1459 Occupied(entry) => Ok(entry.into_mut()),
1460 Vacant(entry) => Err(entry),
1465 #[unstable = "matches collection reform v2 specification, waiting for dust to settle"]
1466 impl<'a, K, V> OccupiedEntry<'a, K, V> {
1467 /// Gets a reference to the value in the entry
1468 pub fn get(&self) -> &V {
1472 /// Gets a mutable reference to the value in the entry
1473 pub fn get_mut(&mut self) -> &mut V {
1474 self.elem.read_mut().1
1477 /// Converts the OccupiedEntry into a mutable reference to the value in the entry
1478 /// with a lifetime bound to the map itself
1479 pub fn into_mut(self) -> &'a mut V {
1480 self.elem.into_mut_refs().1
1483 /// Sets the value of the entry, and returns the entry's old value
1484 pub fn insert(&mut self, mut value: V) -> V {
1485 let old_value = self.get_mut();
1486 mem::swap(&mut value, old_value);
1490 /// Takes the value out of the entry, and returns it
1491 pub fn remove(self) -> V {
1492 pop_internal(self.elem).1
1496 #[unstable = "matches collection reform v2 specification, waiting for dust to settle"]
1497 impl<'a, K: 'a, V: 'a> VacantEntry<'a, K, V> {
1498 /// Sets the value of the entry with the VacantEntry's key,
1499 /// and returns a mutable reference to it
1500 pub fn insert(self, value: V) -> &'a mut V {
1502 NeqElem(bucket, ib) => {
1503 robin_hood(bucket, ib, self.hash, self.key, value)
1506 bucket.put(self.hash, self.key, value).into_mut_refs().1
1513 impl<K, V, S, H> FromIterator<(K, V)> for HashMap<K, V, S>
1514 where K: Eq + Hash<H>,
1515 S: HashState<Hasher=H> + Default,
1516 H: hash::Hasher<Output=u64>
1518 fn from_iter<T: Iterator<Item=(K, V)>>(iter: T) -> HashMap<K, V, S> {
1519 let lower = iter.size_hint().0;
1520 let mut map = HashMap::with_capacity_and_hash_state(lower,
1521 Default::default());
1528 impl<K, V, S, H> Extend<(K, V)> for HashMap<K, V, S>
1529 where K: Eq + Hash<H>,
1530 S: HashState<Hasher=H>,
1531 H: hash::Hasher<Output=u64>
1533 fn extend<T: Iterator<Item=(K, V)>>(&mut self, mut iter: T) {
1534 for (k, v) in iter {
1541 /// `RandomState` is the default state for `HashMap` types.
1543 /// A particular instance `RandomState` will create the same instances of
1544 /// `Hasher`, but the hashers created by two different `RandomState`
1545 /// instances are unlikely to produce the same result for the same values.
1547 #[allow(missing_copy_implementations)]
1548 #[unstable = "hashing an hash maps may be altered"]
1549 pub struct RandomState {
1554 #[unstable = "hashing an hash maps may be altered"]
1556 /// Construct a new `RandomState` that is initialized with random keys.
1558 pub fn new() -> RandomState {
1559 let mut r = rand::thread_rng();
1560 RandomState { k0: r.gen(), k1: r.gen() }
1564 #[unstable = "hashing an hash maps may be altered"]
1565 impl HashState for RandomState {
1566 type Hasher = Hasher;
1567 fn hasher(&self) -> Hasher {
1568 Hasher { inner: SipHasher::new_with_keys(self.k0, self.k1) }
1572 #[unstable = "hashing an hash maps may be altered"]
1573 impl Default for RandomState {
1575 fn default() -> RandomState {
1580 /// A hasher implementation which is generated from `RandomState` instances.
1582 /// This is the default hasher used in a `HashMap` to hash keys. Types do not
1583 /// typically declare an ability to explicitly hash into this particular type,
1584 /// but rather in a `H: hash::Writer` type parameter.
1585 #[allow(missing_copy_implementations)]
1586 pub struct Hasher { inner: SipHasher }
1588 impl hash::Writer for Hasher {
1589 fn write(&mut self, data: &[u8]) { self.inner.write(data) }
1592 impl hash::Hasher for Hasher {
1594 fn reset(&mut self) { self.inner.reset() }
1595 fn finish(&self) -> u64 { self.inner.finish() }
1603 use super::Entry::{Occupied, Vacant};
1604 use iter::{range_inclusive, range_step_inclusive, repeat};
1606 use rand::{weak_rng, Rng};
1609 fn test_create_capacity_zero() {
1610 let mut m = HashMap::with_capacity(0);
1612 assert!(m.insert(1i, 1i).is_none());
1614 assert!(m.contains_key(&1));
1615 assert!(!m.contains_key(&0));
1620 let mut m = HashMap::new();
1621 assert_eq!(m.len(), 0);
1622 assert!(m.insert(1i, 2i).is_none());
1623 assert_eq!(m.len(), 1);
1624 assert!(m.insert(2i, 4i).is_none());
1625 assert_eq!(m.len(), 2);
1626 assert_eq!(*m.get(&1).unwrap(), 2);
1627 assert_eq!(*m.get(&2).unwrap(), 4);
1630 thread_local! { static DROP_VECTOR: RefCell<Vec<int>> = RefCell::new(Vec::new()) }
1632 #[derive(Hash, PartialEq, Eq)]
1638 fn new(k: uint) -> Dropable {
1639 DROP_VECTOR.with(|slot| {
1640 slot.borrow_mut()[k] += 1;
1647 impl Drop for Dropable {
1648 fn drop(&mut self) {
1649 DROP_VECTOR.with(|slot| {
1650 slot.borrow_mut()[self.k] -= 1;
1655 impl Clone for Dropable {
1656 fn clone(&self) -> Dropable {
1657 Dropable::new(self.k)
1663 DROP_VECTOR.with(|slot| {
1664 *slot.borrow_mut() = repeat(0i).take(200).collect();
1668 let mut m = HashMap::new();
1670 DROP_VECTOR.with(|v| {
1671 for i in range(0u, 200) {
1672 assert_eq!(v.borrow()[i], 0);
1676 for i in range(0u, 100) {
1677 let d1 = Dropable::new(i);
1678 let d2 = Dropable::new(i+100);
1682 DROP_VECTOR.with(|v| {
1683 for i in range(0u, 200) {
1684 assert_eq!(v.borrow()[i], 1);
1688 for i in range(0u, 50) {
1689 let k = Dropable::new(i);
1690 let v = m.remove(&k);
1692 assert!(v.is_some());
1694 DROP_VECTOR.with(|v| {
1695 assert_eq!(v.borrow()[i], 1);
1696 assert_eq!(v.borrow()[i+100], 1);
1700 DROP_VECTOR.with(|v| {
1701 for i in range(0u, 50) {
1702 assert_eq!(v.borrow()[i], 0);
1703 assert_eq!(v.borrow()[i+100], 0);
1706 for i in range(50u, 100) {
1707 assert_eq!(v.borrow()[i], 1);
1708 assert_eq!(v.borrow()[i+100], 1);
1713 DROP_VECTOR.with(|v| {
1714 for i in range(0u, 200) {
1715 assert_eq!(v.borrow()[i], 0);
1721 fn test_move_iter_drops() {
1722 DROP_VECTOR.with(|v| {
1723 *v.borrow_mut() = repeat(0).take(200).collect();
1727 let mut hm = HashMap::new();
1729 DROP_VECTOR.with(|v| {
1730 for i in range(0u, 200) {
1731 assert_eq!(v.borrow()[i], 0);
1735 for i in range(0u, 100) {
1736 let d1 = Dropable::new(i);
1737 let d2 = Dropable::new(i+100);
1741 DROP_VECTOR.with(|v| {
1742 for i in range(0u, 200) {
1743 assert_eq!(v.borrow()[i], 1);
1750 // By the way, ensure that cloning doesn't screw up the dropping.
1754 let mut half = hm.into_iter().take(50);
1756 DROP_VECTOR.with(|v| {
1757 for i in range(0u, 200) {
1758 assert_eq!(v.borrow()[i], 1);
1764 DROP_VECTOR.with(|v| {
1765 let nk = range(0u, 100).filter(|&i| {
1769 let nv = range(0u, 100).filter(|&i| {
1770 v.borrow()[i+100] == 1
1778 DROP_VECTOR.with(|v| {
1779 for i in range(0u, 200) {
1780 assert_eq!(v.borrow()[i], 0);
1786 fn test_empty_pop() {
1787 let mut m: HashMap<int, bool> = HashMap::new();
1788 assert_eq!(m.remove(&0), None);
1792 fn test_lots_of_insertions() {
1793 let mut m = HashMap::new();
1795 // Try this a few times to make sure we never screw up the hashmap's
1797 for _ in range(0i, 10) {
1798 assert!(m.is_empty());
1800 for i in range_inclusive(1i, 1000) {
1801 assert!(m.insert(i, i).is_none());
1803 for j in range_inclusive(1, i) {
1805 assert_eq!(r, Some(&j));
1808 for j in range_inclusive(i+1, 1000) {
1810 assert_eq!(r, None);
1814 for i in range_inclusive(1001i, 2000) {
1815 assert!(!m.contains_key(&i));
1819 for i in range_inclusive(1i, 1000) {
1820 assert!(m.remove(&i).is_some());
1822 for j in range_inclusive(1, i) {
1823 assert!(!m.contains_key(&j));
1826 for j in range_inclusive(i+1, 1000) {
1827 assert!(m.contains_key(&j));
1831 for i in range_inclusive(1i, 1000) {
1832 assert!(!m.contains_key(&i));
1835 for i in range_inclusive(1i, 1000) {
1836 assert!(m.insert(i, i).is_none());
1840 for i in range_step_inclusive(1000i, 1, -1) {
1841 assert!(m.remove(&i).is_some());
1843 for j in range_inclusive(i, 1000) {
1844 assert!(!m.contains_key(&j));
1847 for j in range_inclusive(1, i-1) {
1848 assert!(m.contains_key(&j));
1855 fn test_find_mut() {
1856 let mut m = HashMap::new();
1857 assert!(m.insert(1i, 12i).is_none());
1858 assert!(m.insert(2i, 8i).is_none());
1859 assert!(m.insert(5i, 14i).is_none());
1861 match m.get_mut(&5) {
1862 None => panic!(), Some(x) => *x = new
1864 assert_eq!(m.get(&5), Some(&new));
1868 fn test_insert_overwrite() {
1869 let mut m = HashMap::new();
1870 assert!(m.insert(1i, 2i).is_none());
1871 assert_eq!(*m.get(&1).unwrap(), 2);
1872 assert!(!m.insert(1i, 3i).is_none());
1873 assert_eq!(*m.get(&1).unwrap(), 3);
1877 fn test_insert_conflicts() {
1878 let mut m = HashMap::with_capacity(4);
1879 assert!(m.insert(1i, 2i).is_none());
1880 assert!(m.insert(5i, 3i).is_none());
1881 assert!(m.insert(9i, 4i).is_none());
1882 assert_eq!(*m.get(&9).unwrap(), 4);
1883 assert_eq!(*m.get(&5).unwrap(), 3);
1884 assert_eq!(*m.get(&1).unwrap(), 2);
1888 fn test_conflict_remove() {
1889 let mut m = HashMap::with_capacity(4);
1890 assert!(m.insert(1i, 2i).is_none());
1891 assert_eq!(*m.get(&1).unwrap(), 2);
1892 assert!(m.insert(5, 3).is_none());
1893 assert_eq!(*m.get(&1).unwrap(), 2);
1894 assert_eq!(*m.get(&5).unwrap(), 3);
1895 assert!(m.insert(9, 4).is_none());
1896 assert_eq!(*m.get(&1).unwrap(), 2);
1897 assert_eq!(*m.get(&5).unwrap(), 3);
1898 assert_eq!(*m.get(&9).unwrap(), 4);
1899 assert!(m.remove(&1).is_some());
1900 assert_eq!(*m.get(&9).unwrap(), 4);
1901 assert_eq!(*m.get(&5).unwrap(), 3);
1905 fn test_is_empty() {
1906 let mut m = HashMap::with_capacity(4);
1907 assert!(m.insert(1i, 2i).is_none());
1908 assert!(!m.is_empty());
1909 assert!(m.remove(&1).is_some());
1910 assert!(m.is_empty());
1915 let mut m = HashMap::new();
1917 assert_eq!(m.remove(&1), Some(2));
1918 assert_eq!(m.remove(&1), None);
1923 let mut m = HashMap::with_capacity(4);
1924 for i in range(0u, 32) {
1925 assert!(m.insert(i, i*2).is_none());
1927 assert_eq!(m.len(), 32);
1929 let mut observed: u32 = 0;
1931 for (k, v) in m.iter() {
1932 assert_eq!(*v, *k * 2);
1933 observed |= 1 << *k;
1935 assert_eq!(observed, 0xFFFF_FFFF);
1940 let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
1941 let map = vec.into_iter().collect::<HashMap<int, char>>();
1942 let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
1943 assert_eq!(keys.len(), 3);
1944 assert!(keys.contains(&1));
1945 assert!(keys.contains(&2));
1946 assert!(keys.contains(&3));
1951 let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
1952 let map = vec.into_iter().collect::<HashMap<int, char>>();
1953 let values = map.values().map(|&v| v).collect::<Vec<char>>();
1954 assert_eq!(values.len(), 3);
1955 assert!(values.contains(&'a'));
1956 assert!(values.contains(&'b'));
1957 assert!(values.contains(&'c'));
1962 let mut m = HashMap::new();
1963 assert!(m.get(&1i).is_none());
1967 Some(v) => assert_eq!(*v, 2)
1973 let mut m1 = HashMap::new();
1978 let mut m2 = HashMap::new();
1991 let mut map: HashMap<int, int> = HashMap::new();
1992 let empty: HashMap<int, int> = HashMap::new();
1997 let map_str = format!("{:?}", map);
1999 assert!(map_str == "HashMap {1i: 2i, 3i: 4i}" ||
2000 map_str == "HashMap {3i: 4i, 1i: 2i}");
2001 assert_eq!(format!("{:?}", empty), "HashMap {}");
2006 let mut m = HashMap::new();
2008 assert_eq!(m.len(), 0);
2009 assert!(m.is_empty());
2012 let old_cap = m.table.capacity();
2013 while old_cap == m.table.capacity() {
2018 assert_eq!(m.len(), i);
2019 assert!(!m.is_empty());
2023 fn test_behavior_resize_policy() {
2024 let mut m = HashMap::new();
2026 assert_eq!(m.len(), 0);
2027 assert_eq!(m.table.capacity(), 0);
2028 assert!(m.is_empty());
2032 assert!(m.is_empty());
2033 let initial_cap = m.table.capacity();
2034 m.reserve(initial_cap);
2035 let cap = m.table.capacity();
2037 assert_eq!(cap, initial_cap * 2);
2040 for _ in range(0, cap * 3 / 4) {
2044 // three quarters full
2046 assert_eq!(m.len(), i);
2047 assert_eq!(m.table.capacity(), cap);
2049 for _ in range(0, cap / 4) {
2055 let new_cap = m.table.capacity();
2056 assert_eq!(new_cap, cap * 2);
2058 for _ in range(0, cap / 2 - 1) {
2061 assert_eq!(m.table.capacity(), new_cap);
2063 // A little more than one quarter full.
2065 assert_eq!(m.table.capacity(), cap);
2066 // again, a little more than half full
2067 for _ in range(0, cap / 2 - 1) {
2073 assert_eq!(m.len(), i);
2074 assert!(!m.is_empty());
2075 assert_eq!(m.table.capacity(), initial_cap);
2079 fn test_reserve_shrink_to_fit() {
2080 let mut m = HashMap::new();
2083 assert!(m.capacity() >= m.len());
2084 for i in range(0, 128) {
2089 let usable_cap = m.capacity();
2090 for i in range(128, 128+256) {
2092 assert_eq!(m.capacity(), usable_cap);
2095 for i in range(100, 128+256) {
2096 assert_eq!(m.remove(&i), Some(i));
2100 assert_eq!(m.len(), 100);
2101 assert!(!m.is_empty());
2102 assert!(m.capacity() >= m.len());
2104 for i in range(0, 100) {
2105 assert_eq!(m.remove(&i), Some(i));
2110 assert_eq!(m.len(), 1);
2111 assert!(m.capacity() >= m.len());
2112 assert_eq!(m.remove(&0), Some(0));
2116 fn test_from_iter() {
2117 let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2119 let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2121 for &(k, v) in xs.iter() {
2122 assert_eq!(map.get(&k), Some(&v));
2127 fn test_size_hint() {
2128 let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2130 let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2132 let mut iter = map.iter();
2134 for _ in iter.by_ref().take(3) {}
2136 assert_eq!(iter.size_hint(), (3, Some(3)));
2140 fn test_iter_len() {
2141 let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2143 let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2145 let mut iter = map.iter();
2147 for _ in iter.by_ref().take(3) {}
2149 assert_eq!(iter.len(), 3);
2153 fn test_mut_size_hint() {
2154 let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2156 let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2158 let mut iter = map.iter_mut();
2160 for _ in iter.by_ref().take(3) {}
2162 assert_eq!(iter.size_hint(), (3, Some(3)));
2166 fn test_iter_mut_len() {
2167 let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
2169 let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2171 let mut iter = map.iter_mut();
2173 for _ in iter.by_ref().take(3) {}
2175 assert_eq!(iter.len(), 3);
2180 let mut map: HashMap<int, int> = HashMap::new();
2186 assert_eq!(map[2], 1);
2191 fn test_index_nonexistent() {
2192 let mut map: HashMap<int, int> = HashMap::new();
2203 let xs = [(1i, 10i), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
2205 let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
2207 // Existing key (insert)
2208 match map.entry(1) {
2209 Vacant(_) => unreachable!(),
2210 Occupied(mut view) => {
2211 assert_eq!(view.get(), &10);
2212 assert_eq!(view.insert(100), 10);
2215 assert_eq!(map.get(&1).unwrap(), &100);
2216 assert_eq!(map.len(), 6);
2219 // Existing key (update)
2220 match map.entry(2) {
2221 Vacant(_) => unreachable!(),
2222 Occupied(mut view) => {
2223 let v = view.get_mut();
2224 let new_v = (*v) * 10;
2228 assert_eq!(map.get(&2).unwrap(), &200);
2229 assert_eq!(map.len(), 6);
2231 // Existing key (take)
2232 match map.entry(3) {
2233 Vacant(_) => unreachable!(),
2235 assert_eq!(view.remove(), 30);
2238 assert_eq!(map.get(&3), None);
2239 assert_eq!(map.len(), 5);
2242 // Inexistent key (insert)
2243 match map.entry(10) {
2244 Occupied(_) => unreachable!(),
2246 assert_eq!(*view.insert(1000), 1000);
2249 assert_eq!(map.get(&10).unwrap(), &1000);
2250 assert_eq!(map.len(), 6);
2254 fn test_entry_take_doesnt_corrupt() {
2256 fn check(m: &HashMap<int, ()>) {
2258 assert!(m.contains_key(k),
2259 "{} is in keys() but not in the map?", k);
2263 let mut m = HashMap::new();
2264 let mut rng = weak_rng();
2266 // Populate the map with some items.
2267 for _ in range(0u, 50) {
2268 let x = rng.gen_range(-10, 10);
2272 for i in range(0u, 1000) {
2273 let x = rng.gen_range(-10, 10);
2277 println!("{}: remove {}", i, x);