1 // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 // ignore-lexer-test FIXME #15883
13 use self::BucketState::*;
17 use hash::{Hash, Hasher};
18 use iter::{Iterator, IteratorExt, ExactSizeIterator, count};
19 use marker::{Copy, Send, Sync, Sized, self};
20 use mem::{min_align_of, size_of};
22 use num::{Int, UnsignedInt};
23 use ops::{Deref, DerefMut, Drop};
25 use option::Option::{Some, None};
26 use ptr::{self, PtrExt, copy_nonoverlapping_memory, zero_memory};
27 use rt::heap::{allocate, deallocate};
28 use collections::hash_state::HashState;
30 const EMPTY_BUCKET: u64 = 0u64;
32 /// The raw hashtable, providing safe-ish access to the unzipped and highly
33 /// optimized arrays of hashes, keys, and values.
35 /// This design uses less memory and is a lot faster than the naive
36 /// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
37 /// option on every element, and we get a generally more cache-aware design.
39 /// Essential invariants of this structure:
41 /// - if t.hashes[i] == EMPTY_BUCKET, then `Bucket::at_index(&t, i).raw`
42 /// points to 'undefined' contents. Don't read from it. This invariant is
43 /// enforced outside this module with the `EmptyBucket`, `FullBucket`,
44 /// and `SafeHash` types.
46 /// - An `EmptyBucket` is only constructed at an index with
47 /// a hash of EMPTY_BUCKET.
49 /// - A `FullBucket` is only constructed at an index with a
50 /// non-EMPTY_BUCKET hash.
52 /// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
53 /// around hashes of zero by changing them to 0x8000_0000_0000_0000,
54 /// which will likely map to the same bucket, while not being confused
57 /// - All three "arrays represented by pointers" are the same length:
58 /// `capacity`. This is set at creation and never changes. The arrays
59 /// are unzipped to save space (we don't have to pay for the padding
60 /// between odd sized elements, such as in a map from u64 to u8), and
61 /// be more cache aware (scanning through 8 hashes brings in at most
62 /// 2 cache lines, since they're all right beside each other).
64 /// You can kind of think of this module/data structure as a safe wrapper
65 /// around just the "table" part of the hashtable. It enforces some
66 /// invariants at the type level and employs some performance trickery,
67 /// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
68 #[unsafe_no_drop_flag]
69 pub struct RawTable<K, V> {
73 // Because K/V do not appear directly in any of the types in the struct,
74 // inform rustc that in fact instances of K and V are reachable from here.
75 marker: marker::CovariantType<(K,V)>,
78 unsafe impl<K: Send, V: Send> Send for RawTable<K, V> {}
79 unsafe impl<K: Sync, V: Sync> Sync for RawTable<K, V> {}
81 struct RawBucket<K, V> {
87 impl<K,V> Copy for RawBucket<K,V> {}
89 pub struct Bucket<K, V, M> {
95 impl<K,V,M:Copy> Copy for Bucket<K,V,M> {}
97 pub struct EmptyBucket<K, V, M> {
103 pub struct FullBucket<K, V, M> {
104 raw: RawBucket<K, V>,
109 pub type EmptyBucketImm<'table, K, V> = EmptyBucket<K, V, &'table RawTable<K, V>>;
110 pub type FullBucketImm<'table, K, V> = FullBucket<K, V, &'table RawTable<K, V>>;
112 pub type EmptyBucketMut<'table, K, V> = EmptyBucket<K, V, &'table mut RawTable<K, V>>;
113 pub type FullBucketMut<'table, K, V> = FullBucket<K, V, &'table mut RawTable<K, V>>;
115 pub enum BucketState<K, V, M> {
116 Empty(EmptyBucket<K, V, M>),
117 Full(FullBucket<K, V, M>),
120 // A GapThenFull encapsulates the state of two consecutive buckets at once.
121 // The first bucket, called the gap, is known to be empty.
122 // The second bucket is full.
123 struct GapThenFull<K, V, M> {
124 gap: EmptyBucket<K, V, ()>,
125 full: FullBucket<K, V, M>,
128 /// A hash that is not zero, since we use a hash of zero to represent empty
130 #[derive(PartialEq, Copy)]
131 pub struct SafeHash {
136 /// Peek at the hash value, which is guaranteed to be non-zero.
138 pub fn inspect(&self) -> u64 { self.hash }
141 /// We need to remove hashes of 0. That's reserved for empty buckets.
142 /// This function wraps up `hash_keyed` to be the only way outside this
143 /// module to generate a SafeHash.
144 pub fn make_hash<T: ?Sized, S, H>(hash_state: &S, t: &T) -> SafeHash
146 S: HashState<Hasher=H>,
147 H: Hasher<Output=u64>
149 let mut state = hash_state.hasher();
151 // We need to avoid 0u64 in order to prevent collisions with
152 // EMPTY_HASH. We can maintain our precious uniform distribution
153 // of initial indexes by unconditionally setting the MSB,
154 // effectively reducing 64-bits hashes to 63 bits.
155 SafeHash { hash: 0x8000_0000_0000_0000 | state.finish() }
158 // `replace` casts a `*u64` to a `*SafeHash`. Since we statically
159 // ensure that a `FullBucket` points to an index with a non-zero hash,
160 // and a `SafeHash` is just a `u64` with a different name, this is
163 // This test ensures that a `SafeHash` really IS the same size as a
164 // `u64`. If you need to change the size of `SafeHash` (and
165 // consequently made this test fail), `replace` needs to be
166 // modified to no longer assume this.
168 fn can_alias_safehash_as_u64() {
169 assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
172 impl<K, V> RawBucket<K, V> {
173 unsafe fn offset(self, count: int) -> RawBucket<K, V> {
175 hash: self.hash.offset(count),
176 key: self.key.offset(count),
177 val: self.val.offset(count),
182 // Buckets hold references to the table.
183 impl<K, V, M> FullBucket<K, V, M> {
184 /// Borrow a reference to the table.
185 pub fn table(&self) -> &M {
188 /// Move out the reference to the table.
189 pub fn into_table(self) -> M {
192 /// Get the raw index.
193 pub fn index(&self) -> usize {
198 impl<K, V, M> EmptyBucket<K, V, M> {
199 /// Borrow a reference to the table.
200 pub fn table(&self) -> &M {
203 /// Move out the reference to the table.
204 pub fn into_table(self) -> M {
209 impl<K, V, M> Bucket<K, V, M> {
210 /// Move out the reference to the table.
211 pub fn into_table(self) -> M {
214 /// Get the raw index.
215 pub fn index(&self) -> usize {
220 impl<K, V, M: Deref<Target=RawTable<K, V>>> Bucket<K, V, M> {
221 pub fn new(table: M, hash: SafeHash) -> Bucket<K, V, M> {
222 Bucket::at_index(table, hash.inspect() as usize)
225 pub fn at_index(table: M, ib_index: usize) -> Bucket<K, V, M> {
226 let ib_index = ib_index & (table.capacity() - 1);
229 table.first_bucket_raw().offset(ib_index as isize)
236 pub fn first(table: M) -> Bucket<K, V, M> {
238 raw: table.first_bucket_raw(),
244 /// Reads a bucket at a given index, returning an enum indicating whether
245 /// it's initialized or not. You need to match on this enum to get
246 /// the appropriate types to call most of the other functions in
248 pub fn peek(self) -> BucketState<K, V, M> {
249 match unsafe { *self.raw.hash } {
265 /// Modifies the bucket pointer in place to make it point to the next slot.
266 pub fn next(&mut self) {
267 // Branchless bucket iteration step.
268 // As we reach the end of the table...
269 // We take the current idx: 0111111b
270 // Xor it by its increment: ^ 1000000b
273 // Then AND with the capacity: & 1000000b
275 // to get the backwards offset: 1000000b
276 // ... and it's zero at all other times.
277 let maybe_wraparound_dist = (self.idx ^ (self.idx + 1)) & self.table.capacity();
278 // Finally, we obtain the offset 1 or the offset -cap + 1.
279 let dist = 1 - (maybe_wraparound_dist as isize);
284 self.raw = self.raw.offset(dist);
289 impl<K, V, M: Deref<Target=RawTable<K, V>>> EmptyBucket<K, V, M> {
291 pub fn next(self) -> Bucket<K, V, M> {
292 let mut bucket = self.into_bucket();
298 pub fn into_bucket(self) -> Bucket<K, V, M> {
306 pub fn gap_peek(self) -> Option<GapThenFull<K, V, M>> {
307 let gap = EmptyBucket {
313 match self.next().peek() {
325 impl<K, V, M: Deref<Target=RawTable<K, V>> + DerefMut> EmptyBucket<K, V, M> {
326 /// Puts given key and value pair, along with the key's hash,
327 /// into this bucket in the hashtable. Note how `self` is 'moved' into
328 /// this function, because this slot will no longer be empty when
329 /// we return! A `FullBucket` is returned for later use, pointing to
330 /// the newly-filled slot in the hashtable.
332 /// Use `make_hash` to construct a `SafeHash` to pass to this function.
333 pub fn put(mut self, hash: SafeHash, key: K, value: V)
334 -> FullBucket<K, V, M> {
336 *self.raw.hash = hash.inspect();
337 ptr::write(self.raw.key, key);
338 ptr::write(self.raw.val, value);
341 self.table.size += 1;
343 FullBucket { raw: self.raw, idx: self.idx, table: self.table }
347 impl<K, V, M: Deref<Target=RawTable<K, V>>> FullBucket<K, V, M> {
349 pub fn next(self) -> Bucket<K, V, M> {
350 let mut bucket = self.into_bucket();
356 pub fn into_bucket(self) -> Bucket<K, V, M> {
364 /// Get the distance between this bucket and the 'ideal' location
365 /// as determined by the key's hash stored in it.
367 /// In the cited blog posts above, this is called the "distance to
368 /// initial bucket", or DIB. Also known as "probe count".
369 pub fn distance(&self) -> usize {
370 // Calculates the distance one has to travel when going from
371 // `hash mod capacity` onwards to `idx mod capacity`, wrapping around
372 // if the destination is not reached before the end of the table.
373 (self.idx - self.hash().inspect() as usize) & (self.table.capacity() - 1)
377 pub fn hash(&self) -> SafeHash {
385 /// Gets references to the key and value at a given index.
386 pub fn read(&self) -> (&K, &V) {
394 impl<K, V, M: Deref<Target=RawTable<K, V>> + DerefMut> FullBucket<K, V, M> {
395 /// Removes this bucket's key and value from the hashtable.
397 /// This works similarly to `put`, building an `EmptyBucket` out of the
399 pub fn take(mut self) -> (EmptyBucket<K, V, M>, K, V) {
400 self.table.size -= 1;
403 *self.raw.hash = EMPTY_BUCKET;
410 ptr::read(self.raw.key),
411 ptr::read(self.raw.val)
416 pub fn replace(&mut self, h: SafeHash, k: K, v: V) -> (SafeHash, K, V) {
418 let old_hash = ptr::replace(self.raw.hash as *mut SafeHash, h);
419 let old_key = ptr::replace(self.raw.key, k);
420 let old_val = ptr::replace(self.raw.val, v);
422 (old_hash, old_key, old_val)
426 /// Gets mutable references to the key and value at a given index.
427 pub fn read_mut(&mut self) -> (&mut K, &mut V) {
435 impl<'t, K, V, M: Deref<Target=RawTable<K, V>> + 't> FullBucket<K, V, M> {
436 /// Exchange a bucket state for immutable references into the table.
437 /// Because the underlying reference to the table is also consumed,
438 /// no further changes to the structure of the table are possible;
439 /// in exchange for this, the returned references have a longer lifetime
440 /// than the references returned by `read()`.
441 pub fn into_refs(self) -> (&'t K, &'t V) {
449 impl<'t, K, V, M: Deref<Target=RawTable<K, V>> + DerefMut + 't> FullBucket<K, V, M> {
450 /// This works similarly to `into_refs`, exchanging a bucket state
451 /// for mutable references into the table.
452 pub fn into_mut_refs(self) -> (&'t mut K, &'t mut V) {
460 impl<K, V, M> BucketState<K, V, M> {
462 pub fn expect_full(self) -> FullBucket<K, V, M> {
465 Empty(..) => panic!("Expected full bucket")
470 impl<K, V, M: Deref<Target=RawTable<K, V>>> GapThenFull<K, V, M> {
472 pub fn full(&self) -> &FullBucket<K, V, M> {
476 pub fn shift(mut self) -> Option<GapThenFull<K, V, M>> {
478 *self.gap.raw.hash = mem::replace(&mut *self.full.raw.hash, EMPTY_BUCKET);
479 copy_nonoverlapping_memory(self.gap.raw.key, self.full.raw.key, 1);
480 copy_nonoverlapping_memory(self.gap.raw.val, self.full.raw.val, 1);
483 let FullBucket { raw: prev_raw, idx: prev_idx, .. } = self.full;
485 match self.full.next().peek() {
487 self.gap.raw = prev_raw;
488 self.gap.idx = prev_idx;
500 /// Rounds up to a multiple of a power of two. Returns the closest multiple
501 /// of `target_alignment` that is higher or equal to `unrounded`.
505 /// Panics if `target_alignment` is not a power of two.
506 fn round_up_to_next(unrounded: usize, target_alignment: usize) -> usize {
507 assert!(target_alignment.is_power_of_two());
508 (unrounded + target_alignment - 1) & !(target_alignment - 1)
513 assert_eq!(round_up_to_next(0, 4), 0);
514 assert_eq!(round_up_to_next(1, 4), 4);
515 assert_eq!(round_up_to_next(2, 4), 4);
516 assert_eq!(round_up_to_next(3, 4), 4);
517 assert_eq!(round_up_to_next(4, 4), 4);
518 assert_eq!(round_up_to_next(5, 4), 8);
521 // Returns a tuple of (key_offset, val_offset),
522 // from the start of a mallocated array.
523 fn calculate_offsets(hashes_size: usize,
524 keys_size: usize, keys_align: usize,
527 let keys_offset = round_up_to_next(hashes_size, keys_align);
528 let end_of_keys = keys_offset + keys_size;
530 let vals_offset = round_up_to_next(end_of_keys, vals_align);
532 (keys_offset, vals_offset)
535 // Returns a tuple of (minimum required malloc alignment, hash_offset,
536 // array_size), from the start of a mallocated array.
537 fn calculate_allocation(hash_size: usize, hash_align: usize,
538 keys_size: usize, keys_align: usize,
539 vals_size: usize, vals_align: usize)
540 -> (usize, usize, usize) {
542 let (_, vals_offset) = calculate_offsets(hash_size,
543 keys_size, keys_align,
545 let end_of_vals = vals_offset + vals_size;
547 let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
549 (min_align, hash_offset, end_of_vals)
553 fn test_offset_calculation() {
554 assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4), (8, 0, 148));
555 assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6));
556 assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48));
557 assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144));
558 assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5));
559 assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24));
562 impl<K, V> RawTable<K, V> {
563 /// Does not initialize the buckets. The caller should ensure they,
564 /// at the very least, set every hash to EMPTY_BUCKET.
565 unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> {
570 hashes: ptr::null_mut(),
571 marker: marker::CovariantType,
574 // No need for `checked_mul` before a more restrictive check performed
575 // later in this method.
576 let hashes_size = capacity * size_of::<u64>();
577 let keys_size = capacity * size_of::< K >();
578 let vals_size = capacity * size_of::< V >();
580 // Allocating hashmaps is a little tricky. We need to allocate three
581 // arrays, but since we know their sizes and alignments up front,
582 // we just allocate a single array, and then have the subarrays
585 // This is great in theory, but in practice getting the alignment
586 // right is a little subtle. Therefore, calculating offsets has been
587 // factored out into a different function.
588 let (malloc_alignment, hash_offset, size) =
589 calculate_allocation(
590 hashes_size, min_align_of::<u64>(),
591 keys_size, min_align_of::< K >(),
592 vals_size, min_align_of::< V >());
594 // One check for overflow that covers calculation and rounding of size.
595 let size_of_bucket = size_of::<u64>().checked_add(size_of::<K>()).unwrap()
596 .checked_add(size_of::<V>()).unwrap();
597 assert!(size >= capacity.checked_mul(size_of_bucket)
598 .expect("capacity overflow"),
599 "capacity overflow");
601 let buffer = allocate(size, malloc_alignment);
602 if buffer.is_null() { ::alloc::oom() }
604 let hashes = buffer.offset(hash_offset as isize) as *mut u64;
610 marker: marker::CovariantType,
614 fn first_bucket_raw(&self) -> RawBucket<K, V> {
615 let hashes_size = self.capacity * size_of::<u64>();
616 let keys_size = self.capacity * size_of::<K>();
618 let buffer = self.hashes as *mut u8;
619 let (keys_offset, vals_offset) = calculate_offsets(hashes_size,
620 keys_size, min_align_of::<K>(),
621 min_align_of::<V>());
626 key: buffer.offset(keys_offset as isize) as *mut K,
627 val: buffer.offset(vals_offset as isize) as *mut V
632 /// Creates a new raw table from a given capacity. All buckets are
634 pub fn new(capacity: usize) -> RawTable<K, V> {
636 let ret = RawTable::new_uninitialized(capacity);
637 zero_memory(ret.hashes, capacity);
642 /// The hashtable's capacity, similar to a vector's.
643 pub fn capacity(&self) -> usize {
647 /// The number of elements ever `put` in the hashtable, minus the number
648 /// of elements ever `take`n.
649 pub fn size(&self) -> usize {
653 fn raw_buckets(&self) -> RawBuckets<K, V> {
655 raw: self.first_bucket_raw(),
657 self.hashes.offset(self.capacity as isize)
659 marker: marker::ContravariantLifetime,
663 pub fn iter(&self) -> Iter<K, V> {
665 iter: self.raw_buckets(),
666 elems_left: self.size(),
670 pub fn iter_mut(&mut self) -> IterMut<K, V> {
672 iter: self.raw_buckets(),
673 elems_left: self.size(),
677 pub fn into_iter(self) -> IntoIter<K, V> {
678 let RawBuckets { raw, hashes_end, .. } = self.raw_buckets();
679 // Replace the marker regardless of lifetime bounds on parameters.
683 hashes_end: hashes_end,
684 marker: marker::ContravariantLifetime,
690 pub fn drain(&mut self) -> Drain<K, V> {
691 let RawBuckets { raw, hashes_end, .. } = self.raw_buckets();
692 // Replace the marker regardless of lifetime bounds on parameters.
696 hashes_end: hashes_end,
697 marker: marker::ContravariantLifetime::<'static>,
703 /// Returns an iterator that copies out each entry. Used while the table
704 /// is being dropped.
705 unsafe fn rev_move_buckets(&mut self) -> RevMoveBuckets<K, V> {
706 let raw_bucket = self.first_bucket_raw();
708 raw: raw_bucket.offset(self.capacity as isize),
709 hashes_end: raw_bucket.hash,
710 elems_left: self.size,
711 marker: marker::ContravariantLifetime,
716 /// A raw iterator. The basis for some other iterators in this module. Although
717 /// this interface is safe, it's not used outside this module.
718 struct RawBuckets<'a, K, V> {
719 raw: RawBucket<K, V>,
720 hashes_end: *mut u64,
721 marker: marker::ContravariantLifetime<'a>,
724 // FIXME(#19839) Remove in favor of `#[derive(Clone)]`
725 impl<'a, K, V> Clone for RawBuckets<'a, K, V> {
726 fn clone(&self) -> RawBuckets<'a, K, V> {
729 hashes_end: self.hashes_end,
730 marker: marker::ContravariantLifetime,
736 impl<'a, K, V> Iterator for RawBuckets<'a, K, V> {
737 type Item = RawBucket<K, V>;
739 fn next(&mut self) -> Option<RawBucket<K, V>> {
740 while self.raw.hash != self.hashes_end {
742 // We are swapping out the pointer to a bucket and replacing
743 // it with the pointer to the next one.
744 let prev = ptr::replace(&mut self.raw, self.raw.offset(1));
745 if *prev.hash != EMPTY_BUCKET {
755 /// An iterator that moves out buckets in reverse order. It leaves the table
756 /// in an inconsistent state and should only be used for dropping
757 /// the table's remaining entries. It's used in the implementation of Drop.
758 struct RevMoveBuckets<'a, K, V> {
759 raw: RawBucket<K, V>,
760 hashes_end: *mut u64,
762 marker: marker::ContravariantLifetime<'a>,
765 impl<'a, K, V> Iterator for RevMoveBuckets<'a, K, V> {
768 fn next(&mut self) -> Option<(K, V)> {
769 if self.elems_left == 0 {
774 debug_assert!(self.raw.hash != self.hashes_end);
777 self.raw = self.raw.offset(-1);
779 if *self.raw.hash != EMPTY_BUCKET {
780 self.elems_left -= 1;
782 ptr::read(self.raw.key),
783 ptr::read(self.raw.val)
791 /// Iterator over shared references to entries in a table.
792 pub struct Iter<'a, K: 'a, V: 'a> {
793 iter: RawBuckets<'a, K, V>,
797 // FIXME(#19839) Remove in favor of `#[derive(Clone)]`
798 impl<'a, K, V> Clone for Iter<'a, K, V> {
799 fn clone(&self) -> Iter<'a, K, V> {
801 iter: self.iter.clone(),
802 elems_left: self.elems_left
808 /// Iterator over mutable references to entries in a table.
809 pub struct IterMut<'a, K: 'a, V: 'a> {
810 iter: RawBuckets<'a, K, V>,
814 /// Iterator over the entries in a table, consuming the table.
815 pub struct IntoIter<K, V> {
816 table: RawTable<K, V>,
817 iter: RawBuckets<'static, K, V>
820 /// Iterator over the entries in a table, clearing the table.
821 pub struct Drain<'a, K: 'a, V: 'a> {
822 table: &'a mut RawTable<K, V>,
823 iter: RawBuckets<'static, K, V>,
826 impl<'a, K, V> Iterator for Iter<'a, K, V> {
827 type Item = (&'a K, &'a V);
829 fn next(&mut self) -> Option<(&'a K, &'a V)> {
830 self.iter.next().map(|bucket| {
831 self.elems_left -= 1;
839 fn size_hint(&self) -> (usize, Option<usize>) {
840 (self.elems_left, Some(self.elems_left))
843 impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
844 fn len(&self) -> usize { self.elems_left }
847 impl<'a, K, V> Iterator for IterMut<'a, K, V> {
848 type Item = (&'a K, &'a mut V);
850 fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
851 self.iter.next().map(|bucket| {
852 self.elems_left -= 1;
860 fn size_hint(&self) -> (usize, Option<usize>) {
861 (self.elems_left, Some(self.elems_left))
864 impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
865 fn len(&self) -> usize { self.elems_left }
868 impl<K, V> Iterator for IntoIter<K, V> {
869 type Item = (SafeHash, K, V);
871 fn next(&mut self) -> Option<(SafeHash, K, V)> {
872 self.iter.next().map(|bucket| {
873 self.table.size -= 1;
879 ptr::read(bucket.key),
880 ptr::read(bucket.val)
886 fn size_hint(&self) -> (usize, Option<usize>) {
887 let size = self.table.size();
891 impl<K, V> ExactSizeIterator for IntoIter<K, V> {
892 fn len(&self) -> usize { self.table.size() }
895 impl<'a, K, V> Iterator for Drain<'a, K, V> {
896 type Item = (SafeHash, K, V);
899 fn next(&mut self) -> Option<(SafeHash, K, V)> {
900 self.iter.next().map(|bucket| {
901 self.table.size -= 1;
905 hash: ptr::replace(bucket.hash, EMPTY_BUCKET),
907 ptr::read(bucket.key),
908 ptr::read(bucket.val)
914 fn size_hint(&self) -> (usize, Option<usize>) {
915 let size = self.table.size();
919 impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
920 fn len(&self) -> usize { self.table.size() }
924 impl<'a, K: 'a, V: 'a> Drop for Drain<'a, K, V> {
926 for _ in self.by_ref() {}
930 impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
931 fn clone(&self) -> RawTable<K, V> {
933 let mut new_ht = RawTable::new_uninitialized(self.capacity());
936 let cap = self.capacity();
937 let mut new_buckets = Bucket::first(&mut new_ht);
938 let mut buckets = Bucket::first(self);
939 while buckets.index() != cap {
940 match buckets.peek() {
943 let (k, v) = full.read();
944 (full.hash(), k.clone(), v.clone())
946 *new_buckets.raw.hash = h.inspect();
947 ptr::write(new_buckets.raw.key, k);
948 ptr::write(new_buckets.raw.val, v);
951 *new_buckets.raw.hash = EMPTY_BUCKET;
959 new_ht.size = self.size();
967 impl<K, V> Drop for RawTable<K, V> {
969 if self.hashes.is_null() {
972 // This is done in reverse because we've likely partially taken
973 // some elements out with `.into_iter()` from the front.
974 // Check if the size is 0, so we don't do a useless scan when
975 // dropping empty tables such as on resize.
976 // Also avoid double drop of elements that have been already moved out.
978 for _ in self.rev_move_buckets() {}
981 let hashes_size = self.capacity * size_of::<u64>();
982 let keys_size = self.capacity * size_of::<K>();
983 let vals_size = self.capacity * size_of::<V>();
984 let (align, _, size) = calculate_allocation(hashes_size, min_align_of::<u64>(),
985 keys_size, min_align_of::<K>(),
986 vals_size, min_align_of::<V>());
989 deallocate(self.hashes as *mut u8, size, align);
990 // Remember how everything was allocated out of one buffer
991 // during initialization? We only need one call to free here.