1 // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use alloc::heap::{allocate, deallocate, EMPTY};
14 use hash::{Hash, Hasher, BuildHasher};
15 use intrinsics::needs_drop;
17 use mem::{align_of, size_of};
19 use ops::{Deref, DerefMut};
20 use ptr::{self, Unique};
22 use self::BucketState::*;
24 const EMPTY_BUCKET: u64 = 0;
26 /// The raw hashtable, providing safe-ish access to the unzipped and highly
27 /// optimized arrays of hashes, keys, and values.
29 /// This design uses less memory and is a lot faster than the naive
30 /// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
31 /// option on every element, and we get a generally more cache-aware design.
33 /// Essential invariants of this structure:
35 /// - if t.hashes[i] == EMPTY_BUCKET, then `Bucket::at_index(&t, i).raw`
36 /// points to 'undefined' contents. Don't read from it. This invariant is
37 /// enforced outside this module with the `EmptyBucket`, `FullBucket`,
38 /// and `SafeHash` types.
40 /// - An `EmptyBucket` is only constructed at an index with
41 /// a hash of EMPTY_BUCKET.
43 /// - A `FullBucket` is only constructed at an index with a
44 /// non-EMPTY_BUCKET hash.
46 /// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
47 /// around hashes of zero by changing them to 0x8000_0000_0000_0000,
48 /// which will likely map to the same bucket, while not being confused
51 /// - All three "arrays represented by pointers" are the same length:
52 /// `capacity`. This is set at creation and never changes. The arrays
53 /// are unzipped to save space (we don't have to pay for the padding
54 /// between odd sized elements, such as in a map from u64 to u8), and
55 /// be more cache aware (scanning through 8 hashes brings in at most
56 /// 2 cache lines, since they're all right beside each other).
58 /// You can kind of think of this module/data structure as a safe wrapper
59 /// around just the "table" part of the hashtable. It enforces some
60 /// invariants at the type level and employs some performance trickery,
61 /// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
62 #[unsafe_no_drop_flag]
63 pub struct RawTable<K, V> {
68 // Because K/V do not appear directly in any of the types in the struct,
69 // inform rustc that in fact instances of K and V are reachable from here.
70 marker: marker::PhantomData<(K,V)>,
73 unsafe impl<K: Send, V: Send> Send for RawTable<K, V> {}
74 unsafe impl<K: Sync, V: Sync> Sync for RawTable<K, V> {}
76 struct RawBucket<K, V> {
80 _marker: marker::PhantomData<(K,V)>,
83 impl<K,V> Copy for RawBucket<K,V> {}
84 impl<K,V> Clone for RawBucket<K,V> {
85 fn clone(&self) -> RawBucket<K, V> { *self }
88 pub struct Bucket<K, V, M> {
94 impl<K,V,M:Copy> Copy for Bucket<K,V,M> {}
95 impl<K,V,M:Copy> Clone for Bucket<K,V,M> {
96 fn clone(&self) -> Bucket<K,V,M> { *self }
99 pub struct EmptyBucket<K, V, M> {
100 raw: RawBucket<K, V>,
105 pub struct FullBucket<K, V, M> {
106 raw: RawBucket<K, V>,
111 pub type EmptyBucketImm<'table, K, V> = EmptyBucket<K, V, &'table RawTable<K, V>>;
112 pub type FullBucketImm<'table, K, V> = FullBucket<K, V, &'table RawTable<K, V>>;
114 pub type EmptyBucketMut<'table, K, V> = EmptyBucket<K, V, &'table mut RawTable<K, V>>;
115 pub type FullBucketMut<'table, K, V> = FullBucket<K, V, &'table mut RawTable<K, V>>;
117 pub enum BucketState<K, V, M> {
118 Empty(EmptyBucket<K, V, M>),
119 Full(FullBucket<K, V, M>),
122 // A GapThenFull encapsulates the state of two consecutive buckets at once.
123 // The first bucket, called the gap, is known to be empty.
124 // The second bucket is full.
125 pub struct GapThenFull<K, V, M> {
126 gap: EmptyBucket<K, V, ()>,
127 full: FullBucket<K, V, M>,
130 /// A hash that is not zero, since we use a hash of zero to represent empty
132 #[derive(PartialEq, Copy, Clone)]
133 pub struct SafeHash {
138 /// Peek at the hash value, which is guaranteed to be non-zero.
140 pub fn inspect(&self) -> u64 { self.hash }
143 /// We need to remove hashes of 0. That's reserved for empty buckets.
144 /// This function wraps up `hash_keyed` to be the only way outside this
145 /// module to generate a SafeHash.
146 pub fn make_hash<T: ?Sized, S>(hash_state: &S, t: &T) -> SafeHash
147 where T: Hash, S: BuildHasher
149 let mut state = hash_state.build_hasher();
151 // We need to avoid 0 in order to prevent collisions with
152 // EMPTY_HASH. We can maintain our precious uniform distribution
153 // of initial indexes by unconditionally setting the MSB,
154 // effectively reducing 64-bits hashes to 63 bits.
155 SafeHash { hash: 0x8000_0000_0000_0000 | state.finish() }
158 // `replace` casts a `*u64` to a `*SafeHash`. Since we statically
159 // ensure that a `FullBucket` points to an index with a non-zero hash,
160 // and a `SafeHash` is just a `u64` with a different name, this is
163 // This test ensures that a `SafeHash` really IS the same size as a
164 // `u64`. If you need to change the size of `SafeHash` (and
165 // consequently made this test fail), `replace` needs to be
166 // modified to no longer assume this.
168 fn can_alias_safehash_as_u64() {
169 assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
172 impl<K, V> RawBucket<K, V> {
173 unsafe fn offset(self, count: isize) -> RawBucket<K, V> {
175 hash: self.hash.offset(count),
176 key: self.key.offset(count),
177 val: self.val.offset(count),
178 _marker: marker::PhantomData,
183 // Buckets hold references to the table.
184 impl<K, V, M> FullBucket<K, V, M> {
185 /// Borrow a reference to the table.
186 pub fn table(&self) -> &M {
189 /// Move out the reference to the table.
190 pub fn into_table(self) -> M {
193 /// Get the raw index.
194 pub fn index(&self) -> usize {
199 impl<K, V, M> EmptyBucket<K, V, M> {
200 /// Borrow a reference to the table.
201 pub fn table(&self) -> &M {
206 impl<K, V, M> Bucket<K, V, M> {
207 /// Get the raw index.
208 pub fn index(&self) -> usize {
213 impl<K, V, M> Deref for FullBucket<K, V, M> where M: Deref<Target=RawTable<K, V>> {
214 type Target = RawTable<K, V>;
215 fn deref(&self) -> &RawTable<K, V> {
220 /// `Put` is implemented for types which provide access to a table and cannot be invalidated
221 /// by filling a bucket. A similar implementation for `Take` is possible.
222 pub trait Put<K, V> {
223 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V>;
227 impl<'t, K, V> Put<K, V> for &'t mut RawTable<K, V> {
228 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
233 impl<K, V, M> Put<K, V> for Bucket<K, V, M> where M: Put<K, V> {
234 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
235 self.table.borrow_table_mut()
239 impl<K, V, M> Put<K, V> for FullBucket<K, V, M> where M: Put<K, V> {
240 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
241 self.table.borrow_table_mut()
245 impl<K, V, M: Deref<Target=RawTable<K, V>>> Bucket<K, V, M> {
246 pub fn new(table: M, hash: SafeHash) -> Bucket<K, V, M> {
247 Bucket::at_index(table, hash.inspect() as usize)
250 pub fn at_index(table: M, ib_index: usize) -> Bucket<K, V, M> {
251 // if capacity is 0, then the RawBucket will be populated with bogus pointers.
252 // This is an uncommon case though, so avoid it in release builds.
253 debug_assert!(table.capacity() > 0, "Table should have capacity at this point");
254 let ib_index = ib_index & (table.capacity() - 1);
257 table.first_bucket_raw().offset(ib_index as isize)
264 pub fn first(table: M) -> Bucket<K, V, M> {
266 raw: table.first_bucket_raw(),
272 /// Reads a bucket at a given index, returning an enum indicating whether
273 /// it's initialized or not. You need to match on this enum to get
274 /// the appropriate types to call most of the other functions in
276 pub fn peek(self) -> BucketState<K, V, M> {
277 match unsafe { *self.raw.hash } {
293 /// Modifies the bucket pointer in place to make it point to the next slot.
294 pub fn next(&mut self) {
296 let range = self.table.capacity();
297 // This code is branchless thanks to a conditional move.
298 let dist = if self.idx & (range - 1) == 0 {
304 self.raw = self.raw.offset(dist);
309 impl<K, V, M: Deref<Target=RawTable<K, V>>> EmptyBucket<K, V, M> {
311 pub fn next(self) -> Bucket<K, V, M> {
312 let mut bucket = self.into_bucket();
318 pub fn into_bucket(self) -> Bucket<K, V, M> {
326 pub fn gap_peek(self) -> Option<GapThenFull<K, V, M>> {
327 let gap = EmptyBucket {
333 match self.next().peek() {
345 impl<K, V, M> EmptyBucket<K, V, M> where M: Put<K, V> {
346 /// Puts given key and value pair, along with the key's hash,
347 /// into this bucket in the hashtable. Note how `self` is 'moved' into
348 /// this function, because this slot will no longer be empty when
349 /// we return! A `FullBucket` is returned for later use, pointing to
350 /// the newly-filled slot in the hashtable.
352 /// Use `make_hash` to construct a `SafeHash` to pass to this function.
353 pub fn put(mut self, hash: SafeHash, key: K, value: V)
354 -> FullBucket<K, V, M> {
356 *self.raw.hash = hash.inspect();
357 ptr::write(self.raw.key, key);
358 ptr::write(self.raw.val, value);
360 self.table.borrow_table_mut().size += 1;
363 FullBucket { raw: self.raw, idx: self.idx, table: self.table }
367 impl<K, V, M: Deref<Target=RawTable<K, V>>> FullBucket<K, V, M> {
369 pub fn next(self) -> Bucket<K, V, M> {
370 let mut bucket = self.into_bucket();
376 pub fn into_bucket(self) -> Bucket<K, V, M> {
384 /// Duplicates the current position. This can be useful for operations
385 /// on two or more buckets.
386 pub fn stash(self) -> FullBucket<K, V, Self> {
394 /// Get the distance between this bucket and the 'ideal' location
395 /// as determined by the key's hash stored in it.
397 /// In the cited blog posts above, this is called the "distance to
398 /// initial bucket", or DIB. Also known as "probe count".
399 pub fn displacement(&self) -> usize {
400 // Calculates the distance one has to travel when going from
401 // `hash mod capacity` onwards to `idx mod capacity`, wrapping around
402 // if the destination is not reached before the end of the table.
403 (self.idx.wrapping_sub(self.hash().inspect() as usize)) & (self.table.capacity() - 1)
407 pub fn hash(&self) -> SafeHash {
415 /// Gets references to the key and value at a given index.
416 pub fn read(&self) -> (&K, &V) {
424 // We take a mutable reference to the table instead of accepting anything that
425 // implements `DerefMut` to prevent fn `take` from being called on `stash`ed
427 impl<'t, K, V> FullBucket<K, V, &'t mut RawTable<K, V>> {
428 /// Removes this bucket's key and value from the hashtable.
430 /// This works similarly to `put`, building an `EmptyBucket` out of the
432 pub fn take(mut self) -> (EmptyBucket<K, V, &'t mut RawTable<K, V>>, K, V) {
433 self.table.size -= 1;
436 *self.raw.hash = EMPTY_BUCKET;
443 ptr::read(self.raw.key),
444 ptr::read(self.raw.val)
450 // This use of `Put` is misleading and restrictive, but safe and sufficient for our use cases
451 // where `M` is a full bucket or table reference type with mutable access to the table.
452 impl<K, V, M> FullBucket<K, V, M> where M: Put<K, V> {
453 pub fn replace(&mut self, h: SafeHash, k: K, v: V) -> (SafeHash, K, V) {
455 let old_hash = ptr::replace(self.raw.hash as *mut SafeHash, h);
456 let old_key = ptr::replace(self.raw.key, k);
457 let old_val = ptr::replace(self.raw.val, v);
459 (old_hash, old_key, old_val)
464 impl<K, V, M> FullBucket<K, V, M> where M: Deref<Target=RawTable<K, V>> + DerefMut {
465 /// Gets mutable references to the key and value at a given index.
466 pub fn read_mut(&mut self) -> (&mut K, &mut V) {
474 impl<'t, K, V, M> FullBucket<K, V, M> where M: Deref<Target=RawTable<K, V>> + 't {
475 /// Exchange a bucket state for immutable references into the table.
476 /// Because the underlying reference to the table is also consumed,
477 /// no further changes to the structure of the table are possible;
478 /// in exchange for this, the returned references have a longer lifetime
479 /// than the references returned by `read()`.
480 pub fn into_refs(self) -> (&'t K, &'t V) {
488 impl<'t, K, V, M> FullBucket<K, V, M> where M: Deref<Target=RawTable<K, V>> + DerefMut + 't {
489 /// This works similarly to `into_refs`, exchanging a bucket state
490 /// for mutable references into the table.
491 pub fn into_mut_refs(self) -> (&'t mut K, &'t mut V) {
499 impl<K, V, M> GapThenFull<K, V, M> where M: Deref<Target=RawTable<K, V>> {
501 pub fn full(&self) -> &FullBucket<K, V, M> {
505 pub fn shift(mut self) -> Option<GapThenFull<K, V, M>> {
507 *self.gap.raw.hash = mem::replace(&mut *self.full.raw.hash, EMPTY_BUCKET);
508 ptr::copy_nonoverlapping(self.full.raw.key, self.gap.raw.key, 1);
509 ptr::copy_nonoverlapping(self.full.raw.val, self.gap.raw.val, 1);
512 let FullBucket { raw: prev_raw, idx: prev_idx, .. } = self.full;
514 match self.full.next().peek() {
516 self.gap.raw = prev_raw;
517 self.gap.idx = prev_idx;
529 /// Rounds up to a multiple of a power of two. Returns the closest multiple
530 /// of `target_alignment` that is higher or equal to `unrounded`.
534 /// Panics if `target_alignment` is not a power of two.
536 fn round_up_to_next(unrounded: usize, target_alignment: usize) -> usize {
537 assert!(target_alignment.is_power_of_two());
538 (unrounded + target_alignment - 1) & !(target_alignment - 1)
543 assert_eq!(round_up_to_next(0, 4), 0);
544 assert_eq!(round_up_to_next(1, 4), 4);
545 assert_eq!(round_up_to_next(2, 4), 4);
546 assert_eq!(round_up_to_next(3, 4), 4);
547 assert_eq!(round_up_to_next(4, 4), 4);
548 assert_eq!(round_up_to_next(5, 4), 8);
551 // Returns a tuple of (key_offset, val_offset),
552 // from the start of a mallocated array.
554 fn calculate_offsets(hashes_size: usize,
555 keys_size: usize, keys_align: usize,
557 -> (usize, usize, bool) {
558 let keys_offset = round_up_to_next(hashes_size, keys_align);
559 let (end_of_keys, oflo) = keys_offset.overflowing_add(keys_size);
561 let vals_offset = round_up_to_next(end_of_keys, vals_align);
563 (keys_offset, vals_offset, oflo)
566 // Returns a tuple of (minimum required malloc alignment, hash_offset,
567 // array_size), from the start of a mallocated array.
568 fn calculate_allocation(hash_size: usize, hash_align: usize,
569 keys_size: usize, keys_align: usize,
570 vals_size: usize, vals_align: usize)
571 -> (usize, usize, usize, bool) {
573 let (_, vals_offset, oflo) = calculate_offsets(hash_size,
574 keys_size, keys_align,
576 let (end_of_vals, oflo2) = vals_offset.overflowing_add(vals_size);
578 let align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
580 (align, hash_offset, end_of_vals, oflo || oflo2)
584 fn test_offset_calculation() {
585 assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4), (8, 0, 148, false));
586 assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6, false));
587 assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48, false));
588 assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144, false));
589 assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5, false));
590 assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24, false));
593 impl<K, V> RawTable<K, V> {
594 /// Does not initialize the buckets. The caller should ensure they,
595 /// at the very least, set every hash to EMPTY_BUCKET.
596 unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> {
601 hashes: Unique::new(EMPTY as *mut u64),
602 marker: marker::PhantomData,
606 // No need for `checked_mul` before a more restrictive check performed
607 // later in this method.
608 let hashes_size = capacity * size_of::<u64>();
609 let keys_size = capacity * size_of::< K >();
610 let vals_size = capacity * size_of::< V >();
612 // Allocating hashmaps is a little tricky. We need to allocate three
613 // arrays, but since we know their sizes and alignments up front,
614 // we just allocate a single array, and then have the subarrays
617 // This is great in theory, but in practice getting the alignment
618 // right is a little subtle. Therefore, calculating offsets has been
619 // factored out into a different function.
620 let (malloc_alignment, hash_offset, size, oflo) =
621 calculate_allocation(
622 hashes_size, align_of::<u64>(),
623 keys_size, align_of::< K >(),
624 vals_size, align_of::< V >());
626 assert!(!oflo, "capacity overflow");
628 // One check for overflow that covers calculation and rounding of size.
629 let size_of_bucket = size_of::<u64>().checked_add(size_of::<K>()).unwrap()
630 .checked_add(size_of::<V>()).unwrap();
631 assert!(size >= capacity.checked_mul(size_of_bucket)
632 .expect("capacity overflow"),
633 "capacity overflow");
635 let buffer = allocate(size, malloc_alignment);
636 if buffer.is_null() { ::alloc::oom() }
638 let hashes = buffer.offset(hash_offset as isize) as *mut u64;
643 hashes: Unique::new(hashes),
644 marker: marker::PhantomData,
648 fn first_bucket_raw(&self) -> RawBucket<K, V> {
649 let hashes_size = self.capacity * size_of::<u64>();
650 let keys_size = self.capacity * size_of::<K>();
652 let buffer = *self.hashes as *mut u8;
653 let (keys_offset, vals_offset, oflo) =
654 calculate_offsets(hashes_size,
655 keys_size, align_of::<K>(),
657 debug_assert!(!oflo, "capacity overflow");
661 key: buffer.offset(keys_offset as isize) as *mut K,
662 val: buffer.offset(vals_offset as isize) as *mut V,
663 _marker: marker::PhantomData,
668 /// Creates a new raw table from a given capacity. All buckets are
670 pub fn new(capacity: usize) -> RawTable<K, V> {
672 let ret = RawTable::new_uninitialized(capacity);
673 ptr::write_bytes(*ret.hashes, 0, capacity);
678 /// The hashtable's capacity, similar to a vector's.
679 pub fn capacity(&self) -> usize {
683 /// The number of elements ever `put` in the hashtable, minus the number
684 /// of elements ever `take`n.
685 pub fn size(&self) -> usize {
689 fn raw_buckets(&self) -> RawBuckets<K, V> {
691 raw: self.first_bucket_raw(),
693 self.hashes.offset(self.capacity as isize)
695 marker: marker::PhantomData,
699 pub fn iter(&self) -> Iter<K, V> {
701 iter: self.raw_buckets(),
702 elems_left: self.size(),
706 pub fn iter_mut(&mut self) -> IterMut<K, V> {
708 iter: self.raw_buckets(),
709 elems_left: self.size(),
713 pub fn into_iter(self) -> IntoIter<K, V> {
714 let RawBuckets { raw, hashes_end, .. } = self.raw_buckets();
715 // Replace the marker regardless of lifetime bounds on parameters.
719 hashes_end: hashes_end,
720 marker: marker::PhantomData,
726 pub fn drain(&mut self) -> Drain<K, V> {
727 let RawBuckets { raw, hashes_end, .. } = self.raw_buckets();
728 // Replace the marker regardless of lifetime bounds on parameters.
732 hashes_end: hashes_end,
733 marker: marker::PhantomData,
739 /// Returns an iterator that copies out each entry. Used while the table
740 /// is being dropped.
741 unsafe fn rev_move_buckets(&mut self) -> RevMoveBuckets<K, V> {
742 let raw_bucket = self.first_bucket_raw();
744 raw: raw_bucket.offset(self.capacity as isize),
745 hashes_end: raw_bucket.hash,
746 elems_left: self.size,
747 marker: marker::PhantomData,
752 /// A raw iterator. The basis for some other iterators in this module. Although
753 /// this interface is safe, it's not used outside this module.
754 struct RawBuckets<'a, K, V> {
755 raw: RawBucket<K, V>,
756 hashes_end: *mut u64,
758 // Strictly speaking, this should be &'a (K,V), but that would
759 // require that K:'a, and we often use RawBuckets<'static...> for
760 // move iterations, so that messes up a lot of other things. So
761 // just use `&'a (K,V)` as this is not a publicly exposed type
763 marker: marker::PhantomData<&'a ()>,
766 // FIXME(#19839) Remove in favor of `#[derive(Clone)]`
767 impl<'a, K, V> Clone for RawBuckets<'a, K, V> {
768 fn clone(&self) -> RawBuckets<'a, K, V> {
771 hashes_end: self.hashes_end,
772 marker: marker::PhantomData,
778 impl<'a, K, V> Iterator for RawBuckets<'a, K, V> {
779 type Item = RawBucket<K, V>;
781 fn next(&mut self) -> Option<RawBucket<K, V>> {
782 while self.raw.hash != self.hashes_end {
784 // We are swapping out the pointer to a bucket and replacing
785 // it with the pointer to the next one.
786 let prev = ptr::replace(&mut self.raw, self.raw.offset(1));
787 if *prev.hash != EMPTY_BUCKET {
797 /// An iterator that moves out buckets in reverse order. It leaves the table
798 /// in an inconsistent state and should only be used for dropping
799 /// the table's remaining entries. It's used in the implementation of Drop.
800 struct RevMoveBuckets<'a, K, V> {
801 raw: RawBucket<K, V>,
802 hashes_end: *mut u64,
805 // As above, `&'a (K,V)` would seem better, but we often use
806 // 'static for the lifetime, and this is not a publicly exposed
808 marker: marker::PhantomData<&'a ()>,
811 impl<'a, K, V> Iterator for RevMoveBuckets<'a, K, V> {
814 fn next(&mut self) -> Option<(K, V)> {
815 if self.elems_left == 0 {
820 debug_assert!(self.raw.hash != self.hashes_end);
823 self.raw = self.raw.offset(-1);
825 if *self.raw.hash != EMPTY_BUCKET {
826 self.elems_left -= 1;
828 ptr::read(self.raw.key),
829 ptr::read(self.raw.val)
837 /// Iterator over shared references to entries in a table.
838 pub struct Iter<'a, K: 'a, V: 'a> {
839 iter: RawBuckets<'a, K, V>,
843 unsafe impl<'a, K: Sync, V: Sync> Sync for Iter<'a, K, V> {}
844 unsafe impl<'a, K: Sync, V: Sync> Send for Iter<'a, K, V> {}
846 // FIXME(#19839) Remove in favor of `#[derive(Clone)]`
847 impl<'a, K, V> Clone for Iter<'a, K, V> {
848 fn clone(&self) -> Iter<'a, K, V> {
850 iter: self.iter.clone(),
851 elems_left: self.elems_left
857 /// Iterator over mutable references to entries in a table.
858 pub struct IterMut<'a, K: 'a, V: 'a> {
859 iter: RawBuckets<'a, K, V>,
863 unsafe impl<'a, K: Sync, V: Sync> Sync for IterMut<'a, K, V> {}
864 // Both K: Sync and K: Send are correct for IterMut's Send impl,
865 // but Send is the more useful bound
866 unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {}
868 /// Iterator over the entries in a table, consuming the table.
869 pub struct IntoIter<K, V> {
870 table: RawTable<K, V>,
871 iter: RawBuckets<'static, K, V>
874 unsafe impl<K: Sync, V: Sync> Sync for IntoIter<K, V> {}
875 unsafe impl<K: Send, V: Send> Send for IntoIter<K, V> {}
877 /// Iterator over the entries in a table, clearing the table.
878 pub struct Drain<'a, K: 'a, V: 'a> {
879 table: &'a mut RawTable<K, V>,
880 iter: RawBuckets<'static, K, V>,
883 unsafe impl<'a, K: Sync, V: Sync> Sync for Drain<'a, K, V> {}
884 unsafe impl<'a, K: Send, V: Send> Send for Drain<'a, K, V> {}
886 impl<'a, K, V> Iterator for Iter<'a, K, V> {
887 type Item = (&'a K, &'a V);
889 fn next(&mut self) -> Option<(&'a K, &'a V)> {
890 self.iter.next().map(|bucket| {
891 self.elems_left -= 1;
899 fn size_hint(&self) -> (usize, Option<usize>) {
900 (self.elems_left, Some(self.elems_left))
903 impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
904 fn len(&self) -> usize { self.elems_left }
907 impl<'a, K, V> Iterator for IterMut<'a, K, V> {
908 type Item = (&'a K, &'a mut V);
910 fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
911 self.iter.next().map(|bucket| {
912 self.elems_left -= 1;
920 fn size_hint(&self) -> (usize, Option<usize>) {
921 (self.elems_left, Some(self.elems_left))
924 impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
925 fn len(&self) -> usize { self.elems_left }
928 impl<K, V> Iterator for IntoIter<K, V> {
929 type Item = (SafeHash, K, V);
931 fn next(&mut self) -> Option<(SafeHash, K, V)> {
932 self.iter.next().map(|bucket| {
933 self.table.size -= 1;
939 ptr::read(bucket.key),
940 ptr::read(bucket.val)
946 fn size_hint(&self) -> (usize, Option<usize>) {
947 let size = self.table.size();
951 impl<K, V> ExactSizeIterator for IntoIter<K, V> {
952 fn len(&self) -> usize { self.table.size() }
955 impl<'a, K, V> Iterator for Drain<'a, K, V> {
956 type Item = (SafeHash, K, V);
959 fn next(&mut self) -> Option<(SafeHash, K, V)> {
960 self.iter.next().map(|bucket| {
961 self.table.size -= 1;
965 hash: ptr::replace(bucket.hash, EMPTY_BUCKET),
967 ptr::read(bucket.key),
968 ptr::read(bucket.val)
974 fn size_hint(&self) -> (usize, Option<usize>) {
975 let size = self.table.size();
979 impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
980 fn len(&self) -> usize { self.table.size() }
983 impl<'a, K: 'a, V: 'a> Drop for Drain<'a, K, V> {
989 impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
990 fn clone(&self) -> RawTable<K, V> {
992 let mut new_ht = RawTable::new_uninitialized(self.capacity());
995 let cap = self.capacity();
996 let mut new_buckets = Bucket::first(&mut new_ht);
997 let mut buckets = Bucket::first(self);
998 while buckets.index() != cap {
999 match buckets.peek() {
1002 let (k, v) = full.read();
1003 (full.hash(), k.clone(), v.clone())
1005 *new_buckets.raw.hash = h.inspect();
1006 ptr::write(new_buckets.raw.key, k);
1007 ptr::write(new_buckets.raw.val, v);
1010 *new_buckets.raw.hash = EMPTY_BUCKET;
1018 new_ht.size = self.size();
1025 impl<K, V> Drop for RawTable<K, V> {
1026 #[unsafe_destructor_blind_to_params]
1027 fn drop(&mut self) {
1028 if self.capacity == 0 || self.capacity == mem::POST_DROP_USIZE {
1032 // This is done in reverse because we've likely partially taken
1033 // some elements out with `.into_iter()` from the front.
1034 // Check if the size is 0, so we don't do a useless scan when
1035 // dropping empty tables such as on resize.
1036 // Also avoid double drop of elements that have been already moved out.
1038 if needs_drop::<(K, V)>() { // avoid linear runtime for types that don't need drop
1039 for _ in self.rev_move_buckets() {}
1043 let hashes_size = self.capacity * size_of::<u64>();
1044 let keys_size = self.capacity * size_of::<K>();
1045 let vals_size = self.capacity * size_of::<V>();
1046 let (align, _, size, oflo) =
1047 calculate_allocation(hashes_size, align_of::<u64>(),
1048 keys_size, align_of::<K>(),
1049 vals_size, align_of::<V>());
1051 debug_assert!(!oflo, "should be impossible");
1054 deallocate(*self.hashes as *mut u8, size, align);
1055 // Remember how everything was allocated out of one buffer
1056 // during initialization? We only need one call to free here.