1 // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use alloc::heap::{Heap, Alloc, Layout};
14 use hash::{BuildHasher, Hash, Hasher};
16 use mem::{align_of, size_of, needs_drop};
18 use ops::{Deref, DerefMut};
19 use ptr::{self, Unique, NonNull};
21 use self::BucketState::*;
23 /// Integer type used for stored hash values.
25 /// No more than bit_width(usize) bits are needed to select a bucket.
27 /// The most significant bit is ours to use for tagging `SafeHash`.
29 /// (Even if we could have usize::MAX bytes allocated for buckets,
30 /// each bucket stores at least a `HashUint`, so there can be no more than
31 /// usize::MAX / size_of(usize) buckets.)
32 type HashUint = usize;
34 const EMPTY_BUCKET: HashUint = 0;
35 const EMPTY: usize = 1;
37 /// Special `Unique<HashUint>` that uses the lower bit of the pointer
38 /// to expose a boolean tag.
39 /// Note: when the pointer is initialized to EMPTY `.ptr()` will return
40 /// null and the tag functions shouldn't be used.
41 struct TaggedHashUintPtr(Unique<HashUint>);
43 impl TaggedHashUintPtr {
45 unsafe fn new(ptr: *mut HashUint) -> Self {
46 debug_assert!(ptr as usize & 1 == 0 || ptr as usize == EMPTY as usize);
47 TaggedHashUintPtr(Unique::new_unchecked(ptr))
51 fn set_tag(&mut self, value: bool) {
52 let mut usize_ptr = self.0.as_ptr() as usize;
59 self.0 = Unique::new_unchecked(usize_ptr as *mut HashUint)
64 fn tag(&self) -> bool {
65 (self.0.as_ptr() as usize) & 1 == 1
69 fn ptr(&self) -> *mut HashUint {
70 (self.0.as_ptr() as usize & !1) as *mut HashUint
74 /// The raw hashtable, providing safe-ish access to the unzipped and highly
75 /// optimized arrays of hashes, and key-value pairs.
77 /// This design is a lot faster than the naive
78 /// `Vec<Option<(u64, K, V)>>`, because we don't pay for the overhead of an
79 /// option on every element, and we get a generally more cache-aware design.
81 /// Essential invariants of this structure:
83 /// - if t.hashes[i] == EMPTY_BUCKET, then `Bucket::at_index(&t, i).raw`
84 /// points to 'undefined' contents. Don't read from it. This invariant is
85 /// enforced outside this module with the `EmptyBucket`, `FullBucket`,
86 /// and `SafeHash` types.
88 /// - An `EmptyBucket` is only constructed at an index with
89 /// a hash of EMPTY_BUCKET.
91 /// - A `FullBucket` is only constructed at an index with a
92 /// non-EMPTY_BUCKET hash.
94 /// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
95 /// around hashes of zero by changing them to 0x8000_0000_0000_0000,
96 /// which will likely map to the same bucket, while not being confused
99 /// - Both "arrays represented by pointers" are the same length:
100 /// `capacity`. This is set at creation and never changes. The arrays
101 /// are unzipped and are more cache aware (scanning through 8 hashes
102 /// brings in at most 2 cache lines, since they're all right beside each
103 /// other). This layout may waste space in padding such as in a map from
104 /// u64 to u8, but is a more cache conscious layout as the key-value pairs
105 /// are only very shortly probed and the desired value will be in the same
106 /// or next cache line.
108 /// You can kind of think of this module/data structure as a safe wrapper
109 /// around just the "table" part of the hashtable. It enforces some
110 /// invariants at the type level and employs some performance trickery,
111 /// but in general is just a tricked out `Vec<Option<(u64, K, V)>>`.
113 /// The hashtable also exposes a special boolean tag. The tag defaults to false
114 /// when the RawTable is created and is accessible with the `tag` and `set_tag`
116 pub struct RawTable<K, V> {
117 capacity_mask: usize,
119 hashes: TaggedHashUintPtr,
121 // Because K/V do not appear directly in any of the types in the struct,
122 // inform rustc that in fact instances of K and V are reachable from here.
123 marker: marker::PhantomData<(K, V)>,
126 // An unsafe view of a RawTable bucket
127 // Valid indexes are within [0..table_capacity)
128 pub struct RawBucket<K, V> {
129 hash_start: *mut HashUint,
130 // We use *const to ensure covariance with respect to K and V
131 pair_start: *const (K, V),
133 _marker: marker::PhantomData<(K, V)>,
136 impl<K, V> Copy for RawBucket<K, V> {}
137 impl<K, V> Clone for RawBucket<K, V> {
138 fn clone(&self) -> RawBucket<K, V> {
143 pub struct Bucket<K, V, M> {
144 raw: RawBucket<K, V>,
148 impl<K, V, M: Copy> Copy for Bucket<K, V, M> {}
149 impl<K, V, M: Copy> Clone for Bucket<K, V, M> {
150 fn clone(&self) -> Bucket<K, V, M> {
155 pub struct EmptyBucket<K, V, M> {
156 raw: RawBucket<K, V>,
160 pub struct FullBucket<K, V, M> {
161 raw: RawBucket<K, V>,
165 pub type FullBucketMut<'table, K, V> = FullBucket<K, V, &'table mut RawTable<K, V>>;
167 pub enum BucketState<K, V, M> {
168 Empty(EmptyBucket<K, V, M>),
169 Full(FullBucket<K, V, M>),
172 // A GapThenFull encapsulates the state of two consecutive buckets at once.
173 // The first bucket, called the gap, is known to be empty.
174 // The second bucket is full.
175 pub struct GapThenFull<K, V, M> {
176 gap: EmptyBucket<K, V, ()>,
177 full: FullBucket<K, V, M>,
180 /// A hash that is not zero, since we use a hash of zero to represent empty
182 #[derive(PartialEq, Copy, Clone)]
183 pub struct SafeHash {
188 /// Peek at the hash value, which is guaranteed to be non-zero.
190 pub fn inspect(&self) -> HashUint {
195 pub fn new(hash: u64) -> Self {
196 // We need to avoid 0 in order to prevent collisions with
197 // EMPTY_HASH. We can maintain our precious uniform distribution
198 // of initial indexes by unconditionally setting the MSB,
199 // effectively reducing the hashes by one bit.
201 // Truncate hash to fit in `HashUint`.
202 let hash_bits = size_of::<HashUint>() * 8;
203 SafeHash { hash: (1 << (hash_bits - 1)) | (hash as HashUint) }
207 /// We need to remove hashes of 0. That's reserved for empty buckets.
208 /// This function wraps up `hash_keyed` to be the only way outside this
209 /// module to generate a SafeHash.
210 pub fn make_hash<T: ?Sized, S>(hash_state: &S, t: &T) -> SafeHash
214 let mut state = hash_state.build_hasher();
216 SafeHash::new(state.finish())
219 // `replace` casts a `*HashUint` to a `*SafeHash`. Since we statically
220 // ensure that a `FullBucket` points to an index with a non-zero hash,
221 // and a `SafeHash` is just a `HashUint` with a different name, this is
224 // This test ensures that a `SafeHash` really IS the same size as a
225 // `HashUint`. If you need to change the size of `SafeHash` (and
226 // consequently made this test fail), `replace` needs to be
227 // modified to no longer assume this.
229 fn can_alias_safehash_as_hash() {
230 assert_eq!(size_of::<SafeHash>(), size_of::<HashUint>())
233 // RawBucket methods are unsafe as it's possible to
234 // make a RawBucket point to invalid memory using safe code.
235 impl<K, V> RawBucket<K, V> {
236 unsafe fn hash(&self) -> *mut HashUint {
237 self.hash_start.offset(self.idx as isize)
239 unsafe fn pair(&self) -> *mut (K, V) {
240 self.pair_start.offset(self.idx as isize) as *mut (K, V)
242 unsafe fn hash_pair(&self) -> (*mut HashUint, *mut (K, V)) {
243 (self.hash(), self.pair())
247 // Buckets hold references to the table.
248 impl<K, V, M> FullBucket<K, V, M> {
249 /// Borrow a reference to the table.
250 pub fn table(&self) -> &M {
253 /// Borrow a mutable reference to the table.
254 pub fn table_mut(&mut self) -> &mut M {
257 /// Move out the reference to the table.
258 pub fn into_table(self) -> M {
261 /// Get the raw index.
262 pub fn index(&self) -> usize {
265 /// Get the raw bucket.
266 pub fn raw(&self) -> RawBucket<K, V> {
271 impl<K, V, M> EmptyBucket<K, V, M> {
272 /// Borrow a reference to the table.
273 pub fn table(&self) -> &M {
276 /// Borrow a mutable reference to the table.
277 pub fn table_mut(&mut self) -> &mut M {
282 impl<K, V, M> Bucket<K, V, M> {
283 /// Get the raw index.
284 pub fn index(&self) -> usize {
288 pub fn into_table(self) -> M {
293 impl<K, V, M> Deref for FullBucket<K, V, M>
294 where M: Deref<Target = RawTable<K, V>>
296 type Target = RawTable<K, V>;
297 fn deref(&self) -> &RawTable<K, V> {
302 /// `Put` is implemented for types which provide access to a table and cannot be invalidated
303 /// by filling a bucket. A similar implementation for `Take` is possible.
304 pub trait Put<K, V> {
305 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V>;
309 impl<'t, K, V> Put<K, V> for &'t mut RawTable<K, V> {
310 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
315 impl<K, V, M> Put<K, V> for Bucket<K, V, M>
318 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
319 self.table.borrow_table_mut()
323 impl<K, V, M> Put<K, V> for FullBucket<K, V, M>
326 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
327 self.table.borrow_table_mut()
331 impl<K, V, M: Deref<Target = RawTable<K, V>>> Bucket<K, V, M> {
332 pub fn new(table: M, hash: SafeHash) -> Bucket<K, V, M> {
333 Bucket::at_index(table, hash.inspect() as usize)
336 pub fn new_from(r: RawBucket<K, V>, t: M)
345 pub fn at_index(table: M, ib_index: usize) -> Bucket<K, V, M> {
346 // if capacity is 0, then the RawBucket will be populated with bogus pointers.
347 // This is an uncommon case though, so avoid it in release builds.
348 debug_assert!(table.capacity() > 0,
349 "Table should have capacity at this point");
350 let ib_index = ib_index & table.capacity_mask;
352 raw: table.raw_bucket_at(ib_index),
357 pub fn first(table: M) -> Bucket<K, V, M> {
359 raw: table.raw_bucket_at(0),
364 // "So a few of the first shall be last: for many be called,
367 // We'll most likely encounter a few buckets at the beginning that
368 // have their initial buckets near the end of the table. They were
369 // placed at the beginning as the probe wrapped around the table
370 // during insertion. We must skip forward to a bucket that won't
371 // get reinserted too early and won't unfairly steal others spot.
372 // This eliminates the need for robin hood.
373 pub fn head_bucket(table: M) -> Bucket<K, V, M> {
374 let mut bucket = Bucket::first(table);
377 bucket = match bucket.peek() {
379 if full.displacement() == 0 {
380 // This bucket occupies its ideal spot.
381 // It indicates the start of another "cluster".
382 bucket = full.into_bucket();
385 // Leaving this bucket in the last cluster for later.
389 // Encountered a hole between clusters.
398 /// Reads a bucket at a given index, returning an enum indicating whether
399 /// it's initialized or not. You need to match on this enum to get
400 /// the appropriate types to call most of the other functions in
402 pub fn peek(self) -> BucketState<K, V, M> {
403 match unsafe { *self.raw.hash() } {
419 /// Modifies the bucket in place to make it point to the next slot.
420 pub fn next(&mut self) {
421 self.raw.idx = self.raw.idx.wrapping_add(1) & self.table.capacity_mask;
424 /// Modifies the bucket in place to make it point to the previous slot.
425 pub fn prev(&mut self) {
426 self.raw.idx = self.raw.idx.wrapping_sub(1) & self.table.capacity_mask;
430 impl<K, V, M: Deref<Target = RawTable<K, V>>> EmptyBucket<K, V, M> {
432 pub fn next(self) -> Bucket<K, V, M> {
433 let mut bucket = self.into_bucket();
439 pub fn into_bucket(self) -> Bucket<K, V, M> {
446 pub fn gap_peek(self) -> Result<GapThenFull<K, V, M>, Bucket<K, V, M>> {
447 let gap = EmptyBucket {
452 match self.next().peek() {
459 Empty(e) => Err(e.into_bucket()),
464 impl<K, V, M> EmptyBucket<K, V, M>
467 /// Puts given key and value pair, along with the key's hash,
468 /// into this bucket in the hashtable. Note how `self` is 'moved' into
469 /// this function, because this slot will no longer be empty when
470 /// we return! A `FullBucket` is returned for later use, pointing to
471 /// the newly-filled slot in the hashtable.
473 /// Use `make_hash` to construct a `SafeHash` to pass to this function.
474 pub fn put(mut self, hash: SafeHash, key: K, value: V) -> FullBucket<K, V, M> {
476 *self.raw.hash() = hash.inspect();
477 ptr::write(self.raw.pair(), (key, value));
479 self.table.borrow_table_mut().size += 1;
488 /// Puts given key, remain value uninitialized.
489 /// It is only used for inplacement insertion.
490 pub unsafe fn put_key(mut self, hash: SafeHash, key: K) -> FullBucket<K, V, M> {
491 *self.raw.hash() = hash.inspect();
492 let pair_ptr = self.raw.pair();
493 ptr::write(&mut (*pair_ptr).0, key);
495 self.table.borrow_table_mut().size += 1;
504 impl<K, V, M: Deref<Target = RawTable<K, V>>> FullBucket<K, V, M> {
506 pub fn next(self) -> Bucket<K, V, M> {
507 let mut bucket = self.into_bucket();
513 pub fn into_bucket(self) -> Bucket<K, V, M> {
520 /// Duplicates the current position. This can be useful for operations
521 /// on two or more buckets.
522 pub fn stash(self) -> FullBucket<K, V, Self> {
529 /// Get the distance between this bucket and the 'ideal' location
530 /// as determined by the key's hash stored in it.
532 /// In the cited blog posts above, this is called the "distance to
533 /// initial bucket", or DIB. Also known as "probe count".
534 pub fn displacement(&self) -> usize {
535 // Calculates the distance one has to travel when going from
536 // `hash mod capacity` onwards to `idx mod capacity`, wrapping around
537 // if the destination is not reached before the end of the table.
538 (self.raw.idx.wrapping_sub(self.hash().inspect() as usize)) & self.table.capacity_mask
542 pub fn hash(&self) -> SafeHash {
543 unsafe { SafeHash { hash: *self.raw.hash() } }
546 /// Gets references to the key and value at a given index.
547 pub fn read(&self) -> (&K, &V) {
549 let pair_ptr = self.raw.pair();
550 (&(*pair_ptr).0, &(*pair_ptr).1)
555 // We take a mutable reference to the table instead of accepting anything that
556 // implements `DerefMut` to prevent fn `take` from being called on `stash`ed
558 impl<'t, K, V> FullBucket<K, V, &'t mut RawTable<K, V>> {
559 /// Removes this bucket's key and value from the hashtable.
561 /// This works similarly to `put`, building an `EmptyBucket` out of the
563 pub fn take(self) -> (EmptyBucket<K, V, &'t mut RawTable<K, V>>, K, V) {
564 self.table.size -= 1;
567 *self.raw.hash() = EMPTY_BUCKET;
568 let (k, v) = ptr::read(self.raw.pair());
578 /// Remove this bucket's `key` from the hashtable.
579 /// Only used for inplacement insertion.
580 /// NOTE: `Value` is uninitialized when this function is called, don't try to drop the `Value`.
581 pub unsafe fn remove_key(&mut self) {
582 self.table.size -= 1;
584 *self.raw.hash() = EMPTY_BUCKET;
585 let pair_ptr = self.raw.pair();
586 ptr::drop_in_place(&mut (*pair_ptr).0); // only drop key
590 // This use of `Put` is misleading and restrictive, but safe and sufficient for our use cases
591 // where `M` is a full bucket or table reference type with mutable access to the table.
592 impl<K, V, M> FullBucket<K, V, M>
595 pub fn replace(&mut self, h: SafeHash, k: K, v: V) -> (SafeHash, K, V) {
597 let old_hash = ptr::replace(self.raw.hash() as *mut SafeHash, h);
598 let (old_key, old_val) = ptr::replace(self.raw.pair(), (k, v));
600 (old_hash, old_key, old_val)
605 impl<K, V, M> FullBucket<K, V, M>
606 where M: Deref<Target = RawTable<K, V>> + DerefMut
608 /// Gets mutable references to the key and value at a given index.
609 pub fn read_mut(&mut self) -> (&mut K, &mut V) {
611 let pair_ptr = self.raw.pair();
612 (&mut (*pair_ptr).0, &mut (*pair_ptr).1)
617 impl<'t, K, V, M> FullBucket<K, V, M>
618 where M: Deref<Target = RawTable<K, V>> + 't
620 /// Exchange a bucket state for immutable references into the table.
621 /// Because the underlying reference to the table is also consumed,
622 /// no further changes to the structure of the table are possible;
623 /// in exchange for this, the returned references have a longer lifetime
624 /// than the references returned by `read()`.
625 pub fn into_refs(self) -> (&'t K, &'t V) {
627 let pair_ptr = self.raw.pair();
628 (&(*pair_ptr).0, &(*pair_ptr).1)
633 impl<'t, K, V, M> FullBucket<K, V, M>
634 where M: Deref<Target = RawTable<K, V>> + DerefMut + 't
636 /// This works similarly to `into_refs`, exchanging a bucket state
637 /// for mutable references into the table.
638 pub fn into_mut_refs(self) -> (&'t mut K, &'t mut V) {
640 let pair_ptr = self.raw.pair();
641 (&mut (*pair_ptr).0, &mut (*pair_ptr).1)
646 impl<K, V, M> GapThenFull<K, V, M>
647 where M: Deref<Target = RawTable<K, V>>
650 pub fn full(&self) -> &FullBucket<K, V, M> {
654 pub fn into_table(self) -> M {
655 self.full.into_table()
658 pub fn shift(mut self) -> Result<GapThenFull<K, V, M>, Bucket<K, V, M>> {
660 let (gap_hash, gap_pair) = self.gap.raw.hash_pair();
661 let (full_hash, full_pair) = self.full.raw.hash_pair();
662 *gap_hash = mem::replace(&mut *full_hash, EMPTY_BUCKET);
663 ptr::copy_nonoverlapping(full_pair, gap_pair, 1);
666 let FullBucket { raw: prev_raw, .. } = self.full;
668 match self.full.next().peek() {
670 self.gap.raw = prev_raw;
676 Empty(b) => Err(b.into_bucket()),
682 /// Rounds up to a multiple of a power of two. Returns the closest multiple
683 /// of `target_alignment` that is higher or equal to `unrounded`.
687 /// Panics if `target_alignment` is not a power of two.
689 fn round_up_to_next(unrounded: usize, target_alignment: usize) -> usize {
690 assert!(target_alignment.is_power_of_two());
691 (unrounded + target_alignment - 1) & !(target_alignment - 1)
696 assert_eq!(round_up_to_next(0, 4), 0);
697 assert_eq!(round_up_to_next(1, 4), 4);
698 assert_eq!(round_up_to_next(2, 4), 4);
699 assert_eq!(round_up_to_next(3, 4), 4);
700 assert_eq!(round_up_to_next(4, 4), 4);
701 assert_eq!(round_up_to_next(5, 4), 8);
704 // Returns a tuple of (pairs_offset, end_of_pairs_offset),
705 // from the start of a mallocated array.
707 fn calculate_offsets(hashes_size: usize,
710 -> (usize, usize, bool) {
711 let pairs_offset = round_up_to_next(hashes_size, pairs_align);
712 let (end_of_pairs, oflo) = pairs_offset.overflowing_add(pairs_size);
714 (pairs_offset, end_of_pairs, oflo)
717 // Returns a tuple of (minimum required malloc alignment,
718 // array_size), from the start of a mallocated array.
719 fn calculate_allocation(hash_size: usize,
723 -> (usize, usize, bool) {
724 let (_, end_of_pairs, oflo) = calculate_offsets(hash_size, pairs_size, pairs_align);
726 let align = cmp::max(hash_align, pairs_align);
728 (align, end_of_pairs, oflo)
732 fn test_offset_calculation() {
733 assert_eq!(calculate_allocation(128, 8, 16, 8), (8, 144, false));
734 assert_eq!(calculate_allocation(3, 1, 2, 1), (1, 5, false));
735 assert_eq!(calculate_allocation(6, 2, 12, 4), (4, 20, false));
736 assert_eq!(calculate_offsets(128, 15, 4), (128, 143, false));
737 assert_eq!(calculate_offsets(3, 2, 4), (4, 6, false));
738 assert_eq!(calculate_offsets(6, 12, 4), (8, 20, false));
741 impl<K, V> RawTable<K, V> {
742 /// Does not initialize the buckets. The caller should ensure they,
743 /// at the very least, set every hash to EMPTY_BUCKET.
744 unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> {
748 capacity_mask: capacity.wrapping_sub(1),
749 hashes: TaggedHashUintPtr::new(EMPTY as *mut HashUint),
750 marker: marker::PhantomData,
754 // No need for `checked_mul` before a more restrictive check performed
755 // later in this method.
756 let hashes_size = capacity.wrapping_mul(size_of::<HashUint>());
757 let pairs_size = capacity.wrapping_mul(size_of::<(K, V)>());
759 // Allocating hashmaps is a little tricky. We need to allocate two
760 // arrays, but since we know their sizes and alignments up front,
761 // we just allocate a single array, and then have the subarrays
764 // This is great in theory, but in practice getting the alignment
765 // right is a little subtle. Therefore, calculating offsets has been
766 // factored out into a different function.
767 let (alignment, size, oflo) = calculate_allocation(hashes_size,
768 align_of::<HashUint>(),
770 align_of::<(K, V)>());
771 assert!(!oflo, "capacity overflow");
773 // One check for overflow that covers calculation and rounding of size.
774 let size_of_bucket = size_of::<HashUint>().checked_add(size_of::<(K, V)>()).unwrap();
776 capacity.checked_mul(size_of_bucket)
777 .expect("capacity overflow"),
778 "capacity overflow");
780 let buffer = Heap.alloc(Layout::from_size_align(size, alignment).unwrap())
781 .unwrap_or_else(|e| Heap.oom(e));
783 let hashes = buffer as *mut HashUint;
786 capacity_mask: capacity.wrapping_sub(1),
788 hashes: TaggedHashUintPtr::new(hashes),
789 marker: marker::PhantomData,
793 fn raw_bucket_at(&self, index: usize) -> RawBucket<K, V> {
794 let hashes_size = self.capacity() * size_of::<HashUint>();
795 let pairs_size = self.capacity() * size_of::<(K, V)>();
797 let (pairs_offset, _, oflo) =
798 calculate_offsets(hashes_size, pairs_size, align_of::<(K, V)>());
799 debug_assert!(!oflo, "capacity overflow");
801 let buffer = self.hashes.ptr() as *mut u8;
804 hash_start: buffer as *mut HashUint,
805 pair_start: buffer.offset(pairs_offset as isize) as *const (K, V),
807 _marker: marker::PhantomData,
812 /// Creates a new raw table from a given capacity. All buckets are
814 pub fn new(capacity: usize) -> RawTable<K, V> {
816 let ret = RawTable::new_uninitialized(capacity);
817 ptr::write_bytes(ret.hashes.ptr(), 0, capacity);
822 /// The hashtable's capacity, similar to a vector's.
823 pub fn capacity(&self) -> usize {
824 self.capacity_mask.wrapping_add(1)
827 /// The number of elements ever `put` in the hashtable, minus the number
828 /// of elements ever `take`n.
829 pub fn size(&self) -> usize {
833 fn raw_buckets(&self) -> RawBuckets<K, V> {
835 raw: self.raw_bucket_at(0),
836 elems_left: self.size,
837 marker: marker::PhantomData,
841 pub fn iter(&self) -> Iter<K, V> {
843 iter: self.raw_buckets(),
847 pub fn iter_mut(&mut self) -> IterMut<K, V> {
849 iter: self.raw_buckets(),
850 _marker: marker::PhantomData,
854 pub fn into_iter(self) -> IntoIter<K, V> {
855 let RawBuckets { raw, elems_left, .. } = self.raw_buckets();
856 // Replace the marker regardless of lifetime bounds on parameters.
861 marker: marker::PhantomData,
867 pub fn drain(&mut self) -> Drain<K, V> {
868 let RawBuckets { raw, elems_left, .. } = self.raw_buckets();
869 // Replace the marker regardless of lifetime bounds on parameters.
874 marker: marker::PhantomData,
876 table: NonNull::from(self),
877 marker: marker::PhantomData,
881 /// Drops buckets in reverse order. It leaves the table in an inconsistent
882 /// state and should only be used for dropping the table's remaining
883 /// entries. It's used in the implementation of Drop.
884 unsafe fn rev_drop_buckets(&mut self) {
885 // initialize the raw bucket past the end of the table
886 let mut raw = self.raw_bucket_at(self.capacity());
887 let mut elems_left = self.size;
889 while elems_left != 0 {
892 if *raw.hash() != EMPTY_BUCKET {
894 ptr::drop_in_place(raw.pair());
899 /// Set the table tag
900 pub fn set_tag(&mut self, value: bool) {
901 self.hashes.set_tag(value)
904 /// Get the table tag
905 pub fn tag(&self) -> bool {
910 /// A raw iterator. The basis for some other iterators in this module. Although
911 /// this interface is safe, it's not used outside this module.
912 struct RawBuckets<'a, K, V> {
913 raw: RawBucket<K, V>,
916 // Strictly speaking, this should be &'a (K,V), but that would
917 // require that K:'a, and we often use RawBuckets<'static...> for
918 // move iterations, so that messes up a lot of other things. So
919 // just use `&'a (K,V)` as this is not a publicly exposed type
921 marker: marker::PhantomData<&'a ()>,
924 // FIXME(#26925) Remove in favor of `#[derive(Clone)]`
925 impl<'a, K, V> Clone for RawBuckets<'a, K, V> {
926 fn clone(&self) -> RawBuckets<'a, K, V> {
929 elems_left: self.elems_left,
930 marker: marker::PhantomData,
936 impl<'a, K, V> Iterator for RawBuckets<'a, K, V> {
937 type Item = RawBucket<K, V>;
939 fn next(&mut self) -> Option<RawBucket<K, V>> {
940 if self.elems_left == 0 {
948 if *item.hash() != EMPTY_BUCKET {
949 self.elems_left -= 1;
956 fn size_hint(&self) -> (usize, Option<usize>) {
957 (self.elems_left, Some(self.elems_left))
961 impl<'a, K, V> ExactSizeIterator for RawBuckets<'a, K, V> {
962 fn len(&self) -> usize {
967 /// Iterator over shared references to entries in a table.
968 pub struct Iter<'a, K: 'a, V: 'a> {
969 iter: RawBuckets<'a, K, V>,
972 unsafe impl<'a, K: Sync, V: Sync> Sync for Iter<'a, K, V> {}
973 unsafe impl<'a, K: Sync, V: Sync> Send for Iter<'a, K, V> {}
975 // FIXME(#26925) Remove in favor of `#[derive(Clone)]`
976 impl<'a, K, V> Clone for Iter<'a, K, V> {
977 fn clone(&self) -> Iter<'a, K, V> {
979 iter: self.iter.clone(),
984 /// Iterator over mutable references to entries in a table.
985 pub struct IterMut<'a, K: 'a, V: 'a> {
986 iter: RawBuckets<'a, K, V>,
987 // To ensure invariance with respect to V
988 _marker: marker::PhantomData<&'a mut V>,
991 unsafe impl<'a, K: Sync, V: Sync> Sync for IterMut<'a, K, V> {}
992 // Both K: Sync and K: Send are correct for IterMut's Send impl,
993 // but Send is the more useful bound
994 unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {}
996 impl<'a, K: 'a, V: 'a> IterMut<'a, K, V> {
997 pub fn iter(&self) -> Iter<K, V> {
999 iter: self.iter.clone(),
1004 /// Iterator over the entries in a table, consuming the table.
1005 pub struct IntoIter<K, V> {
1006 table: RawTable<K, V>,
1007 iter: RawBuckets<'static, K, V>,
1010 unsafe impl<K: Sync, V: Sync> Sync for IntoIter<K, V> {}
1011 unsafe impl<K: Send, V: Send> Send for IntoIter<K, V> {}
1013 impl<K, V> IntoIter<K, V> {
1014 pub fn iter(&self) -> Iter<K, V> {
1016 iter: self.iter.clone(),
1021 /// Iterator over the entries in a table, clearing the table.
1022 pub struct Drain<'a, K: 'a, V: 'a> {
1023 table: NonNull<RawTable<K, V>>,
1024 iter: RawBuckets<'static, K, V>,
1025 marker: marker::PhantomData<&'a RawTable<K, V>>,
1028 unsafe impl<'a, K: Sync, V: Sync> Sync for Drain<'a, K, V> {}
1029 unsafe impl<'a, K: Send, V: Send> Send for Drain<'a, K, V> {}
1031 impl<'a, K, V> Drain<'a, K, V> {
1032 pub fn iter(&self) -> Iter<K, V> {
1034 iter: self.iter.clone(),
1039 impl<'a, K, V> Iterator for Iter<'a, K, V> {
1040 type Item = (&'a K, &'a V);
1042 fn next(&mut self) -> Option<(&'a K, &'a V)> {
1043 self.iter.next().map(|raw| unsafe {
1044 let pair_ptr = raw.pair();
1045 (&(*pair_ptr).0, &(*pair_ptr).1)
1049 fn size_hint(&self) -> (usize, Option<usize>) {
1050 self.iter.size_hint()
1054 impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
1055 fn len(&self) -> usize {
1060 impl<'a, K, V> Iterator for IterMut<'a, K, V> {
1061 type Item = (&'a K, &'a mut V);
1063 fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
1064 self.iter.next().map(|raw| unsafe {
1065 let pair_ptr = raw.pair();
1066 (&(*pair_ptr).0, &mut (*pair_ptr).1)
1070 fn size_hint(&self) -> (usize, Option<usize>) {
1071 self.iter.size_hint()
1075 impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
1076 fn len(&self) -> usize {
1081 impl<K, V> Iterator for IntoIter<K, V> {
1082 type Item = (SafeHash, K, V);
1084 fn next(&mut self) -> Option<(SafeHash, K, V)> {
1085 self.iter.next().map(|raw| {
1086 self.table.size -= 1;
1088 let (k, v) = ptr::read(raw.pair());
1089 (SafeHash { hash: *raw.hash() }, k, v)
1094 fn size_hint(&self) -> (usize, Option<usize>) {
1095 self.iter.size_hint()
1099 impl<K, V> ExactSizeIterator for IntoIter<K, V> {
1100 fn len(&self) -> usize {
1105 impl<'a, K, V> Iterator for Drain<'a, K, V> {
1106 type Item = (SafeHash, K, V);
1109 fn next(&mut self) -> Option<(SafeHash, K, V)> {
1110 self.iter.next().map(|raw| {
1112 self.table.as_mut().size -= 1;
1113 let (k, v) = ptr::read(raw.pair());
1114 (SafeHash { hash: ptr::replace(&mut *raw.hash(), EMPTY_BUCKET) }, k, v)
1119 fn size_hint(&self) -> (usize, Option<usize>) {
1120 self.iter.size_hint()
1124 impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
1125 fn len(&self) -> usize {
1130 impl<'a, K: 'a, V: 'a> Drop for Drain<'a, K, V> {
1131 fn drop(&mut self) {
1136 impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
1137 fn clone(&self) -> RawTable<K, V> {
1139 let cap = self.capacity();
1140 let mut new_ht = RawTable::new_uninitialized(cap);
1142 let mut new_buckets = new_ht.raw_bucket_at(0);
1143 let mut buckets = self.raw_bucket_at(0);
1144 while buckets.idx < cap {
1145 *new_buckets.hash() = *buckets.hash();
1146 if *new_buckets.hash() != EMPTY_BUCKET {
1147 let pair_ptr = buckets.pair();
1148 let kv = ((*pair_ptr).0.clone(), (*pair_ptr).1.clone());
1149 ptr::write(new_buckets.pair(), kv);
1152 new_buckets.idx += 1;
1155 new_ht.size = self.size();
1156 new_ht.set_tag(self.tag());
1163 unsafe impl<#[may_dangle] K, #[may_dangle] V> Drop for RawTable<K, V> {
1164 fn drop(&mut self) {
1165 if self.capacity() == 0 {
1169 // This is done in reverse because we've likely partially taken
1170 // some elements out with `.into_iter()` from the front.
1171 // Check if the size is 0, so we don't do a useless scan when
1172 // dropping empty tables such as on resize.
1173 // Also avoid double drop of elements that have been already moved out.
1175 if needs_drop::<(K, V)>() {
1176 // avoid linear runtime for types that don't need drop
1177 self.rev_drop_buckets();
1181 let hashes_size = self.capacity() * size_of::<HashUint>();
1182 let pairs_size = self.capacity() * size_of::<(K, V)>();
1183 let (align, size, oflo) = calculate_allocation(hashes_size,
1184 align_of::<HashUint>(),
1186 align_of::<(K, V)>());
1188 debug_assert!(!oflo, "should be impossible");
1191 Heap.dealloc(self.hashes.ptr() as *mut u8,
1192 Layout::from_size_align(size, align).unwrap());
1193 // Remember how everything was allocated out of one buffer
1194 // during initialization? We only need one call to free here.