1 //! This is a copy of the `rustc_hash` crate, adapted to work as a module.
3 //! If in the future it becomes more reasonable to add dependencies to
4 //! `proc_macro`, this module should be removed and replaced with a dependency
5 //! on the `rustc_hash` crate.
7 use std::collections::HashMap;
8 use std::convert::TryInto;
9 use std::default::Default;
10 use std::hash::BuildHasherDefault;
11 use std::hash::Hasher;
12 use std::mem::size_of;
15 /// Type alias for a hashmap using the `fx` hash algorithm.
16 pub type FxHashMap<K, V> = HashMap<K, V, BuildHasherDefault<FxHasher>>;
18 /// A speedy hash algorithm for use within rustc. The hashmap in liballoc
19 /// by default uses SipHash which isn't quite as speedy as we want. In the
20 /// compiler we're not really worried about DOS attempts, so we use a fast
21 /// non-cryptographic hash.
23 /// This is the same as the algorithm used by Firefox -- which is a homespun
24 /// one not based on any widely-known algorithm -- though modified to produce
25 /// 64-bit hash values instead of 32-bit hash values. It consistently
26 /// out-performs an FNV-based hash within rustc itself -- the collision rate is
27 /// similar or slightly worse than FNV, but the speed of the hash function
28 /// itself is much higher because it works on up to 8 bytes at a time.
33 #[cfg(target_pointer_width = "32")]
34 const K: usize = 0x9e3779b9;
35 #[cfg(target_pointer_width = "64")]
36 const K: usize = 0x517cc1b727220a95;
38 impl Default for FxHasher {
40 fn default() -> FxHasher {
47 fn add_to_hash(&mut self, i: usize) {
48 self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K);
52 impl Hasher for FxHasher {
54 fn write(&mut self, mut bytes: &[u8]) {
55 #[cfg(target_pointer_width = "32")]
56 let read_usize = |bytes: &[u8]| u32::from_ne_bytes(bytes[..4].try_into().unwrap());
57 #[cfg(target_pointer_width = "64")]
58 let read_usize = |bytes: &[u8]| u64::from_ne_bytes(bytes[..8].try_into().unwrap());
60 let mut hash = FxHasher { hash: self.hash };
61 assert!(size_of::<usize>() <= 8);
62 while bytes.len() >= size_of::<usize>() {
63 hash.add_to_hash(read_usize(bytes) as usize);
64 bytes = &bytes[size_of::<usize>()..];
66 if (size_of::<usize>() > 4) && (bytes.len() >= 4) {
67 hash.add_to_hash(u32::from_ne_bytes(bytes[..4].try_into().unwrap()) as usize);
70 if (size_of::<usize>() > 2) && bytes.len() >= 2 {
71 hash.add_to_hash(u16::from_ne_bytes(bytes[..2].try_into().unwrap()) as usize);
74 if (size_of::<usize>() > 1) && bytes.len() >= 1 {
75 hash.add_to_hash(bytes[0] as usize);
77 self.hash = hash.hash;
81 fn write_u8(&mut self, i: u8) {
82 self.add_to_hash(i as usize);
86 fn write_u16(&mut self, i: u16) {
87 self.add_to_hash(i as usize);
91 fn write_u32(&mut self, i: u32) {
92 self.add_to_hash(i as usize);
95 #[cfg(target_pointer_width = "32")]
97 fn write_u64(&mut self, i: u64) {
98 self.add_to_hash(i as usize);
99 self.add_to_hash((i >> 32) as usize);
102 #[cfg(target_pointer_width = "64")]
104 fn write_u64(&mut self, i: u64) {
105 self.add_to_hash(i as usize);
109 fn write_usize(&mut self, i: usize) {
114 fn finish(&self) -> u64 {