1 //! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
12 #[derive(Debug, Clone)]
13 pub struct SipHasher128 {
16 length: usize, // how many bytes we've processed
17 state: State, // hash State
18 tail: u64, // unprocessed bytes le
19 ntail: usize, // how many bytes in tail are valid
22 #[derive(Debug, Clone, Copy)]
25 // v0, v2 and v1, v3 show up in pairs in the algorithm,
26 // and simd implementations of SipHash will use vectors
27 // of v02 and v13. By placing them in this order in the struct,
28 // the compiler can pick up on just a few simd optimizations by itself.
35 macro_rules! compress {
37 compress!($state.v0, $state.v1, $state.v2, $state.v3)
39 ($v0:expr, $v1:expr, $v2:expr, $v3:expr) =>
41 $v0 = $v0.wrapping_add($v1); $v1 = $v1.rotate_left(13); $v1 ^= $v0;
42 $v0 = $v0.rotate_left(32);
43 $v2 = $v2.wrapping_add($v3); $v3 = $v3.rotate_left(16); $v3 ^= $v2;
44 $v0 = $v0.wrapping_add($v3); $v3 = $v3.rotate_left(21); $v3 ^= $v0;
45 $v2 = $v2.wrapping_add($v1); $v1 = $v1.rotate_left(17); $v1 ^= $v2;
46 $v2 = $v2.rotate_left(32);
50 /// Loads an integer of the desired type from a byte stream, in LE order. Uses
51 /// `copy_nonoverlapping` to let the compiler generate the most efficient way
52 /// to load it from a possibly unaligned address.
54 /// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
55 macro_rules! load_int_le {
56 ($buf:expr, $i:expr, $int_ty:ident) =>
58 debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
59 let mut data = 0 as $int_ty;
60 ptr::copy_nonoverlapping($buf.get_unchecked($i),
61 &mut data as *mut _ as *mut u8,
62 mem::size_of::<$int_ty>());
67 /// Loads an u64 using up to 7 bytes of a byte slice.
69 /// Unsafe because: unchecked indexing at start..start+len
71 unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
72 debug_assert!(len < 8);
73 let mut i = 0; // current byte index (from LSB) in the output u64
76 out = u64::from(load_int_le!(buf, start + i, u32));
80 out |= u64::from(load_int_le!(buf, start + i, u16)) << (i * 8);
84 out |= u64::from(*buf.get_unchecked(start + i)) << (i * 8);
87 debug_assert_eq!(i, len);
94 pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 {
95 let mut state = SipHasher128 {
113 fn reset(&mut self) {
115 self.state.v0 = self.k0 ^ 0x736f6d6570736575;
116 self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
117 self.state.v2 = self.k0 ^ 0x6c7967656e657261;
118 self.state.v3 = self.k1 ^ 0x7465646279746573;
121 // This is only done in the 128 bit version:
122 self.state.v1 ^= 0xee;
125 // Specialized write function that is only valid for buffers with len <= 8.
126 // It's used to force inlining of write_u8 and write_usize, those would normally be inlined
127 // except for composite types (that includes slices and str hashing because of delimiter).
128 // Without this extra push the compiler is very reluctant to inline delimiter writes,
129 // degrading performance substantially for the most common use cases.
131 fn short_write(&mut self, msg: &[u8]) {
132 debug_assert!(msg.len() <= 8);
133 let length = msg.len();
134 self.length += length;
136 let needed = 8 - self.ntail;
137 let fill = cmp::min(length, needed);
139 self.tail = unsafe { load_int_le!(msg, 0, u64) };
141 self.tail |= unsafe { u8to64_le(msg, 0, fill) } << (8 * self.ntail);
143 self.ntail += length;
147 self.state.v3 ^= self.tail;
148 Sip24Rounds::c_rounds(&mut self.state);
149 self.state.v0 ^= self.tail;
151 // Buffered tail is now flushed, process new input.
152 self.ntail = length - needed;
153 self.tail = unsafe { u8to64_le(msg, needed, self.ntail) };
157 fn short_write_gen<T>(&mut self, x: T) {
159 slice::from_raw_parts(&x as *const T as *const u8, mem::size_of::<T>())
161 self.short_write(bytes);
165 pub fn finish128(mut self) -> (u64, u64) {
166 let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
169 Sip24Rounds::c_rounds(&mut self.state);
172 self.state.v2 ^= 0xee;
173 Sip24Rounds::d_rounds(&mut self.state);
174 let _0 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3;
176 self.state.v1 ^= 0xdd;
177 Sip24Rounds::d_rounds(&mut self.state);
178 let _1 = self.state.v0 ^ self.state.v1 ^ self.state.v2 ^ self.state.v3;
183 impl Hasher for SipHasher128 {
185 fn write_u8(&mut self, i: u8) {
186 self.short_write_gen(i);
190 fn write_u16(&mut self, i: u16) {
191 self.short_write_gen(i);
195 fn write_u32(&mut self, i: u32) {
196 self.short_write_gen(i);
200 fn write_u64(&mut self, i: u64) {
201 self.short_write_gen(i);
205 fn write_usize(&mut self, i: usize) {
206 self.short_write_gen(i);
210 fn write_i8(&mut self, i: i8) {
211 self.short_write_gen(i);
215 fn write_i16(&mut self, i: i16) {
216 self.short_write_gen(i);
220 fn write_i32(&mut self, i: i32) {
221 self.short_write_gen(i);
225 fn write_i64(&mut self, i: i64) {
226 self.short_write_gen(i);
230 fn write_isize(&mut self, i: isize) {
231 self.short_write_gen(i);
235 fn write(&mut self, msg: &[u8]) {
236 let length = msg.len();
237 self.length += length;
242 needed = 8 - self.ntail;
243 self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
245 self.ntail += length;
248 self.state.v3 ^= self.tail;
249 Sip24Rounds::c_rounds(&mut self.state);
250 self.state.v0 ^= self.tail;
255 // Buffered tail is now flushed, process new input.
256 let len = length - needed;
257 let left = len & 0x7;
260 while i < len - left {
261 let mi = unsafe { load_int_le!(msg, i, u64) };
264 Sip24Rounds::c_rounds(&mut self.state);
270 self.tail = unsafe { u8to64_le(msg, i, left) };
274 fn finish(&self) -> u64 {
275 panic!("SipHasher128 cannot provide valid 64 bit hashes")
279 #[derive(Debug, Clone, Default)]
284 fn c_rounds(state: &mut State) {
290 fn d_rounds(state: &mut State) {