1 //-----------------------------------------------------------------------------
2 // MurmurHash3 was written by Austin Appleby, and is placed in the public
3 // domain. The author hereby disclaims copyright to this source code.
4 
5 
6 // Note - The x86 and x64 versions do _not_ produce the same results, as the
7 // algorithms are optimized for their respective platforms. You can still
8 // compile and run any of them on any platform, but your performance with the
9 // non-native version will be less than optimal.
10 
11 
12 // deprecated
13 module ae.utils.digest_murmurhash3;
14 
15 deprecated:
16 
17 private:
18 
19 import ae.utils.meta;
20 
21 alias byte  int8_t;
22 alias ubyte uint8_t;
23 alias uint  uint32_t;
24 alias ulong uint64_t;
25 
26 //-----------------------------------------------------------------------------
27 // Platform-specific functions and macros
28 
29 /*inline*/ uint32_t rotl32(int r) ( uint32_t x )
30 {
31 	return (x << r) | (x >> (32 - r));
32 }
33 
34 
35 /*inline*/ uint64_t rotl64(int r) ( uint64_t x )
36 {
37 	return (x << r) | (x >> (64 - r));
38 }
39 
40 //-----------------------------------------------------------------------------
41 // Block read - if your platform needs to do endian-swapping or can only
42 // handle aligned reads, do the conversion here
43 
44 /+
45 /*FORCE_INLINE*/ uint32_t getblock ( const uint32_t * p, int i )
46 {
47 	return p[i];
48 }
49 
50 
51 /*FORCE_INLINE*/ uint64_t getblock ( const uint64_t * p, int i )
52 {
53 	return p[i];
54 }
55 +/
56 
57 string GETBLOCK(string P, string I)
58 {
59 	return mixin(X!q{ @(P) [ @(I) ] });
60 }
61 
62 //-----------------------------------------------------------------------------
63 // Finalization mix - force all bits of a hash block to avalanche
64 
65 /+
66 /*FORCE_INLINE*/ uint32_t fmix ( uint32_t h )
67 {
68 	h ^= h >> 16;
69 	h *= 0x85ebca6b;
70 	h ^= h >> 13;
71 	h *= 0xc2b2ae35;
72 	h ^= h >> 16;
73 
74 
75 	return h;
76 }
77 
78 
79 //----------
80 
81 
82 /*FORCE_INLINE*/ uint64_t fmix ( uint64_t k )
83 {
84 	k ^= k >> 33;
85 	k *= BIG_CONSTANT(0xff51afd7ed558ccd);
86 	k ^= k >> 33;
87 	k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
88 	k ^= k >> 33;
89 
90 
91 	return k;
92 }
93 +/
94 
95 string FMIX_32(string H)
96 {
97 	return mixin(X!q{
98 		@(H) ^= @(H) >> 16;
99 		@(H) *= 0x85ebca6b;
100 		@(H) ^= @(H) >> 13;
101 		@(H) *= 0xc2b2ae35;
102 		@(H) ^= @(H) >> 16;
103 	});
104 }
105 
106 string FMIX_64(string H)
107 {
108 	return mixin(X!q{
109 		@(H) ^= @(H) >> 33;
110 		@(H) *= 0xff51afd7ed558ccdL;
111 		@(H) ^= @(H) >> 33;
112 		@(H) *= 0xc4ceb9fe1a85ec53L;
113 		@(H) ^= @(H) >> 33;
114 	});
115 }
116 
117 
118 //-----------------------------------------------------------------------------
119 
120 public:
121 
122 void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * output )
123 {
124 	const uint8_t * data = cast(const uint8_t*)key;
125 	const int nblocks = len / 4;
126 
127 
128 	uint32_t h1 = seed;
129 
130 
131 	uint32_t c1 = 0xcc9e2d51;
132 	uint32_t c2 = 0x1b873593;
133 
134 
135 	//----------
136 	// body
137 
138 
139 	const uint32_t * blocks = cast(const uint32_t *)(data + nblocks*4);
140 
141 
142 	for(int i = -nblocks; i; i++)
143 	{
144 		uint32_t k1 = mixin(GETBLOCK(q{blocks},q{i}));
145 
146 
147 		k1 *= c1;
148 		k1 = rotl32!15(k1);
149 		k1 *= c2;
150 
151 		h1 ^= k1;
152 		h1 = rotl32!13(h1);
153 		h1 = h1*5+0xe6546b64;
154 	}
155 
156 
157 	//----------
158 	// tail
159 
160 
161 	const uint8_t * tail = cast(const uint8_t*)(data + nblocks*4);
162 
163 
164 	uint32_t k1 = 0;
165 
166 
167 	switch(len & 3)
168 	{
169 	case 3: k1 ^= tail[2] << 16;                              goto case;
170 	case 2: k1 ^= tail[1] << 8;                               goto case;
171 	case 1: k1 ^= tail[0];
172 	        k1 *= c1; k1 = rotl32!15(k1); k1 *= c2; h1 ^= k1; break;
173 	default:
174 	}
175 
176 
177 	//----------
178 	// finalization
179 
180 
181 	h1 ^= len;
182 
183 
184 	mixin(FMIX_32(q{h1}));
185 
186 
187 	*cast(uint32_t*)output = h1;
188 }
189 
190 //-----------------------------------------------------------------------------
191 
192 
193 void MurmurHash3_x86_128 ( const void * key, const int len, uint32_t seed, void * output )
194 {
195 	const uint8_t * data = cast(const uint8_t*)key;
196 	const int nblocks = len / 16;
197 
198 
199 	uint32_t h1 = seed;
200 	uint32_t h2 = seed;
201 	uint32_t h3 = seed;
202 	uint32_t h4 = seed;
203 
204 
205 	uint32_t c1 = 0x239b961b;
206 	uint32_t c2 = 0xab0e9789;
207 	uint32_t c3 = 0x38b34ae5;
208 	uint32_t c4 = 0xa1e38b93;
209 
210 
211 	//----------
212 	// body
213 
214 
215 	const uint32_t * blocks = cast(const uint32_t *)(data + nblocks*16);
216 
217 
218 	for(int i = -nblocks; i; i++)
219 	{
220 		uint32_t k1 = mixin(GETBLOCK(q{blocks},q{i*4+0}));
221 		uint32_t k2 = mixin(GETBLOCK(q{blocks},q{i*4+1}));
222 		uint32_t k3 = mixin(GETBLOCK(q{blocks},q{i*4+2}));
223 		uint32_t k4 = mixin(GETBLOCK(q{blocks},q{i*4+3}));
224 
225 
226 		k1 *= c1; k1  = rotl32!15(k1); k1 *= c2; h1 ^= k1;
227 
228 
229 		h1 = rotl32!19(h1); h1 += h2; h1 = h1*5+0x561ccd1b;
230 
231 
232 		k2 *= c2; k2  = rotl32!16(k2); k2 *= c3; h2 ^= k2;
233 
234 
235 		h2 = rotl32!17(h2); h2 += h3; h2 = h2*5+0x0bcaa747;
236 
237 
238 		k3 *= c3; k3  = rotl32!17(k3); k3 *= c4; h3 ^= k3;
239 
240 
241 		h3 = rotl32!15(h3); h3 += h4; h3 = h3*5+0x96cd1c35;
242 
243 
244 		k4 *= c4; k4  = rotl32!18(k4); k4 *= c1; h4 ^= k4;
245 
246 
247 		h4 = rotl32!13(h4); h4 += h1; h4 = h4*5+0x32ac3b17;
248 	}
249 
250 
251 	//----------
252 	// tail
253 
254 
255 	const uint8_t * tail = cast(const uint8_t*)(data + nblocks*16);
256 
257 
258 	uint32_t k1 = 0;
259 	uint32_t k2 = 0;
260 	uint32_t k3 = 0;
261 	uint32_t k4 = 0;
262 
263 
264 	switch(len & 15)
265 	{
266 	case 15: k4 ^= tail[14] << 16;                              goto case;
267 	case 14: k4 ^= tail[13] << 8;                               goto case;
268 	case 13: k4 ^= tail[12] << 0;
269 	         k4 *= c4; k4  = rotl32!18(k4); k4 *= c1; h4 ^= k4; goto case;
270 
271 
272 	case 12: k3 ^= tail[11] << 24;                              goto case;
273 	case 11: k3 ^= tail[10] << 16;                              goto case;
274 	case 10: k3 ^= tail[ 9] << 8;                               goto case;
275 	case  9: k3 ^= tail[ 8] << 0;
276 	         k3 *= c3; k3  = rotl32!17(k3); k3 *= c4; h3 ^= k3; goto case;
277 
278 
279 	case  8: k2 ^= tail[ 7] << 24;                              goto case;
280 	case  7: k2 ^= tail[ 6] << 16;                              goto case;
281 	case  6: k2 ^= tail[ 5] << 8;                               goto case;
282 	case  5: k2 ^= tail[ 4] << 0;
283 	         k2 *= c2; k2  = rotl32!16(k2); k2 *= c3; h2 ^= k2; goto case;
284 
285 
286 	case  4: k1 ^= tail[ 3] << 24;                              goto case;
287 	case  3: k1 ^= tail[ 2] << 16;                              goto case;
288 	case  2: k1 ^= tail[ 1] << 8;                               goto case;
289 	case  1: k1 ^= tail[ 0] << 0;
290 	         k1 *= c1; k1  = rotl32!15(k1); k1 *= c2; h1 ^= k1; goto default;
291 	default:
292 	}
293 
294 
295 	//----------
296 	// finalization
297 
298 
299 	h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
300 
301 
302 	h1 += h2; h1 += h3; h1 += h4;
303 	h2 += h1; h3 += h1; h4 += h1;
304 
305 
306 	mixin(FMIX_32(q{h1}));
307 	mixin(FMIX_32(q{h2}));
308 	mixin(FMIX_32(q{h3}));
309 	mixin(FMIX_32(q{h4}));
310 
311 
312 	h1 += h2; h1 += h3; h1 += h4;
313 	h2 += h1; h3 += h1; h4 += h1;
314 
315 
316 	(cast(uint32_t*)output)[0] = h1;
317 	(cast(uint32_t*)output)[1] = h2;
318 	(cast(uint32_t*)output)[2] = h3;
319 	(cast(uint32_t*)output)[3] = h4;
320 }
321 
322 
323 //-----------------------------------------------------------------------------
324 
325 void MurmurHash3_x64_128 ( const void * key, const int len, const uint32_t seed, void * output )
326 {
327 	const uint8_t * data = cast(const uint8_t*)key;
328 	const int nblocks = len / 16;
329 
330 
331 	uint64_t h1 = seed;
332 	uint64_t h2 = seed;
333 
334 
335 	uint64_t c1 = 0x87c37b91114253d5L;
336 	uint64_t c2 = 0x4cf5ad432745937fL;
337 
338 
339 	//----------
340 	// body
341 
342 
343 	const uint64_t * blocks = cast(const uint64_t *)(data);
344 
345 
346 	for(int i = 0; i < nblocks; i++)
347 	{
348 		uint64_t k1 = mixin(GETBLOCK(q{blocks},q{i*2+0}));
349 		uint64_t k2 = mixin(GETBLOCK(q{blocks},q{i*2+1}));
350 
351 
352 		k1 *= c1; k1  = rotl64!31(k1); k1 *= c2; h1 ^= k1;
353 
354 
355 		h1 = rotl64!27(h1); h1 += h2; h1 = h1*5+0x52dce729;
356 
357 
358 		k2 *= c2; k2  = rotl64!33(k2); k2 *= c1; h2 ^= k2;
359 
360 
361 		h2 = rotl64!31(h2); h2 += h1; h2 = h2*5+0x38495ab5;
362 	}
363 
364 
365 	//----------
366 	// tail
367 
368 
369 	const uint8_t * tail = cast(const uint8_t*)(data + nblocks*16);
370 
371 
372 	uint64_t k1 = 0;
373 	uint64_t k2 = 0;
374 
375 
376 	switch(len & 15)
377 	{
378 	case 15: k2 ^= cast(uint64_t)(tail[14]) << 48;              goto case;
379 	case 14: k2 ^= cast(uint64_t)(tail[13]) << 40;              goto case;
380 	case 13: k2 ^= cast(uint64_t)(tail[12]) << 32;              goto case;
381 	case 12: k2 ^= cast(uint64_t)(tail[11]) << 24;              goto case;
382 	case 11: k2 ^= cast(uint64_t)(tail[10]) << 16;              goto case;
383 	case 10: k2 ^= cast(uint64_t)(tail[ 9]) << 8;               goto case;
384 	case  9: k2 ^= cast(uint64_t)(tail[ 8]) << 0;
385 	         k2 *= c2; k2  = rotl64!33(k2); k2 *= c1; h2 ^= k2; goto case;
386 
387 
388 	case  8: k1 ^= cast(uint64_t)(tail[ 7]) << 56;              goto case;
389 	case  7: k1 ^= cast(uint64_t)(tail[ 6]) << 48;              goto case;
390 	case  6: k1 ^= cast(uint64_t)(tail[ 5]) << 40;              goto case;
391 	case  5: k1 ^= cast(uint64_t)(tail[ 4]) << 32;              goto case;
392 	case  4: k1 ^= cast(uint64_t)(tail[ 3]) << 24;              goto case;
393 	case  3: k1 ^= cast(uint64_t)(tail[ 2]) << 16;              goto case;
394 	case  2: k1 ^= cast(uint64_t)(tail[ 1]) << 8;               goto case;
395 	case  1: k1 ^= cast(uint64_t)(tail[ 0]) << 0;
396 	         k1 *= c1; k1  = rotl64!31(k1); k1 *= c2; h1 ^= k1; goto default;
397 	default:
398 	}
399 
400 
401 	//----------
402 	// finalization
403 
404 
405 	h1 ^= len; h2 ^= len;
406 
407 
408 	h1 += h2;
409 	h2 += h1;
410 
411 
412 	mixin(FMIX_64(q{h1}));
413 	mixin(FMIX_64(q{h2}));
414 
415 
416 	h1 += h2;
417 	h2 += h1;
418 
419 
420 	(cast(uint64_t*)output)[0] = h1;
421 	(cast(uint64_t*)output)[1] = h2;
422 }
423 
424 
425 //-----------------------------------------------------------------------------