20 #define FORCE_INLINE __forceinline 24 #define ROTL32(x,y) _rotl(x,y) 25 #define ROTL64(x,y) _rotl64(x,y) 27 #define BIG_CONSTANT(x) (x) 31 #else // defined(_MSC_VER) 33 #define FORCE_INLINE inline __attribute__((always_inline)) 35 inline uint32_t
rotl32(uint32_t x, int8_t r) {
36 return (x << r) | (x >> (32 - r));
39 inline uint64_t
rotl64(uint64_t x, int8_t r) {
40 return (x << r) | (x >> (64 - r));
43 #define ROTL32(x,y) rotl32(x,y) 44 #define ROTL64(x,y) rotl64(x,y) 46 #define BIG_CONSTANT(x) (x##LLU) 48 #endif // !defined(_MSC_VER) 90 uint32_t seed,
void * out) {
91 const uint8_t * data = (
const uint8_t*) key;
92 const int nblocks = len / 4;
96 const uint32_t c1 = 0xcc9e2d51;
97 const uint32_t c2 = 0x1b873593;
102 const uint32_t * blocks = (
const uint32_t *) (data + nblocks * 4);
104 for (
int i = -nblocks; i; i++) {
113 h1 = h1 * 5 + 0xe6546b64;
119 const uint8_t * tail = (
const uint8_t*) (data + nblocks * 4);
124 case 3: k1 ^= tail[2] << 16;
125 case 2: k1 ^= tail[1] << 8;
126 case 1: k1 ^= tail[0];
140 *(uint32_t*) out = h1;
146 uint32_t seed,
void * out) {
147 const uint8_t * data = (
const uint8_t*) key;
148 const int nblocks = len / 16;
155 const uint32_t c1 = 0x239b961b;
156 const uint32_t c2 = 0xab0e9789;
157 const uint32_t c3 = 0x38b34ae5;
158 const uint32_t c4 = 0xa1e38b93;
163 const uint32_t * blocks = (
const uint32_t *) (data + nblocks * 16);
165 for (
int i = -nblocks; i; i++) {
178 h1 = h1 * 5 + 0x561ccd1b;
187 h2 = h2 * 5 + 0x0bcaa747;
196 h3 = h3 * 5 + 0x96cd1c35;
205 h4 = h4 * 5 + 0x32ac3b17;
211 const uint8_t * tail = (
const uint8_t*) (data + nblocks * 16);
219 case 15: k4 ^= tail[14] << 16;
220 case 14: k4 ^= tail[13] << 8;
221 case 13: k4 ^= tail[12] << 0;
227 case 12: k3 ^= tail[11] << 24;
228 case 11: k3 ^= tail[10] << 16;
229 case 10: k3 ^= tail[ 9] << 8;
230 case 9: k3 ^= tail[ 8] << 0;
236 case 8: k2 ^= tail[ 7] << 24;
237 case 7: k2 ^= tail[ 6] << 16;
238 case 6: k2 ^= tail[ 5] << 8;
239 case 5: k2 ^= tail[ 4] << 0;
245 case 4: k1 ^= tail[ 3] << 24;
246 case 3: k1 ^= tail[ 2] << 16;
247 case 2: k1 ^= tail[ 1] << 8;
248 case 1: k1 ^= tail[ 0] << 0;
282 ((uint32_t*) out)[0] = h1;
283 ((uint32_t*) out)[1] = h2;
284 ((uint32_t*) out)[2] = h3;
285 ((uint32_t*) out)[3] = h4;
291 const uint32_t seed,
void * out) {
292 const uint8_t * data = (
const uint8_t*) key;
293 const int nblocks = len / 16;
304 const uint64_t * blocks = (
const uint64_t *) (data);
306 for (
int i = 0; i < nblocks; i++) {
317 h1 = h1 * 5 + 0x52dce729;
326 h2 = h2 * 5 + 0x38495ab5;
332 const uint8_t * tail = (
const uint8_t*) (data + nblocks * 16);
338 case 15: k2 ^= ((uint64_t) tail[14]) << 48;
339 case 14: k2 ^= ((uint64_t) tail[13]) << 40;
340 case 13: k2 ^= ((uint64_t) tail[12]) << 32;
341 case 12: k2 ^= ((uint64_t) tail[11]) << 24;
342 case 11: k2 ^= ((uint64_t) tail[10]) << 16;
343 case 10: k2 ^= ((uint64_t) tail[ 9]) << 8;
344 case 9: k2 ^= ((uint64_t) tail[ 8]) << 0;
350 case 8: k1 ^= ((uint64_t) tail[ 7]) << 56;
351 case 7: k1 ^= ((uint64_t) tail[ 6]) << 48;
352 case 6: k1 ^= ((uint64_t) tail[ 5]) << 40;
353 case 5: k1 ^= ((uint64_t) tail[ 4]) << 32;
354 case 4: k1 ^= ((uint64_t) tail[ 3]) << 24;
355 case 3: k1 ^= ((uint64_t) tail[ 2]) << 16;
356 case 2: k1 ^= ((uint64_t) tail[ 1]) << 8;
357 case 1: k1 ^= ((uint64_t) tail[ 0]) << 0;
379 ((uint64_t*) out)[0] = h1;
380 ((uint64_t*) out)[1] = h2;
FORCE_INLINE uint32_t getblock32(const uint32_t *p, int i)
FORCE_INLINE uint32_t fmix32(uint32_t h)
void MurmurHash3_x86_128(const void *key, const int len, uint32_t seed, void *out)
void MurmurHash3_x86_32(const void *key, int len, uint32_t seed, void *out)
clarg::argBool h("-h", "help message", false)
FORCE_INLINE uint64_t getblock64(const uint64_t *p, int i)
uint64_t rotl64(uint64_t x, int8_t r)
FORCE_INLINE uint64_t fmix64(uint64_t k)
void MurmurHash3_x64_128(const void *key, const int len, const uint32_t seed, void *out)
uint32_t rotl32(uint32_t x, int8_t r)