34 #include <ripple/beast/hash/impl/xxhash.h>
47 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || \
48 defined(__x86_64__) || defined(_M_X64)
49 #define XXH_USE_UNALIGNED_ACCESS 1
68 #define XXH_FORCE_NATIVE_FORMAT 0
74 #ifdef _MSC_VER // Visual Studio
76 disable : 4127) // disable: C4127: conditional expression is constant
79 #ifdef _MSC_VER // Visual Studio
80 #define FORCE_INLINE static __forceinline
83 #define FORCE_INLINE static inline __attribute__((always_inline))
85 #define FORCE_INLINE static inline
109 XXH_memcpy(
void* dest,
const void* src,
size_t size)
111 return memcpy(dest, src, size);
117 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
119 typedef uint8_t BYTE;
120 typedef uint16_t U16;
121 typedef uint32_t U32;
123 typedef uint64_t U64;
125 typedef unsigned char BYTE;
126 typedef unsigned short U16;
127 typedef unsigned int U32;
128 typedef signed int S32;
129 typedef unsigned long long U64;
132 #if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS)
133 #define _PACKED __attribute__((packed))
138 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
142 #pragma pack(push, 1)
158 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
162 #define A32(x) (((U32_S*)(x))->v)
163 #define A64(x) (((U64_S*)(x))->v)
168 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
172 #if defined(_MSC_VER)
173 #define XXH_rotl32(x, r) _rotl(x, r)
174 #define XXH_rotl64(x, r) _rotl64(x, r)
176 #define XXH_rotl32(x, r) ((x << r) | (x >> (32 - r)))
177 #define XXH_rotl64(x, r) ((x << r) | (x >> (64 - r)))
180 #if defined(_MSC_VER) // Visual Studio
181 #define XXH_swap32 _byteswap_ulong
182 #define XXH_swap64 _byteswap_uint64
183 #elif GCC_VERSION >= 403
184 #define XXH_swap32 __builtin_bswap32
185 #define XXH_swap64 __builtin_bswap64
190 return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) |
191 ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff);
196 return ((x << 56) & 0xff00000000000000ULL) |
197 ((x << 40) & 0x00ff000000000000ULL) |
198 ((x << 24) & 0x0000ff0000000000ULL) |
199 ((x << 8) & 0x000000ff00000000ULL) |
200 ((x >> 8) & 0x00000000ff000000ULL) |
201 ((x >> 24) & 0x0000000000ff0000ULL) |
202 ((x >> 40) & 0x000000000000ff00ULL) |
203 ((x >> 56) & 0x00000000000000ffULL);
210 #define PRIME32_1 2654435761U
211 #define PRIME32_2 2246822519U
212 #define PRIME32_3 3266489917U
213 #define PRIME32_4 668265263U
214 #define PRIME32_5 374761393U
216 #define PRIME64_1 11400714785074694791ULL
217 #define PRIME64_2 14029467366897019727ULL
218 #define PRIME64_3 1609587929392839161ULL
219 #define PRIME64_4 9650029242287828579ULL
220 #define PRIME64_5 2870177450012600261ULL
226 #ifndef XXH_CPU_LITTLE_ENDIAN // It is possible to define XXH_CPU_LITTLE_ENDIAN
230 #define XXH_CPU_LITTLE_ENDIAN (*(char*)(&one))
236 #define XXH_STATIC_ASSERT(c) \
238 enum { XXH_static_assert = 1 / (!!(c)) }; \
239 } // use only *after* variable declarations
270 : XXH_swap64(tmp_aligned);
293 const BYTE* p = (
const BYTE*)input;
294 const BYTE* bEnd = p + len;
296 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
298 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
302 bEnd = p = (
const BYTE*)(
size_t)16;
308 const BYTE*
const limit = bEnd - 16;
309 U32 v1 = seed + PRIME32_1 + PRIME32_2;
310 U32 v2 = seed + PRIME32_2;
312 U32 v4 = seed - PRIME32_1;
316 v1 += XXH_get32bits(p) * PRIME32_2;
317 v1 = XXH_rotl32(v1, 13);
320 v2 += XXH_get32bits(p) * PRIME32_2;
321 v2 = XXH_rotl32(v2, 13);
324 v3 += XXH_get32bits(p) * PRIME32_2;
325 v3 = XXH_rotl32(v3, 13);
328 v4 += XXH_get32bits(p) * PRIME32_2;
329 v4 = XXH_rotl32(v4, 13);
332 }
while (p <= limit);
334 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) +
339 h32 = seed + PRIME32_5;
344 while (p + 4 <= bEnd)
346 h32 += XXH_get32bits(p) * PRIME32_3;
347 h32 = XXH_rotl32(h32, 17) * PRIME32_4;
353 h32 += (*p) * PRIME32_5;
354 h32 = XXH_rotl32(h32, 11) * PRIME32_1;
368 XXH32(
const void* input,
size_t len,
unsigned seed)
379 #if !defined(XXH_USE_UNALIGNED_ACCESS)
380 if ((((
size_t)input) & 3) ==
409 const BYTE* p = (
const BYTE*)input;
410 const BYTE* bEnd = p + len;
412 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
414 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
418 bEnd = p = (
const BYTE*)(
size_t)32;
424 const BYTE*
const limit = bEnd - 32;
425 U64 v1 = seed + PRIME64_1 + PRIME64_2;
426 U64 v2 = seed + PRIME64_2;
428 U64 v4 = seed - PRIME64_1;
432 v1 += XXH_get64bits(p) * PRIME64_2;
434 v1 = XXH_rotl64(v1, 31);
436 v2 += XXH_get64bits(p) * PRIME64_2;
438 v2 = XXH_rotl64(v2, 31);
440 v3 += XXH_get64bits(p) * PRIME64_2;
442 v3 = XXH_rotl64(v3, 31);
444 v4 += XXH_get64bits(p) * PRIME64_2;
446 v4 = XXH_rotl64(v4, 31);
448 }
while (p <= limit);
450 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
454 v1 = XXH_rotl64(v1, 31);
457 h64 = h64 * PRIME64_1 + PRIME64_4;
460 v2 = XXH_rotl64(v2, 31);
463 h64 = h64 * PRIME64_1 + PRIME64_4;
466 v3 = XXH_rotl64(v3, 31);
469 h64 = h64 * PRIME64_1 + PRIME64_4;
472 v4 = XXH_rotl64(v4, 31);
475 h64 = h64 * PRIME64_1 + PRIME64_4;
479 h64 = seed + PRIME64_5;
484 while (p + 8 <= bEnd)
486 U64 k1 = XXH_get64bits(p);
488 k1 = XXH_rotl64(k1, 31);
491 h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
497 h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
498 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
504 h64 ^= (*p) * PRIME64_5;
505 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
519 XXH64(
const void* input,
size_t len,
unsigned long long seed)
530 #if !defined(XXH_USE_UNALIGNED_ACCESS)
531 if ((((
size_t)input) & 7) ==
620 state->
v1 = seed + PRIME32_1 + PRIME32_2;
621 state->
v2 = seed + PRIME32_2;
622 state->
v3 = seed + 0;
623 state->
v4 = seed - PRIME32_1;
634 state->
v1 = seed + PRIME64_1 + PRIME64_2;
635 state->
v2 = seed + PRIME64_2;
636 state->
v3 = seed + 0;
637 state->
v4 = seed - PRIME64_1;
651 const BYTE* p = (
const BYTE*)input;
652 const BYTE*
const bEnd = p + len;
654 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
663 XXH_memcpy((BYTE*)(state->
mem32) + state->
memsize, input, len);
673 const U32* p32 = state->
mem32;
675 state->
v1 = XXH_rotl32(state->
v1, 13);
676 state->
v1 *= PRIME32_1;
679 state->
v2 = XXH_rotl32(state->
v2, 13);
680 state->
v2 *= PRIME32_1;
683 state->
v3 = XXH_rotl32(state->
v3, 13);
684 state->
v3 *= PRIME32_1;
687 state->
v4 = XXH_rotl32(state->
v4, 13);
688 state->
v4 *= PRIME32_1;
697 const BYTE*
const limit = bEnd - 16;
706 v1 = XXH_rotl32(v1, 13);
710 v2 = XXH_rotl32(v2, 13);
714 v3 = XXH_rotl32(v3, 13);
718 v4 = XXH_rotl32(v4, 13);
721 }
while (p <= limit);
731 XXH_memcpy(state->
mem32, p, bEnd - p);
732 state->
memsize = (int)(bEnd - p);
753 const BYTE* p = (
const BYTE*)state->
mem32;
759 h32 = XXH_rotl32(state->
v1, 1) + XXH_rotl32(state->
v2, 7) +
760 XXH_rotl32(state->
v3, 12) + XXH_rotl32(state->
v4, 18);
764 h32 = state->
seed + PRIME32_5;
769 while (p + 4 <= bEnd)
772 h32 = XXH_rotl32(h32, 17) * PRIME32_4;
778 h32 += (*p) * PRIME32_5;
779 h32 = XXH_rotl32(h32, 11) * PRIME32_1;
811 const BYTE* p = (
const BYTE*)input;
812 const BYTE*
const bEnd = p + len;
814 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
823 XXH_memcpy(((BYTE*)state->
mem64) + state->
memsize, input, len);
833 const U64* p64 = state->
mem64;
835 state->
v1 = XXH_rotl64(state->
v1, 31);
836 state->
v1 *= PRIME64_1;
839 state->
v2 = XXH_rotl64(state->
v2, 31);
840 state->
v2 *= PRIME64_1;
843 state->
v3 = XXH_rotl64(state->
v3, 31);
844 state->
v3 *= PRIME64_1;
847 state->
v4 = XXH_rotl64(state->
v4, 31);
848 state->
v4 *= PRIME64_1;
857 const BYTE*
const limit = bEnd - 32;
866 v1 = XXH_rotl64(v1, 31);
870 v2 = XXH_rotl64(v2, 31);
874 v3 = XXH_rotl64(v3, 31);
878 v4 = XXH_rotl64(v4, 31);
881 }
while (p <= limit);
891 XXH_memcpy(state->
mem64, p, bEnd - p);
892 state->
memsize = (int)(bEnd - p);
913 const BYTE* p = (
const BYTE*)state->
mem64;
924 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
928 v1 = XXH_rotl64(v1, 31);
931 h64 = h64 * PRIME64_1 + PRIME64_4;
934 v2 = XXH_rotl64(v2, 31);
937 h64 = h64 * PRIME64_1 + PRIME64_4;
940 v3 = XXH_rotl64(v3, 31);
943 h64 = h64 * PRIME64_1 + PRIME64_4;
946 v4 = XXH_rotl64(v4, 31);
949 h64 = h64 * PRIME64_1 + PRIME64_4;
953 h64 = state->
seed + PRIME64_5;
958 while (p + 8 <= bEnd)
962 k1 = XXH_rotl64(k1, 31);
965 h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
972 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
978 h64 ^= (*p) * PRIME64_5;
979 h64 = XXH_rotl64(h64, 11) * PRIME64_1;