Line data Source code
1 : /*
2 : xxHash - Fast Hash algorithm
3 : Copyright (C) 2012-2015, Yann Collet
4 :
5 : BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 :
7 : Redistribution and use in source and binary forms, with or without
8 : modification, are permitted provided that the following conditions are
9 : met:
10 :
11 : * Redistributions of source code must retain the above copyright
12 : notice, this list of conditions and the following disclaimer.
13 : * Redistributions in binary form must reproduce the above
14 : copyright notice, this list of conditions and the following disclaimer
15 : in the documentation and/or other materials provided with the
16 : distribution.
17 :
18 : THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 : "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 : LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 : A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 : OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 : SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 : LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 : DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 : THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 : (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 : OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 :
30 : You can contact the author at :
31 : - xxHash source repository : https://github.com/Cyan4973/xxHash
32 : */
33 :
34 : /**************************************
35 : * Tuning parameters
36 : **************************************/
37 : /* Unaligned memory access is automatically enabled for "common" CPU, such as x86.
38 : * For others CPU, the compiler will be more cautious, and insert extra code to ensure
39 : * aligned access is respected.
40 : * If you know your target CPU supports unaligned memory access, you want to force this
41 : * option manually to improve performance.
42 : * You can also enable this parameter if you know your input data will always be aligned
43 : * (boundaries of 4, for U32).
44 : */
45 : #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || \
46 : defined(__x86_64__) || defined(_M_X64)
47 : #define XXH_USE_UNALIGNED_ACCESS 1
48 : #endif
49 :
50 : /* XXH_ACCEPT_NULL_INPUT_POINTER :
51 : * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory
52 : * access error, since it is a bad pointer.
53 : * When this option is enabled, xxHash output for null input pointers will be the same as
54 : * a null-length input.
55 : * By default, this option is disabled. To enable it, uncomment below define :
56 : */
57 : /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
58 :
59 : /* XXH_FORCE_NATIVE_FORMAT :
60 : * By default, xxHash library provides endian-independant Hash values, based on
61 : * little-endian convention.
62 : * Results are therefore identical for little-endian and big-endian CPU.
63 : * This comes at a performance cost for big-endian CPU, since some swapping is required to
64 : * emulate little-endian format.
65 : * Should endian-independance be of no importance for your application, you may set the
66 : * #define below to 1.
67 : * It will improve speed for Big-endian CPU.
68 : * This option has no impact on Little_Endian CPU.
69 : */
70 : #define XXH_FORCE_NATIVE_FORMAT 0
71 :
72 : /**************************************
73 : * Compiler Specific Options
74 : ***************************************/
75 : #ifdef _MSC_VER /* Visual Studio */
76 : #pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
77 : #define FORCE_INLINE static __forceinline
78 : #else
79 : #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
80 : #ifdef __GNUC__
81 : #define FORCE_INLINE static inline __attribute__((always_inline))
82 : #else
83 : #define FORCE_INLINE static inline
84 : #endif
85 : #else
86 : #define FORCE_INLINE static
87 : #endif /* __STDC_VERSION__ */
88 : #endif
89 :
90 : /**************************************
91 : * Includes & Memory related functions
92 : ***************************************/
93 : #include "xxhash.h"
94 : /* Modify the local functions below should you wish to use some other memory routines */
95 : /* for malloc(), free() */
96 : #include <stdlib.h>
97 0 : static void* XXH_malloc(size_t s) {
98 0 : return malloc(s);
99 : }
100 0 : static void XXH_free(void* p) {
101 0 : free(p);
102 0 : }
103 : /* for memcpy() */
104 : #include <string.h>
105 0 : static void* XXH_memcpy(void* dest, const void* src, size_t size) {
106 0 : return memcpy(dest, src, size);
107 : }
108 :
109 : /**************************************
110 : * Basic Types
111 : ***************************************/
112 : #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
113 : #include <stdint.h>
114 : typedef uint8_t BYTE;
115 : typedef uint16_t U16;
116 : typedef uint32_t U32;
117 : typedef int32_t S32;
118 : typedef uint64_t U64;
119 : #else
120 : typedef unsigned char BYTE;
121 : typedef unsigned short U16;
122 : typedef unsigned int U32;
123 : typedef signed int S32;
124 : typedef unsigned long long U64;
125 : #endif
126 :
127 9204 : static U32 XXH_read32(const void* memPtr) {
128 : U32 val32;
129 9204 : memcpy(&val32, memPtr, 4);
130 9204 : return val32;
131 : }
132 :
133 9473567 : static U64 XXH_read64(const void* memPtr) {
134 : U64 val64;
135 9473567 : memcpy(&val64, memPtr, 8);
136 9473567 : return val64;
137 : }
138 :
139 : /******************************************
140 : * Compiler-specific Functions and Macros
141 : ******************************************/
142 : #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
143 :
144 : /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
145 : #if defined(_MSC_VER)
146 : #define XXH_rotl32(x, r) _rotl(x, r)
147 : #define XXH_rotl64(x, r) _rotl64(x, r)
148 : #else
149 : #define XXH_rotl32(x, r) ((x << r) | (x >> (32 - r)))
150 : #define XXH_rotl64(x, r) ((x << r) | (x >> (64 - r)))
151 : #endif
152 :
153 : #if defined(_MSC_VER) /* Visual Studio */
154 : #define XXH_swap32 _byteswap_ulong
155 : #define XXH_swap64 _byteswap_uint64
156 : #elif GCC_VERSION >= 403
157 : #define XXH_swap32 __builtin_bswap32
158 : #define XXH_swap64 __builtin_bswap64
159 : #else
160 : static U32 XXH_swap32(U32 x) {
161 : return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) |
162 : ((x >> 24) & 0x000000ff);
163 : }
164 : static U64 XXH_swap64(U64 x) {
165 : return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) |
166 : ((x << 24) & 0x0000ff0000000000ULL) | ((x << 8) & 0x000000ff00000000ULL) |
167 : ((x >> 8) & 0x00000000ff000000ULL) | ((x >> 24) & 0x0000000000ff0000ULL) |
168 : ((x >> 40) & 0x000000000000ff00ULL) | ((x >> 56) & 0x00000000000000ffULL);
169 : }
170 : #endif
171 :
172 : /***************************************
173 : * Architecture Macros
174 : ***************************************/
175 : typedef enum { XXH_bigEndian = 0, XXH_littleEndian = 1 } XXH_endianess;
176 : #ifndef XXH_CPU_LITTLE_ENDIAN /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for \
177 : example using a compiler switch */
178 : static const int one = 1;
179 : #define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&one))
180 : #endif
181 :
182 : /*****************************
183 : * Memory reads
184 : *****************************/
185 : typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
186 :
187 : FORCE_INLINE U32
188 : XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) {
189 9203 : if(align == XXH_unaligned)
190 9203 : return endian == XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
191 : else
192 0 : return endian == XXH_littleEndian ? *(const U32*)ptr
193 0 : : XXH_swap32(*(const U32*)ptr);
194 : }
195 :
196 : FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) {
197 0 : return XXH_readLE32_align(ptr, endian, XXH_unaligned);
198 : }
199 :
200 : FORCE_INLINE U64
201 : XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) {
202 13959602 : if(align == XXH_unaligned)
203 13959602 : return endian == XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
204 : else
205 0 : return endian == XXH_littleEndian ? *(const U64*)ptr
206 0 : : XXH_swap64(*(const U64*)ptr);
207 : }
208 :
209 : FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) {
210 0 : return XXH_readLE64_align(ptr, endian, XXH_unaligned);
211 : }
212 :
213 : /***************************************
214 : * Macros
215 : ***************************************/
216 : #define XXH_STATIC_ASSERT(c) \
217 : { \
218 : enum { XXH_static_assert = 1 / (!!(c)) }; \
219 : } /* use only *after* variable declarations */
220 :
221 : /***************************************
222 : * Constants
223 : ***************************************/
224 : #define PRIME32_1 2654435761U
225 : #define PRIME32_2 2246822519U
226 : #define PRIME32_3 3266489917U
227 : #define PRIME32_4 668265263U
228 : #define PRIME32_5 374761393U
229 :
230 : #define PRIME64_1 11400714785074694791ULL
231 : #define PRIME64_2 14029467366897019727ULL
232 : #define PRIME64_3 1609587929392839161ULL
233 : #define PRIME64_4 9650029242287828579ULL
234 : #define PRIME64_5 2870177450012600261ULL
235 :
236 : /*****************************
237 : * Simple Hash Functions
238 : *****************************/
239 : FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed,
240 : XXH_endianess endian, XXH_alignment align) {
241 0 : const BYTE* p = (const BYTE*)input;
242 0 : const BYTE* bEnd = p + len;
243 : U32 h32;
244 : #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
245 :
246 : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
247 : if(p == NULL) {
248 : len = 0;
249 : bEnd = p = (const BYTE*)(size_t)16;
250 : }
251 : #endif
252 :
253 0 : if(len >= 16) {
254 0 : const BYTE* const limit = bEnd - 16;
255 0 : U32 v1 = seed + PRIME32_1 + PRIME32_2;
256 0 : U32 v2 = seed + PRIME32_2;
257 0 : U32 v3 = seed + 0;
258 0 : U32 v4 = seed - PRIME32_1;
259 :
260 : do {
261 0 : v1 += XXH_get32bits(p) * PRIME32_2;
262 0 : v1 = XXH_rotl32(v1, 13);
263 0 : v1 *= PRIME32_1;
264 0 : p += 4;
265 0 : v2 += XXH_get32bits(p) * PRIME32_2;
266 0 : v2 = XXH_rotl32(v2, 13);
267 0 : v2 *= PRIME32_1;
268 0 : p += 4;
269 0 : v3 += XXH_get32bits(p) * PRIME32_2;
270 0 : v3 = XXH_rotl32(v3, 13);
271 0 : v3 *= PRIME32_1;
272 0 : p += 4;
273 0 : v4 += XXH_get32bits(p) * PRIME32_2;
274 0 : v4 = XXH_rotl32(v4, 13);
275 0 : v4 *= PRIME32_1;
276 0 : p += 4;
277 0 : } while(p <= limit);
278 :
279 0 : h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) +
280 0 : XXH_rotl32(v4, 18);
281 : } else {
282 0 : h32 = seed + PRIME32_5;
283 : }
284 :
285 0 : h32 += (U32)len;
286 :
287 0 : while(p + 4 <= bEnd) {
288 0 : h32 += XXH_get32bits(p) * PRIME32_3;
289 0 : h32 = XXH_rotl32(h32, 17) * PRIME32_4;
290 0 : p += 4;
291 : }
292 :
293 0 : while(p < bEnd) {
294 0 : h32 += (*p) * PRIME32_5;
295 0 : h32 = XXH_rotl32(h32, 11) * PRIME32_1;
296 0 : p++;
297 : }
298 :
299 0 : h32 ^= h32 >> 15;
300 0 : h32 *= PRIME32_2;
301 0 : h32 ^= h32 >> 13;
302 0 : h32 *= PRIME32_3;
303 0 : h32 ^= h32 >> 16;
304 :
305 0 : return h32;
306 : }
307 :
308 0 : unsigned XXH32(const void* input, size_t len, unsigned seed) {
309 : #if 0
310 : /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
311 : XXH32_state_t state;
312 : XXH32_reset(&state, seed);
313 : XXH32_update(&state, input, len);
314 : return XXH32_digest(&state);
315 : #else
316 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
317 :
318 : #if !defined(XXH_USE_UNALIGNED_ACCESS)
319 : if((((size_t)input) & 3) ==
320 : 0) /* Input is 4-bytes aligned, leverage the speed benefit */
321 : {
322 : if((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
323 : return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
324 : else
325 : return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
326 : }
327 : #endif
328 :
329 0 : if((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
330 0 : return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
331 : else
332 0 : return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
333 : #endif
334 : }
335 :
336 : FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed,
337 : XXH_endianess endian, XXH_alignment align) {
338 361933 : const BYTE* p = (const BYTE*)input;
339 361933 : const BYTE* bEnd = p + len;
340 : U64 h64;
341 : #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
342 :
343 : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
344 : if(p == NULL) {
345 : len = 0;
346 : bEnd = p = (const BYTE*)(size_t)32;
347 : }
348 : #endif
349 :
350 361933 : if(len >= 32) {
351 331257 : const BYTE* const limit = bEnd - 32;
352 331257 : U64 v1 = seed + PRIME64_1 + PRIME64_2;
353 331257 : U64 v2 = seed + PRIME64_2;
354 331257 : U64 v3 = seed + 0;
355 331257 : U64 v4 = seed - PRIME64_1;
356 :
357 : do {
358 3517709 : v1 += XXH_get64bits(p) * PRIME64_2;
359 3517709 : p += 8;
360 3517709 : v1 = XXH_rotl64(v1, 31);
361 3517709 : v1 *= PRIME64_1;
362 3450585 : v2 += XXH_get64bits(p) * PRIME64_2;
363 3450585 : p += 8;
364 3450585 : v2 = XXH_rotl64(v2, 31);
365 3450585 : v2 *= PRIME64_1;
366 3557311 : v3 += XXH_get64bits(p) * PRIME64_2;
367 3557311 : p += 8;
368 3557311 : v3 = XXH_rotl64(v3, 31);
369 3557311 : v3 *= PRIME64_1;
370 3111215 : v4 += XXH_get64bits(p) * PRIME64_2;
371 3111215 : p += 8;
372 3111215 : v4 = XXH_rotl64(v4, 31);
373 3111215 : v4 *= PRIME64_1;
374 3111215 : } while(p <= limit);
375 :
376 17102 : h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
377 8551 : XXH_rotl64(v4, 18);
378 :
379 8551 : v1 *= PRIME64_2;
380 8551 : v1 = XXH_rotl64(v1, 31);
381 8551 : v1 *= PRIME64_1;
382 8551 : h64 ^= v1;
383 8551 : h64 = h64 * PRIME64_1 + PRIME64_4;
384 :
385 8551 : v2 *= PRIME64_2;
386 8551 : v2 = XXH_rotl64(v2, 31);
387 8551 : v2 *= PRIME64_1;
388 8551 : h64 ^= v2;
389 8551 : h64 = h64 * PRIME64_1 + PRIME64_4;
390 :
391 8551 : v3 *= PRIME64_2;
392 8551 : v3 = XXH_rotl64(v3, 31);
393 8551 : v3 *= PRIME64_1;
394 8551 : h64 ^= v3;
395 8551 : h64 = h64 * PRIME64_1 + PRIME64_4;
396 :
397 8551 : v4 *= PRIME64_2;
398 8551 : v4 = XXH_rotl64(v4, 31);
399 8551 : v4 *= PRIME64_1;
400 8551 : h64 ^= v4;
401 8551 : h64 = h64 * PRIME64_1 + PRIME64_4;
402 : } else {
403 30676 : h64 = seed + PRIME64_5;
404 : }
405 :
406 39227 : h64 += (U64)len;
407 :
408 39303 : while(p + 8 <= bEnd) {
409 76 : U64 k1 = XXH_get64bits(p);
410 76 : k1 *= PRIME64_2;
411 76 : k1 = XXH_rotl64(k1, 31);
412 76 : k1 *= PRIME64_1;
413 76 : h64 ^= k1;
414 76 : h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
415 76 : p += 8;
416 : }
417 :
418 39227 : if(p + 4 <= bEnd) {
419 9204 : h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
420 9204 : h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
421 9204 : p += 4;
422 : }
423 :
424 103846 : while(p < bEnd) {
425 64618 : h64 ^= (*p) * PRIME64_5;
426 64618 : h64 = XXH_rotl64(h64, 11) * PRIME64_1;
427 64618 : p++;
428 : }
429 :
430 39228 : h64 ^= h64 >> 33;
431 39228 : h64 *= PRIME64_2;
432 39228 : h64 ^= h64 >> 29;
433 39228 : h64 *= PRIME64_3;
434 39228 : h64 ^= h64 >> 32;
435 :
436 39228 : return h64;
437 : }
438 :
439 361933 : unsigned long long XXH64(const void* input, size_t len, unsigned long long seed) {
440 : #if 0
441 : /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
442 : XXH64_state_t state;
443 : XXH64_reset(&state, seed);
444 : XXH64_update(&state, input, len);
445 : return XXH64_digest(&state);
446 : #else
447 361933 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
448 :
449 : #if !defined(XXH_USE_UNALIGNED_ACCESS)
450 : if((((size_t)input) & 7) ==
451 : 0) /* Input is aligned, let's leverage the speed advantage */
452 : {
453 : if((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
454 : return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
455 : else
456 : return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
457 : }
458 : #endif
459 :
460 361933 : if((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
461 39228 : return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
462 : else
463 0 : return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
464 : #endif
465 : }
466 :
467 : /****************************************************
468 : * Advanced Hash Functions
469 : ****************************************************/
470 :
471 : /*** Allocation ***/
472 : typedef struct {
473 : U64 total_len;
474 : U32 seed;
475 : U32 v1;
476 : U32 v2;
477 : U32 v3;
478 : U32 v4;
479 : U32 mem32[4]; /* defined as U32 for alignment */
480 : U32 memsize;
481 : } XXH_istate32_t;
482 :
483 : typedef struct {
484 : U64 total_len;
485 : U64 seed;
486 : U64 v1;
487 : U64 v2;
488 : U64 v3;
489 : U64 v4;
490 : U64 mem64[4]; /* defined as U64 for alignment */
491 : U32 memsize;
492 : } XXH_istate64_t;
493 :
494 0 : XXH32_state_t* XXH32_createState(void) {
495 : XXH_STATIC_ASSERT(sizeof(XXH32_state_t) >=
496 : sizeof(XXH_istate32_t)); /* A compilation error here means
497 : XXH32_state_t is not large enough */
498 0 : return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
499 : }
500 0 : XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) {
501 0 : XXH_free(statePtr);
502 0 : return XXH_OK;
503 : }
504 :
505 0 : XXH64_state_t* XXH64_createState(void) {
506 : XXH_STATIC_ASSERT(sizeof(XXH64_state_t) >=
507 : sizeof(XXH_istate64_t)); /* A compilation error here means
508 : XXH64_state_t is not large enough */
509 0 : return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
510 : }
511 0 : XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) {
512 0 : XXH_free(statePtr);
513 0 : return XXH_OK;
514 : }
515 :
516 : /*** Hash feed ***/
517 :
518 0 : XXH_errorcode XXH32_reset(XXH32_state_t* state_in, U32 seed) {
519 0 : XXH_istate32_t* state = (XXH_istate32_t*)state_in;
520 0 : state->seed = seed;
521 0 : state->v1 = seed + PRIME32_1 + PRIME32_2;
522 0 : state->v2 = seed + PRIME32_2;
523 0 : state->v3 = seed + 0;
524 0 : state->v4 = seed - PRIME32_1;
525 0 : state->total_len = 0;
526 0 : state->memsize = 0;
527 0 : return XXH_OK;
528 : }
529 :
530 0 : XXH_errorcode XXH64_reset(XXH64_state_t* state_in, unsigned long long seed) {
531 0 : XXH_istate64_t* state = (XXH_istate64_t*)state_in;
532 0 : state->seed = seed;
533 0 : state->v1 = seed + PRIME64_1 + PRIME64_2;
534 0 : state->v2 = seed + PRIME64_2;
535 0 : state->v3 = seed + 0;
536 0 : state->v4 = seed - PRIME64_1;
537 0 : state->total_len = 0;
538 0 : state->memsize = 0;
539 0 : return XXH_OK;
540 : }
541 :
542 : FORCE_INLINE XXH_errorcode XXH32_update_endian(XXH32_state_t* state_in, const void* input,
543 : size_t len, XXH_endianess endian) {
544 0 : XXH_istate32_t* state = (XXH_istate32_t*)state_in;
545 0 : const BYTE* p = (const BYTE*)input;
546 0 : const BYTE* const bEnd = p + len;
547 :
548 : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
549 : if(input == NULL)
550 : return XXH_ERROR;
551 : #endif
552 :
553 0 : state->total_len += len;
554 :
555 0 : if(state->memsize + len < 16) /* fill in tmp buffer */
556 : {
557 0 : XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
558 0 : state->memsize += (U32)len;
559 0 : return XXH_OK;
560 : }
561 :
562 0 : if(state->memsize) /* some data left from previous update */
563 : {
564 0 : XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16 - state->memsize);
565 : {
566 0 : const U32* p32 = state->mem32;
567 0 : state->v1 += XXH_readLE32(p32, endian) * PRIME32_2;
568 0 : state->v1 = XXH_rotl32(state->v1, 13);
569 0 : state->v1 *= PRIME32_1;
570 0 : p32++;
571 0 : state->v2 += XXH_readLE32(p32, endian) * PRIME32_2;
572 0 : state->v2 = XXH_rotl32(state->v2, 13);
573 0 : state->v2 *= PRIME32_1;
574 0 : p32++;
575 0 : state->v3 += XXH_readLE32(p32, endian) * PRIME32_2;
576 0 : state->v3 = XXH_rotl32(state->v3, 13);
577 0 : state->v3 *= PRIME32_1;
578 0 : p32++;
579 0 : state->v4 += XXH_readLE32(p32, endian) * PRIME32_2;
580 0 : state->v4 = XXH_rotl32(state->v4, 13);
581 0 : state->v4 *= PRIME32_1;
582 0 : p32++;
583 : }
584 0 : p += 16 - state->memsize;
585 0 : state->memsize = 0;
586 : }
587 :
588 0 : if(p <= bEnd - 16) {
589 0 : const BYTE* const limit = bEnd - 16;
590 0 : U32 v1 = state->v1;
591 0 : U32 v2 = state->v2;
592 0 : U32 v3 = state->v3;
593 0 : U32 v4 = state->v4;
594 :
595 : do {
596 0 : v1 += XXH_readLE32(p, endian) * PRIME32_2;
597 0 : v1 = XXH_rotl32(v1, 13);
598 0 : v1 *= PRIME32_1;
599 0 : p += 4;
600 0 : v2 += XXH_readLE32(p, endian) * PRIME32_2;
601 0 : v2 = XXH_rotl32(v2, 13);
602 0 : v2 *= PRIME32_1;
603 0 : p += 4;
604 0 : v3 += XXH_readLE32(p, endian) * PRIME32_2;
605 0 : v3 = XXH_rotl32(v3, 13);
606 0 : v3 *= PRIME32_1;
607 0 : p += 4;
608 0 : v4 += XXH_readLE32(p, endian) * PRIME32_2;
609 0 : v4 = XXH_rotl32(v4, 13);
610 0 : v4 *= PRIME32_1;
611 0 : p += 4;
612 0 : } while(p <= limit);
613 :
614 0 : state->v1 = v1;
615 0 : state->v2 = v2;
616 0 : state->v3 = v3;
617 0 : state->v4 = v4;
618 : }
619 :
620 0 : if(p < bEnd) {
621 0 : XXH_memcpy(state->mem32, p, bEnd - p);
622 0 : state->memsize = (int)(bEnd - p);
623 : }
624 :
625 0 : return XXH_OK;
626 : }
627 :
628 0 : XXH_errorcode XXH32_update(XXH32_state_t* state_in, const void* input, size_t len) {
629 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
630 :
631 0 : if((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
632 0 : return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
633 : else
634 0 : return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
635 : }
636 :
637 : FORCE_INLINE U32
638 : XXH32_digest_endian(const XXH32_state_t* state_in, XXH_endianess endian) {
639 0 : const XXH_istate32_t* state = (const XXH_istate32_t*)state_in;
640 0 : const BYTE* p = (const BYTE*)state->mem32;
641 0 : const BYTE* bEnd = (const BYTE*)(state->mem32) + state->memsize;
642 : U32 h32;
643 :
644 0 : if(state->total_len >= 16) {
645 0 : h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) +
646 0 : XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
647 : } else {
648 0 : h32 = state->seed + PRIME32_5;
649 : }
650 :
651 0 : h32 += (U32)state->total_len;
652 :
653 0 : while(p + 4 <= bEnd) {
654 0 : h32 += XXH_readLE32(p, endian) * PRIME32_3;
655 0 : h32 = XXH_rotl32(h32, 17) * PRIME32_4;
656 0 : p += 4;
657 : }
658 :
659 0 : while(p < bEnd) {
660 0 : h32 += (*p) * PRIME32_5;
661 0 : h32 = XXH_rotl32(h32, 11) * PRIME32_1;
662 0 : p++;
663 : }
664 :
665 0 : h32 ^= h32 >> 15;
666 0 : h32 *= PRIME32_2;
667 0 : h32 ^= h32 >> 13;
668 0 : h32 *= PRIME32_3;
669 0 : h32 ^= h32 >> 16;
670 :
671 0 : return h32;
672 : }
673 :
674 0 : U32 XXH32_digest(const XXH32_state_t* state_in) {
675 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
676 :
677 0 : if((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
678 0 : return XXH32_digest_endian(state_in, XXH_littleEndian);
679 : else
680 0 : return XXH32_digest_endian(state_in, XXH_bigEndian);
681 : }
682 :
683 : FORCE_INLINE XXH_errorcode XXH64_update_endian(XXH64_state_t* state_in, const void* input,
684 : size_t len, XXH_endianess endian) {
685 0 : XXH_istate64_t* state = (XXH_istate64_t*)state_in;
686 0 : const BYTE* p = (const BYTE*)input;
687 0 : const BYTE* const bEnd = p + len;
688 :
689 : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
690 : if(input == NULL)
691 : return XXH_ERROR;
692 : #endif
693 :
694 0 : state->total_len += len;
695 :
696 0 : if(state->memsize + len < 32) /* fill in tmp buffer */
697 : {
698 0 : XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
699 0 : state->memsize += (U32)len;
700 0 : return XXH_OK;
701 : }
702 :
703 0 : if(state->memsize) /* some data left from previous update */
704 : {
705 0 : XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32 - state->memsize);
706 : {
707 0 : const U64* p64 = state->mem64;
708 0 : state->v1 += XXH_readLE64(p64, endian) * PRIME64_2;
709 0 : state->v1 = XXH_rotl64(state->v1, 31);
710 0 : state->v1 *= PRIME64_1;
711 0 : p64++;
712 0 : state->v2 += XXH_readLE64(p64, endian) * PRIME64_2;
713 0 : state->v2 = XXH_rotl64(state->v2, 31);
714 0 : state->v2 *= PRIME64_1;
715 0 : p64++;
716 0 : state->v3 += XXH_readLE64(p64, endian) * PRIME64_2;
717 0 : state->v3 = XXH_rotl64(state->v3, 31);
718 0 : state->v3 *= PRIME64_1;
719 0 : p64++;
720 0 : state->v4 += XXH_readLE64(p64, endian) * PRIME64_2;
721 0 : state->v4 = XXH_rotl64(state->v4, 31);
722 0 : state->v4 *= PRIME64_1;
723 0 : p64++;
724 : }
725 0 : p += 32 - state->memsize;
726 0 : state->memsize = 0;
727 : }
728 :
729 0 : if(p + 32 <= bEnd) {
730 0 : const BYTE* const limit = bEnd - 32;
731 0 : U64 v1 = state->v1;
732 0 : U64 v2 = state->v2;
733 0 : U64 v3 = state->v3;
734 0 : U64 v4 = state->v4;
735 :
736 : do {
737 0 : v1 += XXH_readLE64(p, endian) * PRIME64_2;
738 0 : v1 = XXH_rotl64(v1, 31);
739 0 : v1 *= PRIME64_1;
740 0 : p += 8;
741 0 : v2 += XXH_readLE64(p, endian) * PRIME64_2;
742 0 : v2 = XXH_rotl64(v2, 31);
743 0 : v2 *= PRIME64_1;
744 0 : p += 8;
745 0 : v3 += XXH_readLE64(p, endian) * PRIME64_2;
746 0 : v3 = XXH_rotl64(v3, 31);
747 0 : v3 *= PRIME64_1;
748 0 : p += 8;
749 0 : v4 += XXH_readLE64(p, endian) * PRIME64_2;
750 0 : v4 = XXH_rotl64(v4, 31);
751 0 : v4 *= PRIME64_1;
752 0 : p += 8;
753 0 : } while(p <= limit);
754 :
755 0 : state->v1 = v1;
756 0 : state->v2 = v2;
757 0 : state->v3 = v3;
758 0 : state->v4 = v4;
759 : }
760 :
761 0 : if(p < bEnd) {
762 0 : XXH_memcpy(state->mem64, p, bEnd - p);
763 0 : state->memsize = (int)(bEnd - p);
764 : }
765 :
766 0 : return XXH_OK;
767 : }
768 :
769 0 : XXH_errorcode XXH64_update(XXH64_state_t* state_in, const void* input, size_t len) {
770 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
771 :
772 0 : if((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
773 0 : return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
774 : else
775 0 : return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
776 : }
777 :
778 : FORCE_INLINE U64
779 : XXH64_digest_endian(const XXH64_state_t* state_in, XXH_endianess endian) {
780 0 : const XXH_istate64_t* state = (const XXH_istate64_t*)state_in;
781 0 : const BYTE* p = (const BYTE*)state->mem64;
782 0 : const BYTE* bEnd = (const BYTE*)state->mem64 + state->memsize;
783 : U64 h64;
784 :
785 0 : if(state->total_len >= 32) {
786 0 : U64 v1 = state->v1;
787 0 : U64 v2 = state->v2;
788 0 : U64 v3 = state->v3;
789 0 : U64 v4 = state->v4;
790 :
791 0 : h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
792 0 : XXH_rotl64(v4, 18);
793 :
794 0 : v1 *= PRIME64_2;
795 0 : v1 = XXH_rotl64(v1, 31);
796 0 : v1 *= PRIME64_1;
797 0 : h64 ^= v1;
798 0 : h64 = h64 * PRIME64_1 + PRIME64_4;
799 :
800 0 : v2 *= PRIME64_2;
801 0 : v2 = XXH_rotl64(v2, 31);
802 0 : v2 *= PRIME64_1;
803 0 : h64 ^= v2;
804 0 : h64 = h64 * PRIME64_1 + PRIME64_4;
805 :
806 0 : v3 *= PRIME64_2;
807 0 : v3 = XXH_rotl64(v3, 31);
808 0 : v3 *= PRIME64_1;
809 0 : h64 ^= v3;
810 0 : h64 = h64 * PRIME64_1 + PRIME64_4;
811 :
812 0 : v4 *= PRIME64_2;
813 0 : v4 = XXH_rotl64(v4, 31);
814 0 : v4 *= PRIME64_1;
815 0 : h64 ^= v4;
816 0 : h64 = h64 * PRIME64_1 + PRIME64_4;
817 : } else {
818 0 : h64 = state->seed + PRIME64_5;
819 : }
820 :
821 0 : h64 += (U64)state->total_len;
822 :
823 0 : while(p + 8 <= bEnd) {
824 0 : U64 k1 = XXH_readLE64(p, endian);
825 0 : k1 *= PRIME64_2;
826 0 : k1 = XXH_rotl64(k1, 31);
827 0 : k1 *= PRIME64_1;
828 0 : h64 ^= k1;
829 0 : h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
830 0 : p += 8;
831 : }
832 :
833 0 : if(p + 4 <= bEnd) {
834 0 : h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
835 0 : h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
836 0 : p += 4;
837 : }
838 :
839 0 : while(p < bEnd) {
840 0 : h64 ^= (*p) * PRIME64_5;
841 0 : h64 = XXH_rotl64(h64, 11) * PRIME64_1;
842 0 : p++;
843 : }
844 :
845 0 : h64 ^= h64 >> 33;
846 0 : h64 *= PRIME64_2;
847 0 : h64 ^= h64 >> 29;
848 0 : h64 *= PRIME64_3;
849 0 : h64 ^= h64 >> 32;
850 :
851 0 : return h64;
852 : }
853 :
854 0 : unsigned long long XXH64_digest(const XXH64_state_t* state_in) {
855 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
856 :
857 0 : if((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
858 0 : return XXH64_digest_endian(state_in, XXH_littleEndian);
859 : else
860 0 : return XXH64_digest_endian(state_in, XXH_bigEndian);
861 : }
|