Line data Source code
1 : /*
2 : xxHash - Fast Hash algorithm
3 : Copyright (C) 2012-2015, Yann Collet
4 :
5 : BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 :
7 : Redistribution and use in source and binary forms, with or without
8 : modification, are permitted provided that the following conditions are
9 : met:
10 :
11 : * Redistributions of source code must retain the above copyright
12 : notice, this list of conditions and the following disclaimer.
13 : * Redistributions in binary form must reproduce the above
14 : copyright notice, this list of conditions and the following disclaimer
15 : in the documentation and/or other materials provided with the
16 : distribution.
17 :
18 : THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 : "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 : LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 : A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 : OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 : SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 : LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 : DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 : THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 : (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 : OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 :
30 : You can contact the author at :
31 : - xxHash source repository : https://github.com/Cyan4973/xxHash
32 : */
33 :
34 :
35 : /**************************************
36 : * Tuning parameters
37 : **************************************/
38 : /* Unaligned memory access is automatically enabled for "common" CPU, such as x86.
39 : * For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
40 : * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
41 : * You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
42 : */
43 : #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
44 : # define XXH_USE_UNALIGNED_ACCESS 1
45 : #endif
46 :
47 : /* XXH_ACCEPT_NULL_INPUT_POINTER :
48 : * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
49 : * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
50 : * By default, this option is disabled. To enable it, uncomment below define :
51 : */
52 : /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
53 :
54 : /* XXH_FORCE_NATIVE_FORMAT :
55 : * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
56 : * Results are therefore identical for little-endian and big-endian CPU.
57 : * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
58 : * Should endian-independance be of no importance for your application, you may set the #define below to 1.
59 : * It will improve speed for Big-endian CPU.
60 : * This option has no impact on Little_Endian CPU.
61 : */
62 : #define XXH_FORCE_NATIVE_FORMAT 0
63 :
64 :
65 : /**************************************
66 : * Compiler Specific Options
67 : ***************************************/
68 : #ifdef _MSC_VER /* Visual Studio */
69 : # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
70 : # define FORCE_INLINE static __forceinline
71 : #else
72 : # if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
73 : # ifdef __GNUC__
74 : # define FORCE_INLINE static inline __attribute__((always_inline))
75 : # else
76 : # define FORCE_INLINE static inline
77 : # endif
78 : # else
79 : # define FORCE_INLINE static
80 : # endif /* __STDC_VERSION__ */
81 : #endif
82 :
83 :
84 : /**************************************
85 : * Includes & Memory related functions
86 : ***************************************/
87 : #include "mxxhash.h"
88 : /* Modify the local functions below should you wish to use some other memory routines */
89 : /* for malloc(), free() */
90 : #include <stdlib.h>
91 0 : static void* XXH_malloc(size_t s) { return malloc(s); }
92 0 : static void XXH_free (void* p) { free(p); }
93 : /* for memcpy() */
94 : #include <string.h>
95 0 : static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
96 :
97 :
98 : /**************************************
99 : * Basic Types
100 : ***************************************/
101 : #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
102 : # include <stdint.h>
103 : typedef uint8_t BYTE;
104 : typedef uint16_t U16;
105 : typedef uint32_t U32;
106 : typedef int32_t S32;
107 : typedef uint64_t U64;
108 : #else
109 : typedef unsigned char BYTE;
110 : typedef unsigned short U16;
111 : typedef unsigned int U32;
112 : typedef signed int S32;
113 : typedef unsigned long long U64;
114 : #endif
115 :
116 0 : static U32 XXH_read32(const void* memPtr)
117 : {
118 : U32 val32;
119 0 : memcpy(&val32, memPtr, 4);
120 0 : return val32;
121 : }
122 :
123 0 : static U64 XXH_read64(const void* memPtr)
124 : {
125 : U64 val64;
126 0 : memcpy(&val64, memPtr, 8);
127 0 : return val64;
128 : }
129 :
130 :
131 :
132 : /******************************************
133 : * Compiler-specific Functions and Macros
134 : ******************************************/
135 : #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
136 :
137 : /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
138 : #if defined(_MSC_VER)
139 : # define XXH_rotl32(x,r) _rotl(x,r)
140 : # define XXH_rotl64(x,r) _rotl64(x,r)
141 : #else
142 : # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
143 : # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
144 : #endif
145 :
146 : #if defined(_MSC_VER) /* Visual Studio */
147 : # define XXH_swap32 _byteswap_ulong
148 : # define XXH_swap64 _byteswap_uint64
149 : #elif GCC_VERSION >= 403
150 : # define XXH_swap32 __builtin_bswap32
151 : # define XXH_swap64 __builtin_bswap64
152 : #else
153 : static U32 XXH_swap32 (U32 x)
154 : {
155 : return ((x << 24) & 0xff000000 ) |
156 : ((x << 8) & 0x00ff0000 ) |
157 : ((x >> 8) & 0x0000ff00 ) |
158 : ((x >> 24) & 0x000000ff );
159 : }
160 : static U64 XXH_swap64 (U64 x)
161 : {
162 : return ((x << 56) & 0xff00000000000000ULL) |
163 : ((x << 40) & 0x00ff000000000000ULL) |
164 : ((x << 24) & 0x0000ff0000000000ULL) |
165 : ((x << 8) & 0x000000ff00000000ULL) |
166 : ((x >> 8) & 0x00000000ff000000ULL) |
167 : ((x >> 24) & 0x0000000000ff0000ULL) |
168 : ((x >> 40) & 0x000000000000ff00ULL) |
169 : ((x >> 56) & 0x00000000000000ffULL);
170 : }
171 : #endif
172 :
173 :
174 : /***************************************
175 : * Architecture Macros
176 : ***************************************/
177 : typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
178 : #ifndef XXH_CPU_LITTLE_ENDIAN /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example using a compiler switch */
179 : static const int one = 1;
180 : # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&one))
181 : #endif
182 :
183 :
184 : /*****************************
185 : * Memory reads
186 : *****************************/
187 : typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
188 :
189 0 : FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
190 : {
191 0 : if (align==XXH_unaligned)
192 0 : return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
193 : else
194 0 : return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
195 : }
196 :
197 0 : FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
198 : {
199 0 : return XXH_readLE32_align(ptr, endian, XXH_unaligned);
200 : }
201 :
202 0 : FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
203 : {
204 0 : if (align==XXH_unaligned)
205 0 : return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
206 : else
207 0 : return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
208 : }
209 :
210 0 : FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
211 : {
212 0 : return XXH_readLE64_align(ptr, endian, XXH_unaligned);
213 : }
214 :
215 :
216 : /***************************************
217 : * Macros
218 : ***************************************/
219 : #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } /* use only *after* variable declarations */
220 :
221 :
222 : /***************************************
223 : * Constants
224 : ***************************************/
225 : #define PRIME32_1 2654435761U
226 : #define PRIME32_2 2246822519U
227 : #define PRIME32_3 3266489917U
228 : #define PRIME32_4 668265263U
229 : #define PRIME32_5 374761393U
230 :
231 : #define PRIME64_1 11400714785074694791ULL
232 : #define PRIME64_2 14029467366897019727ULL
233 : #define PRIME64_3 1609587929392839161ULL
234 : #define PRIME64_4 9650029242287828579ULL
235 : #define PRIME64_5 2870177450012600261ULL
236 :
237 :
238 : /*****************************
239 : * Simple Hash Functions
240 : *****************************/
241 0 : FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
242 : {
243 0 : const BYTE* p = (const BYTE*)input;
244 0 : const BYTE* bEnd = p + len;
245 : U32 h32;
246 : #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
247 :
248 : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
249 : if (p==NULL)
250 : {
251 : len=0;
252 : bEnd=p=(const BYTE*)(size_t)16;
253 : }
254 : #endif
255 :
256 0 : if (len>=16)
257 : {
258 0 : const BYTE* const limit = bEnd - 16;
259 0 : U32 v1 = seed + PRIME32_1 + PRIME32_2;
260 0 : U32 v2 = seed + PRIME32_2;
261 0 : U32 v3 = seed + 0;
262 0 : U32 v4 = seed - PRIME32_1;
263 :
264 : do
265 : {
266 0 : v1 += XXH_get32bits(p) * PRIME32_2;
267 0 : v1 = XXH_rotl32(v1, 13);
268 0 : v1 *= PRIME32_1;
269 0 : p+=4;
270 0 : v2 += XXH_get32bits(p) * PRIME32_2;
271 0 : v2 = XXH_rotl32(v2, 13);
272 0 : v2 *= PRIME32_1;
273 0 : p+=4;
274 0 : v3 += XXH_get32bits(p) * PRIME32_2;
275 0 : v3 = XXH_rotl32(v3, 13);
276 0 : v3 *= PRIME32_1;
277 0 : p+=4;
278 0 : v4 += XXH_get32bits(p) * PRIME32_2;
279 0 : v4 = XXH_rotl32(v4, 13);
280 0 : v4 *= PRIME32_1;
281 0 : p+=4;
282 : }
283 0 : while (p<=limit);
284 :
285 0 : h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
286 : }
287 : else
288 : {
289 0 : h32 = seed + PRIME32_5;
290 : }
291 :
292 0 : h32 += (U32) len;
293 :
294 0 : while (p+4<=bEnd)
295 : {
296 0 : h32 += XXH_get32bits(p) * PRIME32_3;
297 0 : h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
298 0 : p+=4;
299 : }
300 :
301 0 : while (p<bEnd)
302 : {
303 0 : h32 += (*p) * PRIME32_5;
304 0 : h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
305 0 : p++;
306 : }
307 :
308 0 : h32 ^= h32 >> 15;
309 0 : h32 *= PRIME32_2;
310 0 : h32 ^= h32 >> 13;
311 0 : h32 *= PRIME32_3;
312 0 : h32 ^= h32 >> 16;
313 :
314 0 : return h32;
315 : }
316 :
317 :
318 0 : unsigned XXH32 (const void* input, size_t len, unsigned seed)
319 : {
320 : #if 0
321 : /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
322 : XXH32_state_t state;
323 : XXH32_reset(&state, seed);
324 : XXH32_update(&state, input, len);
325 : return XXH32_digest(&state);
326 : #else
327 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
328 :
329 : # if !defined(XXH_USE_UNALIGNED_ACCESS)
330 : if ((((size_t)input) & 3) == 0) /* Input is 4-bytes aligned, leverage the speed benefit */
331 : {
332 : if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
333 : return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
334 : else
335 : return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
336 : }
337 : # endif
338 :
339 0 : if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
340 0 : return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
341 : else
342 0 : return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
343 : #endif
344 : }
345 :
346 0 : FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
347 : {
348 0 : const BYTE* p = (const BYTE*)input;
349 0 : const BYTE* bEnd = p + len;
350 : U64 h64;
351 : #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
352 :
353 : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
354 : if (p==NULL)
355 : {
356 : len=0;
357 : bEnd=p=(const BYTE*)(size_t)32;
358 : }
359 : #endif
360 :
361 0 : if (len>=32)
362 : {
363 0 : const BYTE* const limit = bEnd - 32;
364 0 : U64 v1 = seed + PRIME64_1 + PRIME64_2;
365 0 : U64 v2 = seed + PRIME64_2;
366 0 : U64 v3 = seed + 0;
367 0 : U64 v4 = seed - PRIME64_1;
368 :
369 : do
370 : {
371 0 : v1 += XXH_get64bits(p) * PRIME64_2;
372 0 : p+=8;
373 0 : v1 = XXH_rotl64(v1, 31);
374 0 : v1 *= PRIME64_1;
375 0 : v2 += XXH_get64bits(p) * PRIME64_2;
376 0 : p+=8;
377 0 : v2 = XXH_rotl64(v2, 31);
378 0 : v2 *= PRIME64_1;
379 0 : v3 += XXH_get64bits(p) * PRIME64_2;
380 0 : p+=8;
381 0 : v3 = XXH_rotl64(v3, 31);
382 0 : v3 *= PRIME64_1;
383 0 : v4 += XXH_get64bits(p) * PRIME64_2;
384 0 : p+=8;
385 0 : v4 = XXH_rotl64(v4, 31);
386 0 : v4 *= PRIME64_1;
387 : }
388 0 : while (p<=limit);
389 :
390 0 : h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
391 :
392 0 : v1 *= PRIME64_2;
393 0 : v1 = XXH_rotl64(v1, 31);
394 0 : v1 *= PRIME64_1;
395 0 : h64 ^= v1;
396 0 : h64 = h64 * PRIME64_1 + PRIME64_4;
397 :
398 0 : v2 *= PRIME64_2;
399 0 : v2 = XXH_rotl64(v2, 31);
400 0 : v2 *= PRIME64_1;
401 0 : h64 ^= v2;
402 0 : h64 = h64 * PRIME64_1 + PRIME64_4;
403 :
404 0 : v3 *= PRIME64_2;
405 0 : v3 = XXH_rotl64(v3, 31);
406 0 : v3 *= PRIME64_1;
407 0 : h64 ^= v3;
408 0 : h64 = h64 * PRIME64_1 + PRIME64_4;
409 :
410 0 : v4 *= PRIME64_2;
411 0 : v4 = XXH_rotl64(v4, 31);
412 0 : v4 *= PRIME64_1;
413 0 : h64 ^= v4;
414 0 : h64 = h64 * PRIME64_1 + PRIME64_4;
415 : }
416 : else
417 : {
418 0 : h64 = seed + PRIME64_5;
419 : }
420 :
421 0 : h64 += (U64) len;
422 :
423 0 : while (p+8<=bEnd)
424 : {
425 0 : U64 k1 = XXH_get64bits(p);
426 0 : k1 *= PRIME64_2;
427 0 : k1 = XXH_rotl64(k1,31);
428 0 : k1 *= PRIME64_1;
429 0 : h64 ^= k1;
430 0 : h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
431 0 : p+=8;
432 : }
433 :
434 0 : if (p+4<=bEnd)
435 : {
436 0 : h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
437 0 : h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
438 0 : p+=4;
439 : }
440 :
441 0 : while (p<bEnd)
442 : {
443 0 : h64 ^= (*p) * PRIME64_5;
444 0 : h64 = XXH_rotl64(h64, 11) * PRIME64_1;
445 0 : p++;
446 : }
447 :
448 0 : h64 ^= h64 >> 33;
449 0 : h64 *= PRIME64_2;
450 0 : h64 ^= h64 >> 29;
451 0 : h64 *= PRIME64_3;
452 0 : h64 ^= h64 >> 32;
453 :
454 0 : return h64;
455 : }
456 :
457 :
458 0 : unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
459 : {
460 : #if 0
461 : /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
462 : XXH64_state_t state;
463 : XXH64_reset(&state, seed);
464 : XXH64_update(&state, input, len);
465 : return XXH64_digest(&state);
466 : #else
467 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
468 :
469 : # if !defined(XXH_USE_UNALIGNED_ACCESS)
470 : if ((((size_t)input) & 7)==0) /* Input is aligned, let's leverage the speed advantage */
471 : {
472 : if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
473 : return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
474 : else
475 : return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
476 : }
477 : # endif
478 :
479 0 : if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
480 0 : return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
481 : else
482 0 : return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
483 : #endif
484 : }
485 :
486 : /****************************************************
487 : * Advanced Hash Functions
488 : ****************************************************/
489 :
490 : /*** Allocation ***/
491 : typedef struct
492 : {
493 : U64 total_len;
494 : U32 seed;
495 : U32 v1;
496 : U32 v2;
497 : U32 v3;
498 : U32 v4;
499 : U32 mem32[4]; /* defined as U32 for alignment */
500 : U32 memsize;
501 : } XXH_istate32_t;
502 :
503 : typedef struct
504 : {
505 : U64 total_len;
506 : U64 seed;
507 : U64 v1;
508 : U64 v2;
509 : U64 v3;
510 : U64 v4;
511 : U64 mem64[4]; /* defined as U64 for alignment */
512 : U32 memsize;
513 : } XXH_istate64_t;
514 :
515 :
516 0 : XXH32_state_t* XXH32_createState(void)
517 : {
518 : XXH_STATIC_ASSERT(sizeof(XXH32_state_t) >= sizeof(XXH_istate32_t)); /* A compilation error here means XXH32_state_t is not large enough */
519 0 : return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
520 : }
521 0 : XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
522 : {
523 0 : XXH_free(statePtr);
524 0 : return XXH_OK;
525 : }
526 :
527 0 : XXH64_state_t* XXH64_createState(void)
528 : {
529 : XXH_STATIC_ASSERT(sizeof(XXH64_state_t) >= sizeof(XXH_istate64_t)); /* A compilation error here means XXH64_state_t is not large enough */
530 0 : return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
531 : }
532 0 : XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
533 : {
534 0 : XXH_free(statePtr);
535 0 : return XXH_OK;
536 : }
537 :
538 :
539 : /*** Hash feed ***/
540 :
541 0 : XXH_errorcode XXH32_reset(XXH32_state_t* state_in, U32 seed)
542 : {
543 0 : XXH_istate32_t* state = (XXH_istate32_t*) state_in;
544 0 : state->seed = seed;
545 0 : state->v1 = seed + PRIME32_1 + PRIME32_2;
546 0 : state->v2 = seed + PRIME32_2;
547 0 : state->v3 = seed + 0;
548 0 : state->v4 = seed - PRIME32_1;
549 0 : state->total_len = 0;
550 0 : state->memsize = 0;
551 0 : return XXH_OK;
552 : }
553 :
554 0 : XXH_errorcode XXH64_reset(XXH64_state_t* state_in, unsigned long long seed)
555 : {
556 0 : XXH_istate64_t* state = (XXH_istate64_t*) state_in;
557 0 : state->seed = seed;
558 0 : state->v1 = seed + PRIME64_1 + PRIME64_2;
559 0 : state->v2 = seed + PRIME64_2;
560 0 : state->v3 = seed + 0;
561 0 : state->v4 = seed - PRIME64_1;
562 0 : state->total_len = 0;
563 0 : state->memsize = 0;
564 0 : return XXH_OK;
565 : }
566 :
567 :
568 0 : FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
569 : {
570 0 : XXH_istate32_t* state = (XXH_istate32_t *) state_in;
571 0 : const BYTE* p = (const BYTE*)input;
572 0 : const BYTE* const bEnd = p + len;
573 :
574 : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
575 : if (input==NULL) return XXH_ERROR;
576 : #endif
577 :
578 0 : state->total_len += len;
579 :
580 0 : if (state->memsize + len < 16) /* fill in tmp buffer */
581 : {
582 0 : XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
583 0 : state->memsize += (U32)len;
584 0 : return XXH_OK;
585 : }
586 :
587 0 : if (state->memsize) /* some data left from previous update */
588 : {
589 0 : XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
590 : {
591 0 : const U32* p32 = state->mem32;
592 0 : state->v1 += XXH_readLE32(p32, endian) * PRIME32_2;
593 0 : state->v1 = XXH_rotl32(state->v1, 13);
594 0 : state->v1 *= PRIME32_1;
595 0 : p32++;
596 0 : state->v2 += XXH_readLE32(p32, endian) * PRIME32_2;
597 0 : state->v2 = XXH_rotl32(state->v2, 13);
598 0 : state->v2 *= PRIME32_1;
599 0 : p32++;
600 0 : state->v3 += XXH_readLE32(p32, endian) * PRIME32_2;
601 0 : state->v3 = XXH_rotl32(state->v3, 13);
602 0 : state->v3 *= PRIME32_1;
603 0 : p32++;
604 0 : state->v4 += XXH_readLE32(p32, endian) * PRIME32_2;
605 0 : state->v4 = XXH_rotl32(state->v4, 13);
606 0 : state->v4 *= PRIME32_1;
607 0 : p32++;
608 : }
609 0 : p += 16-state->memsize;
610 0 : state->memsize = 0;
611 : }
612 :
613 0 : if (p <= bEnd-16)
614 : {
615 0 : const BYTE* const limit = bEnd - 16;
616 0 : U32 v1 = state->v1;
617 0 : U32 v2 = state->v2;
618 0 : U32 v3 = state->v3;
619 0 : U32 v4 = state->v4;
620 :
621 : do
622 : {
623 0 : v1 += XXH_readLE32(p, endian) * PRIME32_2;
624 0 : v1 = XXH_rotl32(v1, 13);
625 0 : v1 *= PRIME32_1;
626 0 : p+=4;
627 0 : v2 += XXH_readLE32(p, endian) * PRIME32_2;
628 0 : v2 = XXH_rotl32(v2, 13);
629 0 : v2 *= PRIME32_1;
630 0 : p+=4;
631 0 : v3 += XXH_readLE32(p, endian) * PRIME32_2;
632 0 : v3 = XXH_rotl32(v3, 13);
633 0 : v3 *= PRIME32_1;
634 0 : p+=4;
635 0 : v4 += XXH_readLE32(p, endian) * PRIME32_2;
636 0 : v4 = XXH_rotl32(v4, 13);
637 0 : v4 *= PRIME32_1;
638 0 : p+=4;
639 : }
640 0 : while (p<=limit);
641 :
642 0 : state->v1 = v1;
643 0 : state->v2 = v2;
644 0 : state->v3 = v3;
645 0 : state->v4 = v4;
646 : }
647 :
648 0 : if (p < bEnd)
649 : {
650 0 : XXH_memcpy(state->mem32, p, bEnd-p);
651 0 : state->memsize = (int)(bEnd-p);
652 : }
653 :
654 0 : return XXH_OK;
655 : }
656 :
657 0 : XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
658 : {
659 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
660 :
661 0 : if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
662 0 : return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
663 : else
664 0 : return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
665 : }
666 :
667 :
668 :
669 0 : FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state_in, XXH_endianess endian)
670 : {
671 0 : const XXH_istate32_t* state = (const XXH_istate32_t*) state_in;
672 0 : const BYTE * p = (const BYTE*)state->mem32;
673 0 : const BYTE* bEnd = (const BYTE*)(state->mem32) + state->memsize;
674 : U32 h32;
675 :
676 0 : if (state->total_len >= 16)
677 : {
678 0 : h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
679 : }
680 : else
681 : {
682 0 : h32 = state->seed + PRIME32_5;
683 : }
684 :
685 0 : h32 += (U32) state->total_len;
686 :
687 0 : while (p+4<=bEnd)
688 : {
689 0 : h32 += XXH_readLE32(p, endian) * PRIME32_3;
690 0 : h32 = XXH_rotl32(h32, 17) * PRIME32_4;
691 0 : p+=4;
692 : }
693 :
694 0 : while (p<bEnd)
695 : {
696 0 : h32 += (*p) * PRIME32_5;
697 0 : h32 = XXH_rotl32(h32, 11) * PRIME32_1;
698 0 : p++;
699 : }
700 :
701 0 : h32 ^= h32 >> 15;
702 0 : h32 *= PRIME32_2;
703 0 : h32 ^= h32 >> 13;
704 0 : h32 *= PRIME32_3;
705 0 : h32 ^= h32 >> 16;
706 :
707 0 : return h32;
708 : }
709 :
710 :
711 0 : U32 XXH32_digest (const XXH32_state_t* state_in)
712 : {
713 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
714 :
715 0 : if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
716 0 : return XXH32_digest_endian(state_in, XXH_littleEndian);
717 : else
718 0 : return XXH32_digest_endian(state_in, XXH_bigEndian);
719 : }
720 :
721 :
722 0 : FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
723 : {
724 0 : XXH_istate64_t * state = (XXH_istate64_t *) state_in;
725 0 : const BYTE* p = (const BYTE*)input;
726 0 : const BYTE* const bEnd = p + len;
727 :
728 : #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
729 : if (input==NULL) return XXH_ERROR;
730 : #endif
731 :
732 0 : state->total_len += len;
733 :
734 0 : if (state->memsize + len < 32) /* fill in tmp buffer */
735 : {
736 0 : XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
737 0 : state->memsize += (U32)len;
738 0 : return XXH_OK;
739 : }
740 :
741 0 : if (state->memsize) /* some data left from previous update */
742 : {
743 0 : XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
744 : {
745 0 : const U64* p64 = state->mem64;
746 0 : state->v1 += XXH_readLE64(p64, endian) * PRIME64_2;
747 0 : state->v1 = XXH_rotl64(state->v1, 31);
748 0 : state->v1 *= PRIME64_1;
749 0 : p64++;
750 0 : state->v2 += XXH_readLE64(p64, endian) * PRIME64_2;
751 0 : state->v2 = XXH_rotl64(state->v2, 31);
752 0 : state->v2 *= PRIME64_1;
753 0 : p64++;
754 0 : state->v3 += XXH_readLE64(p64, endian) * PRIME64_2;
755 0 : state->v3 = XXH_rotl64(state->v3, 31);
756 0 : state->v3 *= PRIME64_1;
757 0 : p64++;
758 0 : state->v4 += XXH_readLE64(p64, endian) * PRIME64_2;
759 0 : state->v4 = XXH_rotl64(state->v4, 31);
760 0 : state->v4 *= PRIME64_1;
761 0 : p64++;
762 : }
763 0 : p += 32-state->memsize;
764 0 : state->memsize = 0;
765 : }
766 :
767 0 : if (p+32 <= bEnd)
768 : {
769 0 : const BYTE* const limit = bEnd - 32;
770 0 : U64 v1 = state->v1;
771 0 : U64 v2 = state->v2;
772 0 : U64 v3 = state->v3;
773 0 : U64 v4 = state->v4;
774 :
775 : do
776 : {
777 0 : v1 += XXH_readLE64(p, endian) * PRIME64_2;
778 0 : v1 = XXH_rotl64(v1, 31);
779 0 : v1 *= PRIME64_1;
780 0 : p+=8;
781 0 : v2 += XXH_readLE64(p, endian) * PRIME64_2;
782 0 : v2 = XXH_rotl64(v2, 31);
783 0 : v2 *= PRIME64_1;
784 0 : p+=8;
785 0 : v3 += XXH_readLE64(p, endian) * PRIME64_2;
786 0 : v3 = XXH_rotl64(v3, 31);
787 0 : v3 *= PRIME64_1;
788 0 : p+=8;
789 0 : v4 += XXH_readLE64(p, endian) * PRIME64_2;
790 0 : v4 = XXH_rotl64(v4, 31);
791 0 : v4 *= PRIME64_1;
792 0 : p+=8;
793 : }
794 0 : while (p<=limit);
795 :
796 0 : state->v1 = v1;
797 0 : state->v2 = v2;
798 0 : state->v3 = v3;
799 0 : state->v4 = v4;
800 : }
801 :
802 0 : if (p < bEnd)
803 : {
804 0 : XXH_memcpy(state->mem64, p, bEnd-p);
805 0 : state->memsize = (int)(bEnd-p);
806 : }
807 :
808 0 : return XXH_OK;
809 : }
810 :
811 0 : XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
812 : {
813 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
814 :
815 0 : if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
816 0 : return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
817 : else
818 0 : return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
819 : }
820 :
821 :
822 :
823 0 : FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state_in, XXH_endianess endian)
824 : {
825 0 : const XXH_istate64_t * state = (const XXH_istate64_t *) state_in;
826 0 : const BYTE * p = (const BYTE*)state->mem64;
827 0 : const BYTE* bEnd = (const BYTE*)state->mem64 + state->memsize;
828 : U64 h64;
829 :
830 0 : if (state->total_len >= 32)
831 : {
832 0 : U64 v1 = state->v1;
833 0 : U64 v2 = state->v2;
834 0 : U64 v3 = state->v3;
835 0 : U64 v4 = state->v4;
836 :
837 0 : h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
838 :
839 0 : v1 *= PRIME64_2;
840 0 : v1 = XXH_rotl64(v1, 31);
841 0 : v1 *= PRIME64_1;
842 0 : h64 ^= v1;
843 0 : h64 = h64*PRIME64_1 + PRIME64_4;
844 :
845 0 : v2 *= PRIME64_2;
846 0 : v2 = XXH_rotl64(v2, 31);
847 0 : v2 *= PRIME64_1;
848 0 : h64 ^= v2;
849 0 : h64 = h64*PRIME64_1 + PRIME64_4;
850 :
851 0 : v3 *= PRIME64_2;
852 0 : v3 = XXH_rotl64(v3, 31);
853 0 : v3 *= PRIME64_1;
854 0 : h64 ^= v3;
855 0 : h64 = h64*PRIME64_1 + PRIME64_4;
856 :
857 0 : v4 *= PRIME64_2;
858 0 : v4 = XXH_rotl64(v4, 31);
859 0 : v4 *= PRIME64_1;
860 0 : h64 ^= v4;
861 0 : h64 = h64*PRIME64_1 + PRIME64_4;
862 : }
863 : else
864 : {
865 0 : h64 = state->seed + PRIME64_5;
866 : }
867 :
868 0 : h64 += (U64) state->total_len;
869 :
870 0 : while (p+8<=bEnd)
871 : {
872 0 : U64 k1 = XXH_readLE64(p, endian);
873 0 : k1 *= PRIME64_2;
874 0 : k1 = XXH_rotl64(k1,31);
875 0 : k1 *= PRIME64_1;
876 0 : h64 ^= k1;
877 0 : h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
878 0 : p+=8;
879 : }
880 :
881 0 : if (p+4<=bEnd)
882 : {
883 0 : h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
884 0 : h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
885 0 : p+=4;
886 : }
887 :
888 0 : while (p<bEnd)
889 : {
890 0 : h64 ^= (*p) * PRIME64_5;
891 0 : h64 = XXH_rotl64(h64, 11) * PRIME64_1;
892 0 : p++;
893 : }
894 :
895 0 : h64 ^= h64 >> 33;
896 0 : h64 *= PRIME64_2;
897 0 : h64 ^= h64 >> 29;
898 0 : h64 *= PRIME64_3;
899 0 : h64 ^= h64 >> 32;
900 :
901 0 : return h64;
902 : }
903 :
904 :
905 0 : unsigned long long XXH64_digest (const XXH64_state_t* state_in)
906 : {
907 0 : XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
908 :
909 0 : if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
910 0 : return XXH64_digest_endian(state_in, XXH_littleEndian);
911 : else
912 0 : return XXH64_digest_endian(state_in, XXH_bigEndian);
913 : }
914 :
915 :
|