xxhash.h (204174B)
1 /* 2 * xxHash - Extremely Fast Hash algorithm 3 * Header File 4 * Copyright (C) 2012-2020 Yann Collet 5 * 6 * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are 10 * met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following disclaimer 16 * in the documentation and/or other materials provided with the 17 * distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * You can contact the author at: 32 * - xxHash homepage: https://www.xxhash.com 33 * - xxHash source repository: https://github.com/Cyan4973/xxHash 34 */ 35 /*! 36 * @mainpage xxHash 37 * 38 * @file xxhash.h 39 * xxHash prototypes and implementation 40 */ 41 /* TODO: update */ 42 /* Notice extracted from xxHash homepage: 43 44 xxHash is an extremely fast hash algorithm, running at RAM speed limits. 45 It also successfully passes all tests from the SMHasher suite. 46 47 Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) 48 49 Name Speed Q.Score Author 50 xxHash 5.4 GB/s 10 51 CrapWow 3.2 GB/s 2 Andrew 52 MurmurHash 3a 2.7 GB/s 10 Austin Appleby 53 SpookyHash 2.0 GB/s 10 Bob Jenkins 54 SBox 1.4 GB/s 9 Bret Mulvey 55 Lookup3 1.2 GB/s 9 Bob Jenkins 56 SuperFastHash 1.2 GB/s 1 Paul Hsieh 57 CityHash64 1.05 GB/s 10 Pike & Alakuijala 58 FNV 0.55 GB/s 5 Fowler, Noll, Vo 59 CRC32 0.43 GB/s 9 60 MD5-32 0.33 GB/s 10 Ronald L. Rivest 61 SHA1-32 0.28 GB/s 10 62 63 Q.Score is a measure of quality of the hash function. 64 It depends on successfully passing SMHasher test set. 65 10 is a perfect score. 66 67 Note: SMHasher's CRC32 implementation is not the fastest one. 68 Other speed-oriented implementations can be faster, 69 especially in combination with PCLMUL instruction: 70 https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735 71 72 A 64-bit version, named XXH64, is available since r35. 73 It offers much better speed, but for 64-bit applications only. 74 Name Speed on 64 bits Speed on 32 bits 75 XXH64 13.8 GB/s 1.9 GB/s 76 XXH32 6.8 GB/s 6.0 GB/s 77 */ 78 79 #if defined (__cplusplus) 80 extern "C" { 81 #endif 82 83 /* **************************** 84 * INLINE mode 85 ******************************/ 86 /*! 87 * XXH_INLINE_ALL (and XXH_PRIVATE_API) 88 * Use these build macros to inline xxhash into the target unit. 89 * Inlining improves performance on small inputs, especially when the length is 90 * expressed as a compile-time constant: 91 * 92 * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html 93 * 94 * It also keeps xxHash symbols private to the unit, so they are not exported. 95 * 96 * Usage: 97 * #define XXH_INLINE_ALL 98 * #include "xxhash.h" 99 * 100 * Do not compile and link xxhash.o as a separate object, as it is not useful. 101 */ 102 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ 103 && !defined(XXH_INLINE_ALL_31684351384) 104 /* this section should be traversed only once */ 105 # define XXH_INLINE_ALL_31684351384 106 /* give access to the advanced API, required to compile implementations */ 107 # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ 108 # define XXH_STATIC_LINKING_ONLY 109 /* make all functions private */ 110 # undef XXH_PUBLIC_API 111 # if defined(__GNUC__) 112 # define XXH_PUBLIC_API static __inline __attribute__((unused)) 113 # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) 114 # define XXH_PUBLIC_API static inline 115 # elif defined(_MSC_VER) 116 # define XXH_PUBLIC_API static __inline 117 # else 118 /* note: this version may generate warnings for unused static functions */ 119 # define XXH_PUBLIC_API static 120 # endif 121 122 /* 123 * This part deals with the special case where a unit wants to inline xxHash, 124 * but "xxhash.h" has previously been included without XXH_INLINE_ALL, such 125 * as part of some previously included *.h header file. 126 * Without further action, the new include would just be ignored, 127 * and functions would effectively _not_ be inlined (silent failure). 128 * The following macros solve this situation by prefixing all inlined names, 129 * avoiding naming collision with previous inclusions. 130 */ 131 # ifdef XXH_NAMESPACE 132 # error "XXH_INLINE_ALL with XXH_NAMESPACE is not supported" 133 /* 134 * Note: Alternative: #undef all symbols (it's a pretty large list). 135 * Without #error: it compiles, but functions are actually not inlined. 136 */ 137 # endif 138 # define XXH_NAMESPACE XXH_INLINE_ 139 /* 140 * Some identifiers (enums, type names) are not symbols, but they must 141 * still be renamed to avoid redeclaration. 142 * Alternative solution: do not redeclare them. 143 * However, this requires some #ifdefs, and is a more dispersed action. 144 * Meanwhile, renaming can be achieved in a single block 145 */ 146 # define XXH_IPREF(Id) XXH_INLINE_ ## Id 147 # define XXH_OK XXH_IPREF(XXH_OK) 148 # define XXH_ERROR XXH_IPREF(XXH_ERROR) 149 # define XXH_errorcode XXH_IPREF(XXH_errorcode) 150 # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) 151 # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) 152 # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) 153 # define XXH32_state_s XXH_IPREF(XXH32_state_s) 154 # define XXH32_state_t XXH_IPREF(XXH32_state_t) 155 # define XXH64_state_s XXH_IPREF(XXH64_state_s) 156 # define XXH64_state_t XXH_IPREF(XXH64_state_t) 157 # define XXH3_state_s XXH_IPREF(XXH3_state_s) 158 # define XXH3_state_t XXH_IPREF(XXH3_state_t) 159 # define XXH128_hash_t XXH_IPREF(XXH128_hash_t) 160 /* Ensure the header is parsed again, even if it was previously included */ 161 # undef XXHASH_H_5627135585666179 162 # undef XXHASH_H_STATIC_13879238742 163 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ 164 165 166 167 /* **************************************************************** 168 * Stable API 169 *****************************************************************/ 170 #ifndef XXHASH_H_5627135585666179 171 #define XXHASH_H_5627135585666179 1 172 173 174 /*! 175 * @defgroup public Public API 176 * Contains details on the public xxHash functions. 177 * @{ 178 */ 179 /* specific declaration modes for Windows */ 180 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) 181 # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) 182 # ifdef XXH_EXPORT 183 # define XXH_PUBLIC_API __declspec(dllexport) 184 # elif XXH_IMPORT 185 # define XXH_PUBLIC_API __declspec(dllimport) 186 # endif 187 # else 188 # define XXH_PUBLIC_API /* do nothing */ 189 # endif 190 #endif 191 192 #ifdef XXH_DOXYGEN 193 /*! 194 * @brief Emulate a namespace by transparently prefixing all symbols. 195 * 196 * If you want to include _and expose_ xxHash functions from within your own 197 * library, but also want to avoid symbol collisions with other libraries which 198 * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix 199 * any public symbol from xxhash library with the value of XXH_NAMESPACE 200 * (therefore, avoid empty or numeric values). 201 * 202 * Note that no change is required within the calling program as long as it 203 * includes `xxhash.h`: Regular symbol names will be automatically translated 204 * by this header. 205 */ 206 # define XXH_NAMESPACE /* YOUR NAME HERE */ 207 # undef XXH_NAMESPACE 208 #endif 209 210 #ifdef XXH_NAMESPACE 211 # define XXH_CAT(A,B) A##B 212 # define XXH_NAME2(A,B) XXH_CAT(A,B) 213 # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) 214 /* XXH32 */ 215 # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) 216 # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) 217 # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) 218 # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) 219 # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) 220 # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) 221 # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) 222 # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) 223 # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) 224 /* XXH64 */ 225 # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) 226 # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) 227 # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) 228 # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) 229 # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) 230 # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) 231 # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) 232 # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) 233 # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) 234 /* XXH3_64bits */ 235 # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) 236 # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) 237 # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) 238 # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) 239 # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) 240 # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) 241 # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) 242 # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) 243 # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) 244 # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) 245 # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) 246 # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) 247 /* XXH3_128bits */ 248 # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) 249 # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) 250 # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) 251 # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) 252 # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) 253 # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) 254 # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) 255 # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) 256 # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) 257 # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) 258 # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) 259 # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) 260 # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) 261 #endif 262 263 264 /* ************************************* 265 * Version 266 ***************************************/ 267 #define XXH_VERSION_MAJOR 0 268 #define XXH_VERSION_MINOR 8 269 #define XXH_VERSION_RELEASE 0 270 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) 271 272 /*! 273 * @brief Obtains the xxHash version. 274 * 275 * This is only useful when xxHash is compiled as a shared library, as it is 276 * independent of the version defined in the header. 277 * 278 * @return `XXH_VERSION_NUMBER` as of when the function was compiled. 279 */ 280 XXH_PUBLIC_API unsigned XXH_versionNumber (void); 281 282 283 /* **************************** 284 * Definitions 285 ******************************/ 286 #include <stddef.h> /* size_t */ 287 typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; 288 289 290 /*-********************************************************************** 291 * 32-bit hash 292 ************************************************************************/ 293 #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */ 294 /*! 295 * @brief An unsigned 32-bit integer. 296 * 297 * Not necessarily defined to `uint32_t` but functionally equivalent. 298 */ 299 typedef uint32_t XXH32_hash_t; 300 #elif !defined (__VMS) \ 301 && (defined (__cplusplus) \ 302 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 303 # include <stdint.h> 304 typedef uint32_t XXH32_hash_t; 305 #else 306 # include <limits.h> 307 # if UINT_MAX == 0xFFFFFFFFUL 308 typedef unsigned int XXH32_hash_t; 309 # else 310 # if ULONG_MAX == 0xFFFFFFFFUL 311 typedef unsigned long XXH32_hash_t; 312 # else 313 # error "unsupported platform: need a 32-bit type" 314 # endif 315 # endif 316 #endif 317 318 /*! 319 * @} 320 * 321 * @defgroup xxh32_family XXH32 family 322 * @ingroup public 323 * Contains functions used in the classic 32-bit xxHash algorithm. 324 * 325 * @note 326 * XXH32 is considered rather weak by today's standards. 327 * The @ref xxh3_family provides competitive speed for both 32-bit and 64-bit 328 * systems, and offers true 64/128 bit hash results. It provides a superior 329 * level of dispersion, and greatly reduces the risks of collisions. 330 * 331 * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families 332 * @see @ref xxh32_impl for implementation details 333 * @{ 334 */ 335 336 /*! 337 * @brief Calculates the 32-bit hash of @p input using xxHash32. 338 * 339 * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s 340 * 341 * @param input The block of data to be hashed, at least @p length bytes in size. 342 * @param length The length of @p input, in bytes. 343 * @param seed The 32-bit seed to alter the hash's output predictably. 344 * 345 * @pre 346 * The memory between @p input and @p input + @p length must be valid, 347 * readable, contiguous memory. However, if @p length is `0`, @p input may be 348 * `NULL`. In C++, this also must be *TriviallyCopyable*. 349 * 350 * @return The calculated 32-bit hash value. 351 * 352 * @see 353 * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): 354 * Direct equivalents for the other variants of xxHash. 355 * @see 356 * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. 357 */ 358 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); 359 360 /*! 361 * Streaming functions generate the xxHash value from an incrememtal input. 362 * This method is slower than single-call functions, due to state management. 363 * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. 364 * 365 * An XXH state must first be allocated using `XXH*_createState()`. 366 * 367 * Start a new hash by initializing the state with a seed using `XXH*_reset()`. 368 * 369 * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. 370 * 371 * The function returns an error code, with 0 meaning OK, and any other value 372 * meaning there is an error. 373 * 374 * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. 375 * This function returns the nn-bits hash as an int or long long. 376 * 377 * It's still possible to continue inserting input into the hash state after a 378 * digest, and generate new hash values later on by invoking `XXH*_digest()`. 379 * 380 * When done, release the state using `XXH*_freeState()`. 381 * 382 * Example code for incrementally hashing a file: 383 * @code{.c} 384 * #include <stdio.h> 385 * #include <xxhash.h> 386 * #define BUFFER_SIZE 256 387 * 388 * // Note: XXH64 and XXH3 use the same interface. 389 * XXH32_hash_t 390 * hashFile(FILE* stream) 391 * { 392 * XXH32_state_t* state; 393 * unsigned char buf[BUFFER_SIZE]; 394 * size_t amt; 395 * XXH32_hash_t hash; 396 * 397 * state = XXH32_createState(); // Create a state 398 * assert(state != NULL); // Error check here 399 * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed 400 * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) { 401 * XXH32_update(state, buf, amt); // Hash the file in chunks 402 * } 403 * hash = XXH32_digest(state); // Finalize the hash 404 * XXH32_freeState(state); // Clean up 405 * return hash; 406 * } 407 * @endcode 408 */ 409 410 /*! 411 * @typedef struct XXH32_state_s XXH32_state_t 412 * @brief The opaque state struct for the XXH32 streaming API. 413 * 414 * @see XXH32_state_s for details. 415 */ 416 typedef struct XXH32_state_s XXH32_state_t; 417 418 /*! 419 * @brief Allocates an @ref XXH32_state_t. 420 * 421 * Must be freed with XXH32_freeState(). 422 * @return An allocated XXH32_state_t on success, `NULL` on failure. 423 */ 424 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); 425 /*! 426 * @brief Frees an @ref XXH32_state_t. 427 * 428 * Must be allocated with XXH32_createState(). 429 * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). 430 * @return XXH_OK. 431 */ 432 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); 433 /*! 434 * @brief Copies one @ref XXH32_state_t to another. 435 * 436 * @param dst_state The state to copy to. 437 * @param src_state The state to copy from. 438 * @pre 439 * @p dst_state and @p src_state must not be `NULL` and must not overlap. 440 */ 441 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); 442 443 /*! 444 * @brief Resets an @ref XXH32_state_t to begin a new hash. 445 * 446 * This function resets and seeds a state. Call it before @ref XXH32_update(). 447 * 448 * @param statePtr The state struct to reset. 449 * @param seed The 32-bit seed to alter the hash result predictably. 450 * 451 * @pre 452 * @p statePtr must not be `NULL`. 453 * 454 * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. 455 */ 456 XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); 457 458 /*! 459 * @brief Consumes a block of @p input to an @ref XXH32_state_t. 460 * 461 * Call this to incrementally consume blocks of data. 462 * 463 * @param statePtr The state struct to update. 464 * @param input The block of data to be hashed, at least @p length bytes in size. 465 * @param length The length of @p input, in bytes. 466 * 467 * @pre 468 * @p statePtr must not be `NULL`. 469 * @pre 470 * The memory between @p input and @p input + @p length must be valid, 471 * readable, contiguous memory. However, if @p length is `0`, @p input may be 472 * `NULL`. In C++, this also must be *TriviallyCopyable*. 473 * 474 * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. 475 */ 476 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); 477 478 /*! 479 * @brief Returns the calculated hash value from an @ref XXH32_state_t. 480 * 481 * @note 482 * Calling XXH32_digest() will not affect @p statePtr, so you can update, 483 * digest, and update again. 484 * 485 * @param statePtr The state struct to calculate the hash from. 486 * 487 * @pre 488 * @p statePtr must not be `NULL`. 489 * 490 * @return The calculated xxHash32 value from that state. 491 */ 492 XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); 493 494 /******* Canonical representation *******/ 495 496 /* 497 * The default return values from XXH functions are unsigned 32 and 64 bit 498 * integers. 499 * This the simplest and fastest format for further post-processing. 500 * 501 * However, this leaves open the question of what is the order on the byte level, 502 * since little and big endian conventions will store the same number differently. 503 * 504 * The canonical representation settles this issue by mandating big-endian 505 * convention, the same convention as human-readable numbers (large digits first). 506 * 507 * When writing hash values to storage, sending them over a network, or printing 508 * them, it's highly recommended to use the canonical representation to ensure 509 * portability across a wider range of systems, present and future. 510 * 511 * The following functions allow transformation of hash values to and from 512 * canonical format. 513 */ 514 515 /*! 516 * @brief Canonical (big endian) representation of @ref XXH32_hash_t. 517 */ 518 typedef struct { 519 unsigned char digest[4]; /*!< Hash bytes, big endian */ 520 } XXH32_canonical_t; 521 522 /*! 523 * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. 524 * 525 * @param dst The @ref XXH32_canonical_t pointer to be stored to. 526 * @param hash The @ref XXH32_hash_t to be converted. 527 * 528 * @pre 529 * @p dst must not be `NULL`. 530 */ 531 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); 532 533 /*! 534 * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. 535 * 536 * @param src The @ref XXH32_canonical_t to convert. 537 * 538 * @pre 539 * @p src must not be `NULL`. 540 * 541 * @return The converted hash. 542 */ 543 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); 544 545 546 /*! 547 * @} 548 * @ingroup public 549 * @{ 550 */ 551 552 #ifndef XXH_NO_LONG_LONG 553 /*-********************************************************************** 554 * 64-bit hash 555 ************************************************************************/ 556 #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */ 557 /*! 558 * @brief An unsigned 64-bit integer. 559 * 560 * Not necessarily defined to `uint64_t` but functionally equivalent. 561 */ 562 typedef uint64_t XXH64_hash_t; 563 #elif !defined (__VMS) \ 564 && (defined (__cplusplus) \ 565 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 566 # include <stdint.h> 567 typedef uint64_t XXH64_hash_t; 568 #else 569 # include <limits.h> 570 # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL 571 /* LP64 ABI says uint64_t is unsigned long */ 572 typedef unsigned long XXH64_hash_t; 573 # else 574 /* the following type must have a width of 64-bit */ 575 typedef unsigned long long XXH64_hash_t; 576 # endif 577 #endif 578 579 /*! 580 * @} 581 * 582 * @defgroup xxh64_family XXH64 family 583 * @ingroup public 584 * @{ 585 * Contains functions used in the classic 64-bit xxHash algorithm. 586 * 587 * @note 588 * XXH3 provides competitive speed for both 32-bit and 64-bit systems, 589 * and offers true 64/128 bit hash results. It provides a superior level of 590 * dispersion, and greatly reduces the risks of collisions. 591 */ 592 593 594 /*! 595 * @brief Calculates the 64-bit hash of @p input using xxHash64. 596 * 597 * This function usually runs faster on 64-bit systems, but slower on 32-bit 598 * systems (see benchmark). 599 * 600 * @param input The block of data to be hashed, at least @p length bytes in size. 601 * @param length The length of @p input, in bytes. 602 * @param seed The 64-bit seed to alter the hash's output predictably. 603 * 604 * @pre 605 * The memory between @p input and @p input + @p length must be valid, 606 * readable, contiguous memory. However, if @p length is `0`, @p input may be 607 * `NULL`. In C++, this also must be *TriviallyCopyable*. 608 * 609 * @return The calculated 64-bit hash. 610 * 611 * @see 612 * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): 613 * Direct equivalents for the other variants of xxHash. 614 * @see 615 * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. 616 */ 617 XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed); 618 619 /******* Streaming *******/ 620 /*! 621 * @brief The opaque state struct for the XXH64 streaming API. 622 * 623 * @see XXH64_state_s for details. 624 */ 625 typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ 626 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); 627 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); 628 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); 629 630 XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed); 631 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); 632 XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); 633 634 /******* Canonical representation *******/ 635 typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; 636 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); 637 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); 638 639 /*! 640 * @} 641 * ************************************************************************ 642 * @defgroup xxh3_family XXH3 family 643 * @ingroup public 644 * @{ 645 * 646 * XXH3 is a more recent hash algorithm featuring: 647 * - Improved speed for both small and large inputs 648 * - True 64-bit and 128-bit outputs 649 * - SIMD acceleration 650 * - Improved 32-bit viability 651 * 652 * Speed analysis methodology is explained here: 653 * 654 * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html 655 * 656 * Compared to XXH64, expect XXH3 to run approximately 657 * ~2x faster on large inputs and >3x faster on small ones, 658 * exact differences vary depending on platform. 659 * 660 * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, 661 * but does not require it. 662 * Any 32-bit and 64-bit targets that can run XXH32 smoothly 663 * can run XXH3 at competitive speeds, even without vector support. 664 * Further details are explained in the implementation. 665 * 666 * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8, 667 * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro. 668 * 669 * XXH3 implementation is portable: 670 * it has a generic C90 formulation that can be compiled on any platform, 671 * all implementations generage exactly the same hash value on all platforms. 672 * Starting from v0.8.0, it's also labelled "stable", meaning that 673 * any future version will also generate the same hash value. 674 * 675 * XXH3 offers 2 variants, _64bits and _128bits. 676 * 677 * When only 64 bits are needed, prefer invoking the _64bits variant, as it 678 * reduces the amount of mixing, resulting in faster speed on small inputs. 679 * It's also generally simpler to manipulate a scalar return type than a struct. 680 * 681 * The API supports one-shot hashing, streaming mode, and custom secrets. 682 */ 683 684 /*-********************************************************************** 685 * XXH3 64-bit variant 686 ************************************************************************/ 687 688 /* XXH3_64bits(): 689 * default 64-bit variant, using default secret and default seed of 0. 690 * It's the fastest variant. */ 691 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len); 692 693 /* 694 * XXH3_64bits_withSeed(): 695 * This variant generates a custom secret on the fly 696 * based on default secret altered using the `seed` value. 697 * While this operation is decently fast, note that it's not completely free. 698 * Note: seed==0 produces the same results as XXH3_64bits(). 699 */ 700 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); 701 702 /*! 703 * The bare minimum size for a custom secret. 704 * 705 * @see 706 * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), 707 * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). 708 */ 709 #define XXH3_SECRET_SIZE_MIN 136 710 711 /* 712 * XXH3_64bits_withSecret(): 713 * It's possible to provide any blob of bytes as a "secret" to generate the hash. 714 * This makes it more difficult for an external actor to prepare an intentional collision. 715 * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). 716 * However, the quality of produced hash values depends on secret's entropy. 717 * Technically, the secret must look like a bunch of random bytes. 718 * Avoid "trivial" or structured data such as repeated sequences or a text document. 719 * Whenever unsure about the "randomness" of the blob of bytes, 720 * consider relabelling it as a "custom seed" instead, 721 * and employ "XXH3_generateSecret()" (see below) 722 * to generate a high entropy secret derived from the custom seed. 723 */ 724 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); 725 726 727 /******* Streaming *******/ 728 /* 729 * Streaming requires state maintenance. 730 * This operation costs memory and CPU. 731 * As a consequence, streaming is slower than one-shot hashing. 732 * For better performance, prefer one-shot functions whenever applicable. 733 */ 734 735 /*! 736 * @brief The state struct for the XXH3 streaming API. 737 * 738 * @see XXH3_state_s for details. 739 */ 740 typedef struct XXH3_state_s XXH3_state_t; 741 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void); 742 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); 743 XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state); 744 745 /* 746 * XXH3_64bits_reset(): 747 * Initialize with default parameters. 748 * digest will be equivalent to `XXH3_64bits()`. 749 */ 750 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr); 751 /* 752 * XXH3_64bits_reset_withSeed(): 753 * Generate a custom secret from `seed`, and store it into `statePtr`. 754 * digest will be equivalent to `XXH3_64bits_withSeed()`. 755 */ 756 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed); 757 /* 758 * XXH3_64bits_reset_withSecret(): 759 * `secret` is referenced, it _must outlive_ the hash streaming session. 760 * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, 761 * and the quality of produced hash values depends on secret's entropy 762 * (secret's content should look like a bunch of random bytes). 763 * When in doubt about the randomness of a candidate `secret`, 764 * consider employing `XXH3_generateSecret()` instead (see below). 765 */ 766 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize); 767 768 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length); 769 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr); 770 771 /* note : canonical representation of XXH3 is the same as XXH64 772 * since they both produce XXH64_hash_t values */ 773 774 775 /*-********************************************************************** 776 * XXH3 128-bit variant 777 ************************************************************************/ 778 779 /*! 780 * @brief The return value from 128-bit hashes. 781 * 782 * Stored in little endian order, although the fields themselves are in native 783 * endianness. 784 */ 785 typedef struct { 786 XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ 787 XXH64_hash_t high64; /*!< `value >> 64` */ 788 } XXH128_hash_t; 789 790 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len); 791 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); 792 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); 793 794 /******* Streaming *******/ 795 /* 796 * Streaming requires state maintenance. 797 * This operation costs memory and CPU. 798 * As a consequence, streaming is slower than one-shot hashing. 799 * For better performance, prefer one-shot functions whenever applicable. 800 * 801 * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). 802 * Use already declared XXH3_createState() and XXH3_freeState(). 803 * 804 * All reset and streaming functions have same meaning as their 64-bit counterpart. 805 */ 806 807 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr); 808 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed); 809 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize); 810 811 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length); 812 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr); 813 814 /* Following helper functions make it possible to compare XXH128_hast_t values. 815 * Since XXH128_hash_t is a structure, this capability is not offered by the language. 816 * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ 817 818 /*! 819 * XXH128_isEqual(): 820 * Return: 1 if `h1` and `h2` are equal, 0 if they are not. 821 */ 822 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); 823 824 /*! 825 * XXH128_cmp(): 826 * 827 * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. 828 * 829 * return: >0 if *h128_1 > *h128_2 830 * =0 if *h128_1 == *h128_2 831 * <0 if *h128_1 < *h128_2 832 */ 833 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2); 834 835 836 /******* Canonical representation *******/ 837 typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; 838 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash); 839 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src); 840 841 842 #endif /* XXH_NO_LONG_LONG */ 843 844 /*! 845 * @} 846 */ 847 #endif /* XXHASH_H_5627135585666179 */ 848 849 850 851 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) 852 #define XXHASH_H_STATIC_13879238742 853 /* **************************************************************************** 854 * This section contains declarations which are not guaranteed to remain stable. 855 * They may change in future versions, becoming incompatible with a different 856 * version of the library. 857 * These declarations should only be used with static linking. 858 * Never use them in association with dynamic linking! 859 ***************************************************************************** */ 860 861 /* 862 * These definitions are only present to allow static allocation 863 * of XXH states, on stack or in a struct, for example. 864 * Never **ever** access their members directly. 865 */ 866 867 /*! 868 * @internal 869 * @brief Structure for XXH32 streaming API. 870 * 871 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, 872 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is 873 * an opaque type. This allows fields to safely be changed. 874 * 875 * Typedef'd to @ref XXH32_state_t. 876 * Do not access the members of this struct directly. 877 * @see XXH64_state_s, XXH3_state_s 878 */ 879 struct XXH32_state_s { 880 XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ 881 XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ 882 XXH32_hash_t v1; /*!< First accumulator lane */ 883 XXH32_hash_t v2; /*!< Second accumulator lane */ 884 XXH32_hash_t v3; /*!< Third accumulator lane */ 885 XXH32_hash_t v4; /*!< Fourth accumulator lane */ 886 XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ 887 XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ 888 XXH32_hash_t reserved; /*!< Reserved field. Do not read or write to it, it may be removed. */ 889 }; /* typedef'd to XXH32_state_t */ 890 891 892 #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ 893 894 /*! 895 * @internal 896 * @brief Structure for XXH64 streaming API. 897 * 898 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, 899 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is 900 * an opaque type. This allows fields to safely be changed. 901 * 902 * Typedef'd to @ref XXH64_state_t. 903 * Do not access the members of this struct directly. 904 * @see XXH32_state_s, XXH3_state_s 905 */ 906 struct XXH64_state_s { 907 XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ 908 XXH64_hash_t v1; /*!< First accumulator lane */ 909 XXH64_hash_t v2; /*!< Second accumulator lane */ 910 XXH64_hash_t v3; /*!< Third accumulator lane */ 911 XXH64_hash_t v4; /*!< Fourth accumulator lane */ 912 XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ 913 XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ 914 XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ 915 XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it, it may be removed. */ 916 }; /* typedef'd to XXH64_state_t */ 917 918 #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11+ */ 919 # include <stdalign.h> 920 # define XXH_ALIGN(n) alignas(n) 921 #elif defined(__GNUC__) 922 # define XXH_ALIGN(n) __attribute__ ((aligned(n))) 923 #elif defined(_MSC_VER) 924 # define XXH_ALIGN(n) __declspec(align(n)) 925 #else 926 # define XXH_ALIGN(n) /* disabled */ 927 #endif 928 929 /* Old GCC versions only accept the attribute after the type in structures. */ 930 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ 931 && defined(__GNUC__) 932 # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) 933 #else 934 # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type 935 #endif 936 937 /*! 938 * @brief The size of the internal XXH3 buffer. 939 * 940 * This is the optimal update size for incremental hashing. 941 * 942 * @see XXH3_64b_update(), XXH3_128b_update(). 943 */ 944 #define XXH3_INTERNALBUFFER_SIZE 256 945 946 /*! 947 * @brief Default size of the secret buffer (and @ref XXH3_kSecret). 948 * 949 * This is the size used in @ref XXH3_kSecret and the seeded functions. 950 * 951 * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. 952 */ 953 #define XXH3_SECRET_DEFAULT_SIZE 192 954 955 /*! 956 * @internal 957 * @brief Structure for XXH3 streaming API. 958 * 959 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, 960 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is 961 * an opaque type. This allows fields to safely be changed. 962 * 963 * @note **This structure has a strict alignment requirement of 64 bytes.** Do 964 * not allocate this with `malloc()` or `new`, it will not be sufficiently 965 * aligned. Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack 966 * allocation. 967 * 968 * Typedef'd to @ref XXH3_state_t. 969 * Do not access the members of this struct directly. 970 * 971 * @see XXH3_INITSTATE() for stack initialization. 972 * @see XXH3_createState(), XXH3_freeState(). 973 * @see XXH32_state_s, XXH64_state_s 974 */ 975 struct XXH3_state_s { 976 XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); 977 /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */ 978 XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); 979 /*!< Used to store a custom secret generated from a seed. */ 980 XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); 981 /*!< The internal buffer. @see XXH32_state_s::mem32 */ 982 XXH32_hash_t bufferedSize; 983 /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ 984 XXH32_hash_t reserved32; 985 /*!< Reserved field. Needed for padding on 64-bit. */ 986 size_t nbStripesSoFar; 987 /*!< Number or stripes processed. */ 988 XXH64_hash_t totalLen; 989 /*!< Total length hashed. 64-bit even on 32-bit targets. */ 990 size_t nbStripesPerBlock; 991 /*!< Number of stripes per block. */ 992 size_t secretLimit; 993 /*!< Size of @ref customSecret or @ref extSecret */ 994 XXH64_hash_t seed; 995 /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ 996 XXH64_hash_t reserved64; 997 /*!< Reserved field. */ 998 const unsigned char* extSecret; 999 /*!< Reference to an external secret for the _withSecret variants, NULL 1000 * for other variants. */ 1001 /* note: there may be some padding at the end due to alignment on 64 bytes */ 1002 }; /* typedef'd to XXH3_state_t */ 1003 1004 #undef XXH_ALIGN_MEMBER 1005 1006 /*! 1007 * @brief Initializes a stack-allocated `XXH3_state_s`. 1008 * 1009 * When the @ref XXH3_state_t structure is merely emplaced on stack, 1010 * it should be initialized with XXH3_INITSTATE() or a memset() 1011 * in case its first reset uses XXH3_NNbits_reset_withSeed(). 1012 * This init can be omitted if the first reset uses default or _withSecret mode. 1013 * This operation isn't necessary when the state is created with XXH3_createState(). 1014 * Note that this doesn't prepare the state for a streaming operation, 1015 * it's still necessary to use XXH3_NNbits_reset*() afterwards. 1016 */ 1017 #define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; } 1018 1019 1020 /* === Experimental API === */ 1021 /* Symbols defined below must be considered tied to a specific library version. */ 1022 1023 /* 1024 * XXH3_generateSecret(): 1025 * 1026 * Derive a high-entropy secret from any user-defined content, named customSeed. 1027 * The generated secret can be used in combination with `*_withSecret()` functions. 1028 * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed, 1029 * as it becomes much more difficult for an external actor to guess how to impact the calculation logic. 1030 * 1031 * The function accepts as input a custom seed of any length and any content, 1032 * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE 1033 * into an already allocated buffer secretBuffer. 1034 * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long. 1035 * 1036 * The generated secret can then be used with any `*_withSecret()` variant. 1037 * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`, 1038 * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()` 1039 * are part of this list. They all accept a `secret` parameter 1040 * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN) 1041 * _and_ feature very high entropy (consist of random-looking bytes). 1042 * These conditions can be a high bar to meet, so 1043 * this function can be used to generate a secret of proper quality. 1044 * 1045 * customSeed can be anything. It can have any size, even small ones, 1046 * and its content can be anything, even stupidly "low entropy" source such as a bunch of zeroes. 1047 * The resulting `secret` will nonetheless provide all expected qualities. 1048 * 1049 * Supplying NULL as the customSeed copies the default secret into `secretBuffer`. 1050 * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior. 1051 */ 1052 XXH_PUBLIC_API void XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize); 1053 1054 1055 /* simple short-cut to pre-selected XXH3_128bits variant */ 1056 XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed); 1057 1058 1059 #endif /* XXH_NO_LONG_LONG */ 1060 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) 1061 # define XXH_IMPLEMENTATION 1062 #endif 1063 1064 #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ 1065 1066 1067 /* ======================================================================== */ 1068 /* ======================================================================== */ 1069 /* ======================================================================== */ 1070 1071 1072 /*-********************************************************************** 1073 * xxHash implementation 1074 *-********************************************************************** 1075 * xxHash's implementation used to be hosted inside xxhash.c. 1076 * 1077 * However, inlining requires implementation to be visible to the compiler, 1078 * hence be included alongside the header. 1079 * Previously, implementation was hosted inside xxhash.c, 1080 * which was then #included when inlining was activated. 1081 * This construction created issues with a few build and install systems, 1082 * as it required xxhash.c to be stored in /include directory. 1083 * 1084 * xxHash implementation is now directly integrated within xxhash.h. 1085 * As a consequence, xxhash.c is no longer needed in /include. 1086 * 1087 * xxhash.c is still available and is still useful. 1088 * In a "normal" setup, when xxhash is not inlined, 1089 * xxhash.h only exposes the prototypes and public symbols, 1090 * while xxhash.c can be built into an object file xxhash.o 1091 * which can then be linked into the final binary. 1092 ************************************************************************/ 1093 1094 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ 1095 || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) 1096 # define XXH_IMPLEM_13a8737387 1097 1098 /* ************************************* 1099 * Tuning parameters 1100 ***************************************/ 1101 1102 /*! 1103 * @defgroup tuning Tuning parameters 1104 * @{ 1105 * 1106 * Various macros to control xxHash's behavior. 1107 */ 1108 #ifdef XXH_DOXYGEN 1109 /*! 1110 * @brief Define this to disable 64-bit code. 1111 * 1112 * Useful if only using the @ref xxh32_family and you have a strict C90 compiler. 1113 */ 1114 # define XXH_NO_LONG_LONG 1115 # undef XXH_NO_LONG_LONG /* don't actually */ 1116 /*! 1117 * @brief Controls how unaligned memory is accessed. 1118 * 1119 * By default, access to unaligned memory is controlled by `memcpy()`, which is 1120 * safe and portable. 1121 * 1122 * Unfortunately, on some target/compiler combinations, the generated assembly 1123 * is sub-optimal. 1124 * 1125 * The below switch allow selection of a different access method 1126 * in the search for improved performance. 1127 * 1128 * @par Possible options: 1129 * 1130 * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` 1131 * @par 1132 * Use `memcpy()`. Safe and portable. Note that most modern compilers will 1133 * eliminate the function call and treat it as an unaligned access. 1134 * 1135 * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))` 1136 * @par 1137 * Depends on compiler extensions and is therefore not portable. 1138 * This method is safe if your compiler supports it, and *generally* as 1139 * fast or faster than `memcpy`. 1140 * 1141 * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast 1142 * @par 1143 * Casts directly and dereferences. This method doesn't depend on the 1144 * compiler, but it violates the C standard as it directly dereferences an 1145 * unaligned pointer. It can generate buggy code on targets which do not 1146 * support unaligned memory accesses, but in some circumstances, it's the 1147 * only known way to get the most performance (example: GCC + ARMv6). 1148 * 1149 * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift 1150 * @par 1151 * Also portable. This can generate the best code on old compilers which don't 1152 * inline small `memcpy()` calls, and it might also be faster on big-endian 1153 * systems which lack a native byteswap instruction. However, some compilers 1154 * will emit literal byteshifts even if the target supports unaligned access. 1155 * 1156 * . 1157 * 1158 * @warning 1159 * Methods 1 and 2 rely on implementation-defined behavior. Use these with 1160 * care, as what works on one compiler/platform/optimization level may cause 1161 * another to read garbage data or even crash. 1162 * 1163 * See https://stackoverflow.com/a/32095106/646947 for details. 1164 * 1165 * Prefer these methods in priority order (0 > 3 > 1 > 2) 1166 */ 1167 # define XXH_FORCE_MEMORY_ACCESS 0 1168 /*! 1169 * @def XXH_ACCEPT_NULL_INPUT_POINTER 1170 * @brief Whether to add explicit `NULL` checks. 1171 * 1172 * If the input pointer is `NULL` and the length is non-zero, xxHash's default 1173 * behavior is to dereference it, triggering a segfault. 1174 * 1175 * When this macro is enabled, xxHash actively checks the input for a null pointer. 1176 * If it is, the result for null input pointers is the same as a zero-length input. 1177 */ 1178 # define XXH_ACCEPT_NULL_INPUT_POINTER 0 1179 /*! 1180 * @def XXH_FORCE_ALIGN_CHECK 1181 * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() 1182 * and XXH64() only). 1183 * 1184 * This is an important performance trick for architectures without decent 1185 * unaligned memory access performance. 1186 * 1187 * It checks for input alignment, and when conditions are met, uses a "fast 1188 * path" employing direct 32-bit/64-bit reads, resulting in _dramatically 1189 * faster_ read speed. 1190 * 1191 * The check costs one initial branch per hash, which is generally negligible, 1192 * but not zero. 1193 * 1194 * Moreover, it's not useful to generate an additional code path if memory 1195 * access uses the same instruction for both aligned and unaligned 1196 * adresses (e.g. x86 and aarch64). 1197 * 1198 * In these cases, the alignment check can be removed by setting this macro to 0. 1199 * Then the code will always use unaligned memory access. 1200 * Align check is automatically disabled on x86, x64 & arm64, 1201 * which are platforms known to offer good unaligned memory accesses performance. 1202 * 1203 * This option does not affect XXH3 (only XXH32 and XXH64). 1204 */ 1205 # define XXH_FORCE_ALIGN_CHECK 0 1206 1207 /*! 1208 * @def XXH_NO_INLINE_HINTS 1209 * @brief When non-zero, sets all functions to `static`. 1210 * 1211 * By default, xxHash tries to force the compiler to inline almost all internal 1212 * functions. 1213 * 1214 * This can usually improve performance due to reduced jumping and improved 1215 * constant folding, but significantly increases the size of the binary which 1216 * might not be favorable. 1217 * 1218 * Additionally, sometimes the forced inlining can be detrimental to performance, 1219 * depending on the architecture. 1220 * 1221 * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the 1222 * compiler full control on whether to inline or not. 1223 * 1224 * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using 1225 * -fno-inline with GCC or Clang, this will automatically be defined. 1226 */ 1227 # define XXH_NO_INLINE_HINTS 0 1228 1229 /*! 1230 * @def XXH_REROLL 1231 * @brief Whether to reroll `XXH32_finalize` and `XXH64_finalize`. 1232 * 1233 * For performance, `XXH32_finalize` and `XXH64_finalize` use an unrolled loop 1234 * in the form of a switch statement. 1235 * 1236 * This is not always desirable, as it generates larger code, and depending on 1237 * the architecture, may even be slower 1238 * 1239 * This is automatically defined with `-Os`/`-Oz` on GCC and Clang. 1240 */ 1241 # define XXH_REROLL 0 1242 1243 /*! 1244 * @internal 1245 * @brief Redefines old internal names. 1246 * 1247 * For compatibility with code that uses xxHash's internals before the names 1248 * were changed to improve namespacing. There is no other reason to use this. 1249 */ 1250 # define XXH_OLD_NAMES 1251 # undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ 1252 #endif /* XXH_DOXYGEN */ 1253 /*! 1254 * @} 1255 */ 1256 1257 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ 1258 # if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6) 1259 # define XXH_FORCE_MEMORY_ACCESS 2 1260 # elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ 1261 (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7))) 1262 # define XXH_FORCE_MEMORY_ACCESS 1 1263 # endif 1264 #endif 1265 1266 #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ 1267 # define XXH_ACCEPT_NULL_INPUT_POINTER 0 1268 #endif 1269 1270 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ 1271 # if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \ 1272 || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */ 1273 # define XXH_FORCE_ALIGN_CHECK 0 1274 # else 1275 # define XXH_FORCE_ALIGN_CHECK 1 1276 # endif 1277 #endif 1278 1279 #ifndef XXH_NO_INLINE_HINTS 1280 # if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \ 1281 || defined(__NO_INLINE__) /* -O0, -fno-inline */ 1282 # define XXH_NO_INLINE_HINTS 1 1283 # else 1284 # define XXH_NO_INLINE_HINTS 0 1285 # endif 1286 #endif 1287 1288 #ifndef XXH_REROLL 1289 # if defined(__OPTIMIZE_SIZE__) 1290 # define XXH_REROLL 1 1291 # else 1292 # define XXH_REROLL 0 1293 # endif 1294 #endif 1295 1296 /*! 1297 * @defgroup impl Implementation 1298 * @{ 1299 */ 1300 1301 1302 /* ************************************* 1303 * Includes & Memory related functions 1304 ***************************************/ 1305 /* 1306 * Modify the local functions below should you wish to use 1307 * different memory routines for malloc() and free() 1308 */ 1309 #include <stdlib.h> 1310 1311 /*! 1312 * @internal 1313 * @brief Modify this function to use a different routine than malloc(). 1314 */ 1315 static void* XXH_malloc(size_t s) { return malloc(s); } 1316 1317 /*! 1318 * @internal 1319 * @brief Modify this function to use a different routine than free(). 1320 */ 1321 static void XXH_free(void* p) { free(p); } 1322 1323 #include <string.h> 1324 1325 /*! 1326 * @internal 1327 * @brief Modify this function to use a different routine than memcpy(). 1328 */ 1329 static void* XXH_memcpy(void* dest, const void* src, size_t size) 1330 { 1331 return memcpy(dest,src,size); 1332 } 1333 1334 #include <limits.h> /* ULLONG_MAX */ 1335 1336 1337 /* ************************************* 1338 * Compiler Specific Options 1339 ***************************************/ 1340 #ifdef _MSC_VER /* Visual Studio warning fix */ 1341 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ 1342 #endif 1343 1344 #if XXH_NO_INLINE_HINTS /* disable inlining hints */ 1345 # if defined(__GNUC__) 1346 # define XXH_FORCE_INLINE static __attribute__((unused)) 1347 # else 1348 # define XXH_FORCE_INLINE static 1349 # endif 1350 # define XXH_NO_INLINE static 1351 /* enable inlining hints */ 1352 #elif defined(_MSC_VER) /* Visual Studio */ 1353 # define XXH_FORCE_INLINE static __forceinline 1354 # define XXH_NO_INLINE static __declspec(noinline) 1355 #elif defined(__GNUC__) 1356 # define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) 1357 # define XXH_NO_INLINE static __attribute__((noinline)) 1358 #elif defined (__cplusplus) \ 1359 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ 1360 # define XXH_FORCE_INLINE static inline 1361 # define XXH_NO_INLINE static 1362 #else 1363 # define XXH_FORCE_INLINE static 1364 # define XXH_NO_INLINE static 1365 #endif 1366 1367 1368 1369 /* ************************************* 1370 * Debug 1371 ***************************************/ 1372 /*! 1373 * @ingroup tuning 1374 * @def XXH_DEBUGLEVEL 1375 * @brief Sets the debugging level. 1376 * 1377 * XXH_DEBUGLEVEL is expected to be defined externally, typically via the 1378 * compiler's command line options. The value must be a number. 1379 */ 1380 #ifndef XXH_DEBUGLEVEL 1381 # ifdef DEBUGLEVEL /* backwards compat */ 1382 # define XXH_DEBUGLEVEL DEBUGLEVEL 1383 # else 1384 # define XXH_DEBUGLEVEL 0 1385 # endif 1386 #endif 1387 1388 #if (XXH_DEBUGLEVEL>=1) 1389 # include <assert.h> /* note: can still be disabled with NDEBUG */ 1390 # define XXH_ASSERT(c) assert(c) 1391 #else 1392 # define XXH_ASSERT(c) ((void)0) 1393 #endif 1394 1395 /* note: use after variable declarations */ 1396 #define XXH_STATIC_ASSERT(c) do { enum { XXH_sa = 1/(int)(!!(c)) }; } while (0) 1397 1398 1399 /* ************************************* 1400 * Basic Types 1401 ***************************************/ 1402 #if !defined (__VMS) \ 1403 && (defined (__cplusplus) \ 1404 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 1405 # include <stdint.h> 1406 typedef uint8_t xxh_u8; 1407 #else 1408 typedef unsigned char xxh_u8; 1409 #endif 1410 typedef XXH32_hash_t xxh_u32; 1411 1412 #ifdef XXH_OLD_NAMES 1413 # define BYTE xxh_u8 1414 # define U8 xxh_u8 1415 # define U32 xxh_u32 1416 #endif 1417 1418 /* *** Memory access *** */ 1419 1420 /*! 1421 * @internal 1422 * @fn xxh_u32 XXH_read32(const void* ptr) 1423 * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness. 1424 * 1425 * Affected by @ref XXH_FORCE_MEMORY_ACCESS. 1426 * 1427 * @param ptr The pointer to read from. 1428 * @return The 32-bit native endian integer from the bytes at @p ptr. 1429 */ 1430 1431 /*! 1432 * @internal 1433 * @fn xxh_u32 XXH_readLE32(const void* ptr) 1434 * @brief Reads an unaligned 32-bit little endian integer from @p ptr. 1435 * 1436 * Affected by @ref XXH_FORCE_MEMORY_ACCESS. 1437 * 1438 * @param ptr The pointer to read from. 1439 * @return The 32-bit little endian integer from the bytes at @p ptr. 1440 */ 1441 1442 /*! 1443 * @internal 1444 * @fn xxh_u32 XXH_readBE32(const void* ptr) 1445 * @brief Reads an unaligned 32-bit big endian integer from @p ptr. 1446 * 1447 * Affected by @ref XXH_FORCE_MEMORY_ACCESS. 1448 * 1449 * @param ptr The pointer to read from. 1450 * @return The 32-bit big endian integer from the bytes at @p ptr. 1451 */ 1452 1453 /*! 1454 * @internal 1455 * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) 1456 * @brief Like @ref XXH_readLE32(), but has an option for aligned reads. 1457 * 1458 * Affected by @ref XXH_FORCE_MEMORY_ACCESS. 1459 * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is 1460 * always @ref XXH_alignment::XXH_unaligned. 1461 * 1462 * @param ptr The pointer to read from. 1463 * @param align Whether @p ptr is aligned. 1464 * @pre 1465 * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte 1466 * aligned. 1467 * @return The 32-bit little endian integer from the bytes at @p ptr. 1468 */ 1469 1470 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) 1471 /* 1472 * Manual byteshift. Best for old compilers which don't inline memcpy. 1473 * We actually directly use XXH_readLE32 and XXH_readBE32. 1474 */ 1475 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) 1476 1477 /* 1478 * Force direct memory access. Only works on CPU which support unaligned memory 1479 * access in hardware. 1480 */ 1481 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } 1482 1483 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) 1484 1485 /* 1486 * __pack instructions are safer but compiler specific, hence potentially 1487 * problematic for some compilers. 1488 * 1489 * Currently only defined for GCC and ICC. 1490 */ 1491 #ifdef XXH_OLD_NAMES 1492 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; 1493 #endif 1494 static xxh_u32 XXH_read32(const void* ptr) 1495 { 1496 typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign; 1497 return ((const xxh_unalign*)ptr)->u32; 1498 } 1499 1500 #else 1501 1502 /* 1503 * Portable and safe solution. Generally efficient. 1504 * see: https://stackoverflow.com/a/32095106/646947 1505 */ 1506 static xxh_u32 XXH_read32(const void* memPtr) 1507 { 1508 xxh_u32 val; 1509 memcpy(&val, memPtr, sizeof(val)); 1510 return val; 1511 } 1512 1513 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ 1514 1515 1516 /* *** Endianess *** */ 1517 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; 1518 1519 /*! 1520 * @ingroup tuning 1521 * @def XXH_CPU_LITTLE_ENDIAN 1522 * @brief Whether the target is little endian. 1523 * 1524 * Defined to 1 if the target is little endian, or 0 if it is big endian. 1525 * It can be defined externally, for example on the compiler command line. 1526 * 1527 * If it is not defined, a runtime check (which is usually constant folded) 1528 * is used instead. 1529 * 1530 * @note 1531 * This is not necessarily defined to an integer constant. 1532 * 1533 * @see XXH_isLittleEndian() for the runtime check. 1534 */ 1535 #ifndef XXH_CPU_LITTLE_ENDIAN 1536 /* 1537 * Try to detect endianness automatically, to avoid the nonstandard behavior 1538 * in `XXH_isLittleEndian()` 1539 */ 1540 # if defined(_WIN32) /* Windows is always little endian */ \ 1541 || defined(__LITTLE_ENDIAN__) \ 1542 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) 1543 # define XXH_CPU_LITTLE_ENDIAN 1 1544 # elif defined(__BIG_ENDIAN__) \ 1545 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) 1546 # define XXH_CPU_LITTLE_ENDIAN 0 1547 # else 1548 /*! 1549 * @internal 1550 * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. 1551 * 1552 * Most compilers will constant fold this. 1553 */ 1554 static int XXH_isLittleEndian(void) 1555 { 1556 /* 1557 * Portable and well-defined behavior. 1558 * Don't use static: it is detrimental to performance. 1559 */ 1560 const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; 1561 return one.c[0]; 1562 } 1563 # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() 1564 # endif 1565 #endif 1566 1567 1568 1569 1570 /* **************************************** 1571 * Compiler-specific Functions and Macros 1572 ******************************************/ 1573 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) 1574 1575 #ifdef __has_builtin 1576 # define XXH_HAS_BUILTIN(x) __has_builtin(x) 1577 #else 1578 # define XXH_HAS_BUILTIN(x) 0 1579 #endif 1580 1581 /*! 1582 * @internal 1583 * @def XXH_rotl32(x,r) 1584 * @brief 32-bit rotate left. 1585 * 1586 * @param x The 32-bit integer to be rotated. 1587 * @param r The number of bits to rotate. 1588 * @pre 1589 * @p r > 0 && @p r < 32 1590 * @note 1591 * @p x and @p r may be evaluated multiple times. 1592 * @return The rotated result. 1593 */ 1594 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ 1595 && XXH_HAS_BUILTIN(__builtin_rotateleft64) 1596 # define XXH_rotl32 __builtin_rotateleft32 1597 # define XXH_rotl64 __builtin_rotateleft64 1598 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ 1599 #elif defined(_MSC_VER) 1600 # define XXH_rotl32(x,r) _rotl(x,r) 1601 # define XXH_rotl64(x,r) _rotl64(x,r) 1602 #else 1603 # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) 1604 # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) 1605 #endif 1606 1607 /*! 1608 * @internal 1609 * @fn xxh_u32 XXH_swap32(xxh_u32 x) 1610 * @brief A 32-bit byteswap. 1611 * 1612 * @param x The 32-bit integer to byteswap. 1613 * @return @p x, byteswapped. 1614 */ 1615 #if defined(_MSC_VER) /* Visual Studio */ 1616 # define XXH_swap32 _byteswap_ulong 1617 #elif XXH_GCC_VERSION >= 403 1618 # define XXH_swap32 __builtin_bswap32 1619 #else 1620 static xxh_u32 XXH_swap32 (xxh_u32 x) 1621 { 1622 return ((x << 24) & 0xff000000 ) | 1623 ((x << 8) & 0x00ff0000 ) | 1624 ((x >> 8) & 0x0000ff00 ) | 1625 ((x >> 24) & 0x000000ff ); 1626 } 1627 #endif 1628 1629 1630 /* *************************** 1631 * Memory reads 1632 *****************************/ 1633 1634 /*! 1635 * @internal 1636 * @brief Enum to indicate whether a pointer is aligned. 1637 */ 1638 typedef enum { 1639 XXH_aligned, /*!< Aligned */ 1640 XXH_unaligned /*!< Possibly unaligned */ 1641 } XXH_alignment; 1642 1643 /* 1644 * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. 1645 * 1646 * This is ideal for older compilers which don't inline memcpy. 1647 */ 1648 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) 1649 1650 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) 1651 { 1652 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; 1653 return bytePtr[0] 1654 | ((xxh_u32)bytePtr[1] << 8) 1655 | ((xxh_u32)bytePtr[2] << 16) 1656 | ((xxh_u32)bytePtr[3] << 24); 1657 } 1658 1659 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) 1660 { 1661 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; 1662 return bytePtr[3] 1663 | ((xxh_u32)bytePtr[2] << 8) 1664 | ((xxh_u32)bytePtr[1] << 16) 1665 | ((xxh_u32)bytePtr[0] << 24); 1666 } 1667 1668 #else 1669 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) 1670 { 1671 return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); 1672 } 1673 1674 static xxh_u32 XXH_readBE32(const void* ptr) 1675 { 1676 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); 1677 } 1678 #endif 1679 1680 XXH_FORCE_INLINE xxh_u32 1681 XXH_readLE32_align(const void* ptr, XXH_alignment align) 1682 { 1683 if (align==XXH_unaligned) { 1684 return XXH_readLE32(ptr); 1685 } else { 1686 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); 1687 } 1688 } 1689 1690 1691 /* ************************************* 1692 * Misc 1693 ***************************************/ 1694 /*! @ingroup public */ 1695 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } 1696 1697 1698 /* ******************************************************************* 1699 * 32-bit hash functions 1700 *********************************************************************/ 1701 /*! 1702 * @} 1703 * @defgroup xxh32_impl XXH32 implementation 1704 * @ingroup impl 1705 * @{ 1706 */ 1707 static const xxh_u32 XXH_PRIME32_1 = 0x9E3779B1U; /*!< 0b10011110001101110111100110110001 */ 1708 static const xxh_u32 XXH_PRIME32_2 = 0x85EBCA77U; /*!< 0b10000101111010111100101001110111 */ 1709 static const xxh_u32 XXH_PRIME32_3 = 0xC2B2AE3DU; /*!< 0b11000010101100101010111000111101 */ 1710 static const xxh_u32 XXH_PRIME32_4 = 0x27D4EB2FU; /*!< 0b00100111110101001110101100101111 */ 1711 static const xxh_u32 XXH_PRIME32_5 = 0x165667B1U; /*!< 0b00010110010101100110011110110001 */ 1712 1713 #ifdef XXH_OLD_NAMES 1714 # define PRIME32_1 XXH_PRIME32_1 1715 # define PRIME32_2 XXH_PRIME32_2 1716 # define PRIME32_3 XXH_PRIME32_3 1717 # define PRIME32_4 XXH_PRIME32_4 1718 # define PRIME32_5 XXH_PRIME32_5 1719 #endif 1720 1721 /*! 1722 * @internal 1723 * @brief Normal stripe processing routine. 1724 * 1725 * This shuffles the bits so that any bit from @p input impacts several bits in 1726 * @p acc. 1727 * 1728 * @param acc The accumulator lane. 1729 * @param input The stripe of input to mix. 1730 * @return The mixed accumulator lane. 1731 */ 1732 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) 1733 { 1734 acc += input * XXH_PRIME32_2; 1735 acc = XXH_rotl32(acc, 13); 1736 acc *= XXH_PRIME32_1; 1737 #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE) 1738 /* 1739 * UGLY HACK: 1740 * This inline assembly hack forces acc into a normal register. This is the 1741 * only thing that prevents GCC and Clang from autovectorizing the XXH32 1742 * loop (pragmas and attributes don't work for some resason) without globally 1743 * disabling SSE4.1. 1744 * 1745 * The reason we want to avoid vectorization is because despite working on 1746 * 4 integers at a time, there are multiple factors slowing XXH32 down on 1747 * SSE4: 1748 * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on 1749 * newer chips!) making it slightly slower to multiply four integers at 1750 * once compared to four integers independently. Even when pmulld was 1751 * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE 1752 * just to multiply unless doing a long operation. 1753 * 1754 * - Four instructions are required to rotate, 1755 * movqda tmp, v // not required with VEX encoding 1756 * pslld tmp, 13 // tmp <<= 13 1757 * psrld v, 19 // x >>= 19 1758 * por v, tmp // x |= tmp 1759 * compared to one for scalar: 1760 * roll v, 13 // reliably fast across the board 1761 * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason 1762 * 1763 * - Instruction level parallelism is actually more beneficial here because 1764 * the SIMD actually serializes this operation: While v1 is rotating, v2 1765 * can load data, while v3 can multiply. SSE forces them to operate 1766 * together. 1767 * 1768 * How this hack works: 1769 * __asm__("" // Declare an assembly block but don't declare any instructions 1770 * : // However, as an Input/Output Operand, 1771 * "+r" // constrain a read/write operand (+) as a general purpose register (r). 1772 * (acc) // and set acc as the operand 1773 * ); 1774 * 1775 * Because of the 'r', the compiler has promised that seed will be in a 1776 * general purpose register and the '+' says that it will be 'read/write', 1777 * so it has to assume it has changed. It is like volatile without all the 1778 * loads and stores. 1779 * 1780 * Since the argument has to be in a normal register (not an SSE register), 1781 * each time XXH32_round is called, it is impossible to vectorize. 1782 */ 1783 __asm__("" : "+r" (acc)); 1784 #endif 1785 return acc; 1786 } 1787 1788 /*! 1789 * @internal 1790 * @brief Mixes all bits to finalize the hash. 1791 * 1792 * The final mix ensures that all input bits have a chance to impact any bit in 1793 * the output digest, resulting in an unbiased distribution. 1794 * 1795 * @param h32 The hash to avalanche. 1796 * @return The avalanched hash. 1797 */ 1798 static xxh_u32 XXH32_avalanche(xxh_u32 h32) 1799 { 1800 h32 ^= h32 >> 15; 1801 h32 *= XXH_PRIME32_2; 1802 h32 ^= h32 >> 13; 1803 h32 *= XXH_PRIME32_3; 1804 h32 ^= h32 >> 16; 1805 return(h32); 1806 } 1807 1808 #define XXH_get32bits(p) XXH_readLE32_align(p, align) 1809 1810 /*! 1811 * @internal 1812 * @brief Processes the last 0-15 bytes of @p ptr. 1813 * 1814 * There may be up to 15 bytes remaining to consume from the input. 1815 * This final stage will digest them to ensure that all input bytes are present 1816 * in the final mix. 1817 * 1818 * @param h32 The hash to finalize. 1819 * @param ptr The pointer to the remaining input. 1820 * @param len The remaining length, modulo 16. 1821 * @param align Whether @p ptr is aligned. 1822 * @return The finalized hash. 1823 */ 1824 static xxh_u32 1825 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) 1826 { 1827 #define XXH_PROCESS1 do { \ 1828 h32 += (*ptr++) * XXH_PRIME32_5; \ 1829 h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \ 1830 } while (0) 1831 1832 #define XXH_PROCESS4 do { \ 1833 h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \ 1834 ptr += 4; \ 1835 h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \ 1836 } while (0) 1837 1838 /* Compact rerolled version */ 1839 if (XXH_REROLL) { 1840 len &= 15; 1841 while (len >= 4) { 1842 XXH_PROCESS4; 1843 len -= 4; 1844 } 1845 while (len > 0) { 1846 XXH_PROCESS1; 1847 --len; 1848 } 1849 return XXH32_avalanche(h32); 1850 } else { 1851 switch(len&15) /* or switch(bEnd - p) */ { 1852 case 12: XXH_PROCESS4; 1853 /* fallthrough */ 1854 case 8: XXH_PROCESS4; 1855 /* fallthrough */ 1856 case 4: XXH_PROCESS4; 1857 return XXH32_avalanche(h32); 1858 1859 case 13: XXH_PROCESS4; 1860 /* fallthrough */ 1861 case 9: XXH_PROCESS4; 1862 /* fallthrough */ 1863 case 5: XXH_PROCESS4; 1864 XXH_PROCESS1; 1865 return XXH32_avalanche(h32); 1866 1867 case 14: XXH_PROCESS4; 1868 /* fallthrough */ 1869 case 10: XXH_PROCESS4; 1870 /* fallthrough */ 1871 case 6: XXH_PROCESS4; 1872 XXH_PROCESS1; 1873 XXH_PROCESS1; 1874 return XXH32_avalanche(h32); 1875 1876 case 15: XXH_PROCESS4; 1877 /* fallthrough */ 1878 case 11: XXH_PROCESS4; 1879 /* fallthrough */ 1880 case 7: XXH_PROCESS4; 1881 /* fallthrough */ 1882 case 3: XXH_PROCESS1; 1883 /* fallthrough */ 1884 case 2: XXH_PROCESS1; 1885 /* fallthrough */ 1886 case 1: XXH_PROCESS1; 1887 /* fallthrough */ 1888 case 0: return XXH32_avalanche(h32); 1889 } 1890 XXH_ASSERT(0); 1891 return h32; /* reaching this point is deemed impossible */ 1892 } 1893 } 1894 1895 #ifdef XXH_OLD_NAMES 1896 # define PROCESS1 XXH_PROCESS1 1897 # define PROCESS4 XXH_PROCESS4 1898 #else 1899 # undef XXH_PROCESS1 1900 # undef XXH_PROCESS4 1901 #endif 1902 1903 /*! 1904 * @internal 1905 * @brief The implementation for @ref XXH32(). 1906 * 1907 * @param input, len, seed Directly passed from @ref XXH32(). 1908 * @param align Whether @p input is aligned. 1909 * @return The calculated hash. 1910 */ 1911 XXH_FORCE_INLINE xxh_u32 1912 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) 1913 { 1914 const xxh_u8* bEnd = input + len; 1915 xxh_u32 h32; 1916 1917 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) 1918 if (input==NULL) { 1919 len=0; 1920 bEnd=input=(const xxh_u8*)(size_t)16; 1921 } 1922 #endif 1923 1924 if (len>=16) { 1925 const xxh_u8* const limit = bEnd - 15; 1926 xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; 1927 xxh_u32 v2 = seed + XXH_PRIME32_2; 1928 xxh_u32 v3 = seed + 0; 1929 xxh_u32 v4 = seed - XXH_PRIME32_1; 1930 1931 do { 1932 v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; 1933 v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; 1934 v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; 1935 v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; 1936 } while (input < limit); 1937 1938 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) 1939 + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); 1940 } else { 1941 h32 = seed + XXH_PRIME32_5; 1942 } 1943 1944 h32 += (xxh_u32)len; 1945 1946 return XXH32_finalize(h32, input, len&15, align); 1947 } 1948 1949 /*! @ingroup xxh32_family */ 1950 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) 1951 { 1952 #if 0 1953 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ 1954 XXH32_state_t state; 1955 XXH32_reset(&state, seed); 1956 XXH32_update(&state, (const xxh_u8*)input, len); 1957 return XXH32_digest(&state); 1958 #else 1959 if (XXH_FORCE_ALIGN_CHECK) { 1960 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ 1961 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); 1962 } } 1963 1964 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); 1965 #endif 1966 } 1967 1968 1969 1970 /******* Hash streaming *******/ 1971 /*! 1972 * @ingroup xxh32_family 1973 */ 1974 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) 1975 { 1976 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); 1977 } 1978 /*! @ingroup xxh32_family */ 1979 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) 1980 { 1981 XXH_free(statePtr); 1982 return XXH_OK; 1983 } 1984 1985 /*! @ingroup xxh32_family */ 1986 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) 1987 { 1988 memcpy(dstState, srcState, sizeof(*dstState)); 1989 } 1990 1991 /*! @ingroup xxh32_family */ 1992 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) 1993 { 1994 XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ 1995 memset(&state, 0, sizeof(state)); 1996 state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; 1997 state.v2 = seed + XXH_PRIME32_2; 1998 state.v3 = seed + 0; 1999 state.v4 = seed - XXH_PRIME32_1; 2000 /* do not write into reserved, planned to be removed in a future version */ 2001 memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); 2002 return XXH_OK; 2003 } 2004 2005 2006 /*! @ingroup xxh32_family */ 2007 XXH_PUBLIC_API XXH_errorcode 2008 XXH32_update(XXH32_state_t* state, const void* input, size_t len) 2009 { 2010 if (input==NULL) 2011 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) 2012 return XXH_OK; 2013 #else 2014 return XXH_ERROR; 2015 #endif 2016 2017 { const xxh_u8* p = (const xxh_u8*)input; 2018 const xxh_u8* const bEnd = p + len; 2019 2020 state->total_len_32 += (XXH32_hash_t)len; 2021 state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); 2022 2023 if (state->memsize + len < 16) { /* fill in tmp buffer */ 2024 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); 2025 state->memsize += (XXH32_hash_t)len; 2026 return XXH_OK; 2027 } 2028 2029 if (state->memsize) { /* some data left from previous update */ 2030 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); 2031 { const xxh_u32* p32 = state->mem32; 2032 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++; 2033 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++; 2034 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++; 2035 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32)); 2036 } 2037 p += 16-state->memsize; 2038 state->memsize = 0; 2039 } 2040 2041 if (p <= bEnd-16) { 2042 const xxh_u8* const limit = bEnd - 16; 2043 xxh_u32 v1 = state->v1; 2044 xxh_u32 v2 = state->v2; 2045 xxh_u32 v3 = state->v3; 2046 xxh_u32 v4 = state->v4; 2047 2048 do { 2049 v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4; 2050 v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4; 2051 v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4; 2052 v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4; 2053 } while (p<=limit); 2054 2055 state->v1 = v1; 2056 state->v2 = v2; 2057 state->v3 = v3; 2058 state->v4 = v4; 2059 } 2060 2061 if (p < bEnd) { 2062 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); 2063 state->memsize = (unsigned)(bEnd-p); 2064 } 2065 } 2066 2067 return XXH_OK; 2068 } 2069 2070 2071 /*! @ingroup xxh32_family */ 2072 XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) 2073 { 2074 xxh_u32 h32; 2075 2076 if (state->large_len) { 2077 h32 = XXH_rotl32(state->v1, 1) 2078 + XXH_rotl32(state->v2, 7) 2079 + XXH_rotl32(state->v3, 12) 2080 + XXH_rotl32(state->v4, 18); 2081 } else { 2082 h32 = state->v3 /* == seed */ + XXH_PRIME32_5; 2083 } 2084 2085 h32 += state->total_len_32; 2086 2087 return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); 2088 } 2089 2090 2091 /******* Canonical representation *******/ 2092 2093 /*! 2094 * @ingroup xxh32_family 2095 * The default return values from XXH functions are unsigned 32 and 64 bit 2096 * integers. 2097 * 2098 * The canonical representation uses big endian convention, the same convention 2099 * as human-readable numbers (large digits first). 2100 * 2101 * This way, hash values can be written into a file or buffer, remaining 2102 * comparable across different systems. 2103 * 2104 * The following functions allow transformation of hash values to and from their 2105 * canonical format. 2106 */ 2107 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) 2108 { 2109 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); 2110 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); 2111 memcpy(dst, &hash, sizeof(*dst)); 2112 } 2113 /*! @ingroup xxh32_family */ 2114 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) 2115 { 2116 return XXH_readBE32(src); 2117 } 2118 2119 2120 #ifndef XXH_NO_LONG_LONG 2121 2122 /* ******************************************************************* 2123 * 64-bit hash functions 2124 *********************************************************************/ 2125 /*! 2126 * @} 2127 * @ingroup impl 2128 * @{ 2129 */ 2130 /******* Memory access *******/ 2131 2132 typedef XXH64_hash_t xxh_u64; 2133 2134 #ifdef XXH_OLD_NAMES 2135 # define U64 xxh_u64 2136 #endif 2137 2138 /*! 2139 * XXH_REROLL_XXH64: 2140 * Whether to reroll the XXH64_finalize() loop. 2141 * 2142 * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a 2143 * performance gain on 64-bit hosts, as only one jump is required. 2144 * 2145 * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit 2146 * registers, and 64-bit arithmetic needs to be simulated, it isn't beneficial 2147 * to unroll. The code becomes ridiculously large (the largest function in the 2148 * binary on i386!), and rerolling it saves anywhere from 3kB to 20kB. It is 2149 * also slightly faster because it fits into cache better and is more likely 2150 * to be inlined by the compiler. 2151 * 2152 * If XXH_REROLL is defined, this is ignored and the loop is always rerolled. 2153 */ 2154 #ifndef XXH_REROLL_XXH64 2155 # if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \ 2156 || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \ 2157 || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \ 2158 || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \ 2159 || defined(__mips64__) || defined(__mips64)) /* mips64 */ \ 2160 || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */ 2161 # define XXH_REROLL_XXH64 1 2162 # else 2163 # define XXH_REROLL_XXH64 0 2164 # endif 2165 #endif /* !defined(XXH_REROLL_XXH64) */ 2166 2167 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) 2168 /* 2169 * Manual byteshift. Best for old compilers which don't inline memcpy. 2170 * We actually directly use XXH_readLE64 and XXH_readBE64. 2171 */ 2172 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) 2173 2174 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ 2175 static xxh_u64 XXH_read64(const void* memPtr) 2176 { 2177 return *(const xxh_u64*) memPtr; 2178 } 2179 2180 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) 2181 2182 /* 2183 * __pack instructions are safer, but compiler specific, hence potentially 2184 * problematic for some compilers. 2185 * 2186 * Currently only defined for GCC and ICC. 2187 */ 2188 #ifdef XXH_OLD_NAMES 2189 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; 2190 #endif 2191 static xxh_u64 XXH_read64(const void* ptr) 2192 { 2193 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64; 2194 return ((const xxh_unalign64*)ptr)->u64; 2195 } 2196 2197 #else 2198 2199 /* 2200 * Portable and safe solution. Generally efficient. 2201 * see: https://stackoverflow.com/a/32095106/646947 2202 */ 2203 static xxh_u64 XXH_read64(const void* memPtr) 2204 { 2205 xxh_u64 val; 2206 memcpy(&val, memPtr, sizeof(val)); 2207 return val; 2208 } 2209 2210 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ 2211 2212 #if defined(_MSC_VER) /* Visual Studio */ 2213 # define XXH_swap64 _byteswap_uint64 2214 #elif XXH_GCC_VERSION >= 403 2215 # define XXH_swap64 __builtin_bswap64 2216 #else 2217 static xxh_u64 XXH_swap64(xxh_u64 x) 2218 { 2219 return ((x << 56) & 0xff00000000000000ULL) | 2220 ((x << 40) & 0x00ff000000000000ULL) | 2221 ((x << 24) & 0x0000ff0000000000ULL) | 2222 ((x << 8) & 0x000000ff00000000ULL) | 2223 ((x >> 8) & 0x00000000ff000000ULL) | 2224 ((x >> 24) & 0x0000000000ff0000ULL) | 2225 ((x >> 40) & 0x000000000000ff00ULL) | 2226 ((x >> 56) & 0x00000000000000ffULL); 2227 } 2228 #endif 2229 2230 2231 /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ 2232 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) 2233 2234 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) 2235 { 2236 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; 2237 return bytePtr[0] 2238 | ((xxh_u64)bytePtr[1] << 8) 2239 | ((xxh_u64)bytePtr[2] << 16) 2240 | ((xxh_u64)bytePtr[3] << 24) 2241 | ((xxh_u64)bytePtr[4] << 32) 2242 | ((xxh_u64)bytePtr[5] << 40) 2243 | ((xxh_u64)bytePtr[6] << 48) 2244 | ((xxh_u64)bytePtr[7] << 56); 2245 } 2246 2247 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) 2248 { 2249 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; 2250 return bytePtr[7] 2251 | ((xxh_u64)bytePtr[6] << 8) 2252 | ((xxh_u64)bytePtr[5] << 16) 2253 | ((xxh_u64)bytePtr[4] << 24) 2254 | ((xxh_u64)bytePtr[3] << 32) 2255 | ((xxh_u64)bytePtr[2] << 40) 2256 | ((xxh_u64)bytePtr[1] << 48) 2257 | ((xxh_u64)bytePtr[0] << 56); 2258 } 2259 2260 #else 2261 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) 2262 { 2263 return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); 2264 } 2265 2266 static xxh_u64 XXH_readBE64(const void* ptr) 2267 { 2268 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); 2269 } 2270 #endif 2271 2272 XXH_FORCE_INLINE xxh_u64 2273 XXH_readLE64_align(const void* ptr, XXH_alignment align) 2274 { 2275 if (align==XXH_unaligned) 2276 return XXH_readLE64(ptr); 2277 else 2278 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); 2279 } 2280 2281 2282 /******* xxh64 *******/ 2283 /*! 2284 * @} 2285 * @defgroup xxh64_impl XXH64 implementation 2286 * @ingroup impl 2287 * @{ 2288 */ 2289 static const xxh_u64 XXH_PRIME64_1 = 0x9E3779B185EBCA87ULL; /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ 2290 static const xxh_u64 XXH_PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ 2291 static const xxh_u64 XXH_PRIME64_3 = 0x165667B19E3779F9ULL; /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ 2292 static const xxh_u64 XXH_PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ 2293 static const xxh_u64 XXH_PRIME64_5 = 0x27D4EB2F165667C5ULL; /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ 2294 2295 #ifdef XXH_OLD_NAMES 2296 # define PRIME64_1 XXH_PRIME64_1 2297 # define PRIME64_2 XXH_PRIME64_2 2298 # define PRIME64_3 XXH_PRIME64_3 2299 # define PRIME64_4 XXH_PRIME64_4 2300 # define PRIME64_5 XXH_PRIME64_5 2301 #endif 2302 2303 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) 2304 { 2305 acc += input * XXH_PRIME64_2; 2306 acc = XXH_rotl64(acc, 31); 2307 acc *= XXH_PRIME64_1; 2308 return acc; 2309 } 2310 2311 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) 2312 { 2313 val = XXH64_round(0, val); 2314 acc ^= val; 2315 acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; 2316 return acc; 2317 } 2318 2319 static xxh_u64 XXH64_avalanche(xxh_u64 h64) 2320 { 2321 h64 ^= h64 >> 33; 2322 h64 *= XXH_PRIME64_2; 2323 h64 ^= h64 >> 29; 2324 h64 *= XXH_PRIME64_3; 2325 h64 ^= h64 >> 32; 2326 return h64; 2327 } 2328 2329 2330 #define XXH_get64bits(p) XXH_readLE64_align(p, align) 2331 2332 static xxh_u64 2333 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align) 2334 { 2335 #define XXH_PROCESS1_64 do { \ 2336 h64 ^= (*ptr++) * XXH_PRIME64_5; \ 2337 h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1; \ 2338 } while (0) 2339 2340 #define XXH_PROCESS4_64 do { \ 2341 h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; \ 2342 ptr += 4; \ 2343 h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; \ 2344 } while (0) 2345 2346 #define XXH_PROCESS8_64 do { \ 2347 xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \ 2348 ptr += 8; \ 2349 h64 ^= k1; \ 2350 h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4; \ 2351 } while (0) 2352 2353 /* Rerolled version for 32-bit targets is faster and much smaller. */ 2354 if (XXH_REROLL || XXH_REROLL_XXH64) { 2355 len &= 31; 2356 while (len >= 8) { 2357 XXH_PROCESS8_64; 2358 len -= 8; 2359 } 2360 if (len >= 4) { 2361 XXH_PROCESS4_64; 2362 len -= 4; 2363 } 2364 while (len > 0) { 2365 XXH_PROCESS1_64; 2366 --len; 2367 } 2368 return XXH64_avalanche(h64); 2369 } else { 2370 switch(len & 31) { 2371 case 24: XXH_PROCESS8_64; 2372 /* fallthrough */ 2373 case 16: XXH_PROCESS8_64; 2374 /* fallthrough */ 2375 case 8: XXH_PROCESS8_64; 2376 return XXH64_avalanche(h64); 2377 2378 case 28: XXH_PROCESS8_64; 2379 /* fallthrough */ 2380 case 20: XXH_PROCESS8_64; 2381 /* fallthrough */ 2382 case 12: XXH_PROCESS8_64; 2383 /* fallthrough */ 2384 case 4: XXH_PROCESS4_64; 2385 return XXH64_avalanche(h64); 2386 2387 case 25: XXH_PROCESS8_64; 2388 /* fallthrough */ 2389 case 17: XXH_PROCESS8_64; 2390 /* fallthrough */ 2391 case 9: XXH_PROCESS8_64; 2392 XXH_PROCESS1_64; 2393 return XXH64_avalanche(h64); 2394 2395 case 29: XXH_PROCESS8_64; 2396 /* fallthrough */ 2397 case 21: XXH_PROCESS8_64; 2398 /* fallthrough */ 2399 case 13: XXH_PROCESS8_64; 2400 /* fallthrough */ 2401 case 5: XXH_PROCESS4_64; 2402 XXH_PROCESS1_64; 2403 return XXH64_avalanche(h64); 2404 2405 case 26: XXH_PROCESS8_64; 2406 /* fallthrough */ 2407 case 18: XXH_PROCESS8_64; 2408 /* fallthrough */ 2409 case 10: XXH_PROCESS8_64; 2410 XXH_PROCESS1_64; 2411 XXH_PROCESS1_64; 2412 return XXH64_avalanche(h64); 2413 2414 case 30: XXH_PROCESS8_64; 2415 /* fallthrough */ 2416 case 22: XXH_PROCESS8_64; 2417 /* fallthrough */ 2418 case 14: XXH_PROCESS8_64; 2419 /* fallthrough */ 2420 case 6: XXH_PROCESS4_64; 2421 XXH_PROCESS1_64; 2422 XXH_PROCESS1_64; 2423 return XXH64_avalanche(h64); 2424 2425 case 27: XXH_PROCESS8_64; 2426 /* fallthrough */ 2427 case 19: XXH_PROCESS8_64; 2428 /* fallthrough */ 2429 case 11: XXH_PROCESS8_64; 2430 XXH_PROCESS1_64; 2431 XXH_PROCESS1_64; 2432 XXH_PROCESS1_64; 2433 return XXH64_avalanche(h64); 2434 2435 case 31: XXH_PROCESS8_64; 2436 /* fallthrough */ 2437 case 23: XXH_PROCESS8_64; 2438 /* fallthrough */ 2439 case 15: XXH_PROCESS8_64; 2440 /* fallthrough */ 2441 case 7: XXH_PROCESS4_64; 2442 /* fallthrough */ 2443 case 3: XXH_PROCESS1_64; 2444 /* fallthrough */ 2445 case 2: XXH_PROCESS1_64; 2446 /* fallthrough */ 2447 case 1: XXH_PROCESS1_64; 2448 /* fallthrough */ 2449 case 0: return XXH64_avalanche(h64); 2450 } 2451 } 2452 /* impossible to reach */ 2453 XXH_ASSERT(0); 2454 return 0; /* unreachable, but some compilers complain without it */ 2455 } 2456 2457 #ifdef XXH_OLD_NAMES 2458 # define PROCESS1_64 XXH_PROCESS1_64 2459 # define PROCESS4_64 XXH_PROCESS4_64 2460 # define PROCESS8_64 XXH_PROCESS8_64 2461 #else 2462 # undef XXH_PROCESS1_64 2463 # undef XXH_PROCESS4_64 2464 # undef XXH_PROCESS8_64 2465 #endif 2466 2467 XXH_FORCE_INLINE xxh_u64 2468 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) 2469 { 2470 const xxh_u8* bEnd = input + len; 2471 xxh_u64 h64; 2472 2473 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) 2474 if (input==NULL) { 2475 len=0; 2476 bEnd=input=(const xxh_u8*)(size_t)32; 2477 } 2478 #endif 2479 2480 if (len>=32) { 2481 const xxh_u8* const limit = bEnd - 32; 2482 xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; 2483 xxh_u64 v2 = seed + XXH_PRIME64_2; 2484 xxh_u64 v3 = seed + 0; 2485 xxh_u64 v4 = seed - XXH_PRIME64_1; 2486 2487 do { 2488 v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; 2489 v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; 2490 v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; 2491 v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; 2492 } while (input<=limit); 2493 2494 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); 2495 h64 = XXH64_mergeRound(h64, v1); 2496 h64 = XXH64_mergeRound(h64, v2); 2497 h64 = XXH64_mergeRound(h64, v3); 2498 h64 = XXH64_mergeRound(h64, v4); 2499 2500 } else { 2501 h64 = seed + XXH_PRIME64_5; 2502 } 2503 2504 h64 += (xxh_u64) len; 2505 2506 return XXH64_finalize(h64, input, len, align); 2507 } 2508 2509 2510 /*! @ingroup xxh64_family */ 2511 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed) 2512 { 2513 #if 0 2514 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ 2515 XXH64_state_t state; 2516 XXH64_reset(&state, seed); 2517 XXH64_update(&state, (const xxh_u8*)input, len); 2518 return XXH64_digest(&state); 2519 #else 2520 if (XXH_FORCE_ALIGN_CHECK) { 2521 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ 2522 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); 2523 } } 2524 2525 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); 2526 2527 #endif 2528 } 2529 2530 /******* Hash Streaming *******/ 2531 2532 /*! @ingroup xxh64_family*/ 2533 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) 2534 { 2535 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); 2536 } 2537 /*! @ingroup xxh64_family */ 2538 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) 2539 { 2540 XXH_free(statePtr); 2541 return XXH_OK; 2542 } 2543 2544 /*! @ingroup xxh64_family */ 2545 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) 2546 { 2547 memcpy(dstState, srcState, sizeof(*dstState)); 2548 } 2549 2550 /*! @ingroup xxh64_family */ 2551 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed) 2552 { 2553 XXH64_state_t state; /* use a local state to memcpy() in order to avoid strict-aliasing warnings */ 2554 memset(&state, 0, sizeof(state)); 2555 state.v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; 2556 state.v2 = seed + XXH_PRIME64_2; 2557 state.v3 = seed + 0; 2558 state.v4 = seed - XXH_PRIME64_1; 2559 /* do not write into reserved64, might be removed in a future version */ 2560 memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64)); 2561 return XXH_OK; 2562 } 2563 2564 /*! @ingroup xxh64_family */ 2565 XXH_PUBLIC_API XXH_errorcode 2566 XXH64_update (XXH64_state_t* state, const void* input, size_t len) 2567 { 2568 if (input==NULL) 2569 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) 2570 return XXH_OK; 2571 #else 2572 return XXH_ERROR; 2573 #endif 2574 2575 { const xxh_u8* p = (const xxh_u8*)input; 2576 const xxh_u8* const bEnd = p + len; 2577 2578 state->total_len += len; 2579 2580 if (state->memsize + len < 32) { /* fill in tmp buffer */ 2581 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); 2582 state->memsize += (xxh_u32)len; 2583 return XXH_OK; 2584 } 2585 2586 if (state->memsize) { /* tmp buffer is full */ 2587 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); 2588 state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0)); 2589 state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1)); 2590 state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2)); 2591 state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3)); 2592 p += 32 - state->memsize; 2593 state->memsize = 0; 2594 } 2595 2596 if (p+32 <= bEnd) { 2597 const xxh_u8* const limit = bEnd - 32; 2598 xxh_u64 v1 = state->v1; 2599 xxh_u64 v2 = state->v2; 2600 xxh_u64 v3 = state->v3; 2601 xxh_u64 v4 = state->v4; 2602 2603 do { 2604 v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8; 2605 v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8; 2606 v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8; 2607 v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8; 2608 } while (p<=limit); 2609 2610 state->v1 = v1; 2611 state->v2 = v2; 2612 state->v3 = v3; 2613 state->v4 = v4; 2614 } 2615 2616 if (p < bEnd) { 2617 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); 2618 state->memsize = (unsigned)(bEnd-p); 2619 } 2620 } 2621 2622 return XXH_OK; 2623 } 2624 2625 2626 /*! @ingroup xxh64_family */ 2627 XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state) 2628 { 2629 xxh_u64 h64; 2630 2631 if (state->total_len >= 32) { 2632 xxh_u64 const v1 = state->v1; 2633 xxh_u64 const v2 = state->v2; 2634 xxh_u64 const v3 = state->v3; 2635 xxh_u64 const v4 = state->v4; 2636 2637 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); 2638 h64 = XXH64_mergeRound(h64, v1); 2639 h64 = XXH64_mergeRound(h64, v2); 2640 h64 = XXH64_mergeRound(h64, v3); 2641 h64 = XXH64_mergeRound(h64, v4); 2642 } else { 2643 h64 = state->v3 /*seed*/ + XXH_PRIME64_5; 2644 } 2645 2646 h64 += (xxh_u64) state->total_len; 2647 2648 return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); 2649 } 2650 2651 2652 /******* Canonical representation *******/ 2653 2654 /*! @ingroup xxh64_family */ 2655 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) 2656 { 2657 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); 2658 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); 2659 memcpy(dst, &hash, sizeof(*dst)); 2660 } 2661 2662 /*! @ingroup xxh64_family */ 2663 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) 2664 { 2665 return XXH_readBE64(src); 2666 } 2667 2668 2669 2670 /* ********************************************************************* 2671 * XXH3 2672 * New generation hash designed for speed on small keys and vectorization 2673 ************************************************************************ */ 2674 /*! 2675 * @} 2676 * @defgroup xxh3_impl XXH3 implementation 2677 * @ingroup impl 2678 * @{ 2679 */ 2680 2681 /* === Compiler specifics === */ 2682 2683 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ 2684 # define XXH_RESTRICT restrict 2685 #else 2686 /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */ 2687 # define XXH_RESTRICT /* disable */ 2688 #endif 2689 2690 #if (defined(__GNUC__) && (__GNUC__ >= 3)) \ 2691 || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ 2692 || defined(__clang__) 2693 # define XXH_likely(x) __builtin_expect(x, 1) 2694 # define XXH_unlikely(x) __builtin_expect(x, 0) 2695 #else 2696 # define XXH_likely(x) (x) 2697 # define XXH_unlikely(x) (x) 2698 #endif 2699 2700 #if defined(__GNUC__) 2701 # if defined(__AVX2__) 2702 # include <immintrin.h> 2703 # elif defined(__SSE2__) 2704 # include <emmintrin.h> 2705 # elif defined(__ARM_NEON__) || defined(__ARM_NEON) 2706 # define inline __inline__ /* circumvent a clang bug */ 2707 # include <arm_neon.h> 2708 # undef inline 2709 # endif 2710 #elif defined(_MSC_VER) 2711 # include <intrin.h> 2712 #endif 2713 2714 /* 2715 * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while 2716 * remaining a true 64-bit/128-bit hash function. 2717 * 2718 * This is done by prioritizing a subset of 64-bit operations that can be 2719 * emulated without too many steps on the average 32-bit machine. 2720 * 2721 * For example, these two lines seem similar, and run equally fast on 64-bit: 2722 * 2723 * xxh_u64 x; 2724 * x ^= (x >> 47); // good 2725 * x ^= (x >> 13); // bad 2726 * 2727 * However, to a 32-bit machine, there is a major difference. 2728 * 2729 * x ^= (x >> 47) looks like this: 2730 * 2731 * x.lo ^= (x.hi >> (47 - 32)); 2732 * 2733 * while x ^= (x >> 13) looks like this: 2734 * 2735 * // note: funnel shifts are not usually cheap. 2736 * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); 2737 * x.hi ^= (x.hi >> 13); 2738 * 2739 * The first one is significantly faster than the second, simply because the 2740 * shift is larger than 32. This means: 2741 * - All the bits we need are in the upper 32 bits, so we can ignore the lower 2742 * 32 bits in the shift. 2743 * - The shift result will always fit in the lower 32 bits, and therefore, 2744 * we can ignore the upper 32 bits in the xor. 2745 * 2746 * Thanks to this optimization, XXH3 only requires these features to be efficient: 2747 * 2748 * - Usable unaligned access 2749 * - A 32-bit or 64-bit ALU 2750 * - If 32-bit, a decent ADC instruction 2751 * - A 32 or 64-bit multiply with a 64-bit result 2752 * - For the 128-bit variant, a decent byteswap helps short inputs. 2753 * 2754 * The first two are already required by XXH32, and almost all 32-bit and 64-bit 2755 * platforms which can run XXH32 can run XXH3 efficiently. 2756 * 2757 * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one 2758 * notable exception. 2759 * 2760 * First of all, Thumb-1 lacks support for the UMULL instruction which 2761 * performs the important long multiply. This means numerous __aeabi_lmul 2762 * calls. 2763 * 2764 * Second of all, the 8 functional registers are just not enough. 2765 * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need 2766 * Lo registers, and this shuffling results in thousands more MOVs than A32. 2767 * 2768 * A32 and T32 don't have this limitation. They can access all 14 registers, 2769 * do a 32->64 multiply with UMULL, and the flexible operand allowing free 2770 * shifts is helpful, too. 2771 * 2772 * Therefore, we do a quick sanity check. 2773 * 2774 * If compiling Thumb-1 for a target which supports ARM instructions, we will 2775 * emit a warning, as it is not a "sane" platform to compile for. 2776 * 2777 * Usually, if this happens, it is because of an accident and you probably need 2778 * to specify -march, as you likely meant to compile for a newer architecture. 2779 * 2780 * Credit: large sections of the vectorial and asm source code paths 2781 * have been contributed by @easyaspi314 2782 */ 2783 #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) 2784 # warning "XXH3 is highly inefficient without ARM or Thumb-2." 2785 #endif 2786 2787 /* ========================================== 2788 * Vectorization detection 2789 * ========================================== */ 2790 2791 #ifdef XXH_DOXYGEN 2792 /*! 2793 * @ingroup tuning 2794 * @brief Overrides the vectorization implementation chosen for XXH3. 2795 * 2796 * Can be defined to 0 to disable SIMD or any of the values mentioned in 2797 * @ref XXH_VECTOR_TYPE. 2798 * 2799 * If this is not defined, it uses predefined macros to determine the best 2800 * implementation. 2801 */ 2802 # define XXH_VECTOR XXH_SCALAR 2803 /*! 2804 * @ingroup tuning 2805 * @brief Possible values for @ref XXH_VECTOR. 2806 * 2807 * Note that these are actually implemented as macros. 2808 * 2809 * If this is not defined, it is detected automatically. 2810 * @ref XXH_X86DISPATCH overrides this. 2811 */ 2812 enum XXH_VECTOR_TYPE /* fake enum */ { 2813 XXH_SCALAR = 0, /*!< Portable scalar version */ 2814 XXH_SSE2 = 1, /*!< 2815 * SSE2 for Pentium 4, Opteron, all x86_64. 2816 * 2817 * @note SSE2 is also guaranteed on Windows 10, macOS, and 2818 * Android x86. 2819 */ 2820 XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ 2821 XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ 2822 XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */ 2823 XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ 2824 }; 2825 /*! 2826 * @ingroup tuning 2827 * @brief Selects the minimum alignment for XXH3's accumulators. 2828 * 2829 * When using SIMD, this should match the alignment reqired for said vector 2830 * type, so, for example, 32 for AVX2. 2831 * 2832 * Default: Auto detected. 2833 */ 2834 # define XXH_ACC_ALIGN 8 2835 #endif 2836 2837 /* Actual definition */ 2838 #ifndef XXH_DOXYGEN 2839 # define XXH_SCALAR 0 2840 # define XXH_SSE2 1 2841 # define XXH_AVX2 2 2842 # define XXH_AVX512 3 2843 # define XXH_NEON 4 2844 # define XXH_VSX 5 2845 #endif 2846 2847 #ifndef XXH_VECTOR /* can be defined on command line */ 2848 # if defined(__AVX512F__) 2849 # define XXH_VECTOR XXH_AVX512 2850 # elif defined(__AVX2__) 2851 # define XXH_VECTOR XXH_AVX2 2852 # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) 2853 # define XXH_VECTOR XXH_SSE2 2854 # elif defined(__GNUC__) /* msvc support maybe later */ \ 2855 && (defined(__ARM_NEON__) || defined(__ARM_NEON)) \ 2856 && (defined(__LITTLE_ENDIAN__) /* We only support little endian NEON */ \ 2857 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) 2858 # define XXH_VECTOR XXH_NEON 2859 # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ 2860 || (defined(__s390x__) && defined(__VEC__)) \ 2861 && defined(__GNUC__) /* TODO: IBM XL */ 2862 # define XXH_VECTOR XXH_VSX 2863 # else 2864 # define XXH_VECTOR XXH_SCALAR 2865 # endif 2866 #endif 2867 2868 /* 2869 * Controls the alignment of the accumulator, 2870 * for compatibility with aligned vector loads, which are usually faster. 2871 */ 2872 #ifndef XXH_ACC_ALIGN 2873 # if defined(XXH_X86DISPATCH) 2874 # define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ 2875 # elif XXH_VECTOR == XXH_SCALAR /* scalar */ 2876 # define XXH_ACC_ALIGN 8 2877 # elif XXH_VECTOR == XXH_SSE2 /* sse2 */ 2878 # define XXH_ACC_ALIGN 16 2879 # elif XXH_VECTOR == XXH_AVX2 /* avx2 */ 2880 # define XXH_ACC_ALIGN 32 2881 # elif XXH_VECTOR == XXH_NEON /* neon */ 2882 # define XXH_ACC_ALIGN 16 2883 # elif XXH_VECTOR == XXH_VSX /* vsx */ 2884 # define XXH_ACC_ALIGN 16 2885 # elif XXH_VECTOR == XXH_AVX512 /* avx512 */ 2886 # define XXH_ACC_ALIGN 64 2887 # endif 2888 #endif 2889 2890 #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ 2891 || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 2892 # define XXH_SEC_ALIGN XXH_ACC_ALIGN 2893 #else 2894 # define XXH_SEC_ALIGN 8 2895 #endif 2896 2897 /* 2898 * UGLY HACK: 2899 * GCC usually generates the best code with -O3 for xxHash. 2900 * 2901 * However, when targeting AVX2, it is overzealous in its unrolling resulting 2902 * in code roughly 3/4 the speed of Clang. 2903 * 2904 * There are other issues, such as GCC splitting _mm256_loadu_si256 into 2905 * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which 2906 * only applies to Sandy and Ivy Bridge... which don't even support AVX2. 2907 * 2908 * That is why when compiling the AVX2 version, it is recommended to use either 2909 * -O2 -mavx2 -march=haswell 2910 * or 2911 * -O2 -mavx2 -mno-avx256-split-unaligned-load 2912 * for decent performance, or to use Clang instead. 2913 * 2914 * Fortunately, we can control the first one with a pragma that forces GCC into 2915 * -O2, but the other one we can't control without "failed to inline always 2916 * inline function due to target mismatch" warnings. 2917 */ 2918 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ 2919 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ 2920 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */ 2921 # pragma GCC push_options 2922 # pragma GCC optimize("-O2") 2923 #endif 2924 2925 2926 #if XXH_VECTOR == XXH_NEON 2927 /* 2928 * NEON's setup for vmlal_u32 is a little more complicated than it is on 2929 * SSE2, AVX2, and VSX. 2930 * 2931 * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast. 2932 * 2933 * To do the same operation, the 128-bit 'Q' register needs to be split into 2934 * two 64-bit 'D' registers, performing this operation:: 2935 * 2936 * [ a | b ] 2937 * | '---------. .--------' | 2938 * | x | 2939 * | .---------' '--------. | 2940 * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ] 2941 * 2942 * Due to significant changes in aarch64, the fastest method for aarch64 is 2943 * completely different than the fastest method for ARMv7-A. 2944 * 2945 * ARMv7-A treats D registers as unions overlaying Q registers, so modifying 2946 * D11 will modify the high half of Q5. This is similar to how modifying AH 2947 * will only affect bits 8-15 of AX on x86. 2948 * 2949 * VZIP takes two registers, and puts even lanes in one register and odd lanes 2950 * in the other. 2951 * 2952 * On ARMv7-A, this strangely modifies both parameters in place instead of 2953 * taking the usual 3-operand form. 2954 * 2955 * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the 2956 * lower and upper halves of the Q register to end up with the high and low 2957 * halves where we want - all in one instruction. 2958 * 2959 * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] } 2960 * 2961 * Unfortunately we need inline assembly for this: Instructions modifying two 2962 * registers at once is not possible in GCC or Clang's IR, and they have to 2963 * create a copy. 2964 * 2965 * aarch64 requires a different approach. 2966 * 2967 * In order to make it easier to write a decent compiler for aarch64, many 2968 * quirks were removed, such as conditional execution. 2969 * 2970 * NEON was also affected by this. 2971 * 2972 * aarch64 cannot access the high bits of a Q-form register, and writes to a 2973 * D-form register zero the high bits, similar to how writes to W-form scalar 2974 * registers (or DWORD registers on x86_64) work. 2975 * 2976 * The formerly free vget_high intrinsics now require a vext (with a few 2977 * exceptions) 2978 * 2979 * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent 2980 * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one 2981 * operand. 2982 * 2983 * The equivalent of the VZIP.32 on the lower and upper halves would be this 2984 * mess: 2985 * 2986 * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] } 2987 * zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] } 2988 * zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] } 2989 * 2990 * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN): 2991 * 2992 * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32); 2993 * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF); 2994 * 2995 * This is available on ARMv7-A, but is less efficient than a single VZIP.32. 2996 */ 2997 2998 /*! 2999 * Function-like macro: 3000 * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi) 3001 * { 3002 * outLo = (uint32x2_t)(in & 0xFFFFFFFF); 3003 * outHi = (uint32x2_t)(in >> 32); 3004 * in = UNDEFINED; 3005 * } 3006 */ 3007 # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \ 3008 && defined(__GNUC__) \ 3009 && !defined(__aarch64__) && !defined(__arm64__) 3010 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ 3011 do { \ 3012 /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \ 3013 /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \ 3014 /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \ 3015 __asm__("vzip.32 %e0, %f0" : "+w" (in)); \ 3016 (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \ 3017 (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \ 3018 } while (0) 3019 # else 3020 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ 3021 do { \ 3022 (outLo) = vmovn_u64 (in); \ 3023 (outHi) = vshrn_n_u64 ((in), 32); \ 3024 } while (0) 3025 # endif 3026 #endif /* XXH_VECTOR == XXH_NEON */ 3027 3028 /* 3029 * VSX and Z Vector helpers. 3030 * 3031 * This is very messy, and any pull requests to clean this up are welcome. 3032 * 3033 * There are a lot of problems with supporting VSX and s390x, due to 3034 * inconsistent intrinsics, spotty coverage, and multiple endiannesses. 3035 */ 3036 #if XXH_VECTOR == XXH_VSX 3037 # if defined(__s390x__) 3038 # include <s390intrin.h> 3039 # else 3040 /* gcc's altivec.h can have the unwanted consequence to unconditionally 3041 * #define bool, vector, and pixel keywords, 3042 * with bad consequences for programs already using these keywords for other purposes. 3043 * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined. 3044 * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler, 3045 * but it seems that, in some cases, it isn't. 3046 * Force the build macro to be defined, so that keywords are not altered. 3047 */ 3048 # if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__) 3049 # define __APPLE_ALTIVEC__ 3050 # endif 3051 # include <altivec.h> 3052 # endif 3053 3054 typedef __vector unsigned long long xxh_u64x2; 3055 typedef __vector unsigned char xxh_u8x16; 3056 typedef __vector unsigned xxh_u32x4; 3057 3058 # ifndef XXH_VSX_BE 3059 # if defined(__BIG_ENDIAN__) \ 3060 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) 3061 # define XXH_VSX_BE 1 3062 # elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ 3063 # warning "-maltivec=be is not recommended. Please use native endianness." 3064 # define XXH_VSX_BE 1 3065 # else 3066 # define XXH_VSX_BE 0 3067 # endif 3068 # endif /* !defined(XXH_VSX_BE) */ 3069 3070 # if XXH_VSX_BE 3071 # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) 3072 # define XXH_vec_revb vec_revb 3073 # else 3074 /*! 3075 * A polyfill for POWER9's vec_revb(). 3076 */ 3077 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) 3078 { 3079 xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 3080 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; 3081 return vec_perm(val, val, vByteSwap); 3082 } 3083 # endif 3084 # endif /* XXH_VSX_BE */ 3085 3086 /*! 3087 * Performs an unaligned vector load and byte swaps it on big endian. 3088 */ 3089 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) 3090 { 3091 xxh_u64x2 ret; 3092 memcpy(&ret, ptr, sizeof(xxh_u64x2)); 3093 # if XXH_VSX_BE 3094 ret = XXH_vec_revb(ret); 3095 # endif 3096 return ret; 3097 } 3098 3099 /* 3100 * vec_mulo and vec_mule are very problematic intrinsics on PowerPC 3101 * 3102 * These intrinsics weren't added until GCC 8, despite existing for a while, 3103 * and they are endian dependent. Also, their meaning swap depending on version. 3104 * */ 3105 # if defined(__s390x__) 3106 /* s390x is always big endian, no issue on this platform */ 3107 # define XXH_vec_mulo vec_mulo 3108 # define XXH_vec_mule vec_mule 3109 # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) 3110 /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ 3111 # define XXH_vec_mulo __builtin_altivec_vmulouw 3112 # define XXH_vec_mule __builtin_altivec_vmuleuw 3113 # else 3114 /* gcc needs inline assembly */ 3115 /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ 3116 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) 3117 { 3118 xxh_u64x2 result; 3119 __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); 3120 return result; 3121 } 3122 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) 3123 { 3124 xxh_u64x2 result; 3125 __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); 3126 return result; 3127 } 3128 # endif /* XXH_vec_mulo, XXH_vec_mule */ 3129 #endif /* XXH_VECTOR == XXH_VSX */ 3130 3131 3132 /* prefetch 3133 * can be disabled, by declaring XXH_NO_PREFETCH build macro */ 3134 #if defined(XXH_NO_PREFETCH) 3135 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ 3136 #else 3137 # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ 3138 # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ 3139 # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) 3140 # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) 3141 # define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) 3142 # else 3143 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ 3144 # endif 3145 #endif /* XXH_NO_PREFETCH */ 3146 3147 3148 /* ========================================== 3149 * XXH3 default settings 3150 * ========================================== */ 3151 3152 #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ 3153 3154 #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) 3155 # error "default keyset is not large enough" 3156 #endif 3157 3158 /*! Pseudorandom secret taken directly from FARSH. */ 3159 XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { 3160 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, 3161 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, 3162 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, 3163 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, 3164 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, 3165 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, 3166 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, 3167 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, 3168 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, 3169 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, 3170 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, 3171 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, 3172 }; 3173 3174 3175 #ifdef XXH_OLD_NAMES 3176 # define kSecret XXH3_kSecret 3177 #endif 3178 3179 #ifdef XXH_DOXYGEN 3180 /*! 3181 * @brief Calculates a 32-bit to 64-bit long multiply. 3182 * 3183 * Implemented as a macro. 3184 * 3185 * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't 3186 * need to (but it shouldn't need to anyways, it is about 7 instructions to do 3187 * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we 3188 * use that instead of the normal method. 3189 * 3190 * If you are compiling for platforms like Thumb-1 and don't have a better option, 3191 * you may also want to write your own long multiply routine here. 3192 * 3193 * @param x, y Numbers to be multiplied 3194 * @return 64-bit product of the low 32 bits of @p x and @p y. 3195 */ 3196 XXH_FORCE_INLINE xxh_u64 3197 XXH_mult32to64(xxh_u64 x, xxh_u64 y) 3198 { 3199 return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); 3200 } 3201 #elif defined(_MSC_VER) && defined(_M_IX86) 3202 # include <intrin.h> 3203 # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) 3204 #else 3205 /* 3206 * Downcast + upcast is usually better than masking on older compilers like 3207 * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. 3208 * 3209 * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands 3210 * and perform a full 64x64 multiply -- entirely redundant on 32-bit. 3211 */ 3212 # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) 3213 #endif 3214 3215 /*! 3216 * @brief Calculates a 64->128-bit long multiply. 3217 * 3218 * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar 3219 * version. 3220 * 3221 * @param lhs, rhs The 64-bit integers to be multiplied 3222 * @return The 128-bit result represented in an @ref XXH128_hash_t. 3223 */ 3224 static XXH128_hash_t 3225 XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) 3226 { 3227 /* 3228 * GCC/Clang __uint128_t method. 3229 * 3230 * On most 64-bit targets, GCC and Clang define a __uint128_t type. 3231 * This is usually the best way as it usually uses a native long 64-bit 3232 * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. 3233 * 3234 * Usually. 3235 * 3236 * Despite being a 32-bit platform, Clang (and emscripten) define this type 3237 * despite not having the arithmetic for it. This results in a laggy 3238 * compiler builtin call which calculates a full 128-bit multiply. 3239 * In that case it is best to use the portable one. 3240 * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 3241 */ 3242 #if defined(__GNUC__) && !defined(__wasm__) \ 3243 && defined(__SIZEOF_INT128__) \ 3244 || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) 3245 3246 __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; 3247 XXH128_hash_t r128; 3248 r128.low64 = (xxh_u64)(product); 3249 r128.high64 = (xxh_u64)(product >> 64); 3250 return r128; 3251 3252 /* 3253 * MSVC for x64's _umul128 method. 3254 * 3255 * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); 3256 * 3257 * This compiles to single operand MUL on x64. 3258 */ 3259 #elif defined(_M_X64) || defined(_M_IA64) 3260 3261 #ifndef _MSC_VER 3262 # pragma intrinsic(_umul128) 3263 #endif 3264 xxh_u64 product_high; 3265 xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); 3266 XXH128_hash_t r128; 3267 r128.low64 = product_low; 3268 r128.high64 = product_high; 3269 return r128; 3270 3271 #else 3272 /* 3273 * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. 3274 * 3275 * This is a fast and simple grade school multiply, which is shown below 3276 * with base 10 arithmetic instead of base 0x100000000. 3277 * 3278 * 9 3 // D2 lhs = 93 3279 * x 7 5 // D2 rhs = 75 3280 * ---------- 3281 * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 3282 * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 3283 * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 3284 * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 3285 * --------- 3286 * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 3287 * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 3288 * --------- 3289 * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 3290 * 3291 * The reasons for adding the products like this are: 3292 * 1. It avoids manual carry tracking. Just like how 3293 * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. 3294 * This avoids a lot of complexity. 3295 * 3296 * 2. It hints for, and on Clang, compiles to, the powerful UMAAL 3297 * instruction available in ARM's Digital Signal Processing extension 3298 * in 32-bit ARMv6 and later, which is shown below: 3299 * 3300 * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) 3301 * { 3302 * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; 3303 * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); 3304 * *RdHi = (xxh_u32)(product >> 32); 3305 * } 3306 * 3307 * This instruction was designed for efficient long multiplication, and 3308 * allows this to be calculated in only 4 instructions at speeds 3309 * comparable to some 64-bit ALUs. 3310 * 3311 * 3. It isn't terrible on other platforms. Usually this will be a couple 3312 * of 32-bit ADD/ADCs. 3313 */ 3314 3315 /* First calculate all of the cross products. */ 3316 xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); 3317 xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); 3318 xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); 3319 xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); 3320 3321 /* Now add the products together. These will never overflow. */ 3322 xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; 3323 xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; 3324 xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); 3325 3326 XXH128_hash_t r128; 3327 r128.low64 = lower; 3328 r128.high64 = upper; 3329 return r128; 3330 #endif 3331 } 3332 3333 /*! 3334 * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. 3335 * 3336 * The reason for the separate function is to prevent passing too many structs 3337 * around by value. This will hopefully inline the multiply, but we don't force it. 3338 * 3339 * @param lhs, rhs The 64-bit integers to multiply 3340 * @return The low 64 bits of the product XOR'd by the high 64 bits. 3341 * @see XXH_mult64to128() 3342 */ 3343 static xxh_u64 3344 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) 3345 { 3346 XXH128_hash_t product = XXH_mult64to128(lhs, rhs); 3347 return product.low64 ^ product.high64; 3348 } 3349 3350 /*! Seems to produce slightly better code on GCC for some reason. */ 3351 XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) 3352 { 3353 XXH_ASSERT(0 <= shift && shift < 64); 3354 return v64 ^ (v64 >> shift); 3355 } 3356 3357 /* 3358 * This is a fast avalanche stage, 3359 * suitable when input bits are already partially mixed 3360 */ 3361 static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) 3362 { 3363 h64 = XXH_xorshift64(h64, 37); 3364 h64 *= 0x165667919E3779F9ULL; 3365 h64 = XXH_xorshift64(h64, 32); 3366 return h64; 3367 } 3368 3369 /* 3370 * This is a stronger avalanche, 3371 * inspired by Pelle Evensen's rrmxmx 3372 * preferable when input has not been previously mixed 3373 */ 3374 static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) 3375 { 3376 /* this mix is inspired by Pelle Evensen's rrmxmx */ 3377 h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); 3378 h64 *= 0x9FB21C651E98DF25ULL; 3379 h64 ^= (h64 >> 35) + len ; 3380 h64 *= 0x9FB21C651E98DF25ULL; 3381 return XXH_xorshift64(h64, 28); 3382 } 3383 3384 3385 /* ========================================== 3386 * Short keys 3387 * ========================================== 3388 * One of the shortcomings of XXH32 and XXH64 was that their performance was 3389 * sub-optimal on short lengths. It used an iterative algorithm which strongly 3390 * favored lengths that were a multiple of 4 or 8. 3391 * 3392 * Instead of iterating over individual inputs, we use a set of single shot 3393 * functions which piece together a range of lengths and operate in constant time. 3394 * 3395 * Additionally, the number of multiplies has been significantly reduced. This 3396 * reduces latency, especially when emulating 64-bit multiplies on 32-bit. 3397 * 3398 * Depending on the platform, this may or may not be faster than XXH32, but it 3399 * is almost guaranteed to be faster than XXH64. 3400 */ 3401 3402 /* 3403 * At very short lengths, there isn't enough input to fully hide secrets, or use 3404 * the entire secret. 3405 * 3406 * There is also only a limited amount of mixing we can do before significantly 3407 * impacting performance. 3408 * 3409 * Therefore, we use different sections of the secret and always mix two secret 3410 * samples with an XOR. This should have no effect on performance on the 3411 * seedless or withSeed variants because everything _should_ be constant folded 3412 * by modern compilers. 3413 * 3414 * The XOR mixing hides individual parts of the secret and increases entropy. 3415 * 3416 * This adds an extra layer of strength for custom secrets. 3417 */ 3418 XXH_FORCE_INLINE XXH64_hash_t 3419 XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 3420 { 3421 XXH_ASSERT(input != NULL); 3422 XXH_ASSERT(1 <= len && len <= 3); 3423 XXH_ASSERT(secret != NULL); 3424 /* 3425 * len = 1: combined = { input[0], 0x01, input[0], input[0] } 3426 * len = 2: combined = { input[1], 0x02, input[0], input[1] } 3427 * len = 3: combined = { input[2], 0x03, input[0], input[1] } 3428 */ 3429 { xxh_u8 const c1 = input[0]; 3430 xxh_u8 const c2 = input[len >> 1]; 3431 xxh_u8 const c3 = input[len - 1]; 3432 xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) 3433 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); 3434 xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; 3435 xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; 3436 return XXH64_avalanche(keyed); 3437 } 3438 } 3439 3440 XXH_FORCE_INLINE XXH64_hash_t 3441 XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 3442 { 3443 XXH_ASSERT(input != NULL); 3444 XXH_ASSERT(secret != NULL); 3445 XXH_ASSERT(4 <= len && len < 8); 3446 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; 3447 { xxh_u32 const input1 = XXH_readLE32(input); 3448 xxh_u32 const input2 = XXH_readLE32(input + len - 4); 3449 xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; 3450 xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); 3451 xxh_u64 const keyed = input64 ^ bitflip; 3452 return XXH3_rrmxmx(keyed, len); 3453 } 3454 } 3455 3456 XXH_FORCE_INLINE XXH64_hash_t 3457 XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 3458 { 3459 XXH_ASSERT(input != NULL); 3460 XXH_ASSERT(secret != NULL); 3461 XXH_ASSERT(8 <= len && len <= 16); 3462 { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; 3463 xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; 3464 xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; 3465 xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; 3466 xxh_u64 const acc = len 3467 + XXH_swap64(input_lo) + input_hi 3468 + XXH3_mul128_fold64(input_lo, input_hi); 3469 return XXH3_avalanche(acc); 3470 } 3471 } 3472 3473 XXH_FORCE_INLINE XXH64_hash_t 3474 XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 3475 { 3476 XXH_ASSERT(len <= 16); 3477 { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); 3478 if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); 3479 if (len) return XXH3_len_1to3_64b(input, len, secret, seed); 3480 return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); 3481 } 3482 } 3483 3484 /* 3485 * DISCLAIMER: There are known *seed-dependent* multicollisions here due to 3486 * multiplication by zero, affecting hashes of lengths 17 to 240. 3487 * 3488 * However, they are very unlikely. 3489 * 3490 * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all 3491 * unseeded non-cryptographic hashes, it does not attempt to defend itself 3492 * against specially crafted inputs, only random inputs. 3493 * 3494 * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes 3495 * cancelling out the secret is taken an arbitrary number of times (addressed 3496 * in XXH3_accumulate_512), this collision is very unlikely with random inputs 3497 * and/or proper seeding: 3498 * 3499 * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a 3500 * function that is only called up to 16 times per hash with up to 240 bytes of 3501 * input. 3502 * 3503 * This is not too bad for a non-cryptographic hash function, especially with 3504 * only 64 bit outputs. 3505 * 3506 * The 128-bit variant (which trades some speed for strength) is NOT affected 3507 * by this, although it is always a good idea to use a proper seed if you care 3508 * about strength. 3509 */ 3510 XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, 3511 const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) 3512 { 3513 #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ 3514 && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ 3515 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ 3516 /* 3517 * UGLY HACK: 3518 * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in 3519 * slower code. 3520 * 3521 * By forcing seed64 into a register, we disrupt the cost model and 3522 * cause it to scalarize. See `XXH32_round()` 3523 * 3524 * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, 3525 * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on 3526 * GCC 9.2, despite both emitting scalar code. 3527 * 3528 * GCC generates much better scalar code than Clang for the rest of XXH3, 3529 * which is why finding a more optimal codepath is an interest. 3530 */ 3531 __asm__ ("" : "+r" (seed64)); 3532 #endif 3533 { xxh_u64 const input_lo = XXH_readLE64(input); 3534 xxh_u64 const input_hi = XXH_readLE64(input+8); 3535 return XXH3_mul128_fold64( 3536 input_lo ^ (XXH_readLE64(secret) + seed64), 3537 input_hi ^ (XXH_readLE64(secret+8) - seed64) 3538 ); 3539 } 3540 } 3541 3542 /* For mid range keys, XXH3 uses a Mum-hash variant. */ 3543 XXH_FORCE_INLINE XXH64_hash_t 3544 XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, 3545 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 3546 XXH64_hash_t seed) 3547 { 3548 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; 3549 XXH_ASSERT(16 < len && len <= 128); 3550 3551 { xxh_u64 acc = len * XXH_PRIME64_1; 3552 if (len > 32) { 3553 if (len > 64) { 3554 if (len > 96) { 3555 acc += XXH3_mix16B(input+48, secret+96, seed); 3556 acc += XXH3_mix16B(input+len-64, secret+112, seed); 3557 } 3558 acc += XXH3_mix16B(input+32, secret+64, seed); 3559 acc += XXH3_mix16B(input+len-48, secret+80, seed); 3560 } 3561 acc += XXH3_mix16B(input+16, secret+32, seed); 3562 acc += XXH3_mix16B(input+len-32, secret+48, seed); 3563 } 3564 acc += XXH3_mix16B(input+0, secret+0, seed); 3565 acc += XXH3_mix16B(input+len-16, secret+16, seed); 3566 3567 return XXH3_avalanche(acc); 3568 } 3569 } 3570 3571 #define XXH3_MIDSIZE_MAX 240 3572 3573 XXH_NO_INLINE XXH64_hash_t 3574 XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, 3575 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 3576 XXH64_hash_t seed) 3577 { 3578 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; 3579 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); 3580 3581 #define XXH3_MIDSIZE_STARTOFFSET 3 3582 #define XXH3_MIDSIZE_LASTOFFSET 17 3583 3584 { xxh_u64 acc = len * XXH_PRIME64_1; 3585 int const nbRounds = (int)len / 16; 3586 int i; 3587 for (i=0; i<8; i++) { 3588 acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); 3589 } 3590 acc = XXH3_avalanche(acc); 3591 XXH_ASSERT(nbRounds >= 8); 3592 #if defined(__clang__) /* Clang */ \ 3593 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ 3594 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ 3595 /* 3596 * UGLY HACK: 3597 * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. 3598 * In everywhere else, it uses scalar code. 3599 * 3600 * For 64->128-bit multiplies, even if the NEON was 100% optimal, it 3601 * would still be slower than UMAAL (see XXH_mult64to128). 3602 * 3603 * Unfortunately, Clang doesn't handle the long multiplies properly and 3604 * converts them to the nonexistent "vmulq_u64" intrinsic, which is then 3605 * scalarized into an ugly mess of VMOV.32 instructions. 3606 * 3607 * This mess is difficult to avoid without turning autovectorization 3608 * off completely, but they are usually relatively minor and/or not 3609 * worth it to fix. 3610 * 3611 * This loop is the easiest to fix, as unlike XXH32, this pragma 3612 * _actually works_ because it is a loop vectorization instead of an 3613 * SLP vectorization. 3614 */ 3615 #pragma clang loop vectorize(disable) 3616 #endif 3617 for (i=8 ; i < nbRounds; i++) { 3618 acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); 3619 } 3620 /* last bytes */ 3621 acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); 3622 return XXH3_avalanche(acc); 3623 } 3624 } 3625 3626 3627 /* ======= Long Keys ======= */ 3628 3629 #define XXH_STRIPE_LEN 64 3630 #define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ 3631 #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) 3632 3633 #ifdef XXH_OLD_NAMES 3634 # define STRIPE_LEN XXH_STRIPE_LEN 3635 # define ACC_NB XXH_ACC_NB 3636 #endif 3637 3638 XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) 3639 { 3640 if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); 3641 memcpy(dst, &v64, sizeof(v64)); 3642 } 3643 3644 /* Several intrinsic functions below are supposed to accept __int64 as argument, 3645 * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . 3646 * However, several environments do not define __int64 type, 3647 * requiring a workaround. 3648 */ 3649 #if !defined (__VMS) \ 3650 && (defined (__cplusplus) \ 3651 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 3652 typedef int64_t xxh_i64; 3653 #else 3654 /* the following type must have a width of 64-bit */ 3655 typedef long long xxh_i64; 3656 #endif 3657 3658 /* 3659 * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. 3660 * 3661 * It is a hardened version of UMAC, based off of FARSH's implementation. 3662 * 3663 * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD 3664 * implementations, and it is ridiculously fast. 3665 * 3666 * We harden it by mixing the original input to the accumulators as well as the product. 3667 * 3668 * This means that in the (relatively likely) case of a multiply by zero, the 3669 * original input is preserved. 3670 * 3671 * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve 3672 * cross-pollination, as otherwise the upper and lower halves would be 3673 * essentially independent. 3674 * 3675 * This doesn't matter on 64-bit hashes since they all get merged together in 3676 * the end, so we skip the extra step. 3677 * 3678 * Both XXH3_64bits and XXH3_128bits use this subroutine. 3679 */ 3680 3681 #if (XXH_VECTOR == XXH_AVX512) \ 3682 || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) 3683 3684 #ifndef XXH_TARGET_AVX512 3685 # define XXH_TARGET_AVX512 /* disable attribute target */ 3686 #endif 3687 3688 XXH_FORCE_INLINE XXH_TARGET_AVX512 void 3689 XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, 3690 const void* XXH_RESTRICT input, 3691 const void* XXH_RESTRICT secret) 3692 { 3693 XXH_ALIGN(64) __m512i* const xacc = (__m512i *) acc; 3694 XXH_ASSERT((((size_t)acc) & 63) == 0); 3695 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); 3696 3697 { 3698 /* data_vec = input[0]; */ 3699 __m512i const data_vec = _mm512_loadu_si512 (input); 3700 /* key_vec = secret[0]; */ 3701 __m512i const key_vec = _mm512_loadu_si512 (secret); 3702 /* data_key = data_vec ^ key_vec; */ 3703 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); 3704 /* data_key_lo = data_key >> 32; */ 3705 __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1)); 3706 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ 3707 __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); 3708 /* xacc[0] += swap(data_vec); */ 3709 __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); 3710 __m512i const sum = _mm512_add_epi64(*xacc, data_swap); 3711 /* xacc[0] += product; */ 3712 *xacc = _mm512_add_epi64(product, sum); 3713 } 3714 } 3715 3716 /* 3717 * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. 3718 * 3719 * Multiplication isn't perfect, as explained by Google in HighwayHash: 3720 * 3721 * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to 3722 * // varying degrees. In descending order of goodness, bytes 3723 * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. 3724 * // As expected, the upper and lower bytes are much worse. 3725 * 3726 * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 3727 * 3728 * Since our algorithm uses a pseudorandom secret to add some variance into the 3729 * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. 3730 * 3731 * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid 3732 * extraction. 3733 * 3734 * Both XXH3_64bits and XXH3_128bits use this subroutine. 3735 */ 3736 3737 XXH_FORCE_INLINE XXH_TARGET_AVX512 void 3738 XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 3739 { 3740 XXH_ASSERT((((size_t)acc) & 63) == 0); 3741 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); 3742 { XXH_ALIGN(64) __m512i* const xacc = (__m512i*) acc; 3743 const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); 3744 3745 /* xacc[0] ^= (xacc[0] >> 47) */ 3746 __m512i const acc_vec = *xacc; 3747 __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); 3748 __m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted); 3749 /* xacc[0] ^= secret; */ 3750 __m512i const key_vec = _mm512_loadu_si512 (secret); 3751 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); 3752 3753 /* xacc[0] *= XXH_PRIME32_1; */ 3754 __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1)); 3755 __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); 3756 __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); 3757 *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); 3758 } 3759 } 3760 3761 XXH_FORCE_INLINE XXH_TARGET_AVX512 void 3762 XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) 3763 { 3764 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); 3765 XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); 3766 XXH_ASSERT(((size_t)customSecret & 63) == 0); 3767 (void)(&XXH_writeLE64); 3768 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); 3769 __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, -(xxh_i64)seed64); 3770 3771 XXH_ALIGN(64) const __m512i* const src = (const __m512i*) XXH3_kSecret; 3772 XXH_ALIGN(64) __m512i* const dest = ( __m512i*) customSecret; 3773 int i; 3774 for (i=0; i < nbRounds; ++i) { 3775 /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*', 3776 * this will warn "discards ‘const’ qualifier". */ 3777 union { 3778 XXH_ALIGN(64) const __m512i* cp; 3779 XXH_ALIGN(64) void* p; 3780 } remote_const_void; 3781 remote_const_void.cp = src + i; 3782 dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed); 3783 } } 3784 } 3785 3786 #endif 3787 3788 #if (XXH_VECTOR == XXH_AVX2) \ 3789 || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) 3790 3791 #ifndef XXH_TARGET_AVX2 3792 # define XXH_TARGET_AVX2 /* disable attribute target */ 3793 #endif 3794 3795 XXH_FORCE_INLINE XXH_TARGET_AVX2 void 3796 XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, 3797 const void* XXH_RESTRICT input, 3798 const void* XXH_RESTRICT secret) 3799 { 3800 XXH_ASSERT((((size_t)acc) & 31) == 0); 3801 { XXH_ALIGN(32) __m256i* const xacc = (__m256i *) acc; 3802 /* Unaligned. This is mainly for pointer arithmetic, and because 3803 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ 3804 const __m256i* const xinput = (const __m256i *) input; 3805 /* Unaligned. This is mainly for pointer arithmetic, and because 3806 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ 3807 const __m256i* const xsecret = (const __m256i *) secret; 3808 3809 size_t i; 3810 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { 3811 /* data_vec = xinput[i]; */ 3812 __m256i const data_vec = _mm256_loadu_si256 (xinput+i); 3813 /* key_vec = xsecret[i]; */ 3814 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); 3815 /* data_key = data_vec ^ key_vec; */ 3816 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); 3817 /* data_key_lo = data_key >> 32; */ 3818 __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); 3819 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ 3820 __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); 3821 /* xacc[i] += swap(data_vec); */ 3822 __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); 3823 __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); 3824 /* xacc[i] += product; */ 3825 xacc[i] = _mm256_add_epi64(product, sum); 3826 } } 3827 } 3828 3829 XXH_FORCE_INLINE XXH_TARGET_AVX2 void 3830 XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 3831 { 3832 XXH_ASSERT((((size_t)acc) & 31) == 0); 3833 { XXH_ALIGN(32) __m256i* const xacc = (__m256i*) acc; 3834 /* Unaligned. This is mainly for pointer arithmetic, and because 3835 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ 3836 const __m256i* const xsecret = (const __m256i *) secret; 3837 const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); 3838 3839 size_t i; 3840 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { 3841 /* xacc[i] ^= (xacc[i] >> 47) */ 3842 __m256i const acc_vec = xacc[i]; 3843 __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); 3844 __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); 3845 /* xacc[i] ^= xsecret; */ 3846 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); 3847 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); 3848 3849 /* xacc[i] *= XXH_PRIME32_1; */ 3850 __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); 3851 __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); 3852 __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); 3853 xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); 3854 } 3855 } 3856 } 3857 3858 XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) 3859 { 3860 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); 3861 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); 3862 XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); 3863 (void)(&XXH_writeLE64); 3864 XXH_PREFETCH(customSecret); 3865 { __m256i const seed = _mm256_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64, -(xxh_i64)seed64, (xxh_i64)seed64); 3866 3867 XXH_ALIGN(64) const __m256i* const src = (const __m256i*) XXH3_kSecret; 3868 XXH_ALIGN(64) __m256i* dest = ( __m256i*) customSecret; 3869 3870 # if defined(__GNUC__) || defined(__clang__) 3871 /* 3872 * On GCC & Clang, marking 'dest' as modified will cause the compiler: 3873 * - do not extract the secret from sse registers in the internal loop 3874 * - use less common registers, and avoid pushing these reg into stack 3875 * The asm hack causes Clang to assume that XXH3_kSecretPtr aliases with 3876 * customSecret, and on aarch64, this prevented LDP from merging two 3877 * loads together for free. Putting the loads together before the stores 3878 * properly generates LDP. 3879 */ 3880 __asm__("" : "+r" (dest)); 3881 # endif 3882 3883 /* GCC -O2 need unroll loop manually */ 3884 dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed); 3885 dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed); 3886 dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed); 3887 dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed); 3888 dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed); 3889 dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed); 3890 } 3891 } 3892 3893 #endif 3894 3895 /* x86dispatch always generates SSE2 */ 3896 #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) 3897 3898 #ifndef XXH_TARGET_SSE2 3899 # define XXH_TARGET_SSE2 /* disable attribute target */ 3900 #endif 3901 3902 XXH_FORCE_INLINE XXH_TARGET_SSE2 void 3903 XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, 3904 const void* XXH_RESTRICT input, 3905 const void* XXH_RESTRICT secret) 3906 { 3907 /* SSE2 is just a half-scale version of the AVX2 version. */ 3908 XXH_ASSERT((((size_t)acc) & 15) == 0); 3909 { XXH_ALIGN(16) __m128i* const xacc = (__m128i *) acc; 3910 /* Unaligned. This is mainly for pointer arithmetic, and because 3911 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ 3912 const __m128i* const xinput = (const __m128i *) input; 3913 /* Unaligned. This is mainly for pointer arithmetic, and because 3914 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ 3915 const __m128i* const xsecret = (const __m128i *) secret; 3916 3917 size_t i; 3918 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { 3919 /* data_vec = xinput[i]; */ 3920 __m128i const data_vec = _mm_loadu_si128 (xinput+i); 3921 /* key_vec = xsecret[i]; */ 3922 __m128i const key_vec = _mm_loadu_si128 (xsecret+i); 3923 /* data_key = data_vec ^ key_vec; */ 3924 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); 3925 /* data_key_lo = data_key >> 32; */ 3926 __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); 3927 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ 3928 __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); 3929 /* xacc[i] += swap(data_vec); */ 3930 __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); 3931 __m128i const sum = _mm_add_epi64(xacc[i], data_swap); 3932 /* xacc[i] += product; */ 3933 xacc[i] = _mm_add_epi64(product, sum); 3934 } } 3935 } 3936 3937 XXH_FORCE_INLINE XXH_TARGET_SSE2 void 3938 XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 3939 { 3940 XXH_ASSERT((((size_t)acc) & 15) == 0); 3941 { XXH_ALIGN(16) __m128i* const xacc = (__m128i*) acc; 3942 /* Unaligned. This is mainly for pointer arithmetic, and because 3943 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ 3944 const __m128i* const xsecret = (const __m128i *) secret; 3945 const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); 3946 3947 size_t i; 3948 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { 3949 /* xacc[i] ^= (xacc[i] >> 47) */ 3950 __m128i const acc_vec = xacc[i]; 3951 __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); 3952 __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); 3953 /* xacc[i] ^= xsecret[i]; */ 3954 __m128i const key_vec = _mm_loadu_si128 (xsecret+i); 3955 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); 3956 3957 /* xacc[i] *= XXH_PRIME32_1; */ 3958 __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); 3959 __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); 3960 __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); 3961 xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); 3962 } 3963 } 3964 } 3965 3966 XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) 3967 { 3968 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); 3969 (void)(&XXH_writeLE64); 3970 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); 3971 3972 # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 3973 // MSVC 32bit mode does not support _mm_set_epi64x before 2015 3974 XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, -(xxh_i64)seed64 }; 3975 __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); 3976 # else 3977 __m128i const seed = _mm_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64); 3978 # endif 3979 int i; 3980 3981 XXH_ALIGN(64) const float* const src = (float const*) XXH3_kSecret; 3982 XXH_ALIGN(XXH_SEC_ALIGN) __m128i* dest = (__m128i*) customSecret; 3983 # if defined(__GNUC__) || defined(__clang__) 3984 /* 3985 * On GCC & Clang, marking 'dest' as modified will cause the compiler: 3986 * - do not extract the secret from sse registers in the internal loop 3987 * - use less common registers, and avoid pushing these reg into stack 3988 */ 3989 __asm__("" : "+r" (dest)); 3990 # endif 3991 3992 for (i=0; i < nbRounds; ++i) { 3993 dest[i] = _mm_add_epi64(_mm_castps_si128(_mm_load_ps(src+i*4)), seed); 3994 } } 3995 } 3996 3997 #endif 3998 3999 #if (XXH_VECTOR == XXH_NEON) 4000 4001 XXH_FORCE_INLINE void 4002 XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, 4003 const void* XXH_RESTRICT input, 4004 const void* XXH_RESTRICT secret) 4005 { 4006 XXH_ASSERT((((size_t)acc) & 15) == 0); 4007 { 4008 XXH_ALIGN(16) uint64x2_t* const xacc = (uint64x2_t *) acc; 4009 /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ 4010 uint8_t const* const xinput = (const uint8_t *) input; 4011 uint8_t const* const xsecret = (const uint8_t *) secret; 4012 4013 size_t i; 4014 for (i=0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) { 4015 /* data_vec = xinput[i]; */ 4016 uint8x16_t data_vec = vld1q_u8(xinput + (i * 16)); 4017 /* key_vec = xsecret[i]; */ 4018 uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16)); 4019 uint64x2_t data_key; 4020 uint32x2_t data_key_lo, data_key_hi; 4021 /* xacc[i] += swap(data_vec); */ 4022 uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec); 4023 uint64x2_t const swapped = vextq_u64(data64, data64, 1); 4024 xacc[i] = vaddq_u64 (xacc[i], swapped); 4025 /* data_key = data_vec ^ key_vec; */ 4026 data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec)); 4027 /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF); 4028 * data_key_hi = (uint32x2_t) (data_key >> 32); 4029 * data_key = UNDEFINED; */ 4030 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi); 4031 /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */ 4032 xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi); 4033 4034 } 4035 } 4036 } 4037 4038 XXH_FORCE_INLINE void 4039 XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 4040 { 4041 XXH_ASSERT((((size_t)acc) & 15) == 0); 4042 4043 { uint64x2_t* xacc = (uint64x2_t*) acc; 4044 uint8_t const* xsecret = (uint8_t const*) secret; 4045 uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1); 4046 4047 size_t i; 4048 for (i=0; i < XXH_STRIPE_LEN/sizeof(uint64x2_t); i++) { 4049 /* xacc[i] ^= (xacc[i] >> 47); */ 4050 uint64x2_t acc_vec = xacc[i]; 4051 uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47); 4052 uint64x2_t data_vec = veorq_u64 (acc_vec, shifted); 4053 4054 /* xacc[i] ^= xsecret[i]; */ 4055 uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16)); 4056 uint64x2_t data_key = veorq_u64(data_vec, vreinterpretq_u64_u8(key_vec)); 4057 4058 /* xacc[i] *= XXH_PRIME32_1 */ 4059 uint32x2_t data_key_lo, data_key_hi; 4060 /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF); 4061 * data_key_hi = (uint32x2_t) (xacc[i] >> 32); 4062 * xacc[i] = UNDEFINED; */ 4063 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi); 4064 { /* 4065 * prod_hi = (data_key >> 32) * XXH_PRIME32_1; 4066 * 4067 * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will 4068 * incorrectly "optimize" this: 4069 * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b)); 4070 * shifted = vshll_n_u32(tmp, 32); 4071 * to this: 4072 * tmp = "vmulq_u64"(a, b); // no such thing! 4073 * shifted = vshlq_n_u64(tmp, 32); 4074 * 4075 * However, unlike SSE, Clang lacks a 64-bit multiply routine 4076 * for NEON, and it scalarizes two 64-bit multiplies instead. 4077 * 4078 * vmull_u32 has the same timing as vmul_u32, and it avoids 4079 * this bug completely. 4080 * See https://bugs.llvm.org/show_bug.cgi?id=39967 4081 */ 4082 uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime); 4083 /* xacc[i] = prod_hi << 32; */ 4084 xacc[i] = vshlq_n_u64(prod_hi, 32); 4085 /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */ 4086 xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime); 4087 } 4088 } } 4089 } 4090 4091 #endif 4092 4093 #if (XXH_VECTOR == XXH_VSX) 4094 4095 XXH_FORCE_INLINE void 4096 XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, 4097 const void* XXH_RESTRICT input, 4098 const void* XXH_RESTRICT secret) 4099 { 4100 xxh_u64x2* const xacc = (xxh_u64x2*) acc; /* presumed aligned */ 4101 xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */ 4102 xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */ 4103 xxh_u64x2 const v32 = { 32, 32 }; 4104 size_t i; 4105 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { 4106 /* data_vec = xinput[i]; */ 4107 xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i); 4108 /* key_vec = xsecret[i]; */ 4109 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i); 4110 xxh_u64x2 const data_key = data_vec ^ key_vec; 4111 /* shuffled = (data_key << 32) | (data_key >> 32); */ 4112 xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); 4113 /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ 4114 xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); 4115 xacc[i] += product; 4116 4117 /* swap high and low halves */ 4118 #ifdef __s390x__ 4119 xacc[i] += vec_permi(data_vec, data_vec, 2); 4120 #else 4121 xacc[i] += vec_xxpermdi(data_vec, data_vec, 2); 4122 #endif 4123 } 4124 } 4125 4126 XXH_FORCE_INLINE void 4127 XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 4128 { 4129 XXH_ASSERT((((size_t)acc) & 15) == 0); 4130 4131 { xxh_u64x2* const xacc = (xxh_u64x2*) acc; 4132 const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret; 4133 /* constants */ 4134 xxh_u64x2 const v32 = { 32, 32 }; 4135 xxh_u64x2 const v47 = { 47, 47 }; 4136 xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; 4137 size_t i; 4138 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { 4139 /* xacc[i] ^= (xacc[i] >> 47); */ 4140 xxh_u64x2 const acc_vec = xacc[i]; 4141 xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); 4142 4143 /* xacc[i] ^= xsecret[i]; */ 4144 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i); 4145 xxh_u64x2 const data_key = data_vec ^ key_vec; 4146 4147 /* xacc[i] *= XXH_PRIME32_1 */ 4148 /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ 4149 xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); 4150 /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ 4151 xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); 4152 xacc[i] = prod_odd + (prod_even << v32); 4153 } } 4154 } 4155 4156 #endif 4157 4158 /* scalar variants - universal */ 4159 4160 XXH_FORCE_INLINE void 4161 XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, 4162 const void* XXH_RESTRICT input, 4163 const void* XXH_RESTRICT secret) 4164 { 4165 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ 4166 const xxh_u8* const xinput = (const xxh_u8*) input; /* no alignment restriction */ 4167 const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ 4168 size_t i; 4169 XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); 4170 for (i=0; i < XXH_ACC_NB; i++) { 4171 xxh_u64 const data_val = XXH_readLE64(xinput + 8*i); 4172 xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8); 4173 xacc[i ^ 1] += data_val; /* swap adjacent lanes */ 4174 xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32); 4175 } 4176 } 4177 4178 XXH_FORCE_INLINE void 4179 XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 4180 { 4181 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ 4182 const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ 4183 size_t i; 4184 XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); 4185 for (i=0; i < XXH_ACC_NB; i++) { 4186 xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i); 4187 xxh_u64 acc64 = xacc[i]; 4188 acc64 = XXH_xorshift64(acc64, 47); 4189 acc64 ^= key64; 4190 acc64 *= XXH_PRIME32_1; 4191 xacc[i] = acc64; 4192 } 4193 } 4194 4195 XXH_FORCE_INLINE void 4196 XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) 4197 { 4198 /* 4199 * We need a separate pointer for the hack below, 4200 * which requires a non-const pointer. 4201 * Any decent compiler will optimize this out otherwise. 4202 */ 4203 const xxh_u8* kSecretPtr = XXH3_kSecret; 4204 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); 4205 4206 #if defined(__clang__) && defined(__aarch64__) 4207 /* 4208 * UGLY HACK: 4209 * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are 4210 * placed sequentially, in order, at the top of the unrolled loop. 4211 * 4212 * While MOVK is great for generating constants (2 cycles for a 64-bit 4213 * constant compared to 4 cycles for LDR), long MOVK chains stall the 4214 * integer pipelines: 4215 * I L S 4216 * MOVK 4217 * MOVK 4218 * MOVK 4219 * MOVK 4220 * ADD 4221 * SUB STR 4222 * STR 4223 * By forcing loads from memory (as the asm line causes Clang to assume 4224 * that XXH3_kSecretPtr has been changed), the pipelines are used more 4225 * efficiently: 4226 * I L S 4227 * LDR 4228 * ADD LDR 4229 * SUB STR 4230 * STR 4231 * XXH3_64bits_withSeed, len == 256, Snapdragon 835 4232 * without hack: 2654.4 MB/s 4233 * with hack: 3202.9 MB/s 4234 */ 4235 __asm__("" : "+r" (kSecretPtr)); 4236 #endif 4237 /* 4238 * Note: in debug mode, this overrides the asm optimization 4239 * and Clang will emit MOVK chains again. 4240 */ 4241 XXH_ASSERT(kSecretPtr == XXH3_kSecret); 4242 4243 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; 4244 int i; 4245 for (i=0; i < nbRounds; i++) { 4246 /* 4247 * The asm hack causes Clang to assume that kSecretPtr aliases with 4248 * customSecret, and on aarch64, this prevented LDP from merging two 4249 * loads together for free. Putting the loads together before the stores 4250 * properly generates LDP. 4251 */ 4252 xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; 4253 xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; 4254 XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); 4255 XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); 4256 } } 4257 } 4258 4259 4260 typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*); 4261 typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); 4262 typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); 4263 4264 4265 #if (XXH_VECTOR == XXH_AVX512) 4266 4267 #define XXH3_accumulate_512 XXH3_accumulate_512_avx512 4268 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 4269 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 4270 4271 #elif (XXH_VECTOR == XXH_AVX2) 4272 4273 #define XXH3_accumulate_512 XXH3_accumulate_512_avx2 4274 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 4275 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 4276 4277 #elif (XXH_VECTOR == XXH_SSE2) 4278 4279 #define XXH3_accumulate_512 XXH3_accumulate_512_sse2 4280 #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 4281 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 4282 4283 #elif (XXH_VECTOR == XXH_NEON) 4284 4285 #define XXH3_accumulate_512 XXH3_accumulate_512_neon 4286 #define XXH3_scrambleAcc XXH3_scrambleAcc_neon 4287 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar 4288 4289 #elif (XXH_VECTOR == XXH_VSX) 4290 4291 #define XXH3_accumulate_512 XXH3_accumulate_512_vsx 4292 #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx 4293 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar 4294 4295 #else /* scalar */ 4296 4297 #define XXH3_accumulate_512 XXH3_accumulate_512_scalar 4298 #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar 4299 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar 4300 4301 #endif 4302 4303 4304 4305 #ifndef XXH_PREFETCH_DIST 4306 # ifdef __clang__ 4307 # define XXH_PREFETCH_DIST 320 4308 # else 4309 # if (XXH_VECTOR == XXH_AVX512) 4310 # define XXH_PREFETCH_DIST 512 4311 # else 4312 # define XXH_PREFETCH_DIST 384 4313 # endif 4314 # endif /* __clang__ */ 4315 #endif /* XXH_PREFETCH_DIST */ 4316 4317 /* 4318 * XXH3_accumulate() 4319 * Loops over XXH3_accumulate_512(). 4320 * Assumption: nbStripes will not overflow the secret size 4321 */ 4322 XXH_FORCE_INLINE void 4323 XXH3_accumulate( xxh_u64* XXH_RESTRICT acc, 4324 const xxh_u8* XXH_RESTRICT input, 4325 const xxh_u8* XXH_RESTRICT secret, 4326 size_t nbStripes, 4327 XXH3_f_accumulate_512 f_acc512) 4328 { 4329 size_t n; 4330 for (n = 0; n < nbStripes; n++ ) { 4331 const xxh_u8* const in = input + n*XXH_STRIPE_LEN; 4332 XXH_PREFETCH(in + XXH_PREFETCH_DIST); 4333 f_acc512(acc, 4334 in, 4335 secret + n*XXH_SECRET_CONSUME_RATE); 4336 } 4337 } 4338 4339 XXH_FORCE_INLINE void 4340 XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, 4341 const xxh_u8* XXH_RESTRICT input, size_t len, 4342 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 4343 XXH3_f_accumulate_512 f_acc512, 4344 XXH3_f_scrambleAcc f_scramble) 4345 { 4346 size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; 4347 size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; 4348 size_t const nb_blocks = (len - 1) / block_len; 4349 4350 size_t n; 4351 4352 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); 4353 4354 for (n = 0; n < nb_blocks; n++) { 4355 XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512); 4356 f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); 4357 } 4358 4359 /* last partial block */ 4360 XXH_ASSERT(len > XXH_STRIPE_LEN); 4361 { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; 4362 XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); 4363 XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512); 4364 4365 /* last stripe */ 4366 { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; 4367 #define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ 4368 f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); 4369 } } 4370 } 4371 4372 XXH_FORCE_INLINE xxh_u64 4373 XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) 4374 { 4375 return XXH3_mul128_fold64( 4376 acc[0] ^ XXH_readLE64(secret), 4377 acc[1] ^ XXH_readLE64(secret+8) ); 4378 } 4379 4380 static XXH64_hash_t 4381 XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) 4382 { 4383 xxh_u64 result64 = start; 4384 size_t i = 0; 4385 4386 for (i = 0; i < 4; i++) { 4387 result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); 4388 #if defined(__clang__) /* Clang */ \ 4389 && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ 4390 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ 4391 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ 4392 /* 4393 * UGLY HACK: 4394 * Prevent autovectorization on Clang ARMv7-a. Exact same problem as 4395 * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. 4396 * XXH3_64bits, len == 256, Snapdragon 835: 4397 * without hack: 2063.7 MB/s 4398 * with hack: 2560.7 MB/s 4399 */ 4400 __asm__("" : "+r" (result64)); 4401 #endif 4402 } 4403 4404 return XXH3_avalanche(result64); 4405 } 4406 4407 #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ 4408 XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } 4409 4410 XXH_FORCE_INLINE XXH64_hash_t 4411 XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, 4412 const void* XXH_RESTRICT secret, size_t secretSize, 4413 XXH3_f_accumulate_512 f_acc512, 4414 XXH3_f_scrambleAcc f_scramble) 4415 { 4416 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; 4417 4418 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble); 4419 4420 /* converge into final hash */ 4421 XXH_STATIC_ASSERT(sizeof(acc) == 64); 4422 /* do not align on 8, so that the secret is different from the accumulator */ 4423 #define XXH_SECRET_MERGEACCS_START 11 4424 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); 4425 return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1); 4426 } 4427 4428 /* 4429 * It's important for performance that XXH3_hashLong is not inlined. 4430 */ 4431 XXH_NO_INLINE XXH64_hash_t 4432 XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, 4433 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) 4434 { 4435 (void)seed64; 4436 return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc); 4437 } 4438 4439 /* 4440 * It's important for performance that XXH3_hashLong is not inlined. 4441 * Since the function is not inlined, the compiler may not be able to understand that, 4442 * in some scenarios, its `secret` argument is actually a compile time constant. 4443 * This variant enforces that the compiler can detect that, 4444 * and uses this opportunity to streamline the generated code for better performance. 4445 */ 4446 XXH_NO_INLINE XXH64_hash_t 4447 XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, 4448 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) 4449 { 4450 (void)seed64; (void)secret; (void)secretLen; 4451 return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc); 4452 } 4453 4454 /* 4455 * XXH3_hashLong_64b_withSeed(): 4456 * Generate a custom key based on alteration of default XXH3_kSecret with the seed, 4457 * and then use this key for long mode hashing. 4458 * 4459 * This operation is decently fast but nonetheless costs a little bit of time. 4460 * Try to avoid it whenever possible (typically when seed==0). 4461 * 4462 * It's important for performance that XXH3_hashLong is not inlined. Not sure 4463 * why (uop cache maybe?), but the difference is large and easily measurable. 4464 */ 4465 XXH_FORCE_INLINE XXH64_hash_t 4466 XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, 4467 XXH64_hash_t seed, 4468 XXH3_f_accumulate_512 f_acc512, 4469 XXH3_f_scrambleAcc f_scramble, 4470 XXH3_f_initCustomSecret f_initSec) 4471 { 4472 if (seed == 0) 4473 return XXH3_hashLong_64b_internal(input, len, 4474 XXH3_kSecret, sizeof(XXH3_kSecret), 4475 f_acc512, f_scramble); 4476 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; 4477 f_initSec(secret, seed); 4478 return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), 4479 f_acc512, f_scramble); 4480 } 4481 } 4482 4483 /* 4484 * It's important for performance that XXH3_hashLong is not inlined. 4485 */ 4486 XXH_NO_INLINE XXH64_hash_t 4487 XXH3_hashLong_64b_withSeed(const void* input, size_t len, 4488 XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen) 4489 { 4490 (void)secret; (void)secretLen; 4491 return XXH3_hashLong_64b_withSeed_internal(input, len, seed, 4492 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret); 4493 } 4494 4495 4496 typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, 4497 XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); 4498 4499 XXH_FORCE_INLINE XXH64_hash_t 4500 XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, 4501 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, 4502 XXH3_hashLong64_f f_hashLong) 4503 { 4504 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); 4505 /* 4506 * If an action is to be taken if `secretLen` condition is not respected, 4507 * it should be done here. 4508 * For now, it's a contract pre-condition. 4509 * Adding a check and a branch here would cost performance at every hash. 4510 * Also, note that function signature doesn't offer room to return an error. 4511 */ 4512 if (len <= 16) 4513 return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); 4514 if (len <= 128) 4515 return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); 4516 if (len <= XXH3_MIDSIZE_MAX) 4517 return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); 4518 return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); 4519 } 4520 4521 4522 /* === Public entry point === */ 4523 4524 /*! @ingroup xxh3_family */ 4525 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len) 4526 { 4527 return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); 4528 } 4529 4530 /*! @ingroup xxh3_family */ 4531 XXH_PUBLIC_API XXH64_hash_t 4532 XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) 4533 { 4534 return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); 4535 } 4536 4537 /*! @ingroup xxh3_family */ 4538 XXH_PUBLIC_API XXH64_hash_t 4539 XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) 4540 { 4541 return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); 4542 } 4543 4544 4545 /* === XXH3 streaming === */ 4546 4547 /* 4548 * Malloc's a pointer that is always aligned to align. 4549 * 4550 * This must be freed with `XXH_alignedFree()`. 4551 * 4552 * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte 4553 * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2 4554 * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON. 4555 * 4556 * This underalignment previously caused a rather obvious crash which went 4557 * completely unnoticed due to XXH3_createState() not actually being tested. 4558 * Credit to RedSpah for noticing this bug. 4559 * 4560 * The alignment is done manually: Functions like posix_memalign or _mm_malloc 4561 * are avoided: To maintain portability, we would have to write a fallback 4562 * like this anyways, and besides, testing for the existence of library 4563 * functions without relying on external build tools is impossible. 4564 * 4565 * The method is simple: Overallocate, manually align, and store the offset 4566 * to the original behind the returned pointer. 4567 * 4568 * Align must be a power of 2 and 8 <= align <= 128. 4569 */ 4570 static void* XXH_alignedMalloc(size_t s, size_t align) 4571 { 4572 XXH_ASSERT(align <= 128 && align >= 8); /* range check */ 4573 XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ 4574 XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ 4575 { /* Overallocate to make room for manual realignment and an offset byte */ 4576 xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); 4577 if (base != NULL) { 4578 /* 4579 * Get the offset needed to align this pointer. 4580 * 4581 * Even if the returned pointer is aligned, there will always be 4582 * at least one byte to store the offset to the original pointer. 4583 */ 4584 size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ 4585 /* Add the offset for the now-aligned pointer */ 4586 xxh_u8* ptr = base + offset; 4587 4588 XXH_ASSERT((size_t)ptr % align == 0); 4589 4590 /* Store the offset immediately before the returned pointer. */ 4591 ptr[-1] = (xxh_u8)offset; 4592 return ptr; 4593 } 4594 return NULL; 4595 } 4596 } 4597 /* 4598 * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass 4599 * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. 4600 */ 4601 static void XXH_alignedFree(void* p) 4602 { 4603 if (p != NULL) { 4604 xxh_u8* ptr = (xxh_u8*)p; 4605 /* Get the offset byte we added in XXH_malloc. */ 4606 xxh_u8 offset = ptr[-1]; 4607 /* Free the original malloc'd pointer */ 4608 xxh_u8* base = ptr - offset; 4609 XXH_free(base); 4610 } 4611 } 4612 /*! @ingroup xxh3_family */ 4613 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) 4614 { 4615 XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); 4616 if (state==NULL) return NULL; 4617 XXH3_INITSTATE(state); 4618 return state; 4619 } 4620 4621 /*! @ingroup xxh3_family */ 4622 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) 4623 { 4624 XXH_alignedFree(statePtr); 4625 return XXH_OK; 4626 } 4627 4628 /*! @ingroup xxh3_family */ 4629 XXH_PUBLIC_API void 4630 XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state) 4631 { 4632 memcpy(dst_state, src_state, sizeof(*dst_state)); 4633 } 4634 4635 static void 4636 XXH3_reset_internal(XXH3_state_t* statePtr, 4637 XXH64_hash_t seed, 4638 const void* secret, size_t secretSize) 4639 { 4640 size_t const initStart = offsetof(XXH3_state_t, bufferedSize); 4641 size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; 4642 XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); 4643 XXH_ASSERT(statePtr != NULL); 4644 /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ 4645 memset((char*)statePtr + initStart, 0, initLength); 4646 statePtr->acc[0] = XXH_PRIME32_3; 4647 statePtr->acc[1] = XXH_PRIME64_1; 4648 statePtr->acc[2] = XXH_PRIME64_2; 4649 statePtr->acc[3] = XXH_PRIME64_3; 4650 statePtr->acc[4] = XXH_PRIME64_4; 4651 statePtr->acc[5] = XXH_PRIME32_2; 4652 statePtr->acc[6] = XXH_PRIME64_5; 4653 statePtr->acc[7] = XXH_PRIME32_1; 4654 statePtr->seed = seed; 4655 statePtr->extSecret = (const unsigned char*)secret; 4656 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); 4657 statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; 4658 statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; 4659 } 4660 4661 /*! @ingroup xxh3_family */ 4662 XXH_PUBLIC_API XXH_errorcode 4663 XXH3_64bits_reset(XXH3_state_t* statePtr) 4664 { 4665 if (statePtr == NULL) return XXH_ERROR; 4666 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); 4667 return XXH_OK; 4668 } 4669 4670 /*! @ingroup xxh3_family */ 4671 XXH_PUBLIC_API XXH_errorcode 4672 XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) 4673 { 4674 if (statePtr == NULL) return XXH_ERROR; 4675 XXH3_reset_internal(statePtr, 0, secret, secretSize); 4676 if (secret == NULL) return XXH_ERROR; 4677 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; 4678 return XXH_OK; 4679 } 4680 4681 /*! @ingroup xxh3_family */ 4682 XXH_PUBLIC_API XXH_errorcode 4683 XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) 4684 { 4685 if (statePtr == NULL) return XXH_ERROR; 4686 if (seed==0) return XXH3_64bits_reset(statePtr); 4687 if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed); 4688 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); 4689 return XXH_OK; 4690 } 4691 4692 /* Note : when XXH3_consumeStripes() is invoked, 4693 * there must be a guarantee that at least one more byte must be consumed from input 4694 * so that the function can blindly consume all stripes using the "normal" secret segment */ 4695 XXH_FORCE_INLINE void 4696 XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, 4697 size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, 4698 const xxh_u8* XXH_RESTRICT input, size_t nbStripes, 4699 const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, 4700 XXH3_f_accumulate_512 f_acc512, 4701 XXH3_f_scrambleAcc f_scramble) 4702 { 4703 XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */ 4704 XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock); 4705 if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) { 4706 /* need a scrambling operation */ 4707 size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr; 4708 size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock; 4709 XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512); 4710 f_scramble(acc, secret + secretLimit); 4711 XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512); 4712 *nbStripesSoFarPtr = nbStripesAfterBlock; 4713 } else { 4714 XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512); 4715 *nbStripesSoFarPtr += nbStripes; 4716 } 4717 } 4718 4719 /* 4720 * Both XXH3_64bits_update and XXH3_128bits_update use this routine. 4721 */ 4722 XXH_FORCE_INLINE XXH_errorcode 4723 XXH3_update(XXH3_state_t* state, 4724 const xxh_u8* input, size_t len, 4725 XXH3_f_accumulate_512 f_acc512, 4726 XXH3_f_scrambleAcc f_scramble) 4727 { 4728 if (input==NULL) 4729 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) 4730 return XXH_OK; 4731 #else 4732 return XXH_ERROR; 4733 #endif 4734 4735 { const xxh_u8* const bEnd = input + len; 4736 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; 4737 4738 state->totalLen += len; 4739 XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); 4740 4741 if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) { /* fill in tmp buffer */ 4742 XXH_memcpy(state->buffer + state->bufferedSize, input, len); 4743 state->bufferedSize += (XXH32_hash_t)len; 4744 return XXH_OK; 4745 } 4746 /* total input is now > XXH3_INTERNALBUFFER_SIZE */ 4747 4748 #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) 4749 XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ 4750 4751 /* 4752 * Internal buffer is partially filled (always, except at beginning) 4753 * Complete it, then consume it. 4754 */ 4755 if (state->bufferedSize) { 4756 size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; 4757 XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); 4758 input += loadSize; 4759 XXH3_consumeStripes(state->acc, 4760 &state->nbStripesSoFar, state->nbStripesPerBlock, 4761 state->buffer, XXH3_INTERNALBUFFER_STRIPES, 4762 secret, state->secretLimit, 4763 f_acc512, f_scramble); 4764 state->bufferedSize = 0; 4765 } 4766 XXH_ASSERT(input < bEnd); 4767 4768 /* Consume input by a multiple of internal buffer size */ 4769 if (input+XXH3_INTERNALBUFFER_SIZE < bEnd) { 4770 const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE; 4771 do { 4772 XXH3_consumeStripes(state->acc, 4773 &state->nbStripesSoFar, state->nbStripesPerBlock, 4774 input, XXH3_INTERNALBUFFER_STRIPES, 4775 secret, state->secretLimit, 4776 f_acc512, f_scramble); 4777 input += XXH3_INTERNALBUFFER_SIZE; 4778 } while (input<limit); 4779 /* for last partial stripe */ 4780 memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); 4781 } 4782 XXH_ASSERT(input < bEnd); 4783 4784 /* Some remaining input (always) : buffer it */ 4785 XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); 4786 state->bufferedSize = (XXH32_hash_t)(bEnd-input); 4787 } 4788 4789 return XXH_OK; 4790 } 4791 4792 /*! @ingroup xxh3_family */ 4793 XXH_PUBLIC_API XXH_errorcode 4794 XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len) 4795 { 4796 return XXH3_update(state, (const xxh_u8*)input, len, 4797 XXH3_accumulate_512, XXH3_scrambleAcc); 4798 } 4799 4800 4801 XXH_FORCE_INLINE void 4802 XXH3_digest_long (XXH64_hash_t* acc, 4803 const XXH3_state_t* state, 4804 const unsigned char* secret) 4805 { 4806 /* 4807 * Digest on a local copy. This way, the state remains unaltered, and it can 4808 * continue ingesting more input afterwards. 4809 */ 4810 memcpy(acc, state->acc, sizeof(state->acc)); 4811 if (state->bufferedSize >= XXH_STRIPE_LEN) { 4812 size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; 4813 size_t nbStripesSoFar = state->nbStripesSoFar; 4814 XXH3_consumeStripes(acc, 4815 &nbStripesSoFar, state->nbStripesPerBlock, 4816 state->buffer, nbStripes, 4817 secret, state->secretLimit, 4818 XXH3_accumulate_512, XXH3_scrambleAcc); 4819 /* last stripe */ 4820 XXH3_accumulate_512(acc, 4821 state->buffer + state->bufferedSize - XXH_STRIPE_LEN, 4822 secret + state->secretLimit - XXH_SECRET_LASTACC_START); 4823 } else { /* bufferedSize < XXH_STRIPE_LEN */ 4824 xxh_u8 lastStripe[XXH_STRIPE_LEN]; 4825 size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; 4826 XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ 4827 memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); 4828 memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); 4829 XXH3_accumulate_512(acc, 4830 lastStripe, 4831 secret + state->secretLimit - XXH_SECRET_LASTACC_START); 4832 } 4833 } 4834 4835 /*! @ingroup xxh3_family */ 4836 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state) 4837 { 4838 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; 4839 if (state->totalLen > XXH3_MIDSIZE_MAX) { 4840 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; 4841 XXH3_digest_long(acc, state, secret); 4842 return XXH3_mergeAccs(acc, 4843 secret + XXH_SECRET_MERGEACCS_START, 4844 (xxh_u64)state->totalLen * XXH_PRIME64_1); 4845 } 4846 /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ 4847 if (state->seed) 4848 return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); 4849 return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), 4850 secret, state->secretLimit + XXH_STRIPE_LEN); 4851 } 4852 4853 4854 #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) 4855 4856 /*! @ingroup xxh3_family */ 4857 XXH_PUBLIC_API void 4858 XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize) 4859 { 4860 XXH_ASSERT(secretBuffer != NULL); 4861 if (customSeedSize == 0) { 4862 memcpy(secretBuffer, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); 4863 return; 4864 } 4865 XXH_ASSERT(customSeed != NULL); 4866 4867 { size_t const segmentSize = sizeof(XXH128_hash_t); 4868 size_t const nbSegments = XXH_SECRET_DEFAULT_SIZE / segmentSize; 4869 XXH128_canonical_t scrambler; 4870 XXH64_hash_t seeds[12]; 4871 size_t segnb; 4872 XXH_ASSERT(nbSegments == 12); 4873 XXH_ASSERT(segmentSize * nbSegments == XXH_SECRET_DEFAULT_SIZE); /* exact multiple */ 4874 XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); 4875 4876 /* 4877 * Copy customSeed to seeds[], truncating or repeating as necessary. 4878 */ 4879 { size_t toFill = XXH_MIN(customSeedSize, sizeof(seeds)); 4880 size_t filled = toFill; 4881 memcpy(seeds, customSeed, toFill); 4882 while (filled < sizeof(seeds)) { 4883 toFill = XXH_MIN(filled, sizeof(seeds) - filled); 4884 memcpy((char*)seeds + filled, seeds, toFill); 4885 filled += toFill; 4886 } } 4887 4888 /* generate secret */ 4889 memcpy(secretBuffer, &scrambler, sizeof(scrambler)); 4890 for (segnb=1; segnb < nbSegments; segnb++) { 4891 size_t const segmentStart = segnb * segmentSize; 4892 XXH128_canonical_t segment; 4893 XXH128_canonicalFromHash(&segment, 4894 XXH128(&scrambler, sizeof(scrambler), XXH_readLE64(seeds + segnb) + segnb) ); 4895 memcpy((char*)secretBuffer + segmentStart, &segment, sizeof(segment)); 4896 } } 4897 } 4898 4899 4900 /* ========================================== 4901 * XXH3 128 bits (a.k.a XXH128) 4902 * ========================================== 4903 * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, 4904 * even without counting the significantly larger output size. 4905 * 4906 * For example, extra steps are taken to avoid the seed-dependent collisions 4907 * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). 4908 * 4909 * This strength naturally comes at the cost of some speed, especially on short 4910 * lengths. Note that longer hashes are about as fast as the 64-bit version 4911 * due to it using only a slight modification of the 64-bit loop. 4912 * 4913 * XXH128 is also more oriented towards 64-bit machines. It is still extremely 4914 * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). 4915 */ 4916 4917 XXH_FORCE_INLINE XXH128_hash_t 4918 XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 4919 { 4920 /* A doubled version of 1to3_64b with different constants. */ 4921 XXH_ASSERT(input != NULL); 4922 XXH_ASSERT(1 <= len && len <= 3); 4923 XXH_ASSERT(secret != NULL); 4924 /* 4925 * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } 4926 * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } 4927 * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } 4928 */ 4929 { xxh_u8 const c1 = input[0]; 4930 xxh_u8 const c2 = input[len >> 1]; 4931 xxh_u8 const c3 = input[len - 1]; 4932 xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) 4933 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); 4934 xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); 4935 xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; 4936 xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; 4937 xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; 4938 xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; 4939 XXH128_hash_t h128; 4940 h128.low64 = XXH64_avalanche(keyed_lo); 4941 h128.high64 = XXH64_avalanche(keyed_hi); 4942 return h128; 4943 } 4944 } 4945 4946 XXH_FORCE_INLINE XXH128_hash_t 4947 XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 4948 { 4949 XXH_ASSERT(input != NULL); 4950 XXH_ASSERT(secret != NULL); 4951 XXH_ASSERT(4 <= len && len <= 8); 4952 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; 4953 { xxh_u32 const input_lo = XXH_readLE32(input); 4954 xxh_u32 const input_hi = XXH_readLE32(input + len - 4); 4955 xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); 4956 xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; 4957 xxh_u64 const keyed = input_64 ^ bitflip; 4958 4959 /* Shift len to the left to ensure it is even, this avoids even multiplies. */ 4960 XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); 4961 4962 m128.high64 += (m128.low64 << 1); 4963 m128.low64 ^= (m128.high64 >> 3); 4964 4965 m128.low64 = XXH_xorshift64(m128.low64, 35); 4966 m128.low64 *= 0x9FB21C651E98DF25ULL; 4967 m128.low64 = XXH_xorshift64(m128.low64, 28); 4968 m128.high64 = XXH3_avalanche(m128.high64); 4969 return m128; 4970 } 4971 } 4972 4973 XXH_FORCE_INLINE XXH128_hash_t 4974 XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 4975 { 4976 XXH_ASSERT(input != NULL); 4977 XXH_ASSERT(secret != NULL); 4978 XXH_ASSERT(9 <= len && len <= 16); 4979 { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; 4980 xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; 4981 xxh_u64 const input_lo = XXH_readLE64(input); 4982 xxh_u64 input_hi = XXH_readLE64(input + len - 8); 4983 XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); 4984 /* 4985 * Put len in the middle of m128 to ensure that the length gets mixed to 4986 * both the low and high bits in the 128x64 multiply below. 4987 */ 4988 m128.low64 += (xxh_u64)(len - 1) << 54; 4989 input_hi ^= bitfliph; 4990 /* 4991 * Add the high 32 bits of input_hi to the high 32 bits of m128, then 4992 * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to 4993 * the high 64 bits of m128. 4994 * 4995 * The best approach to this operation is different on 32-bit and 64-bit. 4996 */ 4997 if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ 4998 /* 4999 * 32-bit optimized version, which is more readable. 5000 * 5001 * On 32-bit, it removes an ADC and delays a dependency between the two 5002 * halves of m128.high64, but it generates an extra mask on 64-bit. 5003 */ 5004 m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); 5005 } else { 5006 /* 5007 * 64-bit optimized (albeit more confusing) version. 5008 * 5009 * Uses some properties of addition and multiplication to remove the mask: 5010 * 5011 * Let: 5012 * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) 5013 * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) 5014 * c = XXH_PRIME32_2 5015 * 5016 * a + (b * c) 5017 * Inverse Property: x + y - x == y 5018 * a + (b * (1 + c - 1)) 5019 * Distributive Property: x * (y + z) == (x * y) + (x * z) 5020 * a + (b * 1) + (b * (c - 1)) 5021 * Identity Property: x * 1 == x 5022 * a + b + (b * (c - 1)) 5023 * 5024 * Substitute a, b, and c: 5025 * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) 5026 * 5027 * Since input_hi.hi + input_hi.lo == input_hi, we get this: 5028 * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) 5029 */ 5030 m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); 5031 } 5032 /* m128 ^= XXH_swap64(m128 >> 64); */ 5033 m128.low64 ^= XXH_swap64(m128.high64); 5034 5035 { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ 5036 XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); 5037 h128.high64 += m128.high64 * XXH_PRIME64_2; 5038 5039 h128.low64 = XXH3_avalanche(h128.low64); 5040 h128.high64 = XXH3_avalanche(h128.high64); 5041 return h128; 5042 } } 5043 } 5044 5045 /* 5046 * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN 5047 */ 5048 XXH_FORCE_INLINE XXH128_hash_t 5049 XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 5050 { 5051 XXH_ASSERT(len <= 16); 5052 { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); 5053 if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); 5054 if (len) return XXH3_len_1to3_128b(input, len, secret, seed); 5055 { XXH128_hash_t h128; 5056 xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); 5057 xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); 5058 h128.low64 = XXH64_avalanche(seed ^ bitflipl); 5059 h128.high64 = XXH64_avalanche( seed ^ bitfliph); 5060 return h128; 5061 } } 5062 } 5063 5064 /* 5065 * A bit slower than XXH3_mix16B, but handles multiply by zero better. 5066 */ 5067 XXH_FORCE_INLINE XXH128_hash_t 5068 XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, 5069 const xxh_u8* secret, XXH64_hash_t seed) 5070 { 5071 acc.low64 += XXH3_mix16B (input_1, secret+0, seed); 5072 acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); 5073 acc.high64 += XXH3_mix16B (input_2, secret+16, seed); 5074 acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); 5075 return acc; 5076 } 5077 5078 5079 XXH_FORCE_INLINE XXH128_hash_t 5080 XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, 5081 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 5082 XXH64_hash_t seed) 5083 { 5084 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; 5085 XXH_ASSERT(16 < len && len <= 128); 5086 5087 { XXH128_hash_t acc; 5088 acc.low64 = len * XXH_PRIME64_1; 5089 acc.high64 = 0; 5090 if (len > 32) { 5091 if (len > 64) { 5092 if (len > 96) { 5093 acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); 5094 } 5095 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); 5096 } 5097 acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); 5098 } 5099 acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); 5100 { XXH128_hash_t h128; 5101 h128.low64 = acc.low64 + acc.high64; 5102 h128.high64 = (acc.low64 * XXH_PRIME64_1) 5103 + (acc.high64 * XXH_PRIME64_4) 5104 + ((len - seed) * XXH_PRIME64_2); 5105 h128.low64 = XXH3_avalanche(h128.low64); 5106 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); 5107 return h128; 5108 } 5109 } 5110 } 5111 5112 XXH_NO_INLINE XXH128_hash_t 5113 XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, 5114 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 5115 XXH64_hash_t seed) 5116 { 5117 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; 5118 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); 5119 5120 { XXH128_hash_t acc; 5121 int const nbRounds = (int)len / 32; 5122 int i; 5123 acc.low64 = len * XXH_PRIME64_1; 5124 acc.high64 = 0; 5125 for (i=0; i<4; i++) { 5126 acc = XXH128_mix32B(acc, 5127 input + (32 * i), 5128 input + (32 * i) + 16, 5129 secret + (32 * i), 5130 seed); 5131 } 5132 acc.low64 = XXH3_avalanche(acc.low64); 5133 acc.high64 = XXH3_avalanche(acc.high64); 5134 XXH_ASSERT(nbRounds >= 4); 5135 for (i=4 ; i < nbRounds; i++) { 5136 acc = XXH128_mix32B(acc, 5137 input + (32 * i), 5138 input + (32 * i) + 16, 5139 secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)), 5140 seed); 5141 } 5142 /* last bytes */ 5143 acc = XXH128_mix32B(acc, 5144 input + len - 16, 5145 input + len - 32, 5146 secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, 5147 0ULL - seed); 5148 5149 { XXH128_hash_t h128; 5150 h128.low64 = acc.low64 + acc.high64; 5151 h128.high64 = (acc.low64 * XXH_PRIME64_1) 5152 + (acc.high64 * XXH_PRIME64_4) 5153 + ((len - seed) * XXH_PRIME64_2); 5154 h128.low64 = XXH3_avalanche(h128.low64); 5155 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); 5156 return h128; 5157 } 5158 } 5159 } 5160 5161 XXH_FORCE_INLINE XXH128_hash_t 5162 XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, 5163 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 5164 XXH3_f_accumulate_512 f_acc512, 5165 XXH3_f_scrambleAcc f_scramble) 5166 { 5167 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; 5168 5169 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble); 5170 5171 /* converge into final hash */ 5172 XXH_STATIC_ASSERT(sizeof(acc) == 64); 5173 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); 5174 { XXH128_hash_t h128; 5175 h128.low64 = XXH3_mergeAccs(acc, 5176 secret + XXH_SECRET_MERGEACCS_START, 5177 (xxh_u64)len * XXH_PRIME64_1); 5178 h128.high64 = XXH3_mergeAccs(acc, 5179 secret + secretSize 5180 - sizeof(acc) - XXH_SECRET_MERGEACCS_START, 5181 ~((xxh_u64)len * XXH_PRIME64_2)); 5182 return h128; 5183 } 5184 } 5185 5186 /* 5187 * It's important for performance that XXH3_hashLong is not inlined. 5188 */ 5189 XXH_NO_INLINE XXH128_hash_t 5190 XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, 5191 XXH64_hash_t seed64, 5192 const void* XXH_RESTRICT secret, size_t secretLen) 5193 { 5194 (void)seed64; (void)secret; (void)secretLen; 5195 return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), 5196 XXH3_accumulate_512, XXH3_scrambleAcc); 5197 } 5198 5199 /* 5200 * It's important for performance that XXH3_hashLong is not inlined. 5201 */ 5202 XXH_NO_INLINE XXH128_hash_t 5203 XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, 5204 XXH64_hash_t seed64, 5205 const void* XXH_RESTRICT secret, size_t secretLen) 5206 { 5207 (void)seed64; 5208 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, 5209 XXH3_accumulate_512, XXH3_scrambleAcc); 5210 } 5211 5212 XXH_FORCE_INLINE XXH128_hash_t 5213 XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, 5214 XXH64_hash_t seed64, 5215 XXH3_f_accumulate_512 f_acc512, 5216 XXH3_f_scrambleAcc f_scramble, 5217 XXH3_f_initCustomSecret f_initSec) 5218 { 5219 if (seed64 == 0) 5220 return XXH3_hashLong_128b_internal(input, len, 5221 XXH3_kSecret, sizeof(XXH3_kSecret), 5222 f_acc512, f_scramble); 5223 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; 5224 f_initSec(secret, seed64); 5225 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), 5226 f_acc512, f_scramble); 5227 } 5228 } 5229 5230 /* 5231 * It's important for performance that XXH3_hashLong is not inlined. 5232 */ 5233 XXH_NO_INLINE XXH128_hash_t 5234 XXH3_hashLong_128b_withSeed(const void* input, size_t len, 5235 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) 5236 { 5237 (void)secret; (void)secretLen; 5238 return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, 5239 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret); 5240 } 5241 5242 typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, 5243 XXH64_hash_t, const void* XXH_RESTRICT, size_t); 5244 5245 XXH_FORCE_INLINE XXH128_hash_t 5246 XXH3_128bits_internal(const void* input, size_t len, 5247 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, 5248 XXH3_hashLong128_f f_hl128) 5249 { 5250 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); 5251 /* 5252 * If an action is to be taken if `secret` conditions are not respected, 5253 * it should be done here. 5254 * For now, it's a contract pre-condition. 5255 * Adding a check and a branch here would cost performance at every hash. 5256 */ 5257 if (len <= 16) 5258 return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); 5259 if (len <= 128) 5260 return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); 5261 if (len <= XXH3_MIDSIZE_MAX) 5262 return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); 5263 return f_hl128(input, len, seed64, secret, secretLen); 5264 } 5265 5266 5267 /* === Public XXH128 API === */ 5268 5269 /*! @ingroup xxh3_family */ 5270 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len) 5271 { 5272 return XXH3_128bits_internal(input, len, 0, 5273 XXH3_kSecret, sizeof(XXH3_kSecret), 5274 XXH3_hashLong_128b_default); 5275 } 5276 5277 /*! @ingroup xxh3_family */ 5278 XXH_PUBLIC_API XXH128_hash_t 5279 XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) 5280 { 5281 return XXH3_128bits_internal(input, len, 0, 5282 (const xxh_u8*)secret, secretSize, 5283 XXH3_hashLong_128b_withSecret); 5284 } 5285 5286 /*! @ingroup xxh3_family */ 5287 XXH_PUBLIC_API XXH128_hash_t 5288 XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) 5289 { 5290 return XXH3_128bits_internal(input, len, seed, 5291 XXH3_kSecret, sizeof(XXH3_kSecret), 5292 XXH3_hashLong_128b_withSeed); 5293 } 5294 5295 /*! @ingroup xxh3_family */ 5296 XXH_PUBLIC_API XXH128_hash_t 5297 XXH128(const void* input, size_t len, XXH64_hash_t seed) 5298 { 5299 return XXH3_128bits_withSeed(input, len, seed); 5300 } 5301 5302 5303 /* === XXH3 128-bit streaming === */ 5304 5305 /* 5306 * All the functions are actually the same as for 64-bit streaming variant. 5307 * The only difference is the finalizatiom routine. 5308 */ 5309 5310 /*! @ingroup xxh3_family */ 5311 XXH_PUBLIC_API XXH_errorcode 5312 XXH3_128bits_reset(XXH3_state_t* statePtr) 5313 { 5314 if (statePtr == NULL) return XXH_ERROR; 5315 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); 5316 return XXH_OK; 5317 } 5318 5319 /*! @ingroup xxh3_family */ 5320 XXH_PUBLIC_API XXH_errorcode 5321 XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) 5322 { 5323 if (statePtr == NULL) return XXH_ERROR; 5324 XXH3_reset_internal(statePtr, 0, secret, secretSize); 5325 if (secret == NULL) return XXH_ERROR; 5326 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; 5327 return XXH_OK; 5328 } 5329 5330 /*! @ingroup xxh3_family */ 5331 XXH_PUBLIC_API XXH_errorcode 5332 XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) 5333 { 5334 if (statePtr == NULL) return XXH_ERROR; 5335 if (seed==0) return XXH3_128bits_reset(statePtr); 5336 if (seed != statePtr->seed) XXH3_initCustomSecret(statePtr->customSecret, seed); 5337 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); 5338 return XXH_OK; 5339 } 5340 5341 /*! @ingroup xxh3_family */ 5342 XXH_PUBLIC_API XXH_errorcode 5343 XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len) 5344 { 5345 return XXH3_update(state, (const xxh_u8*)input, len, 5346 XXH3_accumulate_512, XXH3_scrambleAcc); 5347 } 5348 5349 /*! @ingroup xxh3_family */ 5350 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state) 5351 { 5352 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; 5353 if (state->totalLen > XXH3_MIDSIZE_MAX) { 5354 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; 5355 XXH3_digest_long(acc, state, secret); 5356 XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); 5357 { XXH128_hash_t h128; 5358 h128.low64 = XXH3_mergeAccs(acc, 5359 secret + XXH_SECRET_MERGEACCS_START, 5360 (xxh_u64)state->totalLen * XXH_PRIME64_1); 5361 h128.high64 = XXH3_mergeAccs(acc, 5362 secret + state->secretLimit + XXH_STRIPE_LEN 5363 - sizeof(acc) - XXH_SECRET_MERGEACCS_START, 5364 ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); 5365 return h128; 5366 } 5367 } 5368 /* len <= XXH3_MIDSIZE_MAX : short code */ 5369 if (state->seed) 5370 return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); 5371 return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), 5372 secret, state->secretLimit + XXH_STRIPE_LEN); 5373 } 5374 5375 /* 128-bit utility functions */ 5376 5377 #include <string.h> /* memcmp, memcpy */ 5378 5379 /* return : 1 is equal, 0 if different */ 5380 /*! @ingroup xxh3_family */ 5381 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) 5382 { 5383 /* note : XXH128_hash_t is compact, it has no padding byte */ 5384 return !(memcmp(&h1, &h2, sizeof(h1))); 5385 } 5386 5387 /* This prototype is compatible with stdlib's qsort(). 5388 * return : >0 if *h128_1 > *h128_2 5389 * <0 if *h128_1 < *h128_2 5390 * =0 if *h128_1 == *h128_2 */ 5391 /*! @ingroup xxh3_family */ 5392 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2) 5393 { 5394 XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; 5395 XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; 5396 int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); 5397 /* note : bets that, in most cases, hash values are different */ 5398 if (hcmp) return hcmp; 5399 return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); 5400 } 5401 5402 5403 /*====== Canonical representation ======*/ 5404 /*! @ingroup xxh3_family */ 5405 XXH_PUBLIC_API void 5406 XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash) 5407 { 5408 XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); 5409 if (XXH_CPU_LITTLE_ENDIAN) { 5410 hash.high64 = XXH_swap64(hash.high64); 5411 hash.low64 = XXH_swap64(hash.low64); 5412 } 5413 memcpy(dst, &hash.high64, sizeof(hash.high64)); 5414 memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); 5415 } 5416 5417 /*! @ingroup xxh3_family */ 5418 XXH_PUBLIC_API XXH128_hash_t 5419 XXH128_hashFromCanonical(const XXH128_canonical_t* src) 5420 { 5421 XXH128_hash_t h; 5422 h.high64 = XXH_readBE64(src); 5423 h.low64 = XXH_readBE64(src->digest + 8); 5424 return h; 5425 } 5426 5427 /* Pop our optimization override from above */ 5428 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ 5429 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ 5430 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */ 5431 # pragma GCC pop_options 5432 #endif 5433 5434 #endif /* XXH_NO_LONG_LONG */ 5435 5436 /*! 5437 * @} 5438 */ 5439 #endif /* XXH_IMPLEMENTATION */ 5440 5441 5442 #if defined (__cplusplus) 5443 } 5444 #endif