Browse Source

Merge pull request #5366 from ThomasWaldmann/upgrade-xxhash-1.1

upgrade bundled xxhash to 0.7.4
TW 4 years ago
parent
commit
2333047728
2 changed files with 331 additions and 196 deletions
  1. 1 1
      src/borg/algorithms/xxh64/xxhash.c
  2. 330 195
      src/borg/algorithms/xxh64/xxhash.h

+ 1 - 1
src/borg/algorithms/xxh64/xxhash.c

@@ -1,6 +1,6 @@
 /*
 /*
  * xxHash - Extremely Fast Hash algorithm
  * xxHash - Extremely Fast Hash algorithm
- * Copyright (C) 2012-present, Yann Collet
+ * Copyright (C) 2012-2020 Yann Collet
  *
  *
  * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
  * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
  *
  *

+ 330 - 195
src/borg/algorithms/xxh64/xxhash.h

@@ -1,7 +1,7 @@
 /*
 /*
  * xxHash - Extremely Fast Hash algorithm
  * xxHash - Extremely Fast Hash algorithm
  * Header File
  * Header File
- * Copyright (C) 2012-present, Yann Collet.
+ * Copyright (C) 2012-2020 Yann Collet
  *
  *
  * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
  * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
  *
  *
@@ -221,7 +221,7 @@ extern "C" {
 ***************************************/
 ***************************************/
 #define XXH_VERSION_MAJOR    0
 #define XXH_VERSION_MAJOR    0
 #define XXH_VERSION_MINOR    7
 #define XXH_VERSION_MINOR    7
-#define XXH_VERSION_RELEASE  3
+#define XXH_VERSION_RELEASE  4
 #define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
 #define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
 
 
@@ -260,6 +260,10 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
  *  The memory between input & input+length must be valid (allocated and read-accessible).
  *  The memory between input & input+length must be valid (allocated and read-accessible).
  *  "seed" can be used to alter the result predictably.
  *  "seed" can be used to alter the result predictably.
  *  Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
  *  Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
+ *
+ * Note: XXH3 provides competitive speed for both 32-bit and 64-bit systems,
+ * and offers true 64/128 bit hash results. It provides a superior level of
+ * dispersion, and greatly reduces the risks of collisions.
  */
  */
 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
 
 
@@ -342,8 +346,13 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
  * Returns the 64-bit hash of sequence of length @length stored at memory
  * Returns the 64-bit hash of sequence of length @length stored at memory
  * address @input.
  * address @input.
  * @seed can be used to alter the result predictably.
  * @seed can be used to alter the result predictably.
+ *
  * This function usually runs faster on 64-bit systems, but slower on 32-bit
  * This function usually runs faster on 64-bit systems, but slower on 32-bit
  * systems (see benchmark).
  * systems (see benchmark).
+ *
+ * Note: XXH3 provides competitive speed for both 32-bit and 64-bit systems,
+ * and offers true 64/128 bit hash results. It provides a superior level of
+ * dispersion, and greatly reduces the risks of collisions.
  */
  */
 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, XXH64_hash_t seed);
 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, XXH64_hash_t seed);
 
 
@@ -358,7 +367,7 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void*
 XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
 XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
 
 
 /*******   Canonical representation   *******/
 /*******   Canonical representation   *******/
-typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
+typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
 
 
@@ -439,11 +448,11 @@ struct XXH64_state_s {
  * It benefits greatly from SIMD and 64-bit arithmetic, but does not require it.
  * It benefits greatly from SIMD and 64-bit arithmetic, but does not require it.
  *
  *
  * Almost all 32-bit and 64-bit targets that can run XXH32 smoothly can run
  * Almost all 32-bit and 64-bit targets that can run XXH32 smoothly can run
- * XXH3 at usable speeds, even if XXH64 runs slowly. Further details are
+ * XXH3 at competitive speeds, even if XXH64 runs slowly. Further details are
  * explained in the implementation.
  * explained in the implementation.
  *
  *
- * Optimized implementations are provided for AVX2, SSE2, NEON, POWER8, ZVector,
- * and scalar targets. This can be controlled with the XXH_VECTOR macro.
+ * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
+ * ZVector and scalar targets. This can be controlled with the XXH_VECTOR macro.
  *
  *
  * XXH3 offers 2 variants, _64bits and _128bits.
  * XXH3 offers 2 variants, _64bits and _128bits.
  * When only 64 bits are needed, prefer calling the _64bits variant, as it
  * When only 64 bits are needed, prefer calling the _64bits variant, as it
@@ -461,6 +470,10 @@ struct XXH64_state_s {
  * ephemeral data (local sessions).
  * ephemeral data (local sessions).
  *
  *
  * Avoid storing values in long-term storage until the algorithm is finalized.
  * Avoid storing values in long-term storage until the algorithm is finalized.
+ * XXH3's return values will be officially finalized upon reaching v0.8.0.
+ *
+ * After which, return values of XXH3 and XXH128 will no longer change in
+ * future versions.
  *
  *
  * The API supports one-shot hashing, streaming mode, and custom secrets.
  * The API supports one-shot hashing, streaming mode, and custom secrets.
  */
  */
@@ -479,6 +492,8 @@ struct XXH64_state_s {
 #  define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
 #  define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
 #  define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
 #  define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
 #  define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
 #  define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
+
+#  define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
 #endif
 #endif
 
 
 /* XXH3_64bits():
 /* XXH3_64bits():
@@ -486,20 +501,6 @@ struct XXH64_state_s {
  * It's the fastest variant. */
  * It's the fastest variant. */
 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
 
 
-/*
- * XXH3_64bits_withSecret():
- * It's possible to provide any blob of bytes as a "secret" to generate the hash.
- * This makes it more difficult for an external actor to prepare an intentional
- * collision.
- * The secret *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
- * It should consist of random bytes.
- * Avoid trivial sequences, such as repeating sequences and especially '\0',
- * as this can cancel out itself.
- * Failure to respect these conditions will result in a poor quality hash.
- */
-#define XXH3_SECRET_SIZE_MIN 136
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
-
 /*
 /*
  * XXH3_64bits_withSeed():
  * XXH3_64bits_withSeed():
  * This variant generates a custom secret on the fly based on the default
  * This variant generates a custom secret on the fly based on the default
@@ -509,6 +510,22 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len,
  */
  */
 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
 
 
+/*
+ * XXH3_64bits_withSecret():
+ * It's possible to provide any blob of bytes as a "secret" to generate the hash.
+ * This makes it more difficult for an external actor to prepare an intentional collision.
+ * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
+ * However, the quality of the hash highly depends on the secret's entropy.
+ * Technically, the secret must look like a bunch of random bytes.
+ * Avoid "trivial" or structured data such as repeated sequences or a text document.
+ * Whenever unsure about the "randonmess" of the blob of bytes,
+ * consider relabelling it as a "custom seed" instead,
+ * and employ "XXH3_generateSecret()" (see below)
+ * to generate a high quality secret derived from this custom seed.
+ */
+#define XXH3_SECRET_SIZE_MIN 136
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
+
 
 
 /* streaming 64-bit */
 /* streaming 64-bit */
 
 
@@ -533,26 +550,25 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, X
 
 
 typedef struct XXH3_state_s XXH3_state_t;
 typedef struct XXH3_state_s XXH3_state_t;
 
 
-#define XXH3_SECRET_DEFAULT_SIZE 192   /* minimum XXH3_SECRET_SIZE_MIN */
 #define XXH3_INTERNALBUFFER_SIZE 256
 #define XXH3_INTERNALBUFFER_SIZE 256
+#define XXH3_SECRET_DEFAULT_SIZE 192
 struct XXH3_state_s {
 struct XXH3_state_s {
    XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
    XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
-  /* used to store a custom secret generated from the seed. Makes state larger.
-   * Design might change */
+  /* used to store a custom secret generated from a seed */
    XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
    XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
    XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
    XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
    XXH32_hash_t bufferedSize;
    XXH32_hash_t bufferedSize;
-   XXH32_hash_t nbStripesPerBlock;
-   XXH32_hash_t nbStripesSoFar;
-   XXH32_hash_t secretLimit;
    XXH32_hash_t reserved32;
    XXH32_hash_t reserved32;
-   XXH32_hash_t reserved32_2;
+   size_t nbStripesPerBlock;
+   size_t nbStripesSoFar;
+   size_t secretLimit;
    XXH64_hash_t totalLen;
    XXH64_hash_t totalLen;
    XXH64_hash_t seed;
    XXH64_hash_t seed;
    XXH64_hash_t reserved64;
    XXH64_hash_t reserved64;
-   /* note: there is some padding after due to alignment on 64 bytes */
-   const unsigned char* secret;
-};   /* typedef'd to XXH3_state_t */
+   const unsigned char* extSecret;  /* reference to external secret;
+                                     * if == NULL, use .customSecret instead */
+   /* note: there may be some padding at the end due to alignment on 64 bytes */
+}; /* typedef'd to XXH3_state_t */
 
 
 #undef XXH_ALIGN_MEMBER
 #undef XXH_ALIGN_MEMBER
 
 
@@ -581,9 +597,12 @@ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
 /*
 /*
  * XXH3_64bits_reset_withSecret():
  * XXH3_64bits_reset_withSecret():
- * `secret` is referenced, and must outlive the hash streaming session, so
- * be careful when using stack arrays.
- * `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`.
+ * `secret` is referenced, it _must outlive_ the hash streaming session.
+ * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
+ * and the quality of the hash depends on secret's entropy,
+ * meaning that the secret should look like a bunch of random bytes.
+ * When in doubt about the randomness of a candidate `secret`,
+ * consider employing `XXH3_generateSecret()` instead (see below).
  */
  */
 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
 
 
@@ -643,20 +662,56 @@ XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
  * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
  * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
  *
  *
  * return: >0 if *h128_1  > *h128_2
  * return: >0 if *h128_1  > *h128_2
- *         <0 if *h128_1  < *h128_2
  *         =0 if *h128_1 == *h128_2
  *         =0 if *h128_1 == *h128_2
+ *         <0 if *h128_1  < *h128_2
  */
  */
 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
 
 
 
 
 /*******   Canonical representation   *******/
 /*******   Canonical representation   *******/
-typedef struct { unsigned char digest[16]; } XXH128_canonical_t;
+typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
 
 
 
 
+/* ===   Experimental API   === */
+/* Symbols defined below must be considered tied to a specific library version. */
+
+/*
+ * XXH3_generateSecret():
+ *
+ * Derive a high-entropy secret from any user-defined content, named customSeed.
+ * The generated secret can be used in combination with `*_withSecret()` functions.
+ * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
+ * as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
+ *
+ * The function accepts as input a custom seed of any length and any content,
+ * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE
+ * into an already allocated buffer secretBuffer.
+ * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long.
+ *
+ * The generated secret can then be used with any `*_withSecret()` variant.
+ * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
+ * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
+ * are part of this list. They all accept a `secret` parameter
+ * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
+ * _and_ feature very high entropy (consist of random-looking bytes).
+ * These conditions can be a high bar to meet, so
+ * this function can be used to generate a secret of proper quality.
+ *
+ * customSeed can be anything. It can have any size, even small ones,
+ * and its content can be anything, even stupidly "low entropy" source such as a bunch of zeroes.
+ * The resulting `secret` will nonetheless provide all expected qualities.
+ *
+ * Supplying NULL as the customSeed copies the default secret into `secretBuffer`.
+ * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
+ */
+XXH_PUBLIC_API void XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize);
+
+
 #endif  /* XXH_NO_LONG_LONG */
 #endif  /* XXH_NO_LONG_LONG */
 
 
+
 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
 #  define XXH_IMPLEMENTATION
 #  define XXH_IMPLEMENTATION
 #endif
 #endif
@@ -733,7 +788,7 @@ XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t*
 #endif
 #endif
 
 
 /*!
 /*!
- *XXH_ACCEPT_NULL_INPUT_POINTER:
+ * XXH_ACCEPT_NULL_INPUT_POINTER:
  * If the input pointer is NULL, xxHash's default behavior is to dereference it,
  * If the input pointer is NULL, xxHash's default behavior is to dereference it,
  * triggering a segfault.
  * triggering a segfault.
  * When this macro is enabled, xxHash actively checks the input for a null pointer.
  * When this macro is enabled, xxHash actively checks the input for a null pointer.
@@ -745,16 +800,26 @@ XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t*
 
 
 /*!
 /*!
  * XXH_FORCE_ALIGN_CHECK:
  * XXH_FORCE_ALIGN_CHECK:
- * This is a minor performance trick, only useful with lots of very small keys.
- * It means: check for aligned/unaligned input.
- * The check costs one initial branch per hash;
- * Set it to 0 when the input is guaranteed to be aligned or when alignment
- * doesn't matter for performance.
+ * This is an important performance trick
+ * for architectures without decent unaligned memory access performance.
+ * It checks for input alignment, and when conditions are met,
+ * uses a "fast path" employing direct 32-bit/64-bit read,
+ * resulting in _dramatically faster_ read speed.
  *
  *
- * This option does not affect XXH3.
+ * The check costs one initial branch per hash, which is generally negligible, but not zero.
+ * Moreover, it's not useful to generate binary for an additional code path
+ * if memory access uses same instruction for both aligned and unaligned adresses.
+ *
+ * In these cases, the alignment check can be removed by setting this macro to 0.
+ * Then the code will always use unaligned memory access.
+ * Align check is automatically disabled on x86, x64 & arm64,
+ * which are platforms known to offer good unaligned memory accesses performance.
+ *
+ * This option does not affect XXH3 (only XXH32 and XXH64).
  */
  */
-#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
-#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+#ifndef XXH_FORCE_ALIGN_CHECK  /* can be defined externally */
+#  if defined(__i386)  || defined(__x86_64__) || defined(__aarch64__) \
+   || defined(_M_IX86) || defined(_M_X64)     || defined(_M_ARM64)  /* visual */
 #    define XXH_FORCE_ALIGN_CHECK 0
 #    define XXH_FORCE_ALIGN_CHECK 0
 #  else
 #  else
 #    define XXH_FORCE_ALIGN_CHECK 1
 #    define XXH_FORCE_ALIGN_CHECK 1
@@ -813,8 +878,10 @@ XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t*
  * routines for malloc() and free()
  * routines for malloc() and free()
  */
  */
 #include <stdlib.h>
 #include <stdlib.h>
+
 static void* XXH_malloc(size_t s) { return malloc(s); }
 static void* XXH_malloc(size_t s) { return malloc(s); }
-static void  XXH_free  (void* p)  { free(p); }
+static void XXH_free(void* p) { free(p); }
+
 /*! and for memcpy() */
 /*! and for memcpy() */
 #include <string.h>
 #include <string.h>
 static void* XXH_memcpy(void* dest, const void* src, size_t size)
 static void* XXH_memcpy(void* dest, const void* src, size_t size)
@@ -832,26 +899,27 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size)
 #  pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
 #  pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
 #endif
 #endif
 
 
-#if XXH_NO_INLINE_HINTS /* disable inlining hints */
-#  define XXH_FORCE_INLINE static
+#if XXH_NO_INLINE_HINTS  /* disable inlining hints */
+#  if defined(__GNUC__)
+#    define XXH_FORCE_INLINE static __attribute__((unused))
+#  else
+#    define XXH_FORCE_INLINE static
+#  endif
 #  define XXH_NO_INLINE static
 #  define XXH_NO_INLINE static
-#elif defined(_MSC_VER)    /* Visual Studio */
+/* enable inlining hints */
+#elif defined(_MSC_VER)  /* Visual Studio */
 #  define XXH_FORCE_INLINE static __forceinline
 #  define XXH_FORCE_INLINE static __forceinline
 #  define XXH_NO_INLINE static __declspec(noinline)
 #  define XXH_NO_INLINE static __declspec(noinline)
+#elif defined(__GNUC__)
+#  define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
+#  define XXH_NO_INLINE static __attribute__((noinline))
+#elif defined (__cplusplus) \
+  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* C99 */
+#  define XXH_FORCE_INLINE static inline
+#  define XXH_NO_INLINE static
 #else
 #else
-#  if defined (__cplusplus) \
-    || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#    ifdef __GNUC__
-#      define XXH_FORCE_INLINE static inline __attribute__((always_inline))
-#      define XXH_NO_INLINE static __attribute__((noinline))
-#    else
-#      define XXH_FORCE_INLINE static inline
-#      define XXH_NO_INLINE static
-#    endif
-#  else
-#    define XXH_FORCE_INLINE static
-#    define XXH_NO_INLINE static
-#  endif /* __STDC_VERSION__ */
+#  define XXH_FORCE_INLINE static
+#  define XXH_NO_INLINE static
 #endif
 #endif
 
 
 
 
@@ -860,14 +928,18 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size)
 *  Debug
 *  Debug
 ***************************************/
 ***************************************/
 /*
 /*
- * DEBUGLEVEL is expected to be defined externally, typically via the compiler's
- * command line options. The value must be a number.
+ * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
+ * compiler's command line options. The value must be a number.
  */
  */
-#ifndef DEBUGLEVEL
-#  define DEBUGLEVEL 0
+#ifndef XXH_DEBUGLEVEL
+#  ifdef DEBUGLEVEL /* backwards compat */
+#    define XXH_DEBUGLEVEL DEBUGLEVEL
+#  else
+#    define XXH_DEBUGLEVEL 0
+#  endif
 #endif
 #endif
 
 
-#if (DEBUGLEVEL>=1)
+#if (XXH_DEBUGLEVEL>=1)
 #  include <assert.h>   /* note: can still be disabled with NDEBUG */
 #  include <assert.h>   /* note: can still be disabled with NDEBUG */
 #  define XXH_ASSERT(c)   assert(c)
 #  define XXH_ASSERT(c)   assert(c)
 #else
 #else
@@ -875,7 +947,7 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size)
 #endif
 #endif
 
 
 /* note: use after variable declarations */
 /* note: use after variable declarations */
-#define XXH_STATIC_ASSERT(c)  { enum { XXH_sa = 1/(int)(!!(c)) }; }
+#define XXH_STATIC_ASSERT(c)  do { enum { XXH_sa = 1/(int)(!!(c)) }; } while (0)
 
 
 
 
 /* *************************************
 /* *************************************
@@ -885,12 +957,17 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size)
  && (defined (__cplusplus) \
  && (defined (__cplusplus) \
  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
 # include <stdint.h>
 # include <stdint.h>
-  typedef uint8_t  xxh_u8;
+  typedef uint8_t xxh_u8;
 #else
 #else
-  typedef unsigned char      xxh_u8;
+  typedef unsigned char xxh_u8;
 #endif
 #endif
 typedef XXH32_hash_t xxh_u32;
 typedef XXH32_hash_t xxh_u32;
 
 
+#ifdef XXH_OLD_NAMES
+#  define BYTE xxh_u8
+#  define U8   xxh_u8
+#  define U32  xxh_u32
+#endif
 
 
 /* ***   Memory access   *** */
 /* ***   Memory access   *** */
 
 
@@ -915,8 +992,14 @@ static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr;
  *
  *
  * Currently only defined for GCC and ICC.
  * Currently only defined for GCC and ICC.
  */
  */
+#ifdef XXH_OLD_NAMES
 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
-static xxh_u32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+#endif
+static xxh_u32 XXH_read32(const void* ptr)
+{
+    typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
+    return ((const xxh_unalign*)ptr)->u32;
+}
 
 
 #else
 #else
 
 
@@ -958,10 +1041,13 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
 #    define XXH_CPU_LITTLE_ENDIAN 0
 #    define XXH_CPU_LITTLE_ENDIAN 0
 #  else
 #  else
+/*
+ * runtime test, presumed to simplify to a constant by compiler
+ */
 static int XXH_isLittleEndian(void)
 static int XXH_isLittleEndian(void)
 {
 {
     /*
     /*
-     * Nonstandard, but well-defined behavior in practice.
+     * Portable and well-defined behavior.
      * Don't use static: it is detrimental to performance.
      * Don't use static: it is detrimental to performance.
      */
      */
     const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
     const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
@@ -979,12 +1065,14 @@ static int XXH_isLittleEndian(void)
 ******************************************/
 ******************************************/
 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
 
 
-#ifndef __has_builtin
-#  define __has_builtin(x) 0
+#ifdef __has_builtin
+#  define XXH_HAS_BUILTIN(x) __has_builtin(x)
+#else
+#  define XXH_HAS_BUILTIN(x) 0
 #endif
 #endif
 
 
-#if !defined(NO_CLANG_BUILTIN) && __has_builtin(__builtin_rotateleft32) \
-                               && __has_builtin(__builtin_rotateleft64)
+#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
+                               && XXH_HAS_BUILTIN(__builtin_rotateleft64)
 #  define XXH_rotl32 __builtin_rotateleft32
 #  define XXH_rotl32 __builtin_rotateleft32
 #  define XXH_rotl64 __builtin_rotateleft64
 #  define XXH_rotl64 __builtin_rotateleft64
 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
@@ -1073,17 +1161,25 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
 /* *******************************************************************
 /* *******************************************************************
 *  32-bit hash functions
 *  32-bit hash functions
 *********************************************************************/
 *********************************************************************/
-static const xxh_u32 PRIME32_1 = 0x9E3779B1U;   /* 0b10011110001101110111100110110001 */
-static const xxh_u32 PRIME32_2 = 0x85EBCA77U;   /* 0b10000101111010111100101001110111 */
-static const xxh_u32 PRIME32_3 = 0xC2B2AE3DU;   /* 0b11000010101100101010111000111101 */
-static const xxh_u32 PRIME32_4 = 0x27D4EB2FU;   /* 0b00100111110101001110101100101111 */
-static const xxh_u32 PRIME32_5 = 0x165667B1U;   /* 0b00010110010101100110011110110001 */
+static const xxh_u32 XXH_PRIME32_1 = 0x9E3779B1U;   /* 0b10011110001101110111100110110001 */
+static const xxh_u32 XXH_PRIME32_2 = 0x85EBCA77U;   /* 0b10000101111010111100101001110111 */
+static const xxh_u32 XXH_PRIME32_3 = 0xC2B2AE3DU;   /* 0b11000010101100101010111000111101 */
+static const xxh_u32 XXH_PRIME32_4 = 0x27D4EB2FU;   /* 0b00100111110101001110101100101111 */
+static const xxh_u32 XXH_PRIME32_5 = 0x165667B1U;   /* 0b00010110010101100110011110110001 */
+
+#ifdef XXH_OLD_NAMES
+#  define PRIME32_1 XXH_PRIME32_1
+#  define PRIME32_2 XXH_PRIME32_2
+#  define PRIME32_3 XXH_PRIME32_3
+#  define PRIME32_4 XXH_PRIME32_4
+#  define PRIME32_5 XXH_PRIME32_5
+#endif
 
 
 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
 {
 {
-    acc += input * PRIME32_2;
+    acc += input * XXH_PRIME32_2;
     acc  = XXH_rotl32(acc, 13);
     acc  = XXH_rotl32(acc, 13);
-    acc *= PRIME32_1;
+    acc *= XXH_PRIME32_1;
 #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE)
 #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE)
     /*
     /*
      * UGLY HACK:
      * UGLY HACK:
@@ -1139,9 +1235,9 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
 {
 {
     h32 ^= h32 >> 15;
     h32 ^= h32 >> 15;
-    h32 *= PRIME32_2;
+    h32 *= XXH_PRIME32_2;
     h32 ^= h32 >> 13;
     h32 ^= h32 >> 13;
-    h32 *= PRIME32_3;
+    h32 *= XXH_PRIME32_3;
     h32 ^= h32 >> 16;
     h32 ^= h32 >> 16;
     return(h32);
     return(h32);
 }
 }
@@ -1151,64 +1247,66 @@ static xxh_u32 XXH32_avalanche(xxh_u32 h32)
 static xxh_u32
 static xxh_u32
 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
 {
 {
-#define PROCESS1               \
-    h32 += (*ptr++) * PRIME32_5; \
-    h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
+#define XXH_PROCESS1 do {                           \
+    h32 += (*ptr++) * XXH_PRIME32_5;                \
+    h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1;      \
+} while (0)
 
 
-#define PROCESS4                         \
-    h32 += XXH_get32bits(ptr) * PRIME32_3; \
-    ptr+=4;                                \
-    h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
+#define XXH_PROCESS4 do {                           \
+    h32 += XXH_get32bits(ptr) * XXH_PRIME32_3;      \
+    ptr += 4;                                   \
+    h32  = XXH_rotl32(h32, 17) * XXH_PRIME32_4;     \
+} while (0)
 
 
     /* Compact rerolled version */
     /* Compact rerolled version */
     if (XXH_REROLL) {
     if (XXH_REROLL) {
         len &= 15;
         len &= 15;
         while (len >= 4) {
         while (len >= 4) {
-            PROCESS4;
+            XXH_PROCESS4;
             len -= 4;
             len -= 4;
         }
         }
         while (len > 0) {
         while (len > 0) {
-            PROCESS1;
+            XXH_PROCESS1;
             --len;
             --len;
         }
         }
         return XXH32_avalanche(h32);
         return XXH32_avalanche(h32);
     } else {
     } else {
          switch(len&15) /* or switch(bEnd - p) */ {
          switch(len&15) /* or switch(bEnd - p) */ {
-           case 12:      PROCESS4;
+           case 12:      XXH_PROCESS4;
                          /* fallthrough */
                          /* fallthrough */
-           case 8:       PROCESS4;
+           case 8:       XXH_PROCESS4;
                          /* fallthrough */
                          /* fallthrough */
-           case 4:       PROCESS4;
+           case 4:       XXH_PROCESS4;
                          return XXH32_avalanche(h32);
                          return XXH32_avalanche(h32);
 
 
-           case 13:      PROCESS4;
+           case 13:      XXH_PROCESS4;
                          /* fallthrough */
                          /* fallthrough */
-           case 9:       PROCESS4;
+           case 9:       XXH_PROCESS4;
                          /* fallthrough */
                          /* fallthrough */
-           case 5:       PROCESS4;
-                         PROCESS1;
+           case 5:       XXH_PROCESS4;
+                         XXH_PROCESS1;
                          return XXH32_avalanche(h32);
                          return XXH32_avalanche(h32);
 
 
-           case 14:      PROCESS4;
+           case 14:      XXH_PROCESS4;
                          /* fallthrough */
                          /* fallthrough */
-           case 10:      PROCESS4;
+           case 10:      XXH_PROCESS4;
                          /* fallthrough */
                          /* fallthrough */
-           case 6:       PROCESS4;
-                         PROCESS1;
-                         PROCESS1;
+           case 6:       XXH_PROCESS4;
+                         XXH_PROCESS1;
+                         XXH_PROCESS1;
                          return XXH32_avalanche(h32);
                          return XXH32_avalanche(h32);
 
 
-           case 15:      PROCESS4;
+           case 15:      XXH_PROCESS4;
                          /* fallthrough */
                          /* fallthrough */
-           case 11:      PROCESS4;
+           case 11:      XXH_PROCESS4;
                          /* fallthrough */
                          /* fallthrough */
-           case 7:       PROCESS4;
+           case 7:       XXH_PROCESS4;
                          /* fallthrough */
                          /* fallthrough */
-           case 3:       PROCESS1;
+           case 3:       XXH_PROCESS1;
                          /* fallthrough */
                          /* fallthrough */
-           case 2:       PROCESS1;
+           case 2:       XXH_PROCESS1;
                          /* fallthrough */
                          /* fallthrough */
-           case 1:       PROCESS1;
+           case 1:       XXH_PROCESS1;
                          /* fallthrough */
                          /* fallthrough */
            case 0:       return XXH32_avalanche(h32);
            case 0:       return XXH32_avalanche(h32);
         }
         }
@@ -1217,6 +1315,14 @@ XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
     }
     }
 }
 }
 
 
+#ifdef XXH_OLD_NAMES
+#  define PROCESS1 XXH_PROCESS1
+#  define PROCESS4 XXH_PROCESS4
+#else
+#  undef XXH_PROCESS1
+#  undef XXH_PROCESS4
+#endif
+
 XXH_FORCE_INLINE xxh_u32
 XXH_FORCE_INLINE xxh_u32
 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
 {
 {
@@ -1232,10 +1338,10 @@ XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment
 
 
     if (len>=16) {
     if (len>=16) {
         const xxh_u8* const limit = bEnd - 15;
         const xxh_u8* const limit = bEnd - 15;
-        xxh_u32 v1 = seed + PRIME32_1 + PRIME32_2;
-        xxh_u32 v2 = seed + PRIME32_2;
+        xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
+        xxh_u32 v2 = seed + XXH_PRIME32_2;
         xxh_u32 v3 = seed + 0;
         xxh_u32 v3 = seed + 0;
-        xxh_u32 v4 = seed - PRIME32_1;
+        xxh_u32 v4 = seed - XXH_PRIME32_1;
 
 
         do {
         do {
             v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
             v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
@@ -1247,7 +1353,7 @@ XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment
         h32 = XXH_rotl32(v1, 1)  + XXH_rotl32(v2, 7)
         h32 = XXH_rotl32(v1, 1)  + XXH_rotl32(v2, 7)
             + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
             + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
     } else {
     } else {
-        h32  = seed + PRIME32_5;
+        h32  = seed + XXH_PRIME32_5;
     }
     }
 
 
     h32 += (xxh_u32)len;
     h32 += (xxh_u32)len;
@@ -1299,10 +1405,10 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t s
 {
 {
     XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
     XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
     memset(&state, 0, sizeof(state));
     memset(&state, 0, sizeof(state));
-    state.v1 = seed + PRIME32_1 + PRIME32_2;
-    state.v2 = seed + PRIME32_2;
+    state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
+    state.v2 = seed + XXH_PRIME32_2;
     state.v3 = seed + 0;
     state.v3 = seed + 0;
-    state.v4 = seed - PRIME32_1;
+    state.v4 = seed - XXH_PRIME32_1;
     /* do not write into reserved, planned to be removed in a future version */
     /* do not write into reserved, planned to be removed in a future version */
     memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
     memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
     return XXH_OK;
     return XXH_OK;
@@ -1383,7 +1489,7 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state)
             + XXH_rotl32(state->v3, 12)
             + XXH_rotl32(state->v3, 12)
             + XXH_rotl32(state->v4, 18);
             + XXH_rotl32(state->v4, 18);
     } else {
     } else {
-        h32 = state->v3 /* == seed */ + PRIME32_5;
+        h32 = state->v3 /* == seed */ + XXH_PRIME32_5;
     }
     }
 
 
     h32 += state->total_len_32;
     h32 += state->total_len_32;
@@ -1430,6 +1536,9 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
 
 
 typedef XXH64_hash_t xxh_u64;
 typedef XXH64_hash_t xxh_u64;
 
 
+#ifdef XXH_OLD_NAMES
+#  define U64 xxh_u64
+#endif
 
 
 /*!
 /*!
  * XXH_REROLL_XXH64:
  * XXH_REROLL_XXH64:
@@ -1478,8 +1587,14 @@ static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr;
  *
  *
  * Currently only defined for GCC and ICC.
  * Currently only defined for GCC and ICC.
  */
  */
+#ifdef XXH_OLD_NAMES
 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
-static xxh_u64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
+#endif
+static xxh_u64 XXH_read64(const void* ptr)
+{
+    typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
+    return ((const xxh_unalign64*)ptr)->u64;
+}
 
 
 #else
 #else
 
 
@@ -1568,17 +1683,25 @@ XXH_readLE64_align(const void* ptr, XXH_alignment align)
 
 
 /*******   xxh64   *******/
 /*******   xxh64   *******/
 
 
-static const xxh_u64 PRIME64_1 = 0x9E3779B185EBCA87ULL;   /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
-static const xxh_u64 PRIME64_2 = 0xC2B2AE3D27D4EB4FULL;   /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
-static const xxh_u64 PRIME64_3 = 0x165667B19E3779F9ULL;   /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
-static const xxh_u64 PRIME64_4 = 0x85EBCA77C2B2AE63ULL;   /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
-static const xxh_u64 PRIME64_5 = 0x27D4EB2F165667C5ULL;   /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
+static const xxh_u64 XXH_PRIME64_1 = 0x9E3779B185EBCA87ULL;   /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
+static const xxh_u64 XXH_PRIME64_2 = 0xC2B2AE3D27D4EB4FULL;   /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
+static const xxh_u64 XXH_PRIME64_3 = 0x165667B19E3779F9ULL;   /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
+static const xxh_u64 XXH_PRIME64_4 = 0x85EBCA77C2B2AE63ULL;   /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
+static const xxh_u64 XXH_PRIME64_5 = 0x27D4EB2F165667C5ULL;   /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
+
+#ifdef XXH_OLD_NAMES
+#  define PRIME64_1 XXH_PRIME64_1
+#  define PRIME64_2 XXH_PRIME64_2
+#  define PRIME64_3 XXH_PRIME64_3
+#  define PRIME64_4 XXH_PRIME64_4
+#  define PRIME64_5 XXH_PRIME64_5
+#endif
 
 
 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
 {
 {
-    acc += input * PRIME64_2;
+    acc += input * XXH_PRIME64_2;
     acc  = XXH_rotl64(acc, 31);
     acc  = XXH_rotl64(acc, 31);
-    acc *= PRIME64_1;
+    acc *= XXH_PRIME64_1;
     return acc;
     return acc;
 }
 }
 
 
@@ -1586,16 +1709,16 @@ static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
 {
 {
     val  = XXH64_round(0, val);
     val  = XXH64_round(0, val);
     acc ^= val;
     acc ^= val;
-    acc  = acc * PRIME64_1 + PRIME64_4;
+    acc  = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
     return acc;
     return acc;
 }
 }
 
 
 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
 {
 {
     h64 ^= h64 >> 33;
     h64 ^= h64 >> 33;
-    h64 *= PRIME64_2;
+    h64 *= XXH_PRIME64_2;
     h64 ^= h64 >> 29;
     h64 ^= h64 >> 29;
-    h64 *= PRIME64_3;
+    h64 *= XXH_PRIME64_3;
     h64 ^= h64 >> 32;
     h64 ^= h64 >> 32;
     return h64;
     return h64;
 }
 }
@@ -1606,117 +1729,119 @@ static xxh_u64 XXH64_avalanche(xxh_u64 h64)
 static xxh_u64
 static xxh_u64
 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
 {
 {
-#define PROCESS1_64            \
-    h64 ^= (*ptr++) * PRIME64_5; \
-    h64 = XXH_rotl64(h64, 11) * PRIME64_1;
-
-#define PROCESS4_64          \
-    h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * PRIME64_1; \
-    ptr+=4;                    \
-    h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
-
-#define PROCESS8_64 {        \
+#define XXH_PROCESS1_64 do {                                   \
+    h64 ^= (*ptr++) * XXH_PRIME64_5;                           \
+    h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;                 \
+} while (0)
+
+#define XXH_PROCESS4_64 do {                                   \
+    h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;      \
+    ptr += 4;                                              \
+    h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;     \
+} while (0)
+
+#define XXH_PROCESS8_64 do {                                   \
     xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \
     xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \
-    ptr+=8;                    \
-    h64 ^= k1;               \
-    h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
-}
+    ptr += 8;                                              \
+    h64 ^= k1;                                             \
+    h64  = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;     \
+} while (0)
 
 
     /* Rerolled version for 32-bit targets is faster and much smaller. */
     /* Rerolled version for 32-bit targets is faster and much smaller. */
     if (XXH_REROLL || XXH_REROLL_XXH64) {
     if (XXH_REROLL || XXH_REROLL_XXH64) {
         len &= 31;
         len &= 31;
         while (len >= 8) {
         while (len >= 8) {
-            PROCESS8_64;
+            XXH_PROCESS8_64;
             len -= 8;
             len -= 8;
         }
         }
         if (len >= 4) {
         if (len >= 4) {
-            PROCESS4_64;
+            XXH_PROCESS4_64;
             len -= 4;
             len -= 4;
         }
         }
         while (len > 0) {
         while (len > 0) {
-            PROCESS1_64;
+            XXH_PROCESS1_64;
             --len;
             --len;
         }
         }
          return  XXH64_avalanche(h64);
          return  XXH64_avalanche(h64);
     } else {
     } else {
         switch(len & 31) {
         switch(len & 31) {
-           case 24: PROCESS8_64;
+           case 24: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 16: PROCESS8_64;
+           case 16: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case  8: PROCESS8_64;
+           case  8: XXH_PROCESS8_64;
                     return XXH64_avalanche(h64);
                     return XXH64_avalanche(h64);
 
 
-           case 28: PROCESS8_64;
+           case 28: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 20: PROCESS8_64;
+           case 20: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 12: PROCESS8_64;
+           case 12: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case  4: PROCESS4_64;
+           case  4: XXH_PROCESS4_64;
                     return XXH64_avalanche(h64);
                     return XXH64_avalanche(h64);
 
 
-           case 25: PROCESS8_64;
+           case 25: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 17: PROCESS8_64;
+           case 17: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case  9: PROCESS8_64;
-                    PROCESS1_64;
+           case  9: XXH_PROCESS8_64;
+                    XXH_PROCESS1_64;
                     return XXH64_avalanche(h64);
                     return XXH64_avalanche(h64);
 
 
-           case 29: PROCESS8_64;
+           case 29: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 21: PROCESS8_64;
+           case 21: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 13: PROCESS8_64;
+           case 13: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case  5: PROCESS4_64;
-                    PROCESS1_64;
+           case  5: XXH_PROCESS4_64;
+                    XXH_PROCESS1_64;
                     return XXH64_avalanche(h64);
                     return XXH64_avalanche(h64);
 
 
-           case 26: PROCESS8_64;
+           case 26: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 18: PROCESS8_64;
+           case 18: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 10: PROCESS8_64;
-                    PROCESS1_64;
-                    PROCESS1_64;
+           case 10: XXH_PROCESS8_64;
+                    XXH_PROCESS1_64;
+                    XXH_PROCESS1_64;
                     return XXH64_avalanche(h64);
                     return XXH64_avalanche(h64);
 
 
-           case 30: PROCESS8_64;
+           case 30: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 22: PROCESS8_64;
+           case 22: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 14: PROCESS8_64;
+           case 14: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case  6: PROCESS4_64;
-                    PROCESS1_64;
-                    PROCESS1_64;
+           case  6: XXH_PROCESS4_64;
+                    XXH_PROCESS1_64;
+                    XXH_PROCESS1_64;
                     return XXH64_avalanche(h64);
                     return XXH64_avalanche(h64);
 
 
-           case 27: PROCESS8_64;
+           case 27: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 19: PROCESS8_64;
+           case 19: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 11: PROCESS8_64;
-                    PROCESS1_64;
-                    PROCESS1_64;
-                    PROCESS1_64;
+           case 11: XXH_PROCESS8_64;
+                    XXH_PROCESS1_64;
+                    XXH_PROCESS1_64;
+                    XXH_PROCESS1_64;
                     return XXH64_avalanche(h64);
                     return XXH64_avalanche(h64);
 
 
-           case 31: PROCESS8_64;
+           case 31: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 23: PROCESS8_64;
+           case 23: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case 15: PROCESS8_64;
+           case 15: XXH_PROCESS8_64;
                          /* fallthrough */
                          /* fallthrough */
-           case  7: PROCESS4_64;
+           case  7: XXH_PROCESS4_64;
                          /* fallthrough */
                          /* fallthrough */
-           case  3: PROCESS1_64;
+           case  3: XXH_PROCESS1_64;
                          /* fallthrough */
                          /* fallthrough */
-           case  2: PROCESS1_64;
+           case  2: XXH_PROCESS1_64;
                          /* fallthrough */
                          /* fallthrough */
-           case  1: PROCESS1_64;
+           case  1: XXH_PROCESS1_64;
                          /* fallthrough */
                          /* fallthrough */
            case  0: return XXH64_avalanche(h64);
            case  0: return XXH64_avalanche(h64);
         }
         }
@@ -1726,6 +1851,16 @@ XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
     return 0;  /* unreachable, but some compilers complain without it */
     return 0;  /* unreachable, but some compilers complain without it */
 }
 }
 
 
+#ifdef XXH_OLD_NAMES
+#  define PROCESS1_64 XXH_PROCESS1_64
+#  define PROCESS4_64 XXH_PROCESS4_64
+#  define PROCESS8_64 XXH_PROCESS8_64
+#else
+#  undef XXH_PROCESS1_64
+#  undef XXH_PROCESS4_64
+#  undef XXH_PROCESS8_64
+#endif
+
 XXH_FORCE_INLINE xxh_u64
 XXH_FORCE_INLINE xxh_u64
 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
 {
 {
@@ -1741,10 +1876,10 @@ XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment
 
 
     if (len>=32) {
     if (len>=32) {
         const xxh_u8* const limit = bEnd - 32;
         const xxh_u8* const limit = bEnd - 32;
-        xxh_u64 v1 = seed + PRIME64_1 + PRIME64_2;
-        xxh_u64 v2 = seed + PRIME64_2;
+        xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+        xxh_u64 v2 = seed + XXH_PRIME64_2;
         xxh_u64 v3 = seed + 0;
         xxh_u64 v3 = seed + 0;
-        xxh_u64 v4 = seed - PRIME64_1;
+        xxh_u64 v4 = seed - XXH_PRIME64_1;
 
 
         do {
         do {
             v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
             v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
@@ -1760,7 +1895,7 @@ XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment
         h64 = XXH64_mergeRound(h64, v4);
         h64 = XXH64_mergeRound(h64, v4);
 
 
     } else {
     } else {
-        h64  = seed + PRIME64_5;
+        h64  = seed + XXH_PRIME64_5;
     }
     }
 
 
     h64 += (xxh_u64) len;
     h64 += (xxh_u64) len;
@@ -1811,10 +1946,10 @@ XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t s
 {
 {
     XXH64_state_t state;   /* use a local state to memcpy() in order to avoid strict-aliasing warnings */
     XXH64_state_t state;   /* use a local state to memcpy() in order to avoid strict-aliasing warnings */
     memset(&state, 0, sizeof(state));
     memset(&state, 0, sizeof(state));
-    state.v1 = seed + PRIME64_1 + PRIME64_2;
-    state.v2 = seed + PRIME64_2;
+    state.v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+    state.v2 = seed + XXH_PRIME64_2;
     state.v3 = seed + 0;
     state.v3 = seed + 0;
-    state.v4 = seed - PRIME64_1;
+    state.v4 = seed - XXH_PRIME64_1;
      /* do not write into reserved64, might be removed in a future version */
      /* do not write into reserved64, might be removed in a future version */
     memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
     memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
     return XXH_OK;
     return XXH_OK;
@@ -1897,7 +2032,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state)
         h64 = XXH64_mergeRound(h64, v3);
         h64 = XXH64_mergeRound(h64, v3);
         h64 = XXH64_mergeRound(h64, v4);
         h64 = XXH64_mergeRound(h64, v4);
     } else {
     } else {
-        h64  = state->v3 /*seed*/ + PRIME64_5;
+        h64  = state->v3 /*seed*/ + XXH_PRIME64_5;
     }
     }
 
 
     h64 += (xxh_u64) state->total_len;
     h64 += (xxh_u64) state->total_len;