|
@@ -0,0 +1,615 @@
|
|
|
+/*
|
|
|
+* xxHash - Fast Hash algorithm
|
|
|
+* Copyright (C) 2012-2016, Yann Collet
|
|
|
+*
|
|
|
+* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
|
|
+*
|
|
|
+* Redistribution and use in source and binary forms, with or without
|
|
|
+* modification, are permitted provided that the following conditions are
|
|
|
+* met:
|
|
|
+*
|
|
|
+* * Redistributions of source code must retain the above copyright
|
|
|
+* notice, this list of conditions and the following disclaimer.
|
|
|
+* * Redistributions in binary form must reproduce the above
|
|
|
+* copyright notice, this list of conditions and the following disclaimer
|
|
|
+* in the documentation and/or other materials provided with the
|
|
|
+* distribution.
|
|
|
+*
|
|
|
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
+*
|
|
|
+* You can contact the author at :
|
|
|
+* - xxHash homepage: http://www.xxhash.com
|
|
|
+* - xxHash source repository : https://github.com/Cyan4973/xxHash
|
|
|
+*/
|
|
|
+
|
|
|
+
|
|
|
+/* *************************************
|
|
|
+* Tuning parameters
|
|
|
+***************************************/
|
|
|
+/*!XXH_FORCE_MEMORY_ACCESS :
|
|
|
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
|
|
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
|
|
|
+ * The below switch allow to select different access method for improved performance.
|
|
|
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
|
|
|
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
|
|
|
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
|
|
+ * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
|
|
|
+ * It can generate buggy code on targets which do not support unaligned memory accesses.
|
|
|
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
|
|
|
+ * See http://stackoverflow.com/a/32095106/646947 for details.
|
|
|
+ * Prefer these methods in priority order (0 > 1 > 2)
|
|
|
+ */
|
|
|
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
|
|
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
|
|
|
+# define XXH_FORCE_MEMORY_ACCESS 2
|
|
|
+# elif defined(__INTEL_COMPILER) || \
|
|
|
+ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
|
|
|
+# define XXH_FORCE_MEMORY_ACCESS 1
|
|
|
+# endif
|
|
|
+#endif
|
|
|
+
|
|
|
+/*!XXH_ACCEPT_NULL_INPUT_POINTER :
|
|
|
+ * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
|
|
|
+ * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
|
|
|
+ * By default, this option is disabled. To enable it, uncomment below define :
|
|
|
+ */
|
|
|
+/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
|
|
|
+
|
|
|
+/*!XXH_FORCE_NATIVE_FORMAT :
|
|
|
+ * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
|
|
|
+ * Results are therefore identical for little-endian and big-endian CPU.
|
|
|
+ * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
|
|
|
+ * Should endian-independance be of no importance for your application, you may set the #define below to 1,
|
|
|
+ * to improve speed for Big-endian CPU.
|
|
|
+ * This option has no impact on Little_Endian CPU.
|
|
|
+ */
|
|
|
+#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
|
|
|
+# define XXH_FORCE_NATIVE_FORMAT 0
|
|
|
+#endif
|
|
|
+
|
|
|
+/*!XXH_FORCE_ALIGN_CHECK :
|
|
|
+ * This is a minor performance trick, only useful with lots of very small keys.
|
|
|
+ * It means : check for aligned/unaligned input.
|
|
|
+ * The check costs one initial branch per hash; set to 0 when the input data
|
|
|
+ * is guaranteed to be aligned.
|
|
|
+ */
|
|
|
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
|
|
|
+# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
|
|
|
+# define XXH_FORCE_ALIGN_CHECK 0
|
|
|
+# else
|
|
|
+# define XXH_FORCE_ALIGN_CHECK 1
|
|
|
+# endif
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
+/* *************************************
|
|
|
+* Includes & Memory related functions
|
|
|
+***************************************/
|
|
|
+/* Modify the local functions below should you wish to use some other memory routines */
|
|
|
+/* for malloc(), free() */
|
|
|
+#include <stdlib.h>
|
|
|
+static void* XXH_malloc(size_t s) { return malloc(s); }
|
|
|
+static void XXH_free (void* p) { free(p); }
|
|
|
+/* for memcpy() */
|
|
|
+#include <string.h>
|
|
|
+static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
|
|
|
+
|
|
|
+#define XXH_STATIC_LINKING_ONLY
|
|
|
+#include "xxhash.h"
|
|
|
+
|
|
|
+
|
|
|
+/* *************************************
|
|
|
+* Compiler Specific Options
|
|
|
+***************************************/
|
|
|
+#ifdef _MSC_VER /* Visual Studio */
|
|
|
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
|
|
+# define FORCE_INLINE static __forceinline
|
|
|
+#else
|
|
|
+# if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
|
|
+# ifdef __GNUC__
|
|
|
+# define FORCE_INLINE static inline __attribute__((always_inline))
|
|
|
+# else
|
|
|
+# define FORCE_INLINE static inline
|
|
|
+# endif
|
|
|
+# else
|
|
|
+# define FORCE_INLINE static
|
|
|
+# endif /* __STDC_VERSION__ */
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
+/* *************************************
|
|
|
+* Basic Types
|
|
|
+***************************************/
|
|
|
+#ifndef MEM_MODULE
|
|
|
+# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
|
|
+# include <stdint.h>
|
|
|
+ typedef uint8_t BYTE;
|
|
|
+ typedef uint16_t U16;
|
|
|
+ typedef uint32_t U32;
|
|
|
+ typedef int32_t S32;
|
|
|
+# else
|
|
|
+ typedef unsigned char BYTE;
|
|
|
+ typedef unsigned short U16;
|
|
|
+ typedef unsigned int U32;
|
|
|
+ typedef signed int S32;
|
|
|
+# endif
|
|
|
+#endif
|
|
|
+
|
|
|
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
|
|
+
|
|
|
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
|
|
|
+static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
|
|
+
|
|
|
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
|
|
|
+
|
|
|
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
|
|
+/* currently only defined for gcc and icc */
|
|
|
+typedef union { U32 u32; } __attribute__((packed)) unalign;
|
|
|
+static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+/* portable and safe solution. Generally efficient.
|
|
|
+ * see : http://stackoverflow.com/a/32095106/646947
|
|
|
+ */
|
|
|
+static U32 XXH_read32(const void* memPtr)
|
|
|
+{
|
|
|
+ U32 val;
|
|
|
+ memcpy(&val, memPtr, sizeof(val));
|
|
|
+ return val;
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
|
|
|
+
|
|
|
+
|
|
|
+/* ****************************************
|
|
|
+* Compiler-specific Functions and Macros
|
|
|
+******************************************/
|
|
|
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
|
|
+
|
|
|
+/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
|
|
|
+#if defined(_MSC_VER)
|
|
|
+# define XXH_rotl32(x,r) _rotl(x,r)
|
|
|
+# define XXH_rotl64(x,r) _rotl64(x,r)
|
|
|
+#else
|
|
|
+# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
|
|
|
+# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
|
|
|
+#endif
|
|
|
+
|
|
|
+#if defined(_MSC_VER) /* Visual Studio */
|
|
|
+# define XXH_swap32 _byteswap_ulong
|
|
|
+#elif GCC_VERSION >= 403
|
|
|
+# define XXH_swap32 __builtin_bswap32
|
|
|
+#else
|
|
|
+static U32 XXH_swap32 (U32 x)
|
|
|
+{
|
|
|
+ return ((x << 24) & 0xff000000 ) |
|
|
|
+ ((x << 8) & 0x00ff0000 ) |
|
|
|
+ ((x >> 8) & 0x0000ff00 ) |
|
|
|
+ ((x >> 24) & 0x000000ff );
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
+/* *************************************
|
|
|
+* Architecture Macros
|
|
|
+***************************************/
|
|
|
+typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
|
|
|
+
|
|
|
+/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
|
|
|
+#ifndef XXH_CPU_LITTLE_ENDIAN
|
|
|
+ static const int g_one = 1;
|
|
|
+# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
+/* ***************************
|
|
|
+* Memory reads
|
|
|
+*****************************/
|
|
|
+typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
|
|
|
+
|
|
|
+FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
|
|
+{
|
|
|
+ if (align==XXH_unaligned)
|
|
|
+ return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
|
|
|
+ else
|
|
|
+ return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
|
|
|
+}
|
|
|
+
|
|
|
+FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
|
|
|
+{
|
|
|
+ return XXH_readLE32_align(ptr, endian, XXH_unaligned);
|
|
|
+}
|
|
|
+
|
|
|
+/* *************************************
|
|
|
+* Macros
|
|
|
+***************************************/
|
|
|
+#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
|
|
|
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
|
|
|
+
|
|
|
+#ifndef XXH_NO_LONG_LONG
|
|
|
+
|
|
|
+/* *******************************************************************
|
|
|
+* 64-bits hash functions
|
|
|
+*********************************************************************/
|
|
|
+
|
|
|
+#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
|
|
|
+
|
|
|
+/*====== Memory access ======*/
|
|
|
+
|
|
|
+#ifndef MEM_MODULE
|
|
|
+# define MEM_MODULE
|
|
|
+# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
|
|
+# include <stdint.h>
|
|
|
+ typedef uint64_t U64;
|
|
|
+# else
|
|
|
+ typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
|
|
|
+# endif
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
|
|
+
|
|
|
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
|
|
|
+static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
|
|
+
|
|
|
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
|
|
|
+
|
|
|
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
|
|
+/* currently only defined for gcc and icc */
|
|
|
+typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
|
|
|
+
|
|
|
+static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+/* portable and safe solution. Generally efficient.
|
|
|
+ * see : http://stackoverflow.com/a/32095106/646947
|
|
|
+ */
|
|
|
+
|
|
|
+static U64 XXH_read64(const void* memPtr)
|
|
|
+{
|
|
|
+ U64 val;
|
|
|
+ memcpy(&val, memPtr, sizeof(val));
|
|
|
+ return val;
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
|
|
|
+
|
|
|
+#if defined(_MSC_VER) /* Visual Studio */
|
|
|
+# define XXH_swap64 _byteswap_uint64
|
|
|
+#elif GCC_VERSION >= 403
|
|
|
+# define XXH_swap64 __builtin_bswap64
|
|
|
+#else
|
|
|
+static U64 XXH_swap64 (U64 x)
|
|
|
+{
|
|
|
+ return ((x << 56) & 0xff00000000000000ULL) |
|
|
|
+ ((x << 40) & 0x00ff000000000000ULL) |
|
|
|
+ ((x << 24) & 0x0000ff0000000000ULL) |
|
|
|
+ ((x << 8) & 0x000000ff00000000ULL) |
|
|
|
+ ((x >> 8) & 0x00000000ff000000ULL) |
|
|
|
+ ((x >> 24) & 0x0000000000ff0000ULL) |
|
|
|
+ ((x >> 40) & 0x000000000000ff00ULL) |
|
|
|
+ ((x >> 56) & 0x00000000000000ffULL);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
|
|
|
+{
|
|
|
+ if (align==XXH_unaligned)
|
|
|
+ return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
|
|
|
+ else
|
|
|
+ return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
|
|
|
+}
|
|
|
+
|
|
|
+FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
|
|
|
+{
|
|
|
+ return XXH_readLE64_align(ptr, endian, XXH_unaligned);
|
|
|
+}
|
|
|
+
|
|
|
+static U64 XXH_readBE64(const void* ptr)
|
|
|
+{
|
|
|
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+/*====== xxh64 ======*/
|
|
|
+
|
|
|
+static const U64 PRIME64_1 = 11400714785074694791ULL;
|
|
|
+static const U64 PRIME64_2 = 14029467366897019727ULL;
|
|
|
+static const U64 PRIME64_3 = 1609587929392839161ULL;
|
|
|
+static const U64 PRIME64_4 = 9650029242287828579ULL;
|
|
|
+static const U64 PRIME64_5 = 2870177450012600261ULL;
|
|
|
+
|
|
|
+static U64 XXH64_round(U64 acc, U64 input)
|
|
|
+{
|
|
|
+ acc += input * PRIME64_2;
|
|
|
+ acc = XXH_rotl64(acc, 31);
|
|
|
+ acc *= PRIME64_1;
|
|
|
+ return acc;
|
|
|
+}
|
|
|
+
|
|
|
+static U64 XXH64_mergeRound(U64 acc, U64 val)
|
|
|
+{
|
|
|
+ val = XXH64_round(0, val);
|
|
|
+ acc ^= val;
|
|
|
+ acc = acc * PRIME64_1 + PRIME64_4;
|
|
|
+ return acc;
|
|
|
+}
|
|
|
+
|
|
|
+FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
|
|
|
+{
|
|
|
+ const BYTE* p = (const BYTE*)input;
|
|
|
+ const BYTE* const bEnd = p + len;
|
|
|
+ U64 h64;
|
|
|
+#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
|
|
|
+
|
|
|
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
|
|
+ if (p==NULL) {
|
|
|
+ len=0;
|
|
|
+ bEnd=p=(const BYTE*)(size_t)32;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (len>=32) {
|
|
|
+ const BYTE* const limit = bEnd - 32;
|
|
|
+ U64 v1 = seed + PRIME64_1 + PRIME64_2;
|
|
|
+ U64 v2 = seed + PRIME64_2;
|
|
|
+ U64 v3 = seed + 0;
|
|
|
+ U64 v4 = seed - PRIME64_1;
|
|
|
+
|
|
|
+ do {
|
|
|
+ v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
|
|
|
+ v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
|
|
|
+ v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
|
|
|
+ v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
|
|
|
+ } while (p<=limit);
|
|
|
+
|
|
|
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
|
|
+ h64 = XXH64_mergeRound(h64, v1);
|
|
|
+ h64 = XXH64_mergeRound(h64, v2);
|
|
|
+ h64 = XXH64_mergeRound(h64, v3);
|
|
|
+ h64 = XXH64_mergeRound(h64, v4);
|
|
|
+
|
|
|
+ } else {
|
|
|
+ h64 = seed + PRIME64_5;
|
|
|
+ }
|
|
|
+
|
|
|
+ h64 += (U64) len;
|
|
|
+
|
|
|
+ while (p+8<=bEnd) {
|
|
|
+ U64 const k1 = XXH64_round(0, XXH_get64bits(p));
|
|
|
+ h64 ^= k1;
|
|
|
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
|
|
+ p+=8;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (p+4<=bEnd) {
|
|
|
+ h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
|
|
|
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
|
|
+ p+=4;
|
|
|
+ }
|
|
|
+
|
|
|
+ while (p<bEnd) {
|
|
|
+ h64 ^= (*p) * PRIME64_5;
|
|
|
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
|
|
+ p++;
|
|
|
+ }
|
|
|
+
|
|
|
+ h64 ^= h64 >> 33;
|
|
|
+ h64 *= PRIME64_2;
|
|
|
+ h64 ^= h64 >> 29;
|
|
|
+ h64 *= PRIME64_3;
|
|
|
+ h64 ^= h64 >> 32;
|
|
|
+
|
|
|
+ return h64;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
|
|
|
+{
|
|
|
+#if 0
|
|
|
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
|
|
+ XXH64_CREATESTATE_STATIC(state);
|
|
|
+ XXH64_reset(state, seed);
|
|
|
+ XXH64_update(state, input, len);
|
|
|
+ return XXH64_digest(state);
|
|
|
+#else
|
|
|
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
|
|
+
|
|
|
+ if (XXH_FORCE_ALIGN_CHECK) {
|
|
|
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
|
|
|
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
|
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
|
|
|
+ else
|
|
|
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
|
|
|
+ } }
|
|
|
+
|
|
|
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
|
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
|
|
|
+ else
|
|
|
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+/*====== Hash Streaming ======*/
|
|
|
+
|
|
|
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
|
|
|
+{
|
|
|
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
|
|
|
+}
|
|
|
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
|
|
|
+{
|
|
|
+ XXH_free(statePtr);
|
|
|
+ return XXH_OK;
|
|
|
+}
|
|
|
+
|
|
|
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
|
|
|
+{
|
|
|
+ memcpy(dstState, srcState, sizeof(*dstState));
|
|
|
+}
|
|
|
+
|
|
|
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
|
|
|
+{
|
|
|
+ XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
|
|
+ memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
|
|
|
+ state.v1 = seed + PRIME64_1 + PRIME64_2;
|
|
|
+ state.v2 = seed + PRIME64_2;
|
|
|
+ state.v3 = seed + 0;
|
|
|
+ state.v4 = seed - PRIME64_1;
|
|
|
+ memcpy(statePtr, &state, sizeof(state));
|
|
|
+ return XXH_OK;
|
|
|
+}
|
|
|
+
|
|
|
+FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
|
|
|
+{
|
|
|
+ const BYTE* p = (const BYTE*)input;
|
|
|
+ const BYTE* const bEnd = p + len;
|
|
|
+
|
|
|
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
|
|
|
+ if (input==NULL) return XXH_ERROR;
|
|
|
+#endif
|
|
|
+
|
|
|
+ state->total_len += len;
|
|
|
+
|
|
|
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
|
|
|
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
|
|
|
+ state->memsize += (U32)len;
|
|
|
+ return XXH_OK;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (state->memsize) { /* tmp buffer is full */
|
|
|
+ XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
|
|
|
+ state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
|
|
|
+ state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
|
|
|
+ state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
|
|
|
+ state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
|
|
|
+ p += 32-state->memsize;
|
|
|
+ state->memsize = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (p+32 <= bEnd) {
|
|
|
+ const BYTE* const limit = bEnd - 32;
|
|
|
+ U64 v1 = state->v1;
|
|
|
+ U64 v2 = state->v2;
|
|
|
+ U64 v3 = state->v3;
|
|
|
+ U64 v4 = state->v4;
|
|
|
+
|
|
|
+ do {
|
|
|
+ v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
|
|
|
+ v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
|
|
|
+ v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
|
|
|
+ v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
|
|
|
+ } while (p<=limit);
|
|
|
+
|
|
|
+ state->v1 = v1;
|
|
|
+ state->v2 = v2;
|
|
|
+ state->v3 = v3;
|
|
|
+ state->v4 = v4;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (p < bEnd) {
|
|
|
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
|
|
|
+ state->memsize = (unsigned)(bEnd-p);
|
|
|
+ }
|
|
|
+
|
|
|
+ return XXH_OK;
|
|
|
+}
|
|
|
+
|
|
|
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
|
|
|
+{
|
|
|
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
|
|
+
|
|
|
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
|
+ return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
|
|
|
+ else
|
|
|
+ return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
|
|
|
+}
|
|
|
+
|
|
|
+FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
|
|
|
+{
|
|
|
+ const BYTE * p = (const BYTE*)state->mem64;
|
|
|
+ const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
|
|
|
+ U64 h64;
|
|
|
+
|
|
|
+ if (state->total_len >= 32) {
|
|
|
+ U64 const v1 = state->v1;
|
|
|
+ U64 const v2 = state->v2;
|
|
|
+ U64 const v3 = state->v3;
|
|
|
+ U64 const v4 = state->v4;
|
|
|
+
|
|
|
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
|
|
+ h64 = XXH64_mergeRound(h64, v1);
|
|
|
+ h64 = XXH64_mergeRound(h64, v2);
|
|
|
+ h64 = XXH64_mergeRound(h64, v3);
|
|
|
+ h64 = XXH64_mergeRound(h64, v4);
|
|
|
+ } else {
|
|
|
+ h64 = state->v3 + PRIME64_5;
|
|
|
+ }
|
|
|
+
|
|
|
+ h64 += (U64) state->total_len;
|
|
|
+
|
|
|
+ while (p+8<=bEnd) {
|
|
|
+ U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
|
|
|
+ h64 ^= k1;
|
|
|
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
|
|
|
+ p+=8;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (p+4<=bEnd) {
|
|
|
+ h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
|
|
|
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
|
|
+ p+=4;
|
|
|
+ }
|
|
|
+
|
|
|
+ while (p<bEnd) {
|
|
|
+ h64 ^= (*p) * PRIME64_5;
|
|
|
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
|
|
+ p++;
|
|
|
+ }
|
|
|
+
|
|
|
+ h64 ^= h64 >> 33;
|
|
|
+ h64 *= PRIME64_2;
|
|
|
+ h64 ^= h64 >> 29;
|
|
|
+ h64 *= PRIME64_3;
|
|
|
+ h64 ^= h64 >> 32;
|
|
|
+
|
|
|
+ return h64;
|
|
|
+}
|
|
|
+
|
|
|
+XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
|
|
|
+{
|
|
|
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
|
|
+
|
|
|
+ if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
|
|
+ return XXH64_digest_endian(state_in, XXH_littleEndian);
|
|
|
+ else
|
|
|
+ return XXH64_digest_endian(state_in, XXH_bigEndian);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+/*====== Canonical representation ======*/
|
|
|
+
|
|
|
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
|
|
|
+{
|
|
|
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
|
|
|
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
|
|
|
+ memcpy(dst, &hash, sizeof(*dst));
|
|
|
+}
|
|
|
+
|
|
|
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
|
|
|
+{
|
|
|
+ return XXH_readBE64(src);
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* XXH_NO_LONG_LONG */
|