Browse Source

Restoring authorship annotation for <igorsolovyev@yandex-team.ru>. Commit 1 of 2.

igorsolovyev 3 years ago
parent
commit
93dc653cf5

+ 107 - 107
contrib/libs/zstd/lib/common/bitstream.h

@@ -43,21 +43,21 @@ extern "C" {
 #  endif
 #endif
 
-#define STREAM_ACCUMULATOR_MIN_32  25
-#define STREAM_ACCUMULATOR_MIN_64  57
-#define STREAM_ACCUMULATOR_MIN    ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
-
+#define STREAM_ACCUMULATOR_MIN_32  25 
+#define STREAM_ACCUMULATOR_MIN_64  57 
+#define STREAM_ACCUMULATOR_MIN    ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) 
 
+ 
 /*-******************************************
 *  bitStream encoding API (write forward)
 ********************************************/
 /* bitStream can mix input from multiple sources.
- * A critical property of these streams is that they encode and decode in **reverse** direction.
- * So the first bit sequence you add will be the last to be read, like a LIFO stack.
- */
+ * A critical property of these streams is that they encode and decode in **reverse** direction. 
+ * So the first bit sequence you add will be the last to be read, like a LIFO stack. 
+ */ 
 typedef struct {
     size_t bitContainer;
-    unsigned bitPos;
+    unsigned bitPos; 
     char*  startPtr;
     char*  ptr;
     char*  endPtr;
@@ -94,7 +94,7 @@ typedef struct {
     unsigned bitsConsumed;
     const char* ptr;
     const char* start;
-    const char* limitPtr;
+    const char* limitPtr; 
 } BIT_DStream_t;
 
 typedef enum { BIT_DStream_unfinished = 0,
@@ -137,10 +137,10 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
 /*-**************************************************************
 *  Internal functions
 ****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
+MEM_STATIC unsigned BIT_highbit32 (U32 val) 
 {
-    assert(val != 0);
-    {
+    assert(val != 0); 
+    { 
 #   if defined(_MSC_VER)   /* Visual */
 #       if STATIC_BMI2 == 1
             return _lzcnt_u32(val) ^ 31;
@@ -159,59 +159,59 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
 #   elif defined(__ICCARM__)    /* IAR Intrinsic */
         return 31 - __CLZ(val);
 #   else   /* Software version */
-        static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29,
-                                                 11, 14, 16, 18, 22, 25,  3, 30,
-                                                  8, 12, 20, 28, 15, 17, 24,  7,
-                                                 19, 27, 23,  6, 26,  5,  4, 31 };
-        U32 v = val;
-        v |= v >> 1;
-        v |= v >> 2;
-        v |= v >> 4;
-        v |= v >> 8;
-        v |= v >> 16;
-        return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+        static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29, 
+                                                 11, 14, 16, 18, 22, 25,  3, 30, 
+                                                  8, 12, 20, 28, 15, 17, 24,  7, 
+                                                 19, 27, 23,  6, 26,  5,  4, 31 }; 
+        U32 v = val; 
+        v |= v >> 1; 
+        v |= v >> 2; 
+        v |= v >> 4; 
+        v |= v >> 8; 
+        v |= v >> 16; 
+        return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; 
 #   endif
-    }
+    } 
 }
 
 /*=====    Local Constants   =====*/
-static const unsigned BIT_mask[] = {
-    0,          1,         3,         7,         0xF,       0x1F,
-    0x3F,       0x7F,      0xFF,      0x1FF,     0x3FF,     0x7FF,
-    0xFFF,      0x1FFF,    0x3FFF,    0x7FFF,    0xFFFF,    0x1FFFF,
-    0x3FFFF,    0x7FFFF,   0xFFFFF,   0x1FFFFF,  0x3FFFFF,  0x7FFFFF,
-    0xFFFFFF,   0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
-    0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
-#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
+static const unsigned BIT_mask[] = { 
+    0,          1,         3,         7,         0xF,       0x1F, 
+    0x3F,       0x7F,      0xFF,      0x1FF,     0x3FF,     0x7FF, 
+    0xFFF,      0x1FFF,    0x3FFF,    0x7FFF,    0xFFFF,    0x1FFFF, 
+    0x3FFFF,    0x7FFFF,   0xFFFFF,   0x1FFFFF,  0x3FFFFF,  0x7FFFFF, 
+    0xFFFFFF,   0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, 
+    0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */ 
+#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0])) 
 
 /*-**************************************************************
 *  bitStream encoding
 ****************************************************************/
 /*! BIT_initCStream() :
- *  `dstCapacity` must be > sizeof(size_t)
+ *  `dstCapacity` must be > sizeof(size_t) 
  *  @return : 0 if success,
- *            otherwise an error code (can be tested using ERR_isError()) */
-MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
-                                  void* startPtr, size_t dstCapacity)
+ *            otherwise an error code (can be tested using ERR_isError()) */ 
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, 
+                                  void* startPtr, size_t dstCapacity) 
 {
     bitC->bitContainer = 0;
     bitC->bitPos = 0;
     bitC->startPtr = (char*)startPtr;
     bitC->ptr = bitC->startPtr;
-    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
-    if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
+    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer); 
+    if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall); 
     return 0;
 }
 
 /*! BIT_addBits() :
- *  can add up to 31 bits into `bitC`.
- *  Note : does not check for register overflow ! */
-MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
-                            size_t value, unsigned nbBits)
+ *  can add up to 31 bits into `bitC`. 
+ *  Note : does not check for register overflow ! */ 
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, 
+                            size_t value, unsigned nbBits) 
 {
     DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
-    assert(nbBits < BIT_MASK_SIZE);
-    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+    assert(nbBits < BIT_MASK_SIZE); 
+    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); 
     bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
     bitC->bitPos += nbBits;
 }
@@ -219,74 +219,74 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
 /*! BIT_addBitsFast() :
  *  works only if `value` is _clean_,
  *  meaning all high bits above nbBits are 0 */
-MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
-                                size_t value, unsigned nbBits)
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, 
+                                size_t value, unsigned nbBits) 
 {
-    assert((value>>nbBits) == 0);
-    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+    assert((value>>nbBits) == 0); 
+    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); 
     bitC->bitContainer |= value << bitC->bitPos;
     bitC->bitPos += nbBits;
 }
 
 /*! BIT_flushBitsFast() :
- *  assumption : bitContainer has not overflowed
+ *  assumption : bitContainer has not overflowed 
  *  unsafe version; does not check buffer overflow */
 MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
 {
     size_t const nbBytes = bitC->bitPos >> 3;
-    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); 
     assert(bitC->ptr <= bitC->endPtr);
     MEM_writeLEST(bitC->ptr, bitC->bitContainer);
     bitC->ptr += nbBytes;
     bitC->bitPos &= 7;
-    bitC->bitContainer >>= nbBytes*8;
+    bitC->bitContainer >>= nbBytes*8; 
 }
 
 /*! BIT_flushBits() :
- *  assumption : bitContainer has not overflowed
+ *  assumption : bitContainer has not overflowed 
  *  safe version; check for buffer overflow, and prevents it.
- *  note : does not signal buffer overflow.
- *  overflow will be revealed later on using BIT_closeCStream() */
+ *  note : does not signal buffer overflow. 
+ *  overflow will be revealed later on using BIT_closeCStream() */ 
 MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
 {
     size_t const nbBytes = bitC->bitPos >> 3;
-    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); 
     assert(bitC->ptr <= bitC->endPtr);
     MEM_writeLEST(bitC->ptr, bitC->bitContainer);
     bitC->ptr += nbBytes;
     if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
     bitC->bitPos &= 7;
-    bitC->bitContainer >>= nbBytes*8;
+    bitC->bitContainer >>= nbBytes*8; 
 }
 
 /*! BIT_closeCStream() :
  *  @return : size of CStream, in bytes,
- *            or 0 if it could not fit into dstBuffer */
+ *            or 0 if it could not fit into dstBuffer */ 
 MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
 {
     BIT_addBitsFast(bitC, 1, 1);   /* endMark */
     BIT_flushBits(bitC);
-    if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+    if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ 
     return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
 }
 
 
 /*-********************************************************
-*  bitStream decoding
+*  bitStream decoding 
 **********************************************************/
 /*! BIT_initDStream() :
- *  Initialize a BIT_DStream_t.
- * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
- * `srcSize` must be the *exact* size of the bitStream, in bytes.
- * @return : size of stream (== srcSize), or an errorCode if a problem is detected
- */
+ *  Initialize a BIT_DStream_t. 
+ * `bitD` : a pointer to an already allocated BIT_DStream_t structure. 
+ * `srcSize` must be the *exact* size of the bitStream, in bytes. 
+ * @return : size of stream (== srcSize), or an errorCode if a problem is detected 
+ */ 
 MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
 {
     if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
 
-    bitD->start = (const char*)srcBuffer;
-    bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
-
+    bitD->start = (const char*)srcBuffer; 
+    bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); 
+ 
     if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
         bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
         bitD->bitContainer = MEM_readLEST(bitD->ptr);
@@ -298,30 +298,30 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
         bitD->bitContainer = *(const BYTE*)(bitD->start);
         switch(srcSize)
         {
-        case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+        case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); 
                 ZSTD_FALLTHROUGH;
-
-        case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+ 
+        case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); 
                 ZSTD_FALLTHROUGH;
-
-        case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+ 
+        case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); 
                 ZSTD_FALLTHROUGH;
-
-        case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+ 
+        case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; 
                 ZSTD_FALLTHROUGH;
-
-        case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+ 
+        case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; 
                 ZSTD_FALLTHROUGH;
-
-        case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8;
+ 
+        case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8; 
                 ZSTD_FALLTHROUGH;
-
-        default: break;
-        }
-        {   BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
-            bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
-            if (lastByte == 0) return ERROR(corruption_detected);  /* endMark not present */
+ 
+        default: break; 
         }
+        {   BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; 
+            bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; 
+            if (lastByte == 0) return ERROR(corruption_detected);  /* endMark not present */ 
+        } 
         bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
     }
 
@@ -337,7 +337,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c
 {
     U32 const regMask = sizeof(bitContainer)*8 - 1;
     /* if start > regMask, bitstream is corrupted, and result is undefined */
-    assert(nbBits < BIT_MASK_SIZE);
+    assert(nbBits < BIT_MASK_SIZE); 
     /* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better
      * than accessing memory. When bmi2 instruction is not present, we consider
      * such cpus old (pre-Haswell, 2013) and their performance is not of that
@@ -355,7 +355,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 co
 #if defined(STATIC_BMI2) && STATIC_BMI2 == 1
 	return  _bzhi_u64(bitContainer, nbBits);
 #else
-    assert(nbBits < BIT_MASK_SIZE);
+    assert(nbBits < BIT_MASK_SIZE); 
     return bitContainer & BIT_mask[nbBits];
 #endif
 }
@@ -365,7 +365,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 co
  *  local register is not modified.
  *  On 32-bits, maxNbBits==24.
  *  On 64-bits, maxNbBits==56.
- * @return : value extracted */
+ * @return : value extracted */ 
 MEM_STATIC  FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t*  bitD, U32 nbBits)
 {
     /* arbitrate between double-shift and shift+mask */
@@ -375,18 +375,18 @@ MEM_STATIC  FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t*  bitD, U3
     return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
 #else
     /* this code path is slower on my os-x laptop */
-    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
-    return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
+    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; 
+    return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask); 
 #endif
 }
 
 /*! BIT_lookBitsFast() :
- *  unsafe version; only works if nbBits >= 1 */
+ *  unsafe version; only works if nbBits >= 1 */ 
 MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
 {
-    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
-    assert(nbBits >= 1);
-    return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
+    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; 
+    assert(nbBits >= 1); 
+    return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); 
 }
 
 MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
@@ -397,7 +397,7 @@ MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
 /*! BIT_readBits() :
  *  Read (consume) next n bits from local register and update.
  *  Pay attention to not read more than nbBits contained into local register.
- * @return : extracted value. */
+ * @return : extracted value. */ 
 MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
 {
     size_t const value = BIT_lookBits(bitD, nbBits);
@@ -406,11 +406,11 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned n
 }
 
 /*! BIT_readBitsFast() :
- *  unsafe version; only works only if nbBits >= 1 */
+ *  unsafe version; only works only if nbBits >= 1 */ 
 MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
 {
     size_t const value = BIT_lookBitsFast(bitD, nbBits);
-    assert(nbBits >= 1);
+    assert(nbBits >= 1); 
     BIT_skipBits(bitD, nbBits);
     return value;
 }
@@ -433,23 +433,23 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
 }
 
 /*! BIT_reloadDStream() :
- *  Refill `bitD` from buffer previously set in BIT_initDStream() .
- *  This function is safe, it guarantees it will not read beyond src buffer.
- * @return : status of `BIT_DStream_t` internal register.
- *           when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
+ *  Refill `bitD` from buffer previously set in BIT_initDStream() . 
+ *  This function is safe, it guarantees it will not read beyond src buffer. 
+ * @return : status of `BIT_DStream_t` internal register. 
+ *           when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ 
 MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
 {
-    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* overflow detected, like end of stream */
-        return BIT_DStream_overflow;
+    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* overflow detected, like end of stream */ 
+        return BIT_DStream_overflow; 
 
-    if (bitD->ptr >= bitD->limitPtr) {
+    if (bitD->ptr >= bitD->limitPtr) { 
         return BIT_reloadDStreamFast(bitD);
     }
     if (bitD->ptr == bitD->start) {
         if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
         return BIT_DStream_completed;
     }
-    /* start < ptr < limitPtr */
+    /* start < ptr < limitPtr */ 
     {   U32 nbBytes = bitD->bitsConsumed >> 3;
         BIT_DStream_status result = BIT_DStream_unfinished;
         if (bitD->ptr - nbBytes < bitD->start) {
@@ -458,14 +458,14 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
         }
         bitD->ptr -= nbBytes;
         bitD->bitsConsumed -= nbBytes*8;
-        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */ 
         return result;
     }
 }
 
 /*! BIT_endOfDStream() :
- * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
- */
+ * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). 
+ */ 
 MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
 {
     return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));

+ 82 - 82
contrib/libs/zstd/lib/common/compiler.h

@@ -1,38 +1,38 @@
-/*
+/* 
  * Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_COMPILER_H
-#define ZSTD_COMPILER_H
-
+ * All rights reserved. 
+ * 
+ * This source code is licensed under both the BSD-style license (found in the 
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found 
+ * in the COPYING file in the root directory of this source tree). 
+ * You may select, at your option, one of the above-listed licenses. 
+ */ 
+ 
+#ifndef ZSTD_COMPILER_H 
+#define ZSTD_COMPILER_H 
+ 
 #include "portability_macros.h"
 
-/*-*******************************************************
-*  Compiler specifics
-*********************************************************/
-/* force inlining */
+/*-******************************************************* 
+*  Compiler specifics 
+*********************************************************/ 
+/* force inlining */ 
 
 #if !defined(ZSTD_NO_INLINE)
 #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
-#  define INLINE_KEYWORD inline
-#else
-#  define INLINE_KEYWORD
-#endif
-
+#  define INLINE_KEYWORD inline 
+#else 
+#  define INLINE_KEYWORD 
+#endif 
+ 
 #if defined(__GNUC__) || defined(__ICCARM__)
-#  define FORCE_INLINE_ATTR __attribute__((always_inline))
-#elif defined(_MSC_VER)
-#  define FORCE_INLINE_ATTR __forceinline
-#else
-#  define FORCE_INLINE_ATTR
-#endif
-
+#  define FORCE_INLINE_ATTR __attribute__((always_inline)) 
+#elif defined(_MSC_VER) 
+#  define FORCE_INLINE_ATTR __forceinline 
+#else 
+#  define FORCE_INLINE_ATTR 
+#endif 
+ 
 #else
 
 #define INLINE_KEYWORD
@@ -40,7 +40,7 @@
 
 #endif
 
-/**
+/** 
   On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
   This explicitly marks such functions as __cdecl so that the code will still compile
   if a CC other than __cdecl has been made the default.
@@ -52,28 +52,28 @@
 #endif
 
 /**
- * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
+ * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant 
  * parameters. They must be inlined for the compiler to eliminate the constant
- * branches.
- */
-#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
-/**
- * HINT_INLINE is used to help the compiler generate better code. It is *not*
- * used for "templates", so it can be tweaked based on the compilers
- * performance.
- *
- * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
- * always_inline attribute.
- *
- * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
- * attribute.
- */
-#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
-#  define HINT_INLINE static INLINE_KEYWORD
-#else
-#  define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
-#endif
-
+ * branches. 
+ */ 
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR 
+/** 
+ * HINT_INLINE is used to help the compiler generate better code. It is *not* 
+ * used for "templates", so it can be tweaked based on the compilers 
+ * performance. 
+ * 
+ * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the 
+ * always_inline attribute. 
+ * 
+ * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline 
+ * attribute. 
+ */ 
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 
+#  define HINT_INLINE static INLINE_KEYWORD 
+#else 
+#  define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR 
+#endif 
+ 
 /* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
 #if defined(__GNUC__)
 #  define UNUSED_ATTR __attribute__((unused))
@@ -81,37 +81,37 @@
 #  define UNUSED_ATTR
 #endif
 
-/* force no inlining */
-#ifdef _MSC_VER
-#  define FORCE_NOINLINE static __declspec(noinline)
-#else
+/* force no inlining */ 
+#ifdef _MSC_VER 
+#  define FORCE_NOINLINE static __declspec(noinline) 
+#else 
 #  if defined(__GNUC__) || defined(__ICCARM__)
-#    define FORCE_NOINLINE static __attribute__((__noinline__))
-#  else
-#    define FORCE_NOINLINE static
-#  endif
-#endif
-
-
-/* target attribute */
+#    define FORCE_NOINLINE static __attribute__((__noinline__)) 
+#  else 
+#    define FORCE_NOINLINE static 
+#  endif 
+#endif 
+ 
+
+/* target attribute */ 
 #if defined(__GNUC__) || defined(__ICCARM__)
-#  define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
-#else
-#  define TARGET_ATTRIBUTE(target)
-#endif
-
+#  define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) 
+#else 
+#  define TARGET_ATTRIBUTE(target) 
+#endif 
+ 
 /* Target attribute for BMI2 dynamic dispatch.
  * Enable lzcnt, bmi, and bmi2.
  * We test for bmi1 & bmi2. lzcnt is included in bmi1.
- */
+ */ 
 #define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
-
+ 
 /* prefetch
  * can be disabled, by declaring NO_PREFETCH build macro */
 #if defined(NO_PREFETCH)
 #  define PREFETCH_L1(ptr)  (void)(ptr)  /* disabled */
 #  define PREFETCH_L2(ptr)  (void)(ptr)  /* disabled */
-#else
+#else 
 #  if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))  /* _mm_prefetch() is not defined outside of x86/x64 */
 #    include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
 #    define PREFETCH_L1(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
@@ -127,7 +127,7 @@
 #    define PREFETCH_L2(ptr) (void)(ptr)  /* disabled */
 #  endif
 #endif  /* NO_PREFETCH */
-
+ 
 #define CACHELINE_SIZE 64
 
 #define PREFETCH_AREA(p, s)  {            \
@@ -165,16 +165,16 @@
 #define UNLIKELY(x) (x)
 #endif
 
-/* disable warnings */
-#ifdef _MSC_VER    /* Visual Studio */
-#  include <intrin.h>                    /* For Visual 2005 */
-#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */
-#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
-#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */
-#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
-#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
-#endif
-
+/* disable warnings */ 
+#ifdef _MSC_VER    /* Visual Studio */ 
+#  include <intrin.h>                    /* For Visual 2005 */ 
+#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */ 
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */ 
+#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */ 
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */ 
+#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */ 
+#endif 
+ 
 /*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
 #ifndef STATIC_BMI2
 #  if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))
@@ -332,4 +332,4 @@ void __asan_poison_memory_region(void const volatile *addr, size_t size);
 void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
 #endif
 
-#endif /* ZSTD_COMPILER_H */
+#endif /* ZSTD_COMPILER_H */ 

+ 209 - 209
contrib/libs/zstd/lib/common/cpu.h

@@ -1,213 +1,213 @@
-/*
+/* 
  * Copyright (c) Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_COMMON_CPU_H
-#define ZSTD_COMMON_CPU_H
-
-/**
- * Implementation taken from folly/CpuId.h
- * https://github.com/facebook/folly/blob/master/folly/CpuId.h
- */
-
-#include "mem.h"
-
-#ifdef _MSC_VER
-#include <intrin.h>
-#endif
-
-typedef struct {
-    U32 f1c;
-    U32 f1d;
-    U32 f7b;
-    U32 f7c;
-} ZSTD_cpuid_t;
-
-MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
-    U32 f1c = 0;
-    U32 f1d = 0;
-    U32 f7b = 0;
-    U32 f7c = 0;
+ * All rights reserved. 
+ * 
+ * This source code is licensed under both the BSD-style license (found in the 
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found 
+ * in the COPYING file in the root directory of this source tree). 
+ * You may select, at your option, one of the above-listed licenses. 
+ */ 
+ 
+#ifndef ZSTD_COMMON_CPU_H 
+#define ZSTD_COMMON_CPU_H 
+ 
+/** 
+ * Implementation taken from folly/CpuId.h 
+ * https://github.com/facebook/folly/blob/master/folly/CpuId.h 
+ */ 
+ 
+#include "mem.h" 
+ 
+#ifdef _MSC_VER 
+#include <intrin.h> 
+#endif 
+ 
+typedef struct { 
+    U32 f1c; 
+    U32 f1d; 
+    U32 f7b; 
+    U32 f7c; 
+} ZSTD_cpuid_t; 
+ 
+MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { 
+    U32 f1c = 0; 
+    U32 f1d = 0; 
+    U32 f7b = 0; 
+    U32 f7c = 0; 
 #if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
-    int reg[4];
-    __cpuid((int*)reg, 0);
-    {
-        int const n = reg[0];
-        if (n >= 1) {
-            __cpuid((int*)reg, 1);
-            f1c = (U32)reg[2];
-            f1d = (U32)reg[3];
-        }
-        if (n >= 7) {
-            __cpuidex((int*)reg, 7, 0);
-            f7b = (U32)reg[1];
-            f7c = (U32)reg[2];
-        }
-    }
-#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
-    /* The following block like the normal cpuid branch below, but gcc
-     * reserves ebx for use of its pic register so we must specially
-     * handle the save and restore to avoid clobbering the register
-     */
-    U32 n;
-    __asm__(
-        "pushl %%ebx\n\t"
-        "cpuid\n\t"
-        "popl %%ebx\n\t"
-        : "=a"(n)
-        : "a"(0)
-        : "ecx", "edx");
-    if (n >= 1) {
-      U32 f1a;
-      __asm__(
-          "pushl %%ebx\n\t"
-          "cpuid\n\t"
-          "popl %%ebx\n\t"
-          : "=a"(f1a), "=c"(f1c), "=d"(f1d)
+    int reg[4]; 
+    __cpuid((int*)reg, 0); 
+    { 
+        int const n = reg[0]; 
+        if (n >= 1) { 
+            __cpuid((int*)reg, 1); 
+            f1c = (U32)reg[2]; 
+            f1d = (U32)reg[3]; 
+        } 
+        if (n >= 7) { 
+            __cpuidex((int*)reg, 7, 0); 
+            f7b = (U32)reg[1]; 
+            f7c = (U32)reg[2]; 
+        } 
+    } 
+#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__) 
+    /* The following block like the normal cpuid branch below, but gcc 
+     * reserves ebx for use of its pic register so we must specially 
+     * handle the save and restore to avoid clobbering the register 
+     */ 
+    U32 n; 
+    __asm__( 
+        "pushl %%ebx\n\t" 
+        "cpuid\n\t" 
+        "popl %%ebx\n\t" 
+        : "=a"(n) 
+        : "a"(0) 
+        : "ecx", "edx"); 
+    if (n >= 1) { 
+      U32 f1a; 
+      __asm__( 
+          "pushl %%ebx\n\t" 
+          "cpuid\n\t" 
+          "popl %%ebx\n\t" 
+          : "=a"(f1a), "=c"(f1c), "=d"(f1d) 
           : "a"(1));
-    }
-    if (n >= 7) {
-      __asm__(
-          "pushl %%ebx\n\t"
-          "cpuid\n\t"
+    } 
+    if (n >= 7) { 
+      __asm__( 
+          "pushl %%ebx\n\t" 
+          "cpuid\n\t" 
           "movl %%ebx, %%eax\n\t"
-          "popl %%ebx"
-          : "=a"(f7b), "=c"(f7c)
-          : "a"(7), "c"(0)
-          : "edx");
-    }
-#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
-    U32 n;
-    __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
-    if (n >= 1) {
-      U32 f1a;
-      __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
-    }
-    if (n >= 7) {
-      U32 f7a;
-      __asm__("cpuid"
-              : "=a"(f7a), "=b"(f7b), "=c"(f7c)
-              : "a"(7), "c"(0)
-              : "edx");
-    }
-#endif
-    {
-        ZSTD_cpuid_t cpuid;
-        cpuid.f1c = f1c;
-        cpuid.f1d = f1d;
-        cpuid.f7b = f7b;
-        cpuid.f7c = f7c;
-        return cpuid;
-    }
-}
-
-#define X(name, r, bit)                                                        \
-  MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) {                 \
-    return ((cpuid.r) & (1U << bit)) != 0;                                     \
-  }
-
-/* cpuid(1): Processor Info and Feature Bits. */
-#define C(name, bit) X(name, f1c, bit)
-  C(sse3, 0)
-  C(pclmuldq, 1)
-  C(dtes64, 2)
-  C(monitor, 3)
-  C(dscpl, 4)
-  C(vmx, 5)
-  C(smx, 6)
-  C(eist, 7)
-  C(tm2, 8)
-  C(ssse3, 9)
-  C(cnxtid, 10)
-  C(fma, 12)
-  C(cx16, 13)
-  C(xtpr, 14)
-  C(pdcm, 15)
-  C(pcid, 17)
-  C(dca, 18)
-  C(sse41, 19)
-  C(sse42, 20)
-  C(x2apic, 21)
-  C(movbe, 22)
-  C(popcnt, 23)
-  C(tscdeadline, 24)
-  C(aes, 25)
-  C(xsave, 26)
-  C(osxsave, 27)
-  C(avx, 28)
-  C(f16c, 29)
-  C(rdrand, 30)
-#undef C
-#define D(name, bit) X(name, f1d, bit)
-  D(fpu, 0)
-  D(vme, 1)
-  D(de, 2)
-  D(pse, 3)
-  D(tsc, 4)
-  D(msr, 5)
-  D(pae, 6)
-  D(mce, 7)
-  D(cx8, 8)
-  D(apic, 9)
-  D(sep, 11)
-  D(mtrr, 12)
-  D(pge, 13)
-  D(mca, 14)
-  D(cmov, 15)
-  D(pat, 16)
-  D(pse36, 17)
-  D(psn, 18)
-  D(clfsh, 19)
-  D(ds, 21)
-  D(acpi, 22)
-  D(mmx, 23)
-  D(fxsr, 24)
-  D(sse, 25)
-  D(sse2, 26)
-  D(ss, 27)
-  D(htt, 28)
-  D(tm, 29)
-  D(pbe, 31)
-#undef D
-
-/* cpuid(7): Extended Features. */
-#define B(name, bit) X(name, f7b, bit)
-  B(bmi1, 3)
-  B(hle, 4)
-  B(avx2, 5)
-  B(smep, 7)
-  B(bmi2, 8)
-  B(erms, 9)
-  B(invpcid, 10)
-  B(rtm, 11)
-  B(mpx, 14)
-  B(avx512f, 16)
-  B(avx512dq, 17)
-  B(rdseed, 18)
-  B(adx, 19)
-  B(smap, 20)
-  B(avx512ifma, 21)
-  B(pcommit, 22)
-  B(clflushopt, 23)
-  B(clwb, 24)
-  B(avx512pf, 26)
-  B(avx512er, 27)
-  B(avx512cd, 28)
-  B(sha, 29)
-  B(avx512bw, 30)
-  B(avx512vl, 31)
-#undef B
-#define C(name, bit) X(name, f7c, bit)
-  C(prefetchwt1, 0)
-  C(avx512vbmi, 1)
-#undef C
-
-#undef X
-
-#endif /* ZSTD_COMMON_CPU_H */
+          "popl %%ebx" 
+          : "=a"(f7b), "=c"(f7c) 
+          : "a"(7), "c"(0) 
+          : "edx"); 
+    } 
+#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) 
+    U32 n; 
+    __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx"); 
+    if (n >= 1) { 
+      U32 f1a; 
+      __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx"); 
+    } 
+    if (n >= 7) { 
+      U32 f7a; 
+      __asm__("cpuid" 
+              : "=a"(f7a), "=b"(f7b), "=c"(f7c) 
+              : "a"(7), "c"(0) 
+              : "edx"); 
+    } 
+#endif 
+    { 
+        ZSTD_cpuid_t cpuid; 
+        cpuid.f1c = f1c; 
+        cpuid.f1d = f1d; 
+        cpuid.f7b = f7b; 
+        cpuid.f7c = f7c; 
+        return cpuid; 
+    } 
+} 
+ 
+#define X(name, r, bit)                                                        \ 
+  MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) {                 \ 
+    return ((cpuid.r) & (1U << bit)) != 0;                                     \ 
+  } 
+ 
+/* cpuid(1): Processor Info and Feature Bits. */ 
+#define C(name, bit) X(name, f1c, bit) 
+  C(sse3, 0) 
+  C(pclmuldq, 1) 
+  C(dtes64, 2) 
+  C(monitor, 3) 
+  C(dscpl, 4) 
+  C(vmx, 5) 
+  C(smx, 6) 
+  C(eist, 7) 
+  C(tm2, 8) 
+  C(ssse3, 9) 
+  C(cnxtid, 10) 
+  C(fma, 12) 
+  C(cx16, 13) 
+  C(xtpr, 14) 
+  C(pdcm, 15) 
+  C(pcid, 17) 
+  C(dca, 18) 
+  C(sse41, 19) 
+  C(sse42, 20) 
+  C(x2apic, 21) 
+  C(movbe, 22) 
+  C(popcnt, 23) 
+  C(tscdeadline, 24) 
+  C(aes, 25) 
+  C(xsave, 26) 
+  C(osxsave, 27) 
+  C(avx, 28) 
+  C(f16c, 29) 
+  C(rdrand, 30) 
+#undef C 
+#define D(name, bit) X(name, f1d, bit) 
+  D(fpu, 0) 
+  D(vme, 1) 
+  D(de, 2) 
+  D(pse, 3) 
+  D(tsc, 4) 
+  D(msr, 5) 
+  D(pae, 6) 
+  D(mce, 7) 
+  D(cx8, 8) 
+  D(apic, 9) 
+  D(sep, 11) 
+  D(mtrr, 12) 
+  D(pge, 13) 
+  D(mca, 14) 
+  D(cmov, 15) 
+  D(pat, 16) 
+  D(pse36, 17) 
+  D(psn, 18) 
+  D(clfsh, 19) 
+  D(ds, 21) 
+  D(acpi, 22) 
+  D(mmx, 23) 
+  D(fxsr, 24) 
+  D(sse, 25) 
+  D(sse2, 26) 
+  D(ss, 27) 
+  D(htt, 28) 
+  D(tm, 29) 
+  D(pbe, 31) 
+#undef D 
+ 
+/* cpuid(7): Extended Features. */ 
+#define B(name, bit) X(name, f7b, bit) 
+  B(bmi1, 3) 
+  B(hle, 4) 
+  B(avx2, 5) 
+  B(smep, 7) 
+  B(bmi2, 8) 
+  B(erms, 9) 
+  B(invpcid, 10) 
+  B(rtm, 11) 
+  B(mpx, 14) 
+  B(avx512f, 16) 
+  B(avx512dq, 17) 
+  B(rdseed, 18) 
+  B(adx, 19) 
+  B(smap, 20) 
+  B(avx512ifma, 21) 
+  B(pcommit, 22) 
+  B(clflushopt, 23) 
+  B(clwb, 24) 
+  B(avx512pf, 26) 
+  B(avx512er, 27) 
+  B(avx512cd, 28) 
+  B(sha, 29) 
+  B(avx512bw, 30) 
+  B(avx512vl, 31) 
+#undef B 
+#define C(name, bit) X(name, f7c, bit) 
+  C(prefetchwt1, 0) 
+  C(avx512vbmi, 1) 
+#undef C 
+ 
+#undef X 
+ 
+#endif /* ZSTD_COMMON_CPU_H */ 

+ 12 - 12
contrib/libs/zstd/lib/common/entropy_common.c

@@ -23,12 +23,12 @@
 #include "huf.h"
 
 
-/*===   Version   ===*/
-unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
-
-
-/*===   Error Management   ===*/
-unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+/*===   Version   ===*/ 
+unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } 
+ 
+ 
+/*===   Error Management   ===*/ 
+unsigned FSE_isError(size_t code) { return ERR_isError(code); } 
 const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
 
 unsigned HUF_isError(size_t code) { return ERR_isError(code); }
@@ -158,15 +158,15 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne
         }
         {
             int const max = (2*threshold-1) - remaining;
-            int count;
+            int count; 
 
             if ((bitStream & (threshold-1)) < (U32)max) {
-                count = bitStream & (threshold-1);
-                bitCount += nbBits-1;
+                count = bitStream & (threshold-1); 
+                bitCount += nbBits-1; 
             } else {
-                count = bitStream & (2*threshold-1);
+                count = bitStream & (2*threshold-1); 
                 if (count >= threshold) count -= max;
-                bitCount += nbBits;
+                bitCount += nbBits; 
             }
 
             count--;   /* extra accuracy */
@@ -179,7 +179,7 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne
                 assert(count == -1);
                 remaining += count;
             }
-            normalizedCounter[charnum++] = (short)count;
+            normalizedCounter[charnum++] = (short)count; 
             previous0 = !count;
 
             assert(threshold > 1);

+ 16 - 16
contrib/libs/zstd/lib/common/error_private.c

@@ -1,11 +1,11 @@
-/*
+/* 
  * Copyright (c) Yann Collet, Facebook, Inc.
  * All rights reserved.
  *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the 
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found 
+ * in the COPYING file in the root directory of this source tree). 
+ * You may select, at your option, one of the above-listed licenses. 
  */
 
 /* The purpose of this file is to have a single list of error strings embedded in binary */
@@ -27,26 +27,26 @@ const char* ERR_getErrorString(ERR_enum code)
     case PREFIX(version_unsupported): return "Version not supported";
     case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
     case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
-    case PREFIX(corruption_detected): return "Corrupted block detected";
-    case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
-    case PREFIX(parameter_unsupported): return "Unsupported parameter";
-    case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
+    case PREFIX(corruption_detected): return "Corrupted block detected"; 
+    case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; 
+    case PREFIX(parameter_unsupported): return "Unsupported parameter"; 
+    case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; 
     case PREFIX(init_missing): return "Context should be init first";
     case PREFIX(memory_allocation): return "Allocation error : not enough memory";
-    case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
+    case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough"; 
     case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
     case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
     case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
     case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
     case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
     case PREFIX(dictionary_wrong): return "Dictionary mismatch";
-    case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
-    case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
-    case PREFIX(srcSize_wrong): return "Src size is incorrect";
+    case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; 
+    case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; 
+    case PREFIX(srcSize_wrong): return "Src size is incorrect"; 
     case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
-        /* following error codes are not stable and may be removed or changed in a future version */
-    case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
-    case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
+        /* following error codes are not stable and may be removed or changed in a future version */ 
+    case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; 
+    case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; 
     case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
     case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
     case PREFIX(maxCode):

+ 7 - 7
contrib/libs/zstd/lib/common/error_private.h

@@ -1,11 +1,11 @@
-/*
+/* 
  * Copyright (c) Yann Collet, Facebook, Inc.
  * All rights reserved.
  *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the 
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found 
+ * in the COPYING file in the root directory of this source tree). 
+ * You may select, at your option, one of the above-listed licenses. 
  */
 
 /* Note : this module is expected to remain private, do not expose it */
@@ -52,8 +52,8 @@ typedef ZSTD_ErrorCode ERR_enum;
 *  Error codes handling
 ******************************************/
 #undef ERROR   /* already defined on Visual Studio */
-#define ERROR(name) ZSTD_ERROR(name)
-#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
+#define ERROR(name) ZSTD_ERROR(name) 
+#define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) 
 
 ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
 

+ 60 - 60
contrib/libs/zstd/lib/common/fse.h

@@ -16,42 +16,42 @@
 extern "C" {
 #endif
 
-#ifndef FSE_H
-#define FSE_H
-
+#ifndef FSE_H 
+#define FSE_H 
 
+ 
 /*-*****************************************
 *  Dependencies
 ******************************************/
 #include "zstd_deps.h"    /* size_t, ptrdiff_t */
 
 
-/*-*****************************************
-*  FSE_PUBLIC_API : control library symbols visibility
-******************************************/
-#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
-#  define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
-#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
-#  define FSE_PUBLIC_API __declspec(dllexport)
-#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
-#  define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
-#else
-#  define FSE_PUBLIC_API
-#endif
-
-/*------   Version   ------*/
-#define FSE_VERSION_MAJOR    0
-#define FSE_VERSION_MINOR    9
-#define FSE_VERSION_RELEASE  0
-
-#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
-#define FSE_QUOTE(str) #str
-#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
-#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
-
-#define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
-FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */
-
+/*-***************************************** 
+*  FSE_PUBLIC_API : control library symbols visibility 
+******************************************/ 
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) 
+#  define FSE_PUBLIC_API __attribute__ ((visibility ("default"))) 
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */ 
+#  define FSE_PUBLIC_API __declspec(dllexport) 
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) 
+#  define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ 
+#else 
+#  define FSE_PUBLIC_API 
+#endif 
+ 
+/*------   Version   ------*/ 
+#define FSE_VERSION_MAJOR    0 
+#define FSE_VERSION_MINOR    9 
+#define FSE_VERSION_RELEASE  0 
+ 
+#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE 
+#define FSE_QUOTE(str) #str 
+#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) 
+#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) 
+ 
+#define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) 
+FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */ 
+ 
 
 /*-****************************************
 *  FSE simple functions
@@ -64,8 +64,8 @@ FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number;
                      if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
                      if FSE_isError(return), compression failed (more details using FSE_getErrorName())
 */
-FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
-                             const void* src, size_t srcSize);
+FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, 
+                             const void* src, size_t srcSize); 
 
 /*! FSE_decompress():
     Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
@@ -77,18 +77,18 @@ FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
     Why ? : making this distinction requires a header.
     Header management is intentionally delegated to the user layer, which can better manage special cases.
 */
-FSE_PUBLIC_API size_t FSE_decompress(void* dst,  size_t dstCapacity,
-                               const void* cSrc, size_t cSrcSize);
+FSE_PUBLIC_API size_t FSE_decompress(void* dst,  size_t dstCapacity, 
+                               const void* cSrc, size_t cSrcSize); 
 
 
 /*-*****************************************
 *  Tool functions
 ******************************************/
-FSE_PUBLIC_API size_t FSE_compressBound(size_t size);       /* maximum compressed size */
+FSE_PUBLIC_API size_t FSE_compressBound(size_t size);       /* maximum compressed size */ 
 
 /* Error Management */
-FSE_PUBLIC_API unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */
-FSE_PUBLIC_API const char* FSE_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
+FSE_PUBLIC_API unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */ 
+FSE_PUBLIC_API const char* FSE_getErrorName(size_t code);   /* provides error code string (useful for debugging) */ 
 
 
 /*-*****************************************
@@ -102,7 +102,7 @@ FSE_PUBLIC_API const char* FSE_getErrorName(size_t code);   /* provides error co
                      if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
                      if FSE_isError(return), it's an error code.
 */
-FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); 
 
 
 /*-*****************************************
@@ -132,7 +132,7 @@ or to save and provide normalized distribution using external method.
     dynamically downsize 'tableLog' when conditions are met.
     It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
     @return : recommended tableLog (necessarily <= 'maxTableLog') */
-FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); 
 
 /*! FSE_normalizeCount():
     normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
@@ -151,7 +151,7 @@ FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tabl
 /*! FSE_NCountWriteBound():
     Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
     Typically useful for allocation purpose. */
-FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); 
 
 /*! FSE_writeNCount():
     Compactly save 'normalizedCounter' into 'buffer'.
@@ -164,20 +164,20 @@ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
 /*! Constructor and Destructor of FSE_CTable.
     Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
 typedef unsigned FSE_CTable;   /* don't allocate that. It's only meant to be more restrictive than void* */
-FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
-FSE_PUBLIC_API void        FSE_freeCTable (FSE_CTable* ct);
+FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); 
+FSE_PUBLIC_API void        FSE_freeCTable (FSE_CTable* ct); 
 
 /*! FSE_buildCTable():
     Builds `ct`, which must be already allocated, using FSE_createCTable().
     @return : 0, or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); 
 
 /*! FSE_compress_usingCTable():
     Compress `src` using `ct` into `dst` which must be already allocated.
     @return : size of compressed data (<= `dstCapacity`),
               or 0 if compressed data could not fit into `dst`,
               or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
+FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); 
 
 /*!
 Tutorial :
@@ -244,20 +244,20 @@ FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
 /*! Constructor and Destructor of FSE_DTable.
     Note that its size depends on 'tableLog' */
 typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
-FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
-FSE_PUBLIC_API void        FSE_freeDTable(FSE_DTable* dt);
+FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); 
+FSE_PUBLIC_API void        FSE_freeDTable(FSE_DTable* dt); 
 
 /*! FSE_buildDTable():
     Builds 'dt', which must be already allocated, using FSE_createDTable().
     return : 0, or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); 
 
 /*! FSE_decompress_usingDTable():
     Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
     into `dst` which must be already allocated.
     @return : size of regenerated data (necessarily <= `dstCapacity`),
               or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); 
 
 /*!
 Tutorial :
@@ -287,10 +287,10 @@ FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<
 If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
 */
 
-#endif  /* FSE_H */
+#endif  /* FSE_H */ 
 
-#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
-#define FSE_H_FSE_STATIC_LINKING_ONLY
+#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) 
+#define FSE_H_FSE_STATIC_LINKING_ONLY 
 
 /* *** Dependency *** */
 #include "bitstream.h"
@@ -308,11 +308,11 @@ If there is an error, the function will return an error code, which can be teste
 #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
 #define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<(maxTableLog)))
 
-/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
-#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue)   (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
-#define FSE_DTABLE_SIZE(maxTableLog)                   (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
-
+/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */ 
+#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue)   (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable)) 
+#define FSE_DTABLE_SIZE(maxTableLog)                   (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable)) 
 
+ 
 /* *****************************************
  *  FSE advanced API
  ***************************************** */
@@ -361,11 +361,11 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size
 size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
 /**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
 
-typedef enum {
-   FSE_repeat_none,  /**< Cannot use the previous table */
-   FSE_repeat_check, /**< Can use the previous table but it must be checked */
+typedef enum { 
+   FSE_repeat_none,  /**< Cannot use the previous table */ 
+   FSE_repeat_check, /**< Can use the previous table but it must be checked */ 
    FSE_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
- } FSE_repeat;
+ } FSE_repeat; 
 
 /* *****************************************
 *  FSE symbol compression API
@@ -539,9 +539,9 @@ MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U3
 
 MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
 {
-    FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+    FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; 
     const U16* const stateTable = (const U16*)(statePtr->stateTable);
-    U32 const nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+    U32 const nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); 
     BIT_addBits(bitC, statePtr->value, nbBitsOut);
     statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
 }

+ 5 - 5
contrib/libs/zstd/lib/common/fse_decompress.c

@@ -18,10 +18,10 @@
 ****************************************************************/
 #include "debug.h"      /* assert */
 #include "bitstream.h"
-#include "compiler.h"
+#include "compiler.h" 
 #define FSE_STATIC_LINKING_ONLY
 #include "fse.h"
-#include "error_private.h"
+#include "error_private.h" 
 #define ZSTD_DEPS_NEED_MALLOC
 #include "zstd_deps.h"
 
@@ -165,8 +165,8 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
     {   U32 u;
         for (u=0; u<tableSize; u++) {
             FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
-            U32 const nextState = symbolNext[symbol]++;
-            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+            U32 const nextState = symbolNext[symbol]++; 
+            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) ); 
             tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
     }   }
 
@@ -228,7 +228,7 @@ size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
     return 0;
 }
 
-FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic( 
           void* dst, size_t maxDstSize,
     const void* cSrc, size_t cSrcSize,
     const FSE_DTable* dt, const unsigned fast)

+ 153 - 153
contrib/libs/zstd/lib/common/huf.h

@@ -16,98 +16,98 @@
 extern "C" {
 #endif
 
-#ifndef HUF_H_298734234
-#define HUF_H_298734234
+#ifndef HUF_H_298734234 
+#define HUF_H_298734234 
 
 /* *** Dependencies *** */
 #include "zstd_deps.h"    /* size_t */
 
 
-/* *** library symbols visibility *** */
-/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
- *        HUF symbols remain "private" (internal symbols for library only).
- *        Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
-#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
-#  define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
-#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
-#  define HUF_PUBLIC_API __declspec(dllexport)
-#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
-#  define HUF_PUBLIC_API __declspec(dllimport)  /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
-#else
-#  define HUF_PUBLIC_API
-#endif
-
-
-/* ========================== */
-/* ***  simple functions  *** */
-/* ========================== */
-
-/** HUF_compress() :
- *  Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
- * 'dst' buffer must be already allocated.
- *  Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
- * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
- * @return : size of compressed data (<= `dstCapacity`).
- *  Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
- *                   if HUF_isError(return), compression failed (more details using HUF_getErrorName())
- */
-HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
-                             const void* src, size_t srcSize);
-
-/** HUF_decompress() :
- *  Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
- *  into already allocated buffer 'dst', of minimum size 'dstSize'.
- * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
- *  Note : in contrast with FSE, HUF_decompress can regenerate
- *         RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
- *         because it knows size to regenerate (originalSize).
- * @return : size of regenerated data (== originalSize),
- *           or an error code, which can be tested using HUF_isError()
- */
-HUF_PUBLIC_API size_t HUF_decompress(void* dst,  size_t originalSize,
-                               const void* cSrc, size_t cSrcSize);
-
-
+/* *** library symbols visibility *** */ 
+/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, 
+ *        HUF symbols remain "private" (internal symbols for library only). 
+ *        Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ 
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) 
+#  define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) 
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */ 
+#  define HUF_PUBLIC_API __declspec(dllexport) 
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) 
+#  define HUF_PUBLIC_API __declspec(dllimport)  /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ 
+#else 
+#  define HUF_PUBLIC_API 
+#endif 
+
+
+/* ========================== */ 
+/* ***  simple functions  *** */ 
+/* ========================== */ 
+
+/** HUF_compress() : 
+ *  Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. 
+ * 'dst' buffer must be already allocated. 
+ *  Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). 
+ * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. 
+ * @return : size of compressed data (<= `dstCapacity`). 
+ *  Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! 
+ *                   if HUF_isError(return), compression failed (more details using HUF_getErrorName()) 
+ */ 
+HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, 
+                             const void* src, size_t srcSize); 
+ 
+/** HUF_decompress() : 
+ *  Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', 
+ *  into already allocated buffer 'dst', of minimum size 'dstSize'. 
+ * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. 
+ *  Note : in contrast with FSE, HUF_decompress can regenerate 
+ *         RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, 
+ *         because it knows size to regenerate (originalSize). 
+ * @return : size of regenerated data (== originalSize), 
+ *           or an error code, which can be tested using HUF_isError() 
+ */ 
+HUF_PUBLIC_API size_t HUF_decompress(void* dst,  size_t originalSize, 
+                               const void* cSrc, size_t cSrcSize); 
+ 
+ 
 /* ***   Tool functions *** */
-#define HUF_BLOCKSIZE_MAX (128 * 1024)                  /**< maximum input size for a single block compressed with HUF_compress */
-HUF_PUBLIC_API size_t HUF_compressBound(size_t size);   /**< maximum compressed size (worst case) */
+#define HUF_BLOCKSIZE_MAX (128 * 1024)                  /**< maximum input size for a single block compressed with HUF_compress */ 
+HUF_PUBLIC_API size_t HUF_compressBound(size_t size);   /**< maximum compressed size (worst case) */ 
 
 /* Error Management */
-HUF_PUBLIC_API unsigned    HUF_isError(size_t code);       /**< tells if a return value is an error code */
-HUF_PUBLIC_API const char* HUF_getErrorName(size_t code);  /**< provides error code string (useful for debugging) */
+HUF_PUBLIC_API unsigned    HUF_isError(size_t code);       /**< tells if a return value is an error code */ 
+HUF_PUBLIC_API const char* HUF_getErrorName(size_t code);  /**< provides error code string (useful for debugging) */ 
 
 
 /* ***   Advanced function   *** */
 
 /** HUF_compress2() :
- *  Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
- * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
- * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
-HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
-                               const void* src, size_t srcSize,
-                               unsigned maxSymbolValue, unsigned tableLog);
+ *  Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. 
+ * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . 
+ * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ 
+HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, 
+                               const void* src, size_t srcSize, 
+                               unsigned maxSymbolValue, unsigned tableLog); 
 
 /** HUF_compress4X_wksp() :
- *  Same as HUF_compress2(), but uses externally allocated `workSpace`.
+ *  Same as HUF_compress2(), but uses externally allocated `workSpace`. 
  * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */
 #define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
 #define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
-HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
-                                     const void* src, size_t srcSize,
-                                     unsigned maxSymbolValue, unsigned tableLog,
-                                     void* workSpace, size_t wkspSize);
-
-#endif   /* HUF_H_298734234 */
-
-/* ******************************************************************
- *  WARNING !!
- *  The following section contains advanced and experimental definitions
- *  which shall never be used in the context of a dynamic library,
- *  because they are not guaranteed to remain stable in the future.
- *  Only consider them in association with static linking.
- * *****************************************************************/
-#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
-#define HUF_H_HUF_STATIC_LINKING_ONLY
+HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, 
+                                     const void* src, size_t srcSize, 
+                                     unsigned maxSymbolValue, unsigned tableLog, 
+                                     void* workSpace, size_t wkspSize); 
+
+#endif   /* HUF_H_298734234 */ 
+
+/* ****************************************************************** 
+ *  WARNING !! 
+ *  The following section contains advanced and experimental definitions 
+ *  which shall never be used in the context of a dynamic library, 
+ *  because they are not guaranteed to remain stable in the future. 
+ *  Only consider them in association with static linking. 
+ * *****************************************************************/ 
+#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) 
+#define HUF_H_HUF_STATIC_LINKING_ONLY 
 
 /* *** Dependencies *** */
 #include "mem.h"   /* U32 */
@@ -117,9 +117,9 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
 
 /* *** Constants *** */
 #define HUF_TABLELOG_MAX      12      /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
-#define HUF_TABLELOG_DEFAULT  11      /* default tableLog value when none specified */
-#define HUF_SYMBOLVALUE_MAX  255
-
+#define HUF_TABLELOG_DEFAULT  11      /* default tableLog value when none specified */ 
+#define HUF_SYMBOLVALUE_MAX  255 
+ 
 #define HUF_TABLELOG_ABSOLUTEMAX  12  /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
 #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
 #  error "HUF_TABLELOG_MAX is too large !"
@@ -131,7 +131,7 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
 ******************************************/
 /* HUF buffer bounds */
 #define HUF_CTABLEBOUND 129
-#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true when incompressible is pre-filtered with fast heuristic */
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true when incompressible is pre-filtered with fast heuristic */ 
 #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
 
 /* static allocation of HUF's Compression Table */
@@ -161,7 +161,7 @@ size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS
 
 size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
 size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ 
 size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
 size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
 #ifndef HUF_FORCE_DECOMPRESS_X1
@@ -171,22 +171,22 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
 
 
 /* ****************************************
- *  HUF detailed API
- * ****************************************/
-
-/*! HUF_compress() does the following:
- *  1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
- *  2. (optional) refine tableLog using HUF_optimalTableLog()
- *  3. build Huffman table from count using HUF_buildCTable()
- *  4. save Huffman table to memory buffer using HUF_writeCTable()
- *  5. encode the data stream using HUF_compress4X_usingCTable()
- *
- *  The following API allows targeting specific sub-functions for advanced tasks.
- *  For example, it's possible to compress several blocks using the same 'CTable',
- *  or to save and regenerate 'CTable' using external methods.
- */
+ *  HUF detailed API 
+ * ****************************************/ 
+
+/*! HUF_compress() does the following: 
+ *  1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") 
+ *  2. (optional) refine tableLog using HUF_optimalTableLog() 
+ *  3. build Huffman table from count using HUF_buildCTable() 
+ *  4. save Huffman table to memory buffer using HUF_writeCTable() 
+ *  5. encode the data stream using HUF_compress4X_usingCTable() 
+ * 
+ *  The following API allows targeting specific sub-functions for advanced tasks. 
+ *  For example, it's possible to compress several blocks using the same 'CTable', 
+ *  or to save and regenerate 'CTable' using external methods. 
+ */ 
 unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);   /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
+size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);   /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ 
 size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
 size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
 size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
@@ -194,40 +194,40 @@ size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* sr
 size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
 int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
 
-typedef enum {
-   HUF_repeat_none,  /**< Cannot use the previous table */
-   HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
+typedef enum { 
+   HUF_repeat_none,  /**< Cannot use the previous table */ 
+   HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ 
    HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
- } HUF_repeat;
-/** HUF_compress4X_repeat() :
- *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
- *  If it uses hufTable it does not modify hufTable or repeat.
- *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ } HUF_repeat; 
+/** HUF_compress4X_repeat() : 
+ *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. 
+ *  If it uses hufTable it does not modify hufTable or repeat. 
+ *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. 
  *  If preferRepeat then the old table will always be used if valid.
  *  If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
-size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
-                       const void* src, size_t srcSize,
-                       unsigned maxSymbolValue, unsigned tableLog,
-                       void* workSpace, size_t wkspSize,    /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+size_t HUF_compress4X_repeat(void* dst, size_t dstSize, 
+                       const void* src, size_t srcSize, 
+                       unsigned maxSymbolValue, unsigned tableLog, 
+                       void* workSpace, size_t wkspSize,    /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ 
                        HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
 
 /** HUF_buildCTable_wksp() :
  *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
- * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
+ * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. 
  */
-#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
-#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) 
+#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) 
 size_t HUF_buildCTable_wksp (HUF_CElt* tree,
                        const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
                              void* workSpace, size_t wkspSize);
 
 /*! HUF_readStats() :
- *  Read compact Huffman tree, saved by HUF_writeCTable().
- * `huffWeight` is destination buffer.
- * @return : size read from `src` , or an error Code .
- *  Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
-size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
-                     U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ *  Read compact Huffman tree, saved by HUF_writeCTable(). 
+ * `huffWeight` is destination buffer. 
+ * @return : size read from `src` , or an error Code . 
+ *  Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ 
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, 
+                     U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, 
                      const void* src, size_t srcSize);
 
 /*! HUF_readStats_wksp() :
@@ -244,7 +244,7 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
                           int bmi2);
 
 /** HUF_readCTable() :
- *  Loading a CTable saved with HUF_writeCTable() */
+ *  Loading a CTable saved with HUF_writeCTable() */ 
 size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
 
 /** HUF_getNbBitsFromCTable() :
@@ -253,39 +253,39 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
 U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
 
 /*
- * HUF_decompress() does the following:
+ * HUF_decompress() does the following: 
  * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
- * 2. build Huffman table from save, using HUF_readDTableX?()
- * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
- */
+ * 2. build Huffman table from save, using HUF_readDTableX?() 
+ * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable() 
+ */ 
 
 /** HUF_selectDecoder() :
- *  Tells which decoder is likely to decode faster,
- *  based on a set of pre-computed metrics.
+ *  Tells which decoder is likely to decode faster, 
+ *  based on a set of pre-computed metrics. 
  * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
- *  Assumption : 0 < dstSize <= 128 KB */
+ *  Assumption : 0 < dstSize <= 128 KB */ 
 U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
 
-/**
- *  The minimum workspace size for the `workSpace` used in
+/** 
+ *  The minimum workspace size for the `workSpace` used in 
  *  HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
- *
- *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
- *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
- *  Buffer overflow errors may potentially occur if code modifications result in
- *  a required workspace size greater than that specified in the following
- *  macro.
- */
+ * 
+ *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when 
+ *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. 
+ *  Buffer overflow errors may potentially occur if code modifications result in 
+ *  a required workspace size greater than that specified in the following 
+ *  macro. 
+ */ 
 #define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
-#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
-
+#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) 
+ 
 #ifndef HUF_FORCE_DECOMPRESS_X2
 size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
 size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
 #endif
 #ifndef HUF_FORCE_DECOMPRESS_X1
 size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
-size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); 
 #endif
 
 size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
@@ -297,24 +297,24 @@ size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* c
 #endif
 
 
-/* ====================== */
+/* ====================== */ 
 /* single stream variants */
-/* ====================== */
+/* ====================== */ 
 
 size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
 size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */
 size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
 size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
-/** HUF_compress1X_repeat() :
- *  Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
- *  If it uses hufTable it does not modify hufTable or repeat.
- *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+/** HUF_compress1X_repeat() : 
+ *  Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. 
+ *  If it uses hufTable it does not modify hufTable or repeat. 
+ *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. 
  *  If preferRepeat then the old table will always be used if valid.
  *  If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
-size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
-                       const void* src, size_t srcSize,
-                       unsigned maxSymbolValue, unsigned tableLog,
-                       void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+size_t HUF_compress1X_repeat(void* dst, size_t dstSize, 
+                       const void* src, size_t srcSize, 
+                       unsigned maxSymbolValue, unsigned tableLog, 
+                       void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ 
                        HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
 
 size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
@@ -323,7 +323,7 @@ size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS
 #endif
 
 size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
+size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); 
 #ifndef HUF_FORCE_DECOMPRESS_X2
 size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
 size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
@@ -341,22 +341,22 @@ size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* c
 size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
 #endif
 
-/* BMI2 variants.
- * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
- */
-size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+/* BMI2 variants. 
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. 
+ */ 
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); 
 #ifndef HUF_FORCE_DECOMPRESS_X2
 size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
 #endif
-size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
-size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); 
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); 
 #ifndef HUF_FORCE_DECOMPRESS_X2
 size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
 #endif
 #ifndef HUF_FORCE_DECOMPRESS_X1
 size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
 #endif
-
+ 
 #endif /* HUF_STATIC_LINKING_ONLY */
 
 #if defined (__cplusplus)

+ 31 - 31
contrib/libs/zstd/lib/common/mem.h

@@ -1,11 +1,11 @@
-/*
+/* 
  * Copyright (c) Yann Collet, Facebook, Inc.
  * All rights reserved.
  *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the 
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found 
+ * in the COPYING file in the root directory of this source tree). 
+ * You may select, at your option, one of the above-listed licenses. 
  */
 
 #ifndef MEM_H_MODULE
@@ -50,15 +50,15 @@ extern "C" {
 #  else
 #    include <stdint.h> /* intptr_t */
 #  endif
-  typedef   uint8_t BYTE;
+  typedef   uint8_t BYTE; 
   typedef   uint8_t U8;
   typedef    int8_t S8;
-  typedef  uint16_t U16;
-  typedef   int16_t S16;
-  typedef  uint32_t U32;
-  typedef   int32_t S32;
-  typedef  uint64_t U64;
-  typedef   int64_t S64;
+  typedef  uint16_t U16; 
+  typedef   int16_t S16; 
+  typedef  uint32_t U32; 
+  typedef   int32_t S32; 
+  typedef  uint64_t U64; 
+  typedef   int64_t S64; 
 #else
 # include <limits.h>
 #if CHAR_BIT != 8
@@ -138,11 +138,11 @@ MEM_STATIC size_t MEM_swapST(size_t in);
  * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
  * The below switch allow to select different access method for improved performance.
  * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
+ * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). 
  *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
  * Method 2 : direct access. This method is portable but violate C standard.
  *            It can generate buggy code on targets depending on alignment.
- *            In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
+ *            In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) 
  * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
  * Prefer these methods in priority order (0 > 1 > 2)
  */
@@ -182,7 +182,7 @@ Only use if no other choice to achieve best performance on target platform */
 MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
 MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
 MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
-MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
+MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } 
 
 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
 MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
@@ -193,27 +193,27 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
 /* currently only defined for gcc and icc */
 #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
-    __pragma( pack(push, 1) )
-    typedef struct { U16 v; } unalign16;
-    typedef struct { U32 v; } unalign32;
-    typedef struct { U64 v; } unalign64;
-    typedef struct { size_t v; } unalignArch;
+    __pragma( pack(push, 1) ) 
+    typedef struct { U16 v; } unalign16; 
+    typedef struct { U32 v; } unalign32; 
+    typedef struct { U64 v; } unalign64; 
+    typedef struct { size_t v; } unalignArch; 
     __pragma( pack(pop) )
 #else
-    typedef struct { U16 v; } __attribute__((packed)) unalign16;
-    typedef struct { U32 v; } __attribute__((packed)) unalign32;
-    typedef struct { U64 v; } __attribute__((packed)) unalign64;
-    typedef struct { size_t v; } __attribute__((packed)) unalignArch;
+    typedef struct { U16 v; } __attribute__((packed)) unalign16; 
+    typedef struct { U32 v; } __attribute__((packed)) unalign32; 
+    typedef struct { U64 v; } __attribute__((packed)) unalign64; 
+    typedef struct { size_t v; } __attribute__((packed)) unalignArch; 
 #endif
 
-MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
-MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
-MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
-MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } 
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } 
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } 
+MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } 
 
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
-MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
-MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } 
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } 
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } 
 
 #else
 

Some files were not shown because too many files changed in this diff