zstd_compress_literals.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /*
  2. * Copyright (c) Meta Platforms, Inc. and affiliates.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. /*-*************************************
  11. * Dependencies
  12. ***************************************/
  13. #include "zstd_compress_literals.h"
  14. /* **************************************************************
  15. * Debug Traces
  16. ****************************************************************/
  17. #if DEBUGLEVEL >= 2
  18. static size_t showHexa(const void* src, size_t srcSize)
  19. {
  20. const BYTE* const ip = (const BYTE*)src;
  21. size_t u;
  22. for (u=0; u<srcSize; u++) {
  23. RAWLOG(5, " %02X", ip[u]); (void)ip;
  24. }
  25. RAWLOG(5, " \n");
  26. return srcSize;
  27. }
  28. #endif
  29. /* **************************************************************
  30. * Literals compression - special cases
  31. ****************************************************************/
  32. size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
  33. {
  34. BYTE* const ostart = (BYTE*)dst;
  35. U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
  36. DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity);
  37. RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
  38. switch(flSize)
  39. {
  40. case 1: /* 2 - 1 - 5 */
  41. ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
  42. break;
  43. case 2: /* 2 - 2 - 12 */
  44. MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
  45. break;
  46. case 3: /* 2 - 2 - 20 */
  47. MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
  48. break;
  49. default: /* not necessary : flSize is {1,2,3} */
  50. assert(0);
  51. }
  52. ZSTD_memcpy(ostart + flSize, src, srcSize);
  53. DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
  54. return srcSize + flSize;
  55. }
  56. static int allBytesIdentical(const void* src, size_t srcSize)
  57. {
  58. assert(srcSize >= 1);
  59. assert(src != NULL);
  60. { const BYTE b = ((const BYTE*)src)[0];
  61. size_t p;
  62. for (p=1; p<srcSize; p++) {
  63. if (((const BYTE*)src)[p] != b) return 0;
  64. }
  65. return 1;
  66. }
  67. }
  68. size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
  69. {
  70. BYTE* const ostart = (BYTE*)dst;
  71. U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
  72. assert(dstCapacity >= 4); (void)dstCapacity;
  73. assert(allBytesIdentical(src, srcSize));
  74. switch(flSize)
  75. {
  76. case 1: /* 2 - 1 - 5 */
  77. ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
  78. break;
  79. case 2: /* 2 - 2 - 12 */
  80. MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
  81. break;
  82. case 3: /* 2 - 2 - 20 */
  83. MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
  84. break;
  85. default: /* not necessary : flSize is {1,2,3} */
  86. assert(0);
  87. }
  88. ostart[flSize] = *(const BYTE*)src;
  89. DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1);
  90. return flSize+1;
  91. }
  92. /* ZSTD_minLiteralsToCompress() :
  93. * returns minimal amount of literals
  94. * for literal compression to even be attempted.
  95. * Minimum is made tighter as compression strategy increases.
  96. */
  97. static size_t
  98. ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat)
  99. {
  100. assert((int)strategy >= 0);
  101. assert((int)strategy <= 9);
  102. /* btultra2 : min 8 bytes;
  103. * then 2x larger for each successive compression strategy
  104. * max threshold 64 bytes */
  105. { int const shift = MIN(9-(int)strategy, 3);
  106. size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift;
  107. DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc);
  108. return mintc;
  109. }
  110. }
  111. size_t ZSTD_compressLiterals (
  112. void* dst, size_t dstCapacity,
  113. const void* src, size_t srcSize,
  114. void* entropyWorkspace, size_t entropyWorkspaceSize,
  115. const ZSTD_hufCTables_t* prevHuf,
  116. ZSTD_hufCTables_t* nextHuf,
  117. ZSTD_strategy strategy,
  118. int disableLiteralCompression,
  119. int suspectUncompressible,
  120. int bmi2)
  121. {
  122. size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
  123. BYTE* const ostart = (BYTE*)dst;
  124. U32 singleStream = srcSize < 256;
  125. symbolEncodingType_e hType = set_compressed;
  126. size_t cLitSize;
  127. DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)",
  128. disableLiteralCompression, (U32)srcSize, dstCapacity);
  129. DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize));
  130. /* Prepare nextEntropy assuming reusing the existing table */
  131. ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
  132. if (disableLiteralCompression)
  133. return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
  134. /* if too small, don't even attempt compression (speed opt) */
  135. if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode))
  136. return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
  137. RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
  138. { HUF_repeat repeat = prevHuf->repeatMode;
  139. int const flags = 0
  140. | (bmi2 ? HUF_flags_bmi2 : 0)
  141. | (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0)
  142. | (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0)
  143. | (suspectUncompressible ? HUF_flags_suspectUncompressible : 0);
  144. typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int);
  145. huf_compress_f huf_compress;
  146. if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
  147. huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat;
  148. cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize,
  149. src, srcSize,
  150. HUF_SYMBOLVALUE_MAX, LitHufLog,
  151. entropyWorkspace, entropyWorkspaceSize,
  152. (HUF_CElt*)nextHuf->CTable,
  153. &repeat, flags);
  154. DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize);
  155. if (repeat != HUF_repeat_none) {
  156. /* reused the existing table */
  157. DEBUGLOG(5, "reusing statistics from previous huffman block");
  158. hType = set_repeat;
  159. }
  160. }
  161. { size_t const minGain = ZSTD_minGain(srcSize, strategy);
  162. if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
  163. ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
  164. return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
  165. } }
  166. if (cLitSize==1) {
  167. /* A return value of 1 signals that the alphabet consists of a single symbol.
  168. * However, in some rare circumstances, it could be the compressed size (a single byte).
  169. * For that outcome to have a chance to happen, it's necessary that `srcSize < 8`.
  170. * (it's also necessary to not generate statistics).
  171. * Therefore, in such a case, actively check that all bytes are identical. */
  172. if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) {
  173. ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
  174. return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
  175. } }
  176. if (hType == set_compressed) {
  177. /* using a newly constructed table */
  178. nextHuf->repeatMode = HUF_repeat_check;
  179. }
  180. /* Build header */
  181. switch(lhSize)
  182. {
  183. case 3: /* 2 - 2 - 10 - 10 */
  184. if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
  185. { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
  186. MEM_writeLE24(ostart, lhc);
  187. break;
  188. }
  189. case 4: /* 2 - 2 - 14 - 14 */
  190. assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
  191. { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
  192. MEM_writeLE32(ostart, lhc);
  193. break;
  194. }
  195. case 5: /* 2 - 2 - 18 - 18 */
  196. assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
  197. { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
  198. MEM_writeLE32(ostart, lhc);
  199. ostart[4] = (BYTE)(cLitSize >> 10);
  200. break;
  201. }
  202. default: /* not possible : lhSize is {3,4,5} */
  203. assert(0);
  204. }
  205. DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
  206. return lhSize+cLitSize;
  207. }