zstd_compress_superblock.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. /*
  2. * Copyright (c) Meta Platforms, Inc. and affiliates.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. /*-*************************************
  11. * Dependencies
  12. ***************************************/
  13. #include "zstd_compress_superblock.h"
  14. #include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
  15. #include "hist.h" /* HIST_countFast_wksp */
  16. #include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */
  17. #include "zstd_compress_sequences.h"
  18. #include "zstd_compress_literals.h"
  19. /** ZSTD_compressSubBlock_literal() :
  20. * Compresses literals section for a sub-block.
  21. * When we have to write the Huffman table we will sometimes choose a header
  22. * size larger than necessary. This is because we have to pick the header size
  23. * before we know the table size + compressed size, so we have a bound on the
  24. * table size. If we guessed incorrectly, we fall back to uncompressed literals.
  25. *
  26. * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
  27. * in writing the header, otherwise it is set to 0.
  28. *
  29. * hufMetadata->hType has literals block type info.
  30. * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
  31. * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
  32. * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
  33. * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
  34. * and the following sub-blocks' literals sections will be Treeless_Literals_Block.
  35. * @return : compressed size of literals section of a sub-block
  36. * Or 0 if unable to compress.
  37. * Or error code */
  38. static size_t
  39. ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
  40. const ZSTD_hufCTablesMetadata_t* hufMetadata,
  41. const BYTE* literals, size_t litSize,
  42. void* dst, size_t dstSize,
  43. const int bmi2, int writeEntropy, int* entropyWritten)
  44. {
  45. size_t const header = writeEntropy ? 200 : 0;
  46. size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
  47. BYTE* const ostart = (BYTE*)dst;
  48. BYTE* const oend = ostart + dstSize;
  49. BYTE* op = ostart + lhSize;
  50. U32 const singleStream = lhSize == 3;
  51. symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
  52. size_t cLitSize = 0;
  53. DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
  54. *entropyWritten = 0;
  55. if (litSize == 0 || hufMetadata->hType == set_basic) {
  56. DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
  57. return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
  58. } else if (hufMetadata->hType == set_rle) {
  59. DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
  60. return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
  61. }
  62. assert(litSize > 0);
  63. assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
  64. if (writeEntropy && hufMetadata->hType == set_compressed) {
  65. ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
  66. op += hufMetadata->hufDesSize;
  67. cLitSize += hufMetadata->hufDesSize;
  68. DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
  69. }
  70. { int const flags = bmi2 ? HUF_flags_bmi2 : 0;
  71. const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags)
  72. : HUF_compress4X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags);
  73. op += cSize;
  74. cLitSize += cSize;
  75. if (cSize == 0 || ERR_isError(cSize)) {
  76. DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
  77. return 0;
  78. }
  79. /* If we expand and we aren't writing a header then emit uncompressed */
  80. if (!writeEntropy && cLitSize >= litSize) {
  81. DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
  82. return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
  83. }
  84. /* If we are writing headers then allow expansion that doesn't change our header size. */
  85. if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
  86. assert(cLitSize > litSize);
  87. DEBUGLOG(5, "Literals expanded beyond allowed header size");
  88. return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
  89. }
  90. DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
  91. }
  92. /* Build header */
  93. switch(lhSize)
  94. {
  95. case 3: /* 2 - 2 - 10 - 10 */
  96. { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
  97. MEM_writeLE24(ostart, lhc);
  98. break;
  99. }
  100. case 4: /* 2 - 2 - 14 - 14 */
  101. { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
  102. MEM_writeLE32(ostart, lhc);
  103. break;
  104. }
  105. case 5: /* 2 - 2 - 18 - 18 */
  106. { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
  107. MEM_writeLE32(ostart, lhc);
  108. ostart[4] = (BYTE)(cLitSize >> 10);
  109. break;
  110. }
  111. default: /* not possible : lhSize is {3,4,5} */
  112. assert(0);
  113. }
  114. *entropyWritten = 1;
  115. DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
  116. return (size_t)(op-ostart);
  117. }
  118. static size_t
  119. ZSTD_seqDecompressedSize(seqStore_t const* seqStore,
  120. const seqDef* sequences, size_t nbSeqs,
  121. size_t litSize, int lastSubBlock)
  122. {
  123. size_t matchLengthSum = 0;
  124. size_t litLengthSum = 0;
  125. size_t n;
  126. for (n=0; n<nbSeqs; n++) {
  127. const ZSTD_sequenceLength seqLen = ZSTD_getSequenceLength(seqStore, sequences+n);
  128. litLengthSum += seqLen.litLength;
  129. matchLengthSum += seqLen.matchLength;
  130. }
  131. DEBUGLOG(5, "ZSTD_seqDecompressedSize: %u sequences from %p: %u literals + %u matchlength",
  132. (unsigned)nbSeqs, (const void*)sequences,
  133. (unsigned)litLengthSum, (unsigned)matchLengthSum);
  134. if (!lastSubBlock)
  135. assert(litLengthSum == litSize);
  136. else
  137. assert(litLengthSum <= litSize);
  138. (void)litLengthSum;
  139. return matchLengthSum + litSize;
  140. }
  141. /** ZSTD_compressSubBlock_sequences() :
  142. * Compresses sequences section for a sub-block.
  143. * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
  144. * symbol compression modes for the super-block.
  145. * The first successfully compressed block will have these in its header.
  146. * We set entropyWritten=1 when we succeed in compressing the sequences.
  147. * The following sub-blocks will always have repeat mode.
  148. * @return : compressed size of sequences section of a sub-block
  149. * Or 0 if it is unable to compress
  150. * Or error code. */
  151. static size_t
  152. ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
  153. const ZSTD_fseCTablesMetadata_t* fseMetadata,
  154. const seqDef* sequences, size_t nbSeq,
  155. const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
  156. const ZSTD_CCtx_params* cctxParams,
  157. void* dst, size_t dstCapacity,
  158. const int bmi2, int writeEntropy, int* entropyWritten)
  159. {
  160. const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
  161. BYTE* const ostart = (BYTE*)dst;
  162. BYTE* const oend = ostart + dstCapacity;
  163. BYTE* op = ostart;
  164. BYTE* seqHead;
  165. DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
  166. *entropyWritten = 0;
  167. /* Sequences Header */
  168. RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
  169. dstSize_tooSmall, "");
  170. if (nbSeq < 128)
  171. *op++ = (BYTE)nbSeq;
  172. else if (nbSeq < LONGNBSEQ)
  173. op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
  174. else
  175. op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
  176. if (nbSeq==0) {
  177. return (size_t)(op - ostart);
  178. }
  179. /* seqHead : flags for FSE encoding type */
  180. seqHead = op++;
  181. DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
  182. if (writeEntropy) {
  183. const U32 LLtype = fseMetadata->llType;
  184. const U32 Offtype = fseMetadata->ofType;
  185. const U32 MLtype = fseMetadata->mlType;
  186. DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
  187. *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
  188. ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
  189. op += fseMetadata->fseTablesSize;
  190. } else {
  191. const U32 repeat = set_repeat;
  192. *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
  193. }
  194. { size_t const bitstreamSize = ZSTD_encodeSequences(
  195. op, (size_t)(oend - op),
  196. fseTables->matchlengthCTable, mlCode,
  197. fseTables->offcodeCTable, ofCode,
  198. fseTables->litlengthCTable, llCode,
  199. sequences, nbSeq,
  200. longOffsets, bmi2);
  201. FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
  202. op += bitstreamSize;
  203. /* zstd versions <= 1.3.4 mistakenly report corruption when
  204. * FSE_readNCount() receives a buffer < 4 bytes.
  205. * Fixed by https://github.com/facebook/zstd/pull/1146.
  206. * This can happen when the last set_compressed table present is 2
  207. * bytes and the bitstream is only one byte.
  208. * In this exceedingly rare case, we will simply emit an uncompressed
  209. * block, since it isn't worth optimizing.
  210. */
  211. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  212. if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
  213. /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
  214. assert(fseMetadata->lastCountSize + bitstreamSize == 3);
  215. DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
  216. "emitting an uncompressed block.");
  217. return 0;
  218. }
  219. #endif
  220. DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
  221. }
  222. /* zstd versions <= 1.4.0 mistakenly report error when
  223. * sequences section body size is less than 3 bytes.
  224. * Fixed by https://github.com/facebook/zstd/pull/1664.
  225. * This can happen when the previous sequences section block is compressed
  226. * with rle mode and the current block's sequences section is compressed
  227. * with repeat mode where sequences section body size can be 1 byte.
  228. */
  229. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  230. if (op-seqHead < 4) {
  231. DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
  232. "an uncompressed block when sequences are < 4 bytes");
  233. return 0;
  234. }
  235. #endif
  236. *entropyWritten = 1;
  237. return (size_t)(op - ostart);
  238. }
  239. /** ZSTD_compressSubBlock() :
  240. * Compresses a single sub-block.
  241. * @return : compressed size of the sub-block
  242. * Or 0 if it failed to compress. */
  243. static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
  244. const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
  245. const seqDef* sequences, size_t nbSeq,
  246. const BYTE* literals, size_t litSize,
  247. const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
  248. const ZSTD_CCtx_params* cctxParams,
  249. void* dst, size_t dstCapacity,
  250. const int bmi2,
  251. int writeLitEntropy, int writeSeqEntropy,
  252. int* litEntropyWritten, int* seqEntropyWritten,
  253. U32 lastBlock)
  254. {
  255. BYTE* const ostart = (BYTE*)dst;
  256. BYTE* const oend = ostart + dstCapacity;
  257. BYTE* op = ostart + ZSTD_blockHeaderSize;
  258. DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
  259. litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
  260. { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
  261. &entropyMetadata->hufMetadata, literals, litSize,
  262. op, (size_t)(oend-op),
  263. bmi2, writeLitEntropy, litEntropyWritten);
  264. FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
  265. if (cLitSize == 0) return 0;
  266. op += cLitSize;
  267. }
  268. { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
  269. &entropyMetadata->fseMetadata,
  270. sequences, nbSeq,
  271. llCode, mlCode, ofCode,
  272. cctxParams,
  273. op, (size_t)(oend-op),
  274. bmi2, writeSeqEntropy, seqEntropyWritten);
  275. FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
  276. if (cSeqSize == 0) return 0;
  277. op += cSeqSize;
  278. }
  279. /* Write block header */
  280. { size_t cSize = (size_t)(op-ostart) - ZSTD_blockHeaderSize;
  281. U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
  282. MEM_writeLE24(ostart, cBlockHeader24);
  283. }
  284. return (size_t)(op-ostart);
  285. }
  286. static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
  287. const ZSTD_hufCTables_t* huf,
  288. const ZSTD_hufCTablesMetadata_t* hufMetadata,
  289. void* workspace, size_t wkspSize,
  290. int writeEntropy)
  291. {
  292. unsigned* const countWksp = (unsigned*)workspace;
  293. unsigned maxSymbolValue = 255;
  294. size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
  295. if (hufMetadata->hType == set_basic) return litSize;
  296. else if (hufMetadata->hType == set_rle) return 1;
  297. else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
  298. size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
  299. if (ZSTD_isError(largest)) return litSize;
  300. { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
  301. if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
  302. return cLitSizeEstimate + literalSectionHeaderSize;
  303. } }
  304. assert(0); /* impossible */
  305. return 0;
  306. }
  307. static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
  308. const BYTE* codeTable, unsigned maxCode,
  309. size_t nbSeq, const FSE_CTable* fseCTable,
  310. const U8* additionalBits,
  311. short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
  312. void* workspace, size_t wkspSize)
  313. {
  314. unsigned* const countWksp = (unsigned*)workspace;
  315. const BYTE* ctp = codeTable;
  316. const BYTE* const ctStart = ctp;
  317. const BYTE* const ctEnd = ctStart + nbSeq;
  318. size_t cSymbolTypeSizeEstimateInBits = 0;
  319. unsigned max = maxCode;
  320. HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
  321. if (type == set_basic) {
  322. /* We selected this encoding type, so it must be valid. */
  323. assert(max <= defaultMax);
  324. cSymbolTypeSizeEstimateInBits = max <= defaultMax
  325. ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
  326. : ERROR(GENERIC);
  327. } else if (type == set_rle) {
  328. cSymbolTypeSizeEstimateInBits = 0;
  329. } else if (type == set_compressed || type == set_repeat) {
  330. cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
  331. }
  332. if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
  333. while (ctp < ctEnd) {
  334. if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
  335. else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
  336. ctp++;
  337. }
  338. return cSymbolTypeSizeEstimateInBits / 8;
  339. }
  340. static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
  341. const BYTE* llCodeTable,
  342. const BYTE* mlCodeTable,
  343. size_t nbSeq,
  344. const ZSTD_fseCTables_t* fseTables,
  345. const ZSTD_fseCTablesMetadata_t* fseMetadata,
  346. void* workspace, size_t wkspSize,
  347. int writeEntropy)
  348. {
  349. size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
  350. size_t cSeqSizeEstimate = 0;
  351. if (nbSeq == 0) return sequencesSectionHeaderSize;
  352. cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
  353. nbSeq, fseTables->offcodeCTable, NULL,
  354. OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
  355. workspace, wkspSize);
  356. cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
  357. nbSeq, fseTables->litlengthCTable, LL_bits,
  358. LL_defaultNorm, LL_defaultNormLog, MaxLL,
  359. workspace, wkspSize);
  360. cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
  361. nbSeq, fseTables->matchlengthCTable, ML_bits,
  362. ML_defaultNorm, ML_defaultNormLog, MaxML,
  363. workspace, wkspSize);
  364. if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
  365. return cSeqSizeEstimate + sequencesSectionHeaderSize;
  366. }
  367. typedef struct {
  368. size_t estLitSize;
  369. size_t estBlockSize;
  370. } EstimatedBlockSize;
  371. static EstimatedBlockSize ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
  372. const BYTE* ofCodeTable,
  373. const BYTE* llCodeTable,
  374. const BYTE* mlCodeTable,
  375. size_t nbSeq,
  376. const ZSTD_entropyCTables_t* entropy,
  377. const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
  378. void* workspace, size_t wkspSize,
  379. int writeLitEntropy, int writeSeqEntropy)
  380. {
  381. EstimatedBlockSize ebs;
  382. ebs.estLitSize = ZSTD_estimateSubBlockSize_literal(literals, litSize,
  383. &entropy->huf, &entropyMetadata->hufMetadata,
  384. workspace, wkspSize, writeLitEntropy);
  385. ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
  386. nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
  387. workspace, wkspSize, writeSeqEntropy);
  388. ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize;
  389. return ebs;
  390. }
  391. static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
  392. {
  393. if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
  394. return 1;
  395. if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
  396. return 1;
  397. if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
  398. return 1;
  399. return 0;
  400. }
  401. static size_t countLiterals(seqStore_t const* seqStore, const seqDef* sp, size_t seqCount)
  402. {
  403. size_t n, total = 0;
  404. assert(sp != NULL);
  405. for (n=0; n<seqCount; n++) {
  406. total += ZSTD_getSequenceLength(seqStore, sp+n).litLength;
  407. }
  408. DEBUGLOG(6, "countLiterals for %zu sequences from %p => %zu bytes", seqCount, (const void*)sp, total);
  409. return total;
  410. }
  411. #define BYTESCALE 256
  412. static size_t sizeBlockSequences(const seqDef* sp, size_t nbSeqs,
  413. size_t targetBudget, size_t avgLitCost, size_t avgSeqCost,
  414. int firstSubBlock)
  415. {
  416. size_t n, budget = 0, inSize=0;
  417. /* entropy headers */
  418. size_t const headerSize = (size_t)firstSubBlock * 120 * BYTESCALE; /* generous estimate */
  419. assert(firstSubBlock==0 || firstSubBlock==1);
  420. budget += headerSize;
  421. /* first sequence => at least one sequence*/
  422. budget += sp[0].litLength * avgLitCost + avgSeqCost;
  423. if (budget > targetBudget) return 1;
  424. inSize = sp[0].litLength + (sp[0].mlBase+MINMATCH);
  425. /* loop over sequences */
  426. for (n=1; n<nbSeqs; n++) {
  427. size_t currentCost = sp[n].litLength * avgLitCost + avgSeqCost;
  428. budget += currentCost;
  429. inSize += sp[n].litLength + (sp[n].mlBase+MINMATCH);
  430. /* stop when sub-block budget is reached */
  431. if ( (budget > targetBudget)
  432. /* though continue to expand until the sub-block is deemed compressible */
  433. && (budget < inSize * BYTESCALE) )
  434. break;
  435. }
  436. return n;
  437. }
  438. /** ZSTD_compressSubBlock_multi() :
  439. * Breaks super-block into multiple sub-blocks and compresses them.
  440. * Entropy will be written into the first block.
  441. * The following blocks use repeat_mode to compress.
  442. * Sub-blocks are all compressed, except the last one when beneficial.
  443. * @return : compressed size of the super block (which features multiple ZSTD blocks)
  444. * or 0 if it failed to compress. */
  445. static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
  446. const ZSTD_compressedBlockState_t* prevCBlock,
  447. ZSTD_compressedBlockState_t* nextCBlock,
  448. const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
  449. const ZSTD_CCtx_params* cctxParams,
  450. void* dst, size_t dstCapacity,
  451. const void* src, size_t srcSize,
  452. const int bmi2, U32 lastBlock,
  453. void* workspace, size_t wkspSize)
  454. {
  455. const seqDef* const sstart = seqStorePtr->sequencesStart;
  456. const seqDef* const send = seqStorePtr->sequences;
  457. const seqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */
  458. size_t const nbSeqs = (size_t)(send - sstart);
  459. const BYTE* const lstart = seqStorePtr->litStart;
  460. const BYTE* const lend = seqStorePtr->lit;
  461. const BYTE* lp = lstart;
  462. size_t const nbLiterals = (size_t)(lend - lstart);
  463. BYTE const* ip = (BYTE const*)src;
  464. BYTE const* const iend = ip + srcSize;
  465. BYTE* const ostart = (BYTE*)dst;
  466. BYTE* const oend = ostart + dstCapacity;
  467. BYTE* op = ostart;
  468. const BYTE* llCodePtr = seqStorePtr->llCode;
  469. const BYTE* mlCodePtr = seqStorePtr->mlCode;
  470. const BYTE* ofCodePtr = seqStorePtr->ofCode;
  471. size_t const minTarget = ZSTD_TARGETCBLOCKSIZE_MIN; /* enforce minimum size, to reduce undesirable side effects */
  472. size_t const targetCBlockSize = MAX(minTarget, cctxParams->targetCBlockSize);
  473. int writeLitEntropy = (entropyMetadata->hufMetadata.hType == set_compressed);
  474. int writeSeqEntropy = 1;
  475. DEBUGLOG(5, "ZSTD_compressSubBlock_multi (srcSize=%u, litSize=%u, nbSeq=%u)",
  476. (unsigned)srcSize, (unsigned)(lend-lstart), (unsigned)(send-sstart));
  477. /* let's start by a general estimation for the full block */
  478. if (nbSeqs > 0) {
  479. EstimatedBlockSize const ebs =
  480. ZSTD_estimateSubBlockSize(lp, nbLiterals,
  481. ofCodePtr, llCodePtr, mlCodePtr, nbSeqs,
  482. &nextCBlock->entropy, entropyMetadata,
  483. workspace, wkspSize,
  484. writeLitEntropy, writeSeqEntropy);
  485. /* quick estimation */
  486. size_t const avgLitCost = nbLiterals ? (ebs.estLitSize * BYTESCALE) / nbLiterals : BYTESCALE;
  487. size_t const avgSeqCost = ((ebs.estBlockSize - ebs.estLitSize) * BYTESCALE) / nbSeqs;
  488. const size_t nbSubBlocks = MAX((ebs.estBlockSize + (targetCBlockSize/2)) / targetCBlockSize, 1);
  489. size_t n, avgBlockBudget, blockBudgetSupp=0;
  490. avgBlockBudget = (ebs.estBlockSize * BYTESCALE) / nbSubBlocks;
  491. DEBUGLOG(5, "estimated fullblock size=%u bytes ; avgLitCost=%.2f ; avgSeqCost=%.2f ; targetCBlockSize=%u, nbSubBlocks=%u ; avgBlockBudget=%.0f bytes",
  492. (unsigned)ebs.estBlockSize, (double)avgLitCost/BYTESCALE, (double)avgSeqCost/BYTESCALE,
  493. (unsigned)targetCBlockSize, (unsigned)nbSubBlocks, (double)avgBlockBudget/BYTESCALE);
  494. /* simplification: if estimates states that the full superblock doesn't compress, just bail out immediately
  495. * this will result in the production of a single uncompressed block covering @srcSize.*/
  496. if (ebs.estBlockSize > srcSize) return 0;
  497. /* compress and write sub-blocks */
  498. assert(nbSubBlocks>0);
  499. for (n=0; n < nbSubBlocks-1; n++) {
  500. /* determine nb of sequences for current sub-block + nbLiterals from next sequence */
  501. size_t const seqCount = sizeBlockSequences(sp, (size_t)(send-sp),
  502. avgBlockBudget + blockBudgetSupp, avgLitCost, avgSeqCost, n==0);
  503. /* if reached last sequence : break to last sub-block (simplification) */
  504. assert(seqCount <= (size_t)(send-sp));
  505. if (sp + seqCount == send) break;
  506. assert(seqCount > 0);
  507. /* compress sub-block */
  508. { int litEntropyWritten = 0;
  509. int seqEntropyWritten = 0;
  510. size_t litSize = countLiterals(seqStorePtr, sp, seqCount);
  511. const size_t decompressedSize =
  512. ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 0);
  513. size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
  514. sp, seqCount,
  515. lp, litSize,
  516. llCodePtr, mlCodePtr, ofCodePtr,
  517. cctxParams,
  518. op, (size_t)(oend-op),
  519. bmi2, writeLitEntropy, writeSeqEntropy,
  520. &litEntropyWritten, &seqEntropyWritten,
  521. 0);
  522. FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
  523. /* check compressibility, update state components */
  524. if (cSize > 0 && cSize < decompressedSize) {
  525. DEBUGLOG(5, "Committed sub-block compressing %u bytes => %u bytes",
  526. (unsigned)decompressedSize, (unsigned)cSize);
  527. assert(ip + decompressedSize <= iend);
  528. ip += decompressedSize;
  529. lp += litSize;
  530. op += cSize;
  531. llCodePtr += seqCount;
  532. mlCodePtr += seqCount;
  533. ofCodePtr += seqCount;
  534. /* Entropy only needs to be written once */
  535. if (litEntropyWritten) {
  536. writeLitEntropy = 0;
  537. }
  538. if (seqEntropyWritten) {
  539. writeSeqEntropy = 0;
  540. }
  541. sp += seqCount;
  542. blockBudgetSupp = 0;
  543. } }
  544. /* otherwise : do not compress yet, coalesce current sub-block with following one */
  545. }
  546. } /* if (nbSeqs > 0) */
  547. /* write last block */
  548. DEBUGLOG(5, "Generate last sub-block: %u sequences remaining", (unsigned)(send - sp));
  549. { int litEntropyWritten = 0;
  550. int seqEntropyWritten = 0;
  551. size_t litSize = (size_t)(lend - lp);
  552. size_t seqCount = (size_t)(send - sp);
  553. const size_t decompressedSize =
  554. ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 1);
  555. size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
  556. sp, seqCount,
  557. lp, litSize,
  558. llCodePtr, mlCodePtr, ofCodePtr,
  559. cctxParams,
  560. op, (size_t)(oend-op),
  561. bmi2, writeLitEntropy, writeSeqEntropy,
  562. &litEntropyWritten, &seqEntropyWritten,
  563. lastBlock);
  564. FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
  565. /* update pointers, the nb of literals borrowed from next sequence must be preserved */
  566. if (cSize > 0 && cSize < decompressedSize) {
  567. DEBUGLOG(5, "Last sub-block compressed %u bytes => %u bytes",
  568. (unsigned)decompressedSize, (unsigned)cSize);
  569. assert(ip + decompressedSize <= iend);
  570. ip += decompressedSize;
  571. lp += litSize;
  572. op += cSize;
  573. llCodePtr += seqCount;
  574. mlCodePtr += seqCount;
  575. ofCodePtr += seqCount;
  576. /* Entropy only needs to be written once */
  577. if (litEntropyWritten) {
  578. writeLitEntropy = 0;
  579. }
  580. if (seqEntropyWritten) {
  581. writeSeqEntropy = 0;
  582. }
  583. sp += seqCount;
  584. }
  585. }
  586. if (writeLitEntropy) {
  587. DEBUGLOG(5, "Literal entropy tables were never written");
  588. ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
  589. }
  590. if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
  591. /* If we haven't written our entropy tables, then we've violated our contract and
  592. * must emit an uncompressed block.
  593. */
  594. DEBUGLOG(5, "Sequence entropy tables were never written => cancel, emit an uncompressed block");
  595. return 0;
  596. }
  597. if (ip < iend) {
  598. /* some data left : last part of the block sent uncompressed */
  599. size_t const rSize = (size_t)((iend - ip));
  600. size_t const cSize = ZSTD_noCompressBlock(op, (size_t)(oend - op), ip, rSize, lastBlock);
  601. DEBUGLOG(5, "Generate last uncompressed sub-block of %u bytes", (unsigned)(rSize));
  602. FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
  603. assert(cSize != 0);
  604. op += cSize;
  605. /* We have to regenerate the repcodes because we've skipped some sequences */
  606. if (sp < send) {
  607. const seqDef* seq;
  608. repcodes_t rep;
  609. ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
  610. for (seq = sstart; seq < sp; ++seq) {
  611. ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
  612. }
  613. ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
  614. }
  615. }
  616. DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed all subBlocks: total compressed size = %u",
  617. (unsigned)(op-ostart));
  618. return (size_t)(op-ostart);
  619. }
  620. size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
  621. void* dst, size_t dstCapacity,
  622. const void* src, size_t srcSize,
  623. unsigned lastBlock)
  624. {
  625. ZSTD_entropyCTablesMetadata_t entropyMetadata;
  626. FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
  627. &zc->blockState.prevCBlock->entropy,
  628. &zc->blockState.nextCBlock->entropy,
  629. &zc->appliedParams,
  630. &entropyMetadata,
  631. zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
  632. return ZSTD_compressSubBlock_multi(&zc->seqStore,
  633. zc->blockState.prevCBlock,
  634. zc->blockState.nextCBlock,
  635. &entropyMetadata,
  636. &zc->appliedParams,
  637. dst, dstCapacity,
  638. src, srcSize,
  639. zc->bmi2, lastBlock,
  640. zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
  641. }