huf_compress.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464
  1. /* ******************************************************************
  2. * Huffman encoder, part of New Generation Entropy library
  3. * Copyright (c) Meta Platforms, Inc. and affiliates.
  4. *
  5. * You can contact the author at :
  6. * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
  7. * - Public forum : https://groups.google.com/forum/#!forum/lz4c
  8. *
  9. * This source code is licensed under both the BSD-style license (found in the
  10. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  11. * in the COPYING file in the root directory of this source tree).
  12. * You may select, at your option, one of the above-listed licenses.
  13. ****************************************************************** */
  14. /* **************************************************************
  15. * Compiler specifics
  16. ****************************************************************/
  17. #ifdef _MSC_VER /* Visual Studio */
  18. # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
  19. #endif
  20. /* **************************************************************
  21. * Includes
  22. ****************************************************************/
  23. #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
  24. #include "../common/compiler.h"
  25. #include "../common/bitstream.h"
  26. #include "hist.h"
  27. #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
  28. #include "../common/fse.h" /* header compression */
  29. #include "../common/huf.h"
  30. #include "../common/error_private.h"
  31. #include "../common/bits.h" /* ZSTD_highbit32 */
  32. /* **************************************************************
  33. * Error Management
  34. ****************************************************************/
  35. #define HUF_isError ERR_isError
  36. #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
  37. /* **************************************************************
  38. * Required declarations
  39. ****************************************************************/
  40. typedef struct nodeElt_s {
  41. U32 count;
  42. U16 parent;
  43. BYTE byte;
  44. BYTE nbBits;
  45. } nodeElt;
  46. /* **************************************************************
  47. * Debug Traces
  48. ****************************************************************/
  49. #if DEBUGLEVEL >= 2
  50. static size_t showU32(const U32* arr, size_t size)
  51. {
  52. size_t u;
  53. for (u=0; u<size; u++) {
  54. RAWLOG(6, " %u", arr[u]); (void)arr;
  55. }
  56. RAWLOG(6, " \n");
  57. return size;
  58. }
  59. static size_t HUF_getNbBits(HUF_CElt elt);
  60. static size_t showCTableBits(const HUF_CElt* ctable, size_t size)
  61. {
  62. size_t u;
  63. for (u=0; u<size; u++) {
  64. RAWLOG(6, " %zu", HUF_getNbBits(ctable[u])); (void)ctable;
  65. }
  66. RAWLOG(6, " \n");
  67. return size;
  68. }
  69. static size_t showHNodeSymbols(const nodeElt* hnode, size_t size)
  70. {
  71. size_t u;
  72. for (u=0; u<size; u++) {
  73. RAWLOG(6, " %u", hnode[u].byte); (void)hnode;
  74. }
  75. RAWLOG(6, " \n");
  76. return size;
  77. }
  78. static size_t showHNodeBits(const nodeElt* hnode, size_t size)
  79. {
  80. size_t u;
  81. for (u=0; u<size; u++) {
  82. RAWLOG(6, " %u", hnode[u].nbBits); (void)hnode;
  83. }
  84. RAWLOG(6, " \n");
  85. return size;
  86. }
  87. #endif
  88. /* *******************************************************
  89. * HUF : Huffman block compression
  90. *********************************************************/
  91. #define HUF_WORKSPACE_MAX_ALIGNMENT 8
  92. static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align)
  93. {
  94. size_t const mask = align - 1;
  95. size_t const rem = (size_t)workspace & mask;
  96. size_t const add = (align - rem) & mask;
  97. BYTE* const aligned = (BYTE*)workspace + add;
  98. assert((align & (align - 1)) == 0); /* pow 2 */
  99. assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT);
  100. if (*workspaceSizePtr >= add) {
  101. assert(add < align);
  102. assert(((size_t)aligned & mask) == 0);
  103. *workspaceSizePtr -= add;
  104. return aligned;
  105. } else {
  106. *workspaceSizePtr = 0;
  107. return NULL;
  108. }
  109. }
  110. /* HUF_compressWeights() :
  111. * Same as FSE_compress(), but dedicated to huff0's weights compression.
  112. * The use case needs much less stack memory.
  113. * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
  114. */
  115. #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
  116. typedef struct {
  117. FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
  118. U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
  119. unsigned count[HUF_TABLELOG_MAX+1];
  120. S16 norm[HUF_TABLELOG_MAX+1];
  121. } HUF_CompressWeightsWksp;
  122. static size_t
  123. HUF_compressWeights(void* dst, size_t dstSize,
  124. const void* weightTable, size_t wtSize,
  125. void* workspace, size_t workspaceSize)
  126. {
  127. BYTE* const ostart = (BYTE*) dst;
  128. BYTE* op = ostart;
  129. BYTE* const oend = ostart + dstSize;
  130. unsigned maxSymbolValue = HUF_TABLELOG_MAX;
  131. U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
  132. HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
  133. if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
  134. /* init conditions */
  135. if (wtSize <= 1) return 0; /* Not compressible */
  136. /* Scan input and build symbol stats */
  137. { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */
  138. if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
  139. if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
  140. }
  141. tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
  142. CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
  143. /* Write table description header */
  144. { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
  145. op += hSize;
  146. }
  147. /* Compress */
  148. CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
  149. { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
  150. if (cSize == 0) return 0; /* not enough space for compressed data */
  151. op += cSize;
  152. }
  153. return (size_t)(op-ostart);
  154. }
  155. static size_t HUF_getNbBits(HUF_CElt elt)
  156. {
  157. return elt & 0xFF;
  158. }
  159. static size_t HUF_getNbBitsFast(HUF_CElt elt)
  160. {
  161. return elt;
  162. }
  163. static size_t HUF_getValue(HUF_CElt elt)
  164. {
  165. return elt & ~(size_t)0xFF;
  166. }
  167. static size_t HUF_getValueFast(HUF_CElt elt)
  168. {
  169. return elt;
  170. }
  171. static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits)
  172. {
  173. assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX);
  174. *elt = nbBits;
  175. }
  176. static void HUF_setValue(HUF_CElt* elt, size_t value)
  177. {
  178. size_t const nbBits = HUF_getNbBits(*elt);
  179. if (nbBits > 0) {
  180. assert((value >> nbBits) == 0);
  181. *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits);
  182. }
  183. }
  184. HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable)
  185. {
  186. HUF_CTableHeader header;
  187. ZSTD_memcpy(&header, ctable, sizeof(header));
  188. return header;
  189. }
  190. static void HUF_writeCTableHeader(HUF_CElt* ctable, U32 tableLog, U32 maxSymbolValue)
  191. {
  192. HUF_CTableHeader header;
  193. HUF_STATIC_ASSERT(sizeof(ctable[0]) == sizeof(header));
  194. ZSTD_memset(&header, 0, sizeof(header));
  195. assert(tableLog < 256);
  196. header.tableLog = (BYTE)tableLog;
  197. assert(maxSymbolValue < 256);
  198. header.maxSymbolValue = (BYTE)maxSymbolValue;
  199. ZSTD_memcpy(ctable, &header, sizeof(header));
  200. }
  201. typedef struct {
  202. HUF_CompressWeightsWksp wksp;
  203. BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
  204. BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
  205. } HUF_WriteCTableWksp;
  206. size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
  207. const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
  208. void* workspace, size_t workspaceSize)
  209. {
  210. HUF_CElt const* const ct = CTable + 1;
  211. BYTE* op = (BYTE*)dst;
  212. U32 n;
  213. HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
  214. HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp));
  215. assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue);
  216. assert(HUF_readCTableHeader(CTable).tableLog == huffLog);
  217. /* check conditions */
  218. if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
  219. if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
  220. /* convert to weight */
  221. wksp->bitsToWeight[0] = 0;
  222. for (n=1; n<huffLog+1; n++)
  223. wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
  224. for (n=0; n<maxSymbolValue; n++)
  225. wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])];
  226. /* attempt weights compression by FSE */
  227. if (maxDstSize < 1) return ERROR(dstSize_tooSmall);
  228. { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
  229. if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
  230. op[0] = (BYTE)hSize;
  231. return hSize+1;
  232. } }
  233. /* write raw values as 4-bits (max : 15) */
  234. if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
  235. if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
  236. op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
  237. wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
  238. for (n=0; n<maxSymbolValue; n+=2)
  239. op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
  240. return ((maxSymbolValue+1)/2) + 1;
  241. }
  242. size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
  243. {
  244. BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
  245. U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
  246. U32 tableLog = 0;
  247. U32 nbSymbols = 0;
  248. HUF_CElt* const ct = CTable + 1;
  249. /* get symbol weights */
  250. CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
  251. *hasZeroWeights = (rankVal[0] > 0);
  252. /* check result */
  253. if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
  254. if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
  255. *maxSymbolValuePtr = nbSymbols - 1;
  256. HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr);
  257. /* Prepare base value per rank */
  258. { U32 n, nextRankStart = 0;
  259. for (n=1; n<=tableLog; n++) {
  260. U32 curr = nextRankStart;
  261. nextRankStart += (rankVal[n] << (n-1));
  262. rankVal[n] = curr;
  263. } }
  264. /* fill nbBits */
  265. { U32 n; for (n=0; n<nbSymbols; n++) {
  266. const U32 w = huffWeight[n];
  267. HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0));
  268. } }
  269. /* fill val */
  270. { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
  271. U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
  272. { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; }
  273. /* determine stating value per rank */
  274. valPerRank[tableLog+1] = 0; /* for w==0 */
  275. { U16 min = 0;
  276. U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
  277. valPerRank[n] = min; /* get starting value within each rank */
  278. min += nbPerRank[n];
  279. min >>= 1;
  280. } }
  281. /* assign value within rank, symbol order */
  282. { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
  283. }
  284. return readSize;
  285. }
  286. U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue)
  287. {
  288. const HUF_CElt* const ct = CTable + 1;
  289. assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
  290. if (symbolValue > HUF_readCTableHeader(CTable).maxSymbolValue)
  291. return 0;
  292. return (U32)HUF_getNbBits(ct[symbolValue]);
  293. }
  294. /**
  295. * HUF_setMaxHeight():
  296. * Try to enforce @targetNbBits on the Huffman tree described in @huffNode.
  297. *
  298. * It attempts to convert all nodes with nbBits > @targetNbBits
  299. * to employ @targetNbBits instead. Then it adjusts the tree
  300. * so that it remains a valid canonical Huffman tree.
  301. *
  302. * @pre The sum of the ranks of each symbol == 2^largestBits,
  303. * where largestBits == huffNode[lastNonNull].nbBits.
  304. * @post The sum of the ranks of each symbol == 2^largestBits,
  305. * where largestBits is the return value (expected <= targetNbBits).
  306. *
  307. * @param huffNode The Huffman tree modified in place to enforce targetNbBits.
  308. * It's presumed sorted, from most frequent to rarest symbol.
  309. * @param lastNonNull The symbol with the lowest count in the Huffman tree.
  310. * @param targetNbBits The allowed number of bits, which the Huffman tree
  311. * may not respect. After this function the Huffman tree will
  312. * respect targetNbBits.
  313. * @return The maximum number of bits of the Huffman tree after adjustment.
  314. */
  315. static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits)
  316. {
  317. const U32 largestBits = huffNode[lastNonNull].nbBits;
  318. /* early exit : no elt > targetNbBits, so the tree is already valid. */
  319. if (largestBits <= targetNbBits) return largestBits;
  320. DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits);
  321. /* there are several too large elements (at least >= 2) */
  322. { int totalCost = 0;
  323. const U32 baseCost = 1 << (largestBits - targetNbBits);
  324. int n = (int)lastNonNull;
  325. /* Adjust any ranks > targetNbBits to targetNbBits.
  326. * Compute totalCost, which is how far the sum of the ranks is
  327. * we are over 2^largestBits after adjust the offending ranks.
  328. */
  329. while (huffNode[n].nbBits > targetNbBits) {
  330. totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
  331. huffNode[n].nbBits = (BYTE)targetNbBits;
  332. n--;
  333. }
  334. /* n stops at huffNode[n].nbBits <= targetNbBits */
  335. assert(huffNode[n].nbBits <= targetNbBits);
  336. /* n end at index of smallest symbol using < targetNbBits */
  337. while (huffNode[n].nbBits == targetNbBits) --n;
  338. /* renorm totalCost from 2^largestBits to 2^targetNbBits
  339. * note : totalCost is necessarily a multiple of baseCost */
  340. assert(((U32)totalCost & (baseCost - 1)) == 0);
  341. totalCost >>= (largestBits - targetNbBits);
  342. assert(totalCost > 0);
  343. /* repay normalized cost */
  344. { U32 const noSymbol = 0xF0F0F0F0;
  345. U32 rankLast[HUF_TABLELOG_MAX+2];
  346. /* Get pos of last (smallest = lowest cum. count) symbol per rank */
  347. ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
  348. { U32 currentNbBits = targetNbBits;
  349. int pos;
  350. for (pos=n ; pos >= 0; pos--) {
  351. if (huffNode[pos].nbBits >= currentNbBits) continue;
  352. currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */
  353. rankLast[targetNbBits-currentNbBits] = (U32)pos;
  354. } }
  355. while (totalCost > 0) {
  356. /* Try to reduce the next power of 2 above totalCost because we
  357. * gain back half the rank.
  358. */
  359. U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1;
  360. for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
  361. U32 const highPos = rankLast[nBitsToDecrease];
  362. U32 const lowPos = rankLast[nBitsToDecrease-1];
  363. if (highPos == noSymbol) continue;
  364. /* Decrease highPos if no symbols of lowPos or if it is
  365. * not cheaper to remove 2 lowPos than highPos.
  366. */
  367. if (lowPos == noSymbol) break;
  368. { U32 const highTotal = huffNode[highPos].count;
  369. U32 const lowTotal = 2 * huffNode[lowPos].count;
  370. if (highTotal <= lowTotal) break;
  371. } }
  372. /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
  373. assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
  374. /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
  375. while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
  376. nBitsToDecrease++;
  377. assert(rankLast[nBitsToDecrease] != noSymbol);
  378. /* Increase the number of bits to gain back half the rank cost. */
  379. totalCost -= 1 << (nBitsToDecrease-1);
  380. huffNode[rankLast[nBitsToDecrease]].nbBits++;
  381. /* Fix up the new rank.
  382. * If the new rank was empty, this symbol is now its smallest.
  383. * Otherwise, this symbol will be the largest in the new rank so no adjustment.
  384. */
  385. if (rankLast[nBitsToDecrease-1] == noSymbol)
  386. rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
  387. /* Fix up the old rank.
  388. * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
  389. * it must be the only symbol in its rank, so the old rank now has no symbols.
  390. * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
  391. * the smallest node in the rank. If the previous position belongs to a different rank,
  392. * then the rank is now empty.
  393. */
  394. if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
  395. rankLast[nBitsToDecrease] = noSymbol;
  396. else {
  397. rankLast[nBitsToDecrease]--;
  398. if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease)
  399. rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
  400. }
  401. } /* while (totalCost > 0) */
  402. /* If we've removed too much weight, then we have to add it back.
  403. * To avoid overshooting again, we only adjust the smallest rank.
  404. * We take the largest nodes from the lowest rank 0 and move them
  405. * to rank 1. There's guaranteed to be enough rank 0 symbols because
  406. * TODO.
  407. */
  408. while (totalCost < 0) { /* Sometimes, cost correction overshoot */
  409. /* special case : no rank 1 symbol (using targetNbBits-1);
  410. * let's create one from largest rank 0 (using targetNbBits).
  411. */
  412. if (rankLast[1] == noSymbol) {
  413. while (huffNode[n].nbBits == targetNbBits) n--;
  414. huffNode[n+1].nbBits--;
  415. assert(n >= 0);
  416. rankLast[1] = (U32)(n+1);
  417. totalCost++;
  418. continue;
  419. }
  420. huffNode[ rankLast[1] + 1 ].nbBits--;
  421. rankLast[1]++;
  422. totalCost ++;
  423. }
  424. } /* repay normalized cost */
  425. } /* there are several too large elements (at least >= 2) */
  426. return targetNbBits;
  427. }
  428. typedef struct {
  429. U16 base;
  430. U16 curr;
  431. } rankPos;
  432. typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)];
  433. /* Number of buckets available for HUF_sort() */
  434. #define RANK_POSITION_TABLE_SIZE 192
  435. typedef struct {
  436. huffNodeTable huffNodeTbl;
  437. rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
  438. } HUF_buildCTable_wksp_tables;
  439. /* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing.
  440. * Strategy is to use as many buckets as possible for representing distinct
  441. * counts while using the remainder to represent all "large" counts.
  442. *
  443. * To satisfy this requirement for 192 buckets, we can do the following:
  444. * Let buckets 0-166 represent distinct counts of [0, 166]
  445. * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
  446. */
  447. #define RANK_POSITION_MAX_COUNT_LOG 32
  448. #define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */)
  449. #define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */)
  450. /* Return the appropriate bucket index for a given count. See definition of
  451. * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
  452. */
  453. static U32 HUF_getIndex(U32 const count) {
  454. return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
  455. ? count
  456. : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
  457. }
  458. /* Helper swap function for HUF_quickSortPartition() */
  459. static void HUF_swapNodes(nodeElt* a, nodeElt* b) {
  460. nodeElt tmp = *a;
  461. *a = *b;
  462. *b = tmp;
  463. }
  464. /* Returns 0 if the huffNode array is not sorted by descending count */
  465. MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) {
  466. U32 i;
  467. for (i = 1; i < maxSymbolValue1; ++i) {
  468. if (huffNode[i].count > huffNode[i-1].count) {
  469. return 0;
  470. }
  471. }
  472. return 1;
  473. }
  474. /* Insertion sort by descending order */
  475. HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) {
  476. int i;
  477. int const size = high-low+1;
  478. huffNode += low;
  479. for (i = 1; i < size; ++i) {
  480. nodeElt const key = huffNode[i];
  481. int j = i - 1;
  482. while (j >= 0 && huffNode[j].count < key.count) {
  483. huffNode[j + 1] = huffNode[j];
  484. j--;
  485. }
  486. huffNode[j + 1] = key;
  487. }
  488. }
  489. /* Pivot helper function for quicksort. */
  490. static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) {
  491. /* Simply select rightmost element as pivot. "Better" selectors like
  492. * median-of-three don't experimentally appear to have any benefit.
  493. */
  494. U32 const pivot = arr[high].count;
  495. int i = low - 1;
  496. int j = low;
  497. for ( ; j < high; j++) {
  498. if (arr[j].count > pivot) {
  499. i++;
  500. HUF_swapNodes(&arr[i], &arr[j]);
  501. }
  502. }
  503. HUF_swapNodes(&arr[i + 1], &arr[high]);
  504. return i + 1;
  505. }
  506. /* Classic quicksort by descending with partially iterative calls
  507. * to reduce worst case callstack size.
  508. */
  509. static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) {
  510. int const kInsertionSortThreshold = 8;
  511. if (high - low < kInsertionSortThreshold) {
  512. HUF_insertionSort(arr, low, high);
  513. return;
  514. }
  515. while (low < high) {
  516. int const idx = HUF_quickSortPartition(arr, low, high);
  517. if (idx - low < high - idx) {
  518. HUF_simpleQuickSort(arr, low, idx - 1);
  519. low = idx + 1;
  520. } else {
  521. HUF_simpleQuickSort(arr, idx + 1, high);
  522. high = idx - 1;
  523. }
  524. }
  525. }
  526. /**
  527. * HUF_sort():
  528. * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
  529. * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket.
  530. *
  531. * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
  532. * Must have (maxSymbolValue + 1) entries.
  533. * @param[in] count Histogram of the symbols.
  534. * @param[in] maxSymbolValue Maximum symbol value.
  535. * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
  536. */
  537. static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) {
  538. U32 n;
  539. U32 const maxSymbolValue1 = maxSymbolValue+1;
  540. /* Compute base and set curr to base.
  541. * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1.
  542. * See HUF_getIndex to see bucketing strategy.
  543. * We attribute each symbol to lowerRank's base value, because we want to know where
  544. * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
  545. */
  546. ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
  547. for (n = 0; n < maxSymbolValue1; ++n) {
  548. U32 lowerRank = HUF_getIndex(count[n]);
  549. assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1);
  550. rankPosition[lowerRank].base++;
  551. }
  552. assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
  553. /* Set up the rankPosition table */
  554. for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
  555. rankPosition[n-1].base += rankPosition[n].base;
  556. rankPosition[n-1].curr = rankPosition[n-1].base;
  557. }
  558. /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */
  559. for (n = 0; n < maxSymbolValue1; ++n) {
  560. U32 const c = count[n];
  561. U32 const r = HUF_getIndex(c) + 1;
  562. U32 const pos = rankPosition[r].curr++;
  563. assert(pos < maxSymbolValue1);
  564. huffNode[pos].count = c;
  565. huffNode[pos].byte = (BYTE)n;
  566. }
  567. /* Sort each bucket. */
  568. for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
  569. int const bucketSize = rankPosition[n].curr - rankPosition[n].base;
  570. U32 const bucketStartIdx = rankPosition[n].base;
  571. if (bucketSize > 1) {
  572. assert(bucketStartIdx < maxSymbolValue1);
  573. HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1);
  574. }
  575. }
  576. assert(HUF_isSorted(huffNode, maxSymbolValue1));
  577. }
  578. /** HUF_buildCTable_wksp() :
  579. * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
  580. * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
  581. */
  582. #define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
  583. /* HUF_buildTree():
  584. * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
  585. *
  586. * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array.
  587. * @param maxSymbolValue The maximum symbol value.
  588. * @return The smallest node in the Huffman tree (by count).
  589. */
  590. static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
  591. {
  592. nodeElt* const huffNode0 = huffNode - 1;
  593. int nonNullRank;
  594. int lowS, lowN;
  595. int nodeNb = STARTNODE;
  596. int n, nodeRoot;
  597. DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)", maxSymbolValue + 1);
  598. /* init for parents */
  599. nonNullRank = (int)maxSymbolValue;
  600. while(huffNode[nonNullRank].count == 0) nonNullRank--;
  601. lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
  602. huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
  603. huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
  604. nodeNb++; lowS-=2;
  605. for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
  606. huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
  607. /* create parents */
  608. while (nodeNb <= nodeRoot) {
  609. int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
  610. int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
  611. huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
  612. huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
  613. nodeNb++;
  614. }
  615. /* distribute weights (unlimited tree height) */
  616. huffNode[nodeRoot].nbBits = 0;
  617. for (n=nodeRoot-1; n>=STARTNODE; n--)
  618. huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
  619. for (n=0; n<=nonNullRank; n++)
  620. huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
  621. DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)", showHNodeBits(huffNode, maxSymbolValue+1));
  622. return nonNullRank;
  623. }
  624. /**
  625. * HUF_buildCTableFromTree():
  626. * Build the CTable given the Huffman tree in huffNode.
  627. *
  628. * @param[out] CTable The output Huffman CTable.
  629. * @param huffNode The Huffman tree.
  630. * @param nonNullRank The last and smallest node in the Huffman tree.
  631. * @param maxSymbolValue The maximum symbol value.
  632. * @param maxNbBits The exact maximum number of bits used in the Huffman tree.
  633. */
  634. static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
  635. {
  636. HUF_CElt* const ct = CTable + 1;
  637. /* fill result into ctable (val, nbBits) */
  638. int n;
  639. U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
  640. U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
  641. int const alphabetSize = (int)(maxSymbolValue + 1);
  642. for (n=0; n<=nonNullRank; n++)
  643. nbPerRank[huffNode[n].nbBits]++;
  644. /* determine starting value per rank */
  645. { U16 min = 0;
  646. for (n=(int)maxNbBits; n>0; n--) {
  647. valPerRank[n] = min; /* get starting value within each rank */
  648. min += nbPerRank[n];
  649. min >>= 1;
  650. } }
  651. for (n=0; n<alphabetSize; n++)
  652. HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */
  653. for (n=0; n<alphabetSize; n++)
  654. HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */
  655. HUF_writeCTableHeader(CTable, maxNbBits, maxSymbolValue);
  656. }
  657. size_t
  658. HUF_buildCTable_wksp(HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
  659. void* workSpace, size_t wkspSize)
  660. {
  661. HUF_buildCTable_wksp_tables* const wksp_tables =
  662. (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
  663. nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
  664. nodeElt* const huffNode = huffNode0+1;
  665. int nonNullRank;
  666. HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables));
  667. DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)", maxSymbolValue+1);
  668. /* safety checks */
  669. if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
  670. return ERROR(workSpace_tooSmall);
  671. if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
  672. if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
  673. return ERROR(maxSymbolValue_tooLarge);
  674. ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
  675. /* sort, decreasing order */
  676. HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
  677. DEBUGLOG(6, "sorted symbols completed (%zu symbols)", showHNodeSymbols(huffNode, maxSymbolValue+1));
  678. /* build tree */
  679. nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
  680. /* determine and enforce maxTableLog */
  681. maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
  682. if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
  683. HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
  684. return maxNbBits;
  685. }
  686. size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
  687. {
  688. HUF_CElt const* ct = CTable + 1;
  689. size_t nbBits = 0;
  690. int s;
  691. for (s = 0; s <= (int)maxSymbolValue; ++s) {
  692. nbBits += HUF_getNbBits(ct[s]) * count[s];
  693. }
  694. return nbBits >> 3;
  695. }
  696. int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
  697. HUF_CTableHeader header = HUF_readCTableHeader(CTable);
  698. HUF_CElt const* ct = CTable + 1;
  699. int bad = 0;
  700. int s;
  701. assert(header.tableLog <= HUF_TABLELOG_ABSOLUTEMAX);
  702. if (header.maxSymbolValue < maxSymbolValue)
  703. return 0;
  704. for (s = 0; s <= (int)maxSymbolValue; ++s) {
  705. bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
  706. }
  707. return !bad;
  708. }
  709. size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
  710. /** HUF_CStream_t:
  711. * Huffman uses its own BIT_CStream_t implementation.
  712. * There are three major differences from BIT_CStream_t:
  713. * 1. HUF_addBits() takes a HUF_CElt (size_t) which is
  714. * the pair (nbBits, value) in the format:
  715. * format:
  716. * - Bits [0, 4) = nbBits
  717. * - Bits [4, 64 - nbBits) = 0
  718. * - Bits [64 - nbBits, 64) = value
  719. * 2. The bitContainer is built from the upper bits and
  720. * right shifted. E.g. to add a new value of N bits
  721. * you right shift the bitContainer by N, then or in
  722. * the new value into the N upper bits.
  723. * 3. The bitstream has two bit containers. You can add
  724. * bits to the second container and merge them into
  725. * the first container.
  726. */
  727. #define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8)
  728. typedef struct {
  729. size_t bitContainer[2];
  730. size_t bitPos[2];
  731. BYTE* startPtr;
  732. BYTE* ptr;
  733. BYTE* endPtr;
  734. } HUF_CStream_t;
  735. /**! HUF_initCStream():
  736. * Initializes the bitstream.
  737. * @returns 0 or an error code.
  738. */
  739. static size_t HUF_initCStream(HUF_CStream_t* bitC,
  740. void* startPtr, size_t dstCapacity)
  741. {
  742. ZSTD_memset(bitC, 0, sizeof(*bitC));
  743. bitC->startPtr = (BYTE*)startPtr;
  744. bitC->ptr = bitC->startPtr;
  745. bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]);
  746. if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall);
  747. return 0;
  748. }
  749. /*! HUF_addBits():
  750. * Adds the symbol stored in HUF_CElt elt to the bitstream.
  751. *
  752. * @param elt The element we're adding. This is a (nbBits, value) pair.
  753. * See the HUF_CStream_t docs for the format.
  754. * @param idx Insert into the bitstream at this idx.
  755. * @param kFast This is a template parameter. If the bitstream is guaranteed
  756. * to have at least 4 unused bits after this call it may be 1,
  757. * otherwise it must be 0. HUF_addBits() is faster when fast is set.
  758. */
  759. FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast)
  760. {
  761. assert(idx <= 1);
  762. assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX);
  763. /* This is efficient on x86-64 with BMI2 because shrx
  764. * only reads the low 6 bits of the register. The compiler
  765. * knows this and elides the mask. When fast is set,
  766. * every operation can use the same value loaded from elt.
  767. */
  768. bitC->bitContainer[idx] >>= HUF_getNbBits(elt);
  769. bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt);
  770. /* We only read the low 8 bits of bitC->bitPos[idx] so it
  771. * doesn't matter that the high bits have noise from the value.
  772. */
  773. bitC->bitPos[idx] += HUF_getNbBitsFast(elt);
  774. assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
  775. /* The last 4-bits of elt are dirty if fast is set,
  776. * so we must not be overwriting bits that have already been
  777. * inserted into the bit container.
  778. */
  779. #if DEBUGLEVEL >= 1
  780. {
  781. size_t const nbBits = HUF_getNbBits(elt);
  782. size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1;
  783. (void)dirtyBits;
  784. /* Middle bits are 0. */
  785. assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
  786. /* We didn't overwrite any bits in the bit container. */
  787. assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
  788. (void)dirtyBits;
  789. }
  790. #endif
  791. }
  792. FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC)
  793. {
  794. bitC->bitContainer[1] = 0;
  795. bitC->bitPos[1] = 0;
  796. }
  797. /*! HUF_mergeIndex1() :
  798. * Merges the bit container @ index 1 into the bit container @ index 0
  799. * and zeros the bit container @ index 1.
  800. */
  801. FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC)
  802. {
  803. assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER);
  804. bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF);
  805. bitC->bitContainer[0] |= bitC->bitContainer[1];
  806. bitC->bitPos[0] += bitC->bitPos[1];
  807. assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER);
  808. }
  809. /*! HUF_flushBits() :
  810. * Flushes the bits in the bit container @ index 0.
  811. *
  812. * @post bitPos will be < 8.
  813. * @param kFast If kFast is set then we must know a-priori that
  814. * the bit container will not overflow.
  815. */
  816. FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast)
  817. {
  818. /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */
  819. size_t const nbBits = bitC->bitPos[0] & 0xFF;
  820. size_t const nbBytes = nbBits >> 3;
  821. /* The top nbBits bits of bitContainer are the ones we need. */
  822. size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits);
  823. /* Mask bitPos to account for the bytes we consumed. */
  824. bitC->bitPos[0] &= 7;
  825. assert(nbBits > 0);
  826. assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8);
  827. assert(bitC->ptr <= bitC->endPtr);
  828. MEM_writeLEST(bitC->ptr, bitContainer);
  829. bitC->ptr += nbBytes;
  830. assert(!kFast || bitC->ptr <= bitC->endPtr);
  831. if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
  832. /* bitContainer doesn't need to be modified because the leftover
  833. * bits are already the top bitPos bits. And we don't care about
  834. * noise in the lower values.
  835. */
  836. }
  837. /*! HUF_endMark()
  838. * @returns The Huffman stream end mark: A 1-bit value = 1.
  839. */
  840. static HUF_CElt HUF_endMark(void)
  841. {
  842. HUF_CElt endMark;
  843. HUF_setNbBits(&endMark, 1);
  844. HUF_setValue(&endMark, 1);
  845. return endMark;
  846. }
  847. /*! HUF_closeCStream() :
  848. * @return Size of CStream, in bytes,
  849. * or 0 if it could not fit into dstBuffer */
  850. static size_t HUF_closeCStream(HUF_CStream_t* bitC)
  851. {
  852. HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0);
  853. HUF_flushBits(bitC, /* kFast */ 0);
  854. {
  855. size_t const nbBits = bitC->bitPos[0] & 0xFF;
  856. if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
  857. return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0);
  858. }
  859. }
  860. FORCE_INLINE_TEMPLATE void
  861. HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast)
  862. {
  863. HUF_addBits(bitCPtr, CTable[symbol], idx, fast);
  864. }
  865. FORCE_INLINE_TEMPLATE void
  866. HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
  867. const BYTE* ip, size_t srcSize,
  868. const HUF_CElt* ct,
  869. int kUnroll, int kFastFlush, int kLastFast)
  870. {
  871. /* Join to kUnroll */
  872. int n = (int)srcSize;
  873. int rem = n % kUnroll;
  874. if (rem > 0) {
  875. for (; rem > 0; --rem) {
  876. HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0);
  877. }
  878. HUF_flushBits(bitC, kFastFlush);
  879. }
  880. assert(n % kUnroll == 0);
  881. /* Join to 2 * kUnroll */
  882. if (n % (2 * kUnroll)) {
  883. int u;
  884. for (u = 1; u < kUnroll; ++u) {
  885. HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1);
  886. }
  887. HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast);
  888. HUF_flushBits(bitC, kFastFlush);
  889. n -= kUnroll;
  890. }
  891. assert(n % (2 * kUnroll) == 0);
  892. for (; n>0; n-= 2 * kUnroll) {
  893. /* Encode kUnroll symbols into the bitstream @ index 0. */
  894. int u;
  895. for (u = 1; u < kUnroll; ++u) {
  896. HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1);
  897. }
  898. HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast);
  899. HUF_flushBits(bitC, kFastFlush);
  900. /* Encode kUnroll symbols into the bitstream @ index 1.
  901. * This allows us to start filling the bit container
  902. * without any data dependencies.
  903. */
  904. HUF_zeroIndex1(bitC);
  905. for (u = 1; u < kUnroll; ++u) {
  906. HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1);
  907. }
  908. HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast);
  909. /* Merge bitstream @ index 1 into the bitstream @ index 0 */
  910. HUF_mergeIndex1(bitC);
  911. HUF_flushBits(bitC, kFastFlush);
  912. }
  913. assert(n == 0);
  914. }
  915. /**
  916. * Returns a tight upper bound on the output space needed by Huffman
  917. * with 8 bytes buffer to handle over-writes. If the output is at least
  918. * this large we don't need to do bounds checks during Huffman encoding.
  919. */
  920. static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog)
  921. {
  922. return ((srcSize * tableLog) >> 3) + 8;
  923. }
  924. FORCE_INLINE_TEMPLATE size_t
  925. HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
  926. const void* src, size_t srcSize,
  927. const HUF_CElt* CTable)
  928. {
  929. U32 const tableLog = HUF_readCTableHeader(CTable).tableLog;
  930. HUF_CElt const* ct = CTable + 1;
  931. const BYTE* ip = (const BYTE*) src;
  932. BYTE* const ostart = (BYTE*)dst;
  933. BYTE* const oend = ostart + dstSize;
  934. HUF_CStream_t bitC;
  935. /* init */
  936. if (dstSize < 8) return 0; /* not enough space to compress */
  937. { BYTE* op = ostart;
  938. size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
  939. if (HUF_isError(initErr)) return 0; }
  940. if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
  941. HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0);
  942. else {
  943. if (MEM_32bits()) {
  944. switch (tableLog) {
  945. case 11:
  946. HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0);
  947. break;
  948. case 10: ZSTD_FALLTHROUGH;
  949. case 9: ZSTD_FALLTHROUGH;
  950. case 8:
  951. HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1);
  952. break;
  953. case 7: ZSTD_FALLTHROUGH;
  954. default:
  955. HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1);
  956. break;
  957. }
  958. } else {
  959. switch (tableLog) {
  960. case 11:
  961. HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0);
  962. break;
  963. case 10:
  964. HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1);
  965. break;
  966. case 9:
  967. HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0);
  968. break;
  969. case 8:
  970. HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0);
  971. break;
  972. case 7:
  973. HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0);
  974. break;
  975. case 6: ZSTD_FALLTHROUGH;
  976. default:
  977. HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1);
  978. break;
  979. }
  980. }
  981. }
  982. assert(bitC.ptr <= bitC.endPtr);
  983. return HUF_closeCStream(&bitC);
  984. }
  985. #if DYNAMIC_BMI2
  986. static BMI2_TARGET_ATTRIBUTE size_t
  987. HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
  988. const void* src, size_t srcSize,
  989. const HUF_CElt* CTable)
  990. {
  991. return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
  992. }
  993. static size_t
  994. HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
  995. const void* src, size_t srcSize,
  996. const HUF_CElt* CTable)
  997. {
  998. return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
  999. }
  1000. static size_t
  1001. HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
  1002. const void* src, size_t srcSize,
  1003. const HUF_CElt* CTable, const int flags)
  1004. {
  1005. if (flags & HUF_flags_bmi2) {
  1006. return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
  1007. }
  1008. return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
  1009. }
  1010. #else
  1011. static size_t
  1012. HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
  1013. const void* src, size_t srcSize,
  1014. const HUF_CElt* CTable, const int flags)
  1015. {
  1016. (void)flags;
  1017. return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
  1018. }
  1019. #endif
  1020. size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
  1021. {
  1022. return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags);
  1023. }
  1024. static size_t
  1025. HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
  1026. const void* src, size_t srcSize,
  1027. const HUF_CElt* CTable, int flags)
  1028. {
  1029. size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
  1030. const BYTE* ip = (const BYTE*) src;
  1031. const BYTE* const iend = ip + srcSize;
  1032. BYTE* const ostart = (BYTE*) dst;
  1033. BYTE* const oend = ostart + dstSize;
  1034. BYTE* op = ostart;
  1035. if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
  1036. if (srcSize < 12) return 0; /* no saving possible : too small input */
  1037. op += 6; /* jumpTable */
  1038. assert(op <= oend);
  1039. { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
  1040. if (cSize == 0 || cSize > 65535) return 0;
  1041. MEM_writeLE16(ostart, (U16)cSize);
  1042. op += cSize;
  1043. }
  1044. ip += segmentSize;
  1045. assert(op <= oend);
  1046. { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
  1047. if (cSize == 0 || cSize > 65535) return 0;
  1048. MEM_writeLE16(ostart+2, (U16)cSize);
  1049. op += cSize;
  1050. }
  1051. ip += segmentSize;
  1052. assert(op <= oend);
  1053. { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
  1054. if (cSize == 0 || cSize > 65535) return 0;
  1055. MEM_writeLE16(ostart+4, (U16)cSize);
  1056. op += cSize;
  1057. }
  1058. ip += segmentSize;
  1059. assert(op <= oend);
  1060. assert(ip <= iend);
  1061. { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) );
  1062. if (cSize == 0 || cSize > 65535) return 0;
  1063. op += cSize;
  1064. }
  1065. return (size_t)(op-ostart);
  1066. }
  1067. size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
  1068. {
  1069. return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags);
  1070. }
  1071. typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
  1072. static size_t HUF_compressCTable_internal(
  1073. BYTE* const ostart, BYTE* op, BYTE* const oend,
  1074. const void* src, size_t srcSize,
  1075. HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags)
  1076. {
  1077. size_t const cSize = (nbStreams==HUF_singleStream) ?
  1078. HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) :
  1079. HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags);
  1080. if (HUF_isError(cSize)) { return cSize; }
  1081. if (cSize==0) { return 0; } /* uncompressible */
  1082. op += cSize;
  1083. /* check compressibility */
  1084. assert(op >= ostart);
  1085. if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
  1086. return (size_t)(op-ostart);
  1087. }
  1088. typedef struct {
  1089. unsigned count[HUF_SYMBOLVALUE_MAX + 1];
  1090. HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)];
  1091. union {
  1092. HUF_buildCTable_wksp_tables buildCTable_wksp;
  1093. HUF_WriteCTableWksp writeCTable_wksp;
  1094. U32 hist_wksp[HIST_WKSP_SIZE_U32];
  1095. } wksps;
  1096. } HUF_compress_tables_t;
  1097. #define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
  1098. #define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
  1099. unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue)
  1100. {
  1101. unsigned cardinality = 0;
  1102. unsigned i;
  1103. for (i = 0; i < maxSymbolValue + 1; i++) {
  1104. if (count[i] != 0) cardinality += 1;
  1105. }
  1106. return cardinality;
  1107. }
  1108. unsigned HUF_minTableLog(unsigned symbolCardinality)
  1109. {
  1110. U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1;
  1111. return minBitsSymbols;
  1112. }
  1113. unsigned HUF_optimalTableLog(
  1114. unsigned maxTableLog,
  1115. size_t srcSize,
  1116. unsigned maxSymbolValue,
  1117. void* workSpace, size_t wkspSize,
  1118. HUF_CElt* table,
  1119. const unsigned* count,
  1120. int flags)
  1121. {
  1122. assert(srcSize > 1); /* Not supported, RLE should be used instead */
  1123. assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables));
  1124. if (!(flags & HUF_flags_optimalDepth)) {
  1125. /* cheap evaluation, based on FSE */
  1126. return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
  1127. }
  1128. { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp);
  1129. size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp);
  1130. size_t hSize, newSize;
  1131. const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue);
  1132. const unsigned minTableLog = HUF_minTableLog(symbolCardinality);
  1133. size_t optSize = ((size_t) ~0) - 1;
  1134. unsigned optLog = maxTableLog, optLogGuess;
  1135. DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize);
  1136. /* Search until size increases */
  1137. for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) {
  1138. DEBUGLOG(7, "checking for huffLog=%u", optLogGuess);
  1139. { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize);
  1140. if (ERR_isError(maxBits)) continue;
  1141. if (maxBits < optLogGuess && optLogGuess > minTableLog) break;
  1142. hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize);
  1143. }
  1144. if (ERR_isError(hSize)) continue;
  1145. newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize;
  1146. if (newSize > optSize + 1) {
  1147. break;
  1148. }
  1149. if (newSize < optSize) {
  1150. optSize = newSize;
  1151. optLog = optLogGuess;
  1152. }
  1153. }
  1154. assert(optLog <= HUF_TABLELOG_MAX);
  1155. return optLog;
  1156. }
  1157. }
  1158. /* HUF_compress_internal() :
  1159. * `workSpace_align4` must be aligned on 4-bytes boundaries,
  1160. * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
  1161. static size_t
  1162. HUF_compress_internal (void* dst, size_t dstSize,
  1163. const void* src, size_t srcSize,
  1164. unsigned maxSymbolValue, unsigned huffLog,
  1165. HUF_nbStreams_e nbStreams,
  1166. void* workSpace, size_t wkspSize,
  1167. HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags)
  1168. {
  1169. HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
  1170. BYTE* const ostart = (BYTE*)dst;
  1171. BYTE* const oend = ostart + dstSize;
  1172. BYTE* op = ostart;
  1173. DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize);
  1174. HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
  1175. /* checks & inits */
  1176. if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
  1177. if (!srcSize) return 0; /* Uncompressed */
  1178. if (!dstSize) return 0; /* cannot fit anything within dst budget */
  1179. if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
  1180. if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
  1181. if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
  1182. if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
  1183. if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
  1184. /* Heuristic : If old table is valid, use it for small inputs */
  1185. if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) {
  1186. return HUF_compressCTable_internal(ostart, op, oend,
  1187. src, srcSize,
  1188. nbStreams, oldHufTable, flags);
  1189. }
  1190. /* If uncompressible data is suspected, do a smaller sampling first */
  1191. DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
  1192. if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
  1193. size_t largestTotal = 0;
  1194. DEBUGLOG(5, "input suspected incompressible : sampling to check");
  1195. { unsigned maxSymbolValueBegin = maxSymbolValue;
  1196. CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
  1197. largestTotal += largestBegin;
  1198. }
  1199. { unsigned maxSymbolValueEnd = maxSymbolValue;
  1200. CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
  1201. largestTotal += largestEnd;
  1202. }
  1203. if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */
  1204. }
  1205. /* Scan input and build symbol stats */
  1206. { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
  1207. if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
  1208. if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
  1209. }
  1210. DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1));
  1211. /* Check validity of previous table */
  1212. if ( repeat
  1213. && *repeat == HUF_repeat_check
  1214. && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
  1215. *repeat = HUF_repeat_none;
  1216. }
  1217. /* Heuristic : use existing table for small inputs */
  1218. if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) {
  1219. return HUF_compressCTable_internal(ostart, op, oend,
  1220. src, srcSize,
  1221. nbStreams, oldHufTable, flags);
  1222. }
  1223. /* Build Huffman Tree */
  1224. huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags);
  1225. { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
  1226. maxSymbolValue, huffLog,
  1227. &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
  1228. CHECK_F(maxBits);
  1229. huffLog = (U32)maxBits;
  1230. DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1));
  1231. }
  1232. /* Write table description header */
  1233. { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
  1234. &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
  1235. /* Check if using previous huffman table is beneficial */
  1236. if (repeat && *repeat != HUF_repeat_none) {
  1237. size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
  1238. size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
  1239. if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
  1240. return HUF_compressCTable_internal(ostart, op, oend,
  1241. src, srcSize,
  1242. nbStreams, oldHufTable, flags);
  1243. } }
  1244. /* Use the new huffman table */
  1245. if (hSize + 12ul >= srcSize) { return 0; }
  1246. op += hSize;
  1247. if (repeat) { *repeat = HUF_repeat_none; }
  1248. if (oldHufTable)
  1249. ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
  1250. }
  1251. return HUF_compressCTable_internal(ostart, op, oend,
  1252. src, srcSize,
  1253. nbStreams, table->CTable, flags);
  1254. }
  1255. size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
  1256. const void* src, size_t srcSize,
  1257. unsigned maxSymbolValue, unsigned huffLog,
  1258. void* workSpace, size_t wkspSize,
  1259. HUF_CElt* hufTable, HUF_repeat* repeat, int flags)
  1260. {
  1261. DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize);
  1262. return HUF_compress_internal(dst, dstSize, src, srcSize,
  1263. maxSymbolValue, huffLog, HUF_singleStream,
  1264. workSpace, wkspSize, hufTable,
  1265. repeat, flags);
  1266. }
  1267. /* HUF_compress4X_repeat():
  1268. * compress input using 4 streams.
  1269. * consider skipping quickly
  1270. * reuse an existing huffman compression table */
  1271. size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
  1272. const void* src, size_t srcSize,
  1273. unsigned maxSymbolValue, unsigned huffLog,
  1274. void* workSpace, size_t wkspSize,
  1275. HUF_CElt* hufTable, HUF_repeat* repeat, int flags)
  1276. {
  1277. DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize);
  1278. return HUF_compress_internal(dst, dstSize, src, srcSize,
  1279. maxSymbolValue, huffLog, HUF_fourStreams,
  1280. workSpace, wkspSize,
  1281. hufTable, repeat, flags);
  1282. }