zstd_opt.c 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576
  1. /*
  2. * Copyright (c) Meta Platforms, Inc. and affiliates.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. #include "zstd_compress_internal.h"
  11. #include "hist.h"
  12. #include "zstd_opt.h"
  13. #if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
  14. || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
  15. || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
  16. #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
  17. #define ZSTD_MAX_PRICE (1<<30)
  18. #define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
  19. /*-*************************************
  20. * Price functions for optimal parser
  21. ***************************************/
  22. #if 0 /* approximation at bit level (for tests) */
  23. # define BITCOST_ACCURACY 0
  24. # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
  25. # define WEIGHT(stat, opt) ((void)(opt), ZSTD_bitWeight(stat))
  26. #elif 0 /* fractional bit accuracy (for tests) */
  27. # define BITCOST_ACCURACY 8
  28. # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
  29. # define WEIGHT(stat,opt) ((void)(opt), ZSTD_fracWeight(stat))
  30. #else /* opt==approx, ultra==accurate */
  31. # define BITCOST_ACCURACY 8
  32. # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
  33. # define WEIGHT(stat,opt) ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
  34. #endif
  35. /* ZSTD_bitWeight() :
  36. * provide estimated "cost" of a stat in full bits only */
  37. MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
  38. {
  39. return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
  40. }
  41. /* ZSTD_fracWeight() :
  42. * provide fractional-bit "cost" of a stat,
  43. * using linear interpolation approximation */
  44. MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
  45. {
  46. U32 const stat = rawStat + 1;
  47. U32 const hb = ZSTD_highbit32(stat);
  48. U32 const BWeight = hb * BITCOST_MULTIPLIER;
  49. /* Fweight was meant for "Fractional weight"
  50. * but it's effectively a value between 1 and 2
  51. * using fixed point arithmetic */
  52. U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
  53. U32 const weight = BWeight + FWeight;
  54. assert(hb + BITCOST_ACCURACY < 31);
  55. return weight;
  56. }
  57. #if (DEBUGLEVEL>=2)
  58. /* debugging function,
  59. * @return price in bytes as fractional value
  60. * for debug messages only */
  61. MEM_STATIC double ZSTD_fCost(int price)
  62. {
  63. return (double)price / (BITCOST_MULTIPLIER*8);
  64. }
  65. #endif
  66. static int ZSTD_compressedLiterals(optState_t const* const optPtr)
  67. {
  68. return optPtr->literalCompressionMode != ZSTD_ps_disable;
  69. }
  70. static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
  71. {
  72. if (ZSTD_compressedLiterals(optPtr))
  73. optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
  74. optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
  75. optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
  76. optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
  77. }
  78. static U32 sum_u32(const unsigned table[], size_t nbElts)
  79. {
  80. size_t n;
  81. U32 total = 0;
  82. for (n=0; n<nbElts; n++) {
  83. total += table[n];
  84. }
  85. return total;
  86. }
  87. typedef enum { base_0possible=0, base_1guaranteed=1 } base_directive_e;
  88. static U32
  89. ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift, base_directive_e base1)
  90. {
  91. U32 s, sum=0;
  92. DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)",
  93. (unsigned)lastEltIndex+1, (unsigned)shift );
  94. assert(shift < 30);
  95. for (s=0; s<lastEltIndex+1; s++) {
  96. unsigned const base = base1 ? 1 : (table[s]>0);
  97. unsigned const newStat = base + (table[s] >> shift);
  98. sum += newStat;
  99. table[s] = newStat;
  100. }
  101. return sum;
  102. }
  103. /* ZSTD_scaleStats() :
  104. * reduce all elt frequencies in table if sum too large
  105. * return the resulting sum of elements */
  106. static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
  107. {
  108. U32 const prevsum = sum_u32(table, lastEltIndex+1);
  109. U32 const factor = prevsum >> logTarget;
  110. DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
  111. assert(logTarget < 30);
  112. if (factor <= 1) return prevsum;
  113. return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_1guaranteed);
  114. }
  115. /* ZSTD_rescaleFreqs() :
  116. * if first block (detected by optPtr->litLengthSum == 0) : init statistics
  117. * take hints from dictionary if there is one
  118. * and init from zero if there is none,
  119. * using src for literals stats, and baseline stats for sequence symbols
  120. * otherwise downscale existing stats, to be used as seed for next block.
  121. */
  122. static void
  123. ZSTD_rescaleFreqs(optState_t* const optPtr,
  124. const BYTE* const src, size_t const srcSize,
  125. int const optLevel)
  126. {
  127. int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
  128. DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
  129. optPtr->priceType = zop_dynamic;
  130. if (optPtr->litLengthSum == 0) { /* no literals stats collected -> first block assumed -> init */
  131. /* heuristic: use pre-defined stats for too small inputs */
  132. if (srcSize <= ZSTD_PREDEF_THRESHOLD) {
  133. DEBUGLOG(5, "srcSize <= %i : use predefined stats", ZSTD_PREDEF_THRESHOLD);
  134. optPtr->priceType = zop_predef;
  135. }
  136. assert(optPtr->symbolCosts != NULL);
  137. if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
  138. /* huffman stats covering the full value set : table presumed generated by dictionary */
  139. optPtr->priceType = zop_dynamic;
  140. if (compressedLiterals) {
  141. /* generate literals statistics from huffman table */
  142. unsigned lit;
  143. assert(optPtr->litFreq != NULL);
  144. optPtr->litSum = 0;
  145. for (lit=0; lit<=MaxLit; lit++) {
  146. U32 const scaleLog = 11; /* scale to 2K */
  147. U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit);
  148. assert(bitCost <= scaleLog);
  149. optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
  150. optPtr->litSum += optPtr->litFreq[lit];
  151. } }
  152. { unsigned ll;
  153. FSE_CState_t llstate;
  154. FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
  155. optPtr->litLengthSum = 0;
  156. for (ll=0; ll<=MaxLL; ll++) {
  157. U32 const scaleLog = 10; /* scale to 1K */
  158. U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
  159. assert(bitCost < scaleLog);
  160. optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
  161. optPtr->litLengthSum += optPtr->litLengthFreq[ll];
  162. } }
  163. { unsigned ml;
  164. FSE_CState_t mlstate;
  165. FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
  166. optPtr->matchLengthSum = 0;
  167. for (ml=0; ml<=MaxML; ml++) {
  168. U32 const scaleLog = 10;
  169. U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
  170. assert(bitCost < scaleLog);
  171. optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
  172. optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
  173. } }
  174. { unsigned of;
  175. FSE_CState_t ofstate;
  176. FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
  177. optPtr->offCodeSum = 0;
  178. for (of=0; of<=MaxOff; of++) {
  179. U32 const scaleLog = 10;
  180. U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
  181. assert(bitCost < scaleLog);
  182. optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
  183. optPtr->offCodeSum += optPtr->offCodeFreq[of];
  184. } }
  185. } else { /* first block, no dictionary */
  186. assert(optPtr->litFreq != NULL);
  187. if (compressedLiterals) {
  188. /* base initial cost of literals on direct frequency within src */
  189. unsigned lit = MaxLit;
  190. HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
  191. optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8, base_0possible);
  192. }
  193. { unsigned const baseLLfreqs[MaxLL+1] = {
  194. 4, 2, 1, 1, 1, 1, 1, 1,
  195. 1, 1, 1, 1, 1, 1, 1, 1,
  196. 1, 1, 1, 1, 1, 1, 1, 1,
  197. 1, 1, 1, 1, 1, 1, 1, 1,
  198. 1, 1, 1, 1
  199. };
  200. ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs));
  201. optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1);
  202. }
  203. { unsigned ml;
  204. for (ml=0; ml<=MaxML; ml++)
  205. optPtr->matchLengthFreq[ml] = 1;
  206. }
  207. optPtr->matchLengthSum = MaxML+1;
  208. { unsigned const baseOFCfreqs[MaxOff+1] = {
  209. 6, 2, 1, 1, 2, 3, 4, 4,
  210. 4, 3, 2, 1, 1, 1, 1, 1,
  211. 1, 1, 1, 1, 1, 1, 1, 1,
  212. 1, 1, 1, 1, 1, 1, 1, 1
  213. };
  214. ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs));
  215. optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
  216. }
  217. }
  218. } else { /* new block : scale down accumulated statistics */
  219. if (compressedLiterals)
  220. optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
  221. optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11);
  222. optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11);
  223. optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11);
  224. }
  225. ZSTD_setBasePrices(optPtr, optLevel);
  226. }
  227. /* ZSTD_rawLiteralsCost() :
  228. * price of literals (only) in specified segment (which length can be 0).
  229. * does not include price of literalLength symbol */
  230. static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
  231. const optState_t* const optPtr,
  232. int optLevel)
  233. {
  234. DEBUGLOG(8, "ZSTD_rawLiteralsCost (%u literals)", litLength);
  235. if (litLength == 0) return 0;
  236. if (!ZSTD_compressedLiterals(optPtr))
  237. return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
  238. if (optPtr->priceType == zop_predef)
  239. return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
  240. /* dynamic statistics */
  241. { U32 price = optPtr->litSumBasePrice * litLength;
  242. U32 const litPriceMax = optPtr->litSumBasePrice - BITCOST_MULTIPLIER;
  243. U32 u;
  244. assert(optPtr->litSumBasePrice >= BITCOST_MULTIPLIER);
  245. for (u=0; u < litLength; u++) {
  246. U32 litPrice = WEIGHT(optPtr->litFreq[literals[u]], optLevel);
  247. if (UNLIKELY(litPrice > litPriceMax)) litPrice = litPriceMax;
  248. price -= litPrice;
  249. }
  250. return price;
  251. }
  252. }
  253. /* ZSTD_litLengthPrice() :
  254. * cost of literalLength symbol */
  255. static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
  256. {
  257. assert(litLength <= ZSTD_BLOCKSIZE_MAX);
  258. if (optPtr->priceType == zop_predef)
  259. return WEIGHT(litLength, optLevel);
  260. /* ZSTD_LLcode() can't compute litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
  261. * because it isn't representable in the zstd format.
  262. * So instead just pretend it would cost 1 bit more than ZSTD_BLOCKSIZE_MAX - 1.
  263. * In such a case, the block would be all literals.
  264. */
  265. if (litLength == ZSTD_BLOCKSIZE_MAX)
  266. return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
  267. /* dynamic statistics */
  268. { U32 const llCode = ZSTD_LLcode(litLength);
  269. return (LL_bits[llCode] * BITCOST_MULTIPLIER)
  270. + optPtr->litLengthSumBasePrice
  271. - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
  272. }
  273. }
  274. /* ZSTD_getMatchPrice() :
  275. * Provides the cost of the match part (offset + matchLength) of a sequence.
  276. * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
  277. * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq()
  278. * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
  279. */
  280. FORCE_INLINE_TEMPLATE U32
  281. ZSTD_getMatchPrice(U32 const offBase,
  282. U32 const matchLength,
  283. const optState_t* const optPtr,
  284. int const optLevel)
  285. {
  286. U32 price;
  287. U32 const offCode = ZSTD_highbit32(offBase);
  288. U32 const mlBase = matchLength - MINMATCH;
  289. assert(matchLength >= MINMATCH);
  290. if (optPtr->priceType == zop_predef) /* fixed scheme, does not use statistics */
  291. return WEIGHT(mlBase, optLevel)
  292. + ((16 + offCode) * BITCOST_MULTIPLIER); /* emulated offset cost */
  293. /* dynamic statistics */
  294. price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
  295. if ((optLevel<2) /*static*/ && offCode >= 20)
  296. price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
  297. /* match Length */
  298. { U32 const mlCode = ZSTD_MLcode(mlBase);
  299. price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
  300. }
  301. price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
  302. DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
  303. return price;
  304. }
  305. /* ZSTD_updateStats() :
  306. * assumption : literals + litLength <= iend */
  307. static void ZSTD_updateStats(optState_t* const optPtr,
  308. U32 litLength, const BYTE* literals,
  309. U32 offBase, U32 matchLength)
  310. {
  311. /* literals */
  312. if (ZSTD_compressedLiterals(optPtr)) {
  313. U32 u;
  314. for (u=0; u < litLength; u++)
  315. optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
  316. optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
  317. }
  318. /* literal Length */
  319. { U32 const llCode = ZSTD_LLcode(litLength);
  320. optPtr->litLengthFreq[llCode]++;
  321. optPtr->litLengthSum++;
  322. }
  323. /* offset code : follows storeSeq() numeric representation */
  324. { U32 const offCode = ZSTD_highbit32(offBase);
  325. assert(offCode <= MaxOff);
  326. optPtr->offCodeFreq[offCode]++;
  327. optPtr->offCodeSum++;
  328. }
  329. /* match Length */
  330. { U32 const mlBase = matchLength - MINMATCH;
  331. U32 const mlCode = ZSTD_MLcode(mlBase);
  332. optPtr->matchLengthFreq[mlCode]++;
  333. optPtr->matchLengthSum++;
  334. }
  335. }
  336. /* ZSTD_readMINMATCH() :
  337. * function safe only for comparisons
  338. * assumption : memPtr must be at least 4 bytes before end of buffer */
  339. MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
  340. {
  341. switch (length)
  342. {
  343. default :
  344. case 4 : return MEM_read32(memPtr);
  345. case 3 : if (MEM_isLittleEndian())
  346. return MEM_read32(memPtr)<<8;
  347. else
  348. return MEM_read32(memPtr)>>8;
  349. }
  350. }
  351. /* Update hashTable3 up to ip (excluded)
  352. Assumption : always within prefix (i.e. not within extDict) */
  353. static
  354. ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
  355. U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
  356. U32* nextToUpdate3,
  357. const BYTE* const ip)
  358. {
  359. U32* const hashTable3 = ms->hashTable3;
  360. U32 const hashLog3 = ms->hashLog3;
  361. const BYTE* const base = ms->window.base;
  362. U32 idx = *nextToUpdate3;
  363. U32 const target = (U32)(ip - base);
  364. size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
  365. assert(hashLog3 > 0);
  366. while(idx < target) {
  367. hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
  368. idx++;
  369. }
  370. *nextToUpdate3 = target;
  371. return hashTable3[hash3];
  372. }
  373. /*-*************************************
  374. * Binary Tree search
  375. ***************************************/
  376. /** ZSTD_insertBt1() : add one or multiple positions to tree.
  377. * @param ip assumed <= iend-8 .
  378. * @param target The target of ZSTD_updateTree_internal() - we are filling to this position
  379. * @return : nb of positions added */
  380. static
  381. ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
  382. U32 ZSTD_insertBt1(
  383. const ZSTD_matchState_t* ms,
  384. const BYTE* const ip, const BYTE* const iend,
  385. U32 const target,
  386. U32 const mls, const int extDict)
  387. {
  388. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  389. U32* const hashTable = ms->hashTable;
  390. U32 const hashLog = cParams->hashLog;
  391. size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
  392. U32* const bt = ms->chainTable;
  393. U32 const btLog = cParams->chainLog - 1;
  394. U32 const btMask = (1 << btLog) - 1;
  395. U32 matchIndex = hashTable[h];
  396. size_t commonLengthSmaller=0, commonLengthLarger=0;
  397. const BYTE* const base = ms->window.base;
  398. const BYTE* const dictBase = ms->window.dictBase;
  399. const U32 dictLimit = ms->window.dictLimit;
  400. const BYTE* const dictEnd = dictBase + dictLimit;
  401. const BYTE* const prefixStart = base + dictLimit;
  402. const BYTE* match;
  403. const U32 curr = (U32)(ip-base);
  404. const U32 btLow = btMask >= curr ? 0 : curr - btMask;
  405. U32* smallerPtr = bt + 2*(curr&btMask);
  406. U32* largerPtr = smallerPtr + 1;
  407. U32 dummy32; /* to be nullified at the end */
  408. /* windowLow is based on target because
  409. * we only need positions that will be in the window at the end of the tree update.
  410. */
  411. U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog);
  412. U32 matchEndIdx = curr+8+1;
  413. size_t bestLength = 8;
  414. U32 nbCompares = 1U << cParams->searchLog;
  415. #ifdef ZSTD_C_PREDICT
  416. U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
  417. U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
  418. predictedSmall += (predictedSmall>0);
  419. predictedLarge += (predictedLarge>0);
  420. #endif /* ZSTD_C_PREDICT */
  421. DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
  422. assert(curr <= target);
  423. assert(ip <= iend-8); /* required for h calculation */
  424. hashTable[h] = curr; /* Update Hash Table */
  425. assert(windowLow > 0);
  426. for (; nbCompares && (matchIndex >= windowLow); --nbCompares) {
  427. U32* const nextPtr = bt + 2*(matchIndex & btMask);
  428. size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
  429. assert(matchIndex < curr);
  430. #ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
  431. const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
  432. if (matchIndex == predictedSmall) {
  433. /* no need to check length, result known */
  434. *smallerPtr = matchIndex;
  435. if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
  436. smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
  437. matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
  438. predictedSmall = predictPtr[1] + (predictPtr[1]>0);
  439. continue;
  440. }
  441. if (matchIndex == predictedLarge) {
  442. *largerPtr = matchIndex;
  443. if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
  444. largerPtr = nextPtr;
  445. matchIndex = nextPtr[0];
  446. predictedLarge = predictPtr[0] + (predictPtr[0]>0);
  447. continue;
  448. }
  449. #endif
  450. if (!extDict || (matchIndex+matchLength >= dictLimit)) {
  451. assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */
  452. match = base + matchIndex;
  453. matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
  454. } else {
  455. match = dictBase + matchIndex;
  456. matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
  457. if (matchIndex+matchLength >= dictLimit)
  458. match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
  459. }
  460. if (matchLength > bestLength) {
  461. bestLength = matchLength;
  462. if (matchLength > matchEndIdx - matchIndex)
  463. matchEndIdx = matchIndex + (U32)matchLength;
  464. }
  465. if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
  466. break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
  467. }
  468. if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
  469. /* match is smaller than current */
  470. *smallerPtr = matchIndex; /* update smaller idx */
  471. commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
  472. if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
  473. smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
  474. matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
  475. } else {
  476. /* match is larger than current */
  477. *largerPtr = matchIndex;
  478. commonLengthLarger = matchLength;
  479. if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
  480. largerPtr = nextPtr;
  481. matchIndex = nextPtr[0];
  482. } }
  483. *smallerPtr = *largerPtr = 0;
  484. { U32 positions = 0;
  485. if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */
  486. assert(matchEndIdx > curr + 8);
  487. return MAX(positions, matchEndIdx - (curr + 8));
  488. }
  489. }
  490. FORCE_INLINE_TEMPLATE
  491. ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
  492. void ZSTD_updateTree_internal(
  493. ZSTD_matchState_t* ms,
  494. const BYTE* const ip, const BYTE* const iend,
  495. const U32 mls, const ZSTD_dictMode_e dictMode)
  496. {
  497. const BYTE* const base = ms->window.base;
  498. U32 const target = (U32)(ip - base);
  499. U32 idx = ms->nextToUpdate;
  500. DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
  501. idx, target, dictMode);
  502. while(idx < target) {
  503. U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict);
  504. assert(idx < (U32)(idx + forward));
  505. idx += forward;
  506. }
  507. assert((size_t)(ip - base) <= (size_t)(U32)(-1));
  508. assert((size_t)(iend - base) <= (size_t)(U32)(-1));
  509. ms->nextToUpdate = target;
  510. }
  511. void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
  512. ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
  513. }
  514. FORCE_INLINE_TEMPLATE
  515. ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
  516. U32
  517. ZSTD_insertBtAndGetAllMatches (
  518. ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
  519. ZSTD_matchState_t* ms,
  520. U32* nextToUpdate3,
  521. const BYTE* const ip, const BYTE* const iLimit,
  522. const ZSTD_dictMode_e dictMode,
  523. const U32 rep[ZSTD_REP_NUM],
  524. const U32 ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
  525. const U32 lengthToBeat,
  526. const U32 mls /* template */)
  527. {
  528. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  529. U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
  530. const BYTE* const base = ms->window.base;
  531. U32 const curr = (U32)(ip-base);
  532. U32 const hashLog = cParams->hashLog;
  533. U32 const minMatch = (mls==3) ? 3 : 4;
  534. U32* const hashTable = ms->hashTable;
  535. size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
  536. U32 matchIndex = hashTable[h];
  537. U32* const bt = ms->chainTable;
  538. U32 const btLog = cParams->chainLog - 1;
  539. U32 const btMask= (1U << btLog) - 1;
  540. size_t commonLengthSmaller=0, commonLengthLarger=0;
  541. const BYTE* const dictBase = ms->window.dictBase;
  542. U32 const dictLimit = ms->window.dictLimit;
  543. const BYTE* const dictEnd = dictBase + dictLimit;
  544. const BYTE* const prefixStart = base + dictLimit;
  545. U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
  546. U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
  547. U32 const matchLow = windowLow ? windowLow : 1;
  548. U32* smallerPtr = bt + 2*(curr&btMask);
  549. U32* largerPtr = bt + 2*(curr&btMask) + 1;
  550. U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */
  551. U32 dummy32; /* to be nullified at the end */
  552. U32 mnum = 0;
  553. U32 nbCompares = 1U << cParams->searchLog;
  554. const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
  555. const ZSTD_compressionParameters* const dmsCParams =
  556. dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
  557. const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
  558. const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
  559. U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
  560. U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
  561. U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
  562. U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
  563. U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
  564. U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
  565. U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
  566. size_t bestLength = lengthToBeat-1;
  567. DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
  568. /* check repCode */
  569. assert(ll0 <= 1); /* necessarily 1 or 0 */
  570. { U32 const lastR = ZSTD_REP_NUM + ll0;
  571. U32 repCode;
  572. for (repCode = ll0; repCode < lastR; repCode++) {
  573. U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
  574. U32 const repIndex = curr - repOffset;
  575. U32 repLen = 0;
  576. assert(curr >= dictLimit);
  577. if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */
  578. /* We must validate the repcode offset because when we're using a dictionary the
  579. * valid offset range shrinks when the dictionary goes out of bounds.
  580. */
  581. if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
  582. repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
  583. }
  584. } else { /* repIndex < dictLimit || repIndex >= curr */
  585. const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
  586. dmsBase + repIndex - dmsIndexDelta :
  587. dictBase + repIndex;
  588. assert(curr >= windowLow);
  589. if ( dictMode == ZSTD_extDict
  590. && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
  591. & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
  592. && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
  593. repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
  594. }
  595. if (dictMode == ZSTD_dictMatchState
  596. && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
  597. & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
  598. && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
  599. repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
  600. } }
  601. /* save longer solution */
  602. if (repLen > bestLength) {
  603. DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
  604. repCode, ll0, repOffset, repLen);
  605. bestLength = repLen;
  606. matches[mnum].off = REPCODE_TO_OFFBASE(repCode - ll0 + 1); /* expect value between 1 and 3 */
  607. matches[mnum].len = (U32)repLen;
  608. mnum++;
  609. if ( (repLen > sufficient_len)
  610. | (ip+repLen == iLimit) ) { /* best possible */
  611. return mnum;
  612. } } } }
  613. /* HC3 match finder */
  614. if ((mls == 3) /*static*/ && (bestLength < mls)) {
  615. U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
  616. if ((matchIndex3 >= matchLow)
  617. & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
  618. size_t mlen;
  619. if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
  620. const BYTE* const match = base + matchIndex3;
  621. mlen = ZSTD_count(ip, match, iLimit);
  622. } else {
  623. const BYTE* const match = dictBase + matchIndex3;
  624. mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
  625. }
  626. /* save best solution */
  627. if (mlen >= mls /* == 3 > bestLength */) {
  628. DEBUGLOG(8, "found small match with hlog3, of length %u",
  629. (U32)mlen);
  630. bestLength = mlen;
  631. assert(curr > matchIndex3);
  632. assert(mnum==0); /* no prior solution */
  633. matches[0].off = OFFSET_TO_OFFBASE(curr - matchIndex3);
  634. matches[0].len = (U32)mlen;
  635. mnum = 1;
  636. if ( (mlen > sufficient_len) |
  637. (ip+mlen == iLimit) ) { /* best possible length */
  638. ms->nextToUpdate = curr+1; /* skip insertion */
  639. return 1;
  640. } } }
  641. /* no dictMatchState lookup: dicts don't have a populated HC3 table */
  642. } /* if (mls == 3) */
  643. hashTable[h] = curr; /* Update Hash Table */
  644. for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
  645. U32* const nextPtr = bt + 2*(matchIndex & btMask);
  646. const BYTE* match;
  647. size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
  648. assert(curr > matchIndex);
  649. if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
  650. assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
  651. match = base + matchIndex;
  652. if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
  653. matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
  654. } else {
  655. match = dictBase + matchIndex;
  656. assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
  657. matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
  658. if (matchIndex+matchLength >= dictLimit)
  659. match = base + matchIndex; /* prepare for match[matchLength] read */
  660. }
  661. if (matchLength > bestLength) {
  662. DEBUGLOG(8, "found match of length %u at distance %u (offBase=%u)",
  663. (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex));
  664. assert(matchEndIdx > matchIndex);
  665. if (matchLength > matchEndIdx - matchIndex)
  666. matchEndIdx = matchIndex + (U32)matchLength;
  667. bestLength = matchLength;
  668. matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex);
  669. matches[mnum].len = (U32)matchLength;
  670. mnum++;
  671. if ( (matchLength > ZSTD_OPT_NUM)
  672. | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
  673. if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
  674. break; /* drop, to preserve bt consistency (miss a little bit of compression) */
  675. } }
  676. if (match[matchLength] < ip[matchLength]) {
  677. /* match smaller than current */
  678. *smallerPtr = matchIndex; /* update smaller idx */
  679. commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
  680. if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
  681. smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */
  682. matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */
  683. } else {
  684. *largerPtr = matchIndex;
  685. commonLengthLarger = matchLength;
  686. if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
  687. largerPtr = nextPtr;
  688. matchIndex = nextPtr[0];
  689. } }
  690. *smallerPtr = *largerPtr = 0;
  691. assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
  692. if (dictMode == ZSTD_dictMatchState && nbCompares) {
  693. size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
  694. U32 dictMatchIndex = dms->hashTable[dmsH];
  695. const U32* const dmsBt = dms->chainTable;
  696. commonLengthSmaller = commonLengthLarger = 0;
  697. for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) {
  698. const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
  699. size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
  700. const BYTE* match = dmsBase + dictMatchIndex;
  701. matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
  702. if (dictMatchIndex+matchLength >= dmsHighLimit)
  703. match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */
  704. if (matchLength > bestLength) {
  705. matchIndex = dictMatchIndex + dmsIndexDelta;
  706. DEBUGLOG(8, "found dms match of length %u at distance %u (offBase=%u)",
  707. (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex));
  708. if (matchLength > matchEndIdx - matchIndex)
  709. matchEndIdx = matchIndex + (U32)matchLength;
  710. bestLength = matchLength;
  711. matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex);
  712. matches[mnum].len = (U32)matchLength;
  713. mnum++;
  714. if ( (matchLength > ZSTD_OPT_NUM)
  715. | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
  716. break; /* drop, to guarantee consistency (miss a little bit of compression) */
  717. } }
  718. if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
  719. if (match[matchLength] < ip[matchLength]) {
  720. commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
  721. dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
  722. } else {
  723. /* match is larger than current */
  724. commonLengthLarger = matchLength;
  725. dictMatchIndex = nextPtr[0];
  726. } } } /* if (dictMode == ZSTD_dictMatchState) */
  727. assert(matchEndIdx > curr+8);
  728. ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
  729. return mnum;
  730. }
  731. typedef U32 (*ZSTD_getAllMatchesFn)(
  732. ZSTD_match_t*,
  733. ZSTD_matchState_t*,
  734. U32*,
  735. const BYTE*,
  736. const BYTE*,
  737. const U32 rep[ZSTD_REP_NUM],
  738. U32 const ll0,
  739. U32 const lengthToBeat);
  740. FORCE_INLINE_TEMPLATE
  741. ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
  742. U32 ZSTD_btGetAllMatches_internal(
  743. ZSTD_match_t* matches,
  744. ZSTD_matchState_t* ms,
  745. U32* nextToUpdate3,
  746. const BYTE* ip,
  747. const BYTE* const iHighLimit,
  748. const U32 rep[ZSTD_REP_NUM],
  749. U32 const ll0,
  750. U32 const lengthToBeat,
  751. const ZSTD_dictMode_e dictMode,
  752. const U32 mls)
  753. {
  754. assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls);
  755. DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls);
  756. if (ip < ms->window.base + ms->nextToUpdate)
  757. return 0; /* skipped area */
  758. ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode);
  759. return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls);
  760. }
  761. #define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls
  762. #define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
  763. static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
  764. ZSTD_match_t* matches, \
  765. ZSTD_matchState_t* ms, \
  766. U32* nextToUpdate3, \
  767. const BYTE* ip, \
  768. const BYTE* const iHighLimit, \
  769. const U32 rep[ZSTD_REP_NUM], \
  770. U32 const ll0, \
  771. U32 const lengthToBeat) \
  772. { \
  773. return ZSTD_btGetAllMatches_internal( \
  774. matches, ms, nextToUpdate3, ip, iHighLimit, \
  775. rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
  776. }
  777. #define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
  778. GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
  779. GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \
  780. GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \
  781. GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6)
  782. GEN_ZSTD_BT_GET_ALL_MATCHES(noDict)
  783. GEN_ZSTD_BT_GET_ALL_MATCHES(extDict)
  784. GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
  785. #define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \
  786. { \
  787. ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \
  788. ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \
  789. ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \
  790. ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
  791. }
  792. static ZSTD_getAllMatchesFn
  793. ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
  794. {
  795. ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
  796. ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
  797. ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict),
  798. ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState)
  799. };
  800. U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6);
  801. assert((U32)dictMode < 3);
  802. assert(mls - 3 < 4);
  803. return getAllMatchesFns[(int)dictMode][mls - 3];
  804. }
  805. /*************************
  806. * LDM helper functions *
  807. *************************/
  808. /* Struct containing info needed to make decision about ldm inclusion */
  809. typedef struct {
  810. rawSeqStore_t seqStore; /* External match candidates store for this block */
  811. U32 startPosInBlock; /* Start position of the current match candidate */
  812. U32 endPosInBlock; /* End position of the current match candidate */
  813. U32 offset; /* Offset of the match candidate */
  814. } ZSTD_optLdm_t;
  815. /* ZSTD_optLdm_skipRawSeqStoreBytes():
  816. * Moves forward in @rawSeqStore by @nbBytes,
  817. * which will update the fields 'pos' and 'posInSequence'.
  818. */
  819. static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes)
  820. {
  821. U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
  822. while (currPos && rawSeqStore->pos < rawSeqStore->size) {
  823. rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
  824. if (currPos >= currSeq.litLength + currSeq.matchLength) {
  825. currPos -= currSeq.litLength + currSeq.matchLength;
  826. rawSeqStore->pos++;
  827. } else {
  828. rawSeqStore->posInSequence = currPos;
  829. break;
  830. }
  831. }
  832. if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
  833. rawSeqStore->posInSequence = 0;
  834. }
  835. }
  836. /* ZSTD_opt_getNextMatchAndUpdateSeqStore():
  837. * Calculates the beginning and end of the next match in the current block.
  838. * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
  839. */
  840. static void
  841. ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
  842. U32 blockBytesRemaining)
  843. {
  844. rawSeq currSeq;
  845. U32 currBlockEndPos;
  846. U32 literalsBytesRemaining;
  847. U32 matchBytesRemaining;
  848. /* Setting match end position to MAX to ensure we never use an LDM during this block */
  849. if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
  850. optLdm->startPosInBlock = UINT_MAX;
  851. optLdm->endPosInBlock = UINT_MAX;
  852. return;
  853. }
  854. /* Calculate appropriate bytes left in matchLength and litLength
  855. * after adjusting based on ldmSeqStore->posInSequence */
  856. currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
  857. assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
  858. currBlockEndPos = currPosInBlock + blockBytesRemaining;
  859. literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
  860. currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
  861. 0;
  862. matchBytesRemaining = (literalsBytesRemaining == 0) ?
  863. currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
  864. currSeq.matchLength;
  865. /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
  866. if (literalsBytesRemaining >= blockBytesRemaining) {
  867. optLdm->startPosInBlock = UINT_MAX;
  868. optLdm->endPosInBlock = UINT_MAX;
  869. ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
  870. return;
  871. }
  872. /* Matches may be < MINMATCH by this process. In that case, we will reject them
  873. when we are deciding whether or not to add the ldm */
  874. optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
  875. optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
  876. optLdm->offset = currSeq.offset;
  877. if (optLdm->endPosInBlock > currBlockEndPos) {
  878. /* Match ends after the block ends, we can't use the whole match */
  879. optLdm->endPosInBlock = currBlockEndPos;
  880. ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
  881. } else {
  882. /* Consume nb of bytes equal to size of sequence left */
  883. ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
  884. }
  885. }
  886. /* ZSTD_optLdm_maybeAddMatch():
  887. * Adds a match if it's long enough,
  888. * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock',
  889. * into 'matches'. Maintains the correct ordering of 'matches'.
  890. */
  891. static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
  892. const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
  893. {
  894. U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
  895. /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */
  896. U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
  897. /* Ensure that current block position is not outside of the match */
  898. if (currPosInBlock < optLdm->startPosInBlock
  899. || currPosInBlock >= optLdm->endPosInBlock
  900. || candidateMatchLength < MINMATCH) {
  901. return;
  902. }
  903. if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
  904. U32 const candidateOffBase = OFFSET_TO_OFFBASE(optLdm->offset);
  905. DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offBase: %u matchLength %u) at block position=%u",
  906. candidateOffBase, candidateMatchLength, currPosInBlock);
  907. matches[*nbMatches].len = candidateMatchLength;
  908. matches[*nbMatches].off = candidateOffBase;
  909. (*nbMatches)++;
  910. }
  911. }
  912. /* ZSTD_optLdm_processMatchCandidate():
  913. * Wrapper function to update ldm seq store and call ldm functions as necessary.
  914. */
  915. static void
  916. ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
  917. ZSTD_match_t* matches, U32* nbMatches,
  918. U32 currPosInBlock, U32 remainingBytes)
  919. {
  920. if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
  921. return;
  922. }
  923. if (currPosInBlock >= optLdm->endPosInBlock) {
  924. if (currPosInBlock > optLdm->endPosInBlock) {
  925. /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
  926. * at the end of a match from the ldm seq store, and will often be some bytes
  927. * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
  928. */
  929. U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock;
  930. ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
  931. }
  932. ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
  933. }
  934. ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
  935. }
  936. /*-*******************************
  937. * Optimal parser
  938. *********************************/
  939. #if 0 /* debug */
  940. static void
  941. listStats(const U32* table, int lastEltID)
  942. {
  943. int const nbElts = lastEltID + 1;
  944. int enb;
  945. for (enb=0; enb < nbElts; enb++) {
  946. (void)table;
  947. /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */
  948. RAWLOG(2, "%4i,", table[enb]);
  949. }
  950. RAWLOG(2, " \n");
  951. }
  952. #endif
  953. #define LIT_PRICE(_p) (int)ZSTD_rawLiteralsCost(_p, 1, optStatePtr, optLevel)
  954. #define LL_PRICE(_l) (int)ZSTD_litLengthPrice(_l, optStatePtr, optLevel)
  955. #define LL_INCPRICE(_l) (LL_PRICE(_l) - LL_PRICE(_l-1))
  956. FORCE_INLINE_TEMPLATE
  957. ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
  958. size_t
  959. ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
  960. seqStore_t* seqStore,
  961. U32 rep[ZSTD_REP_NUM],
  962. const void* src, size_t srcSize,
  963. const int optLevel,
  964. const ZSTD_dictMode_e dictMode)
  965. {
  966. optState_t* const optStatePtr = &ms->opt;
  967. const BYTE* const istart = (const BYTE*)src;
  968. const BYTE* ip = istart;
  969. const BYTE* anchor = istart;
  970. const BYTE* const iend = istart + srcSize;
  971. const BYTE* const ilimit = iend - 8;
  972. const BYTE* const base = ms->window.base;
  973. const BYTE* const prefixStart = base + ms->window.dictLimit;
  974. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  975. ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode);
  976. U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
  977. U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
  978. U32 nextToUpdate3 = ms->nextToUpdate;
  979. ZSTD_optimal_t* const opt = optStatePtr->priceTable;
  980. ZSTD_match_t* const matches = optStatePtr->matchTable;
  981. ZSTD_optimal_t lastStretch;
  982. ZSTD_optLdm_t optLdm;
  983. ZSTD_memset(&lastStretch, 0, sizeof(ZSTD_optimal_t));
  984. optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
  985. optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
  986. ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
  987. /* init */
  988. DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
  989. (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
  990. assert(optLevel <= 2);
  991. ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
  992. ip += (ip==prefixStart);
  993. /* Match Loop */
  994. while (ip < ilimit) {
  995. U32 cur, last_pos = 0;
  996. /* find first match */
  997. { U32 const litlen = (U32)(ip - anchor);
  998. U32 const ll0 = !litlen;
  999. U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
  1000. ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
  1001. (U32)(ip-istart), (U32)(iend-ip));
  1002. if (!nbMatches) {
  1003. DEBUGLOG(8, "no match found at cPos %u", (unsigned)(ip-istart));
  1004. ip++;
  1005. continue;
  1006. }
  1007. /* Match found: let's store this solution, and eventually find more candidates.
  1008. * During this forward pass, @opt is used to store stretches,
  1009. * defined as "a match followed by N literals".
  1010. * Note how this is different from a Sequence, which is "N literals followed by a match".
  1011. * Storing stretches allows us to store different match predecessors
  1012. * for each literal position part of a literals run. */
  1013. /* initialize opt[0] */
  1014. opt[0].mlen = 0; /* there are only literals so far */
  1015. opt[0].litlen = litlen;
  1016. /* No need to include the actual price of the literals before the first match
  1017. * because it is static for the duration of the forward pass, and is included
  1018. * in every subsequent price. But, we include the literal length because
  1019. * the cost variation of litlen depends on the value of litlen.
  1020. */
  1021. opt[0].price = LL_PRICE(litlen);
  1022. ZSTD_STATIC_ASSERT(sizeof(opt[0].rep[0]) == sizeof(rep[0]));
  1023. ZSTD_memcpy(&opt[0].rep, rep, sizeof(opt[0].rep));
  1024. /* large match -> immediate encoding */
  1025. { U32 const maxML = matches[nbMatches-1].len;
  1026. U32 const maxOffBase = matches[nbMatches-1].off;
  1027. DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffBase=%u at cPos=%u => start new series",
  1028. nbMatches, maxML, maxOffBase, (U32)(ip-prefixStart));
  1029. if (maxML > sufficient_len) {
  1030. lastStretch.litlen = 0;
  1031. lastStretch.mlen = maxML;
  1032. lastStretch.off = maxOffBase;
  1033. DEBUGLOG(6, "large match (%u>%u) => immediate encoding",
  1034. maxML, sufficient_len);
  1035. cur = 0;
  1036. last_pos = maxML;
  1037. goto _shortestPath;
  1038. } }
  1039. /* set prices for first matches starting position == 0 */
  1040. assert(opt[0].price >= 0);
  1041. { U32 pos;
  1042. U32 matchNb;
  1043. for (pos = 1; pos < minMatch; pos++) {
  1044. opt[pos].price = ZSTD_MAX_PRICE;
  1045. opt[pos].mlen = 0;
  1046. opt[pos].litlen = litlen + pos;
  1047. }
  1048. for (matchNb = 0; matchNb < nbMatches; matchNb++) {
  1049. U32 const offBase = matches[matchNb].off;
  1050. U32 const end = matches[matchNb].len;
  1051. for ( ; pos <= end ; pos++ ) {
  1052. int const matchPrice = (int)ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel);
  1053. int const sequencePrice = opt[0].price + matchPrice;
  1054. DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
  1055. pos, ZSTD_fCost(sequencePrice));
  1056. opt[pos].mlen = pos;
  1057. opt[pos].off = offBase;
  1058. opt[pos].litlen = 0; /* end of match */
  1059. opt[pos].price = sequencePrice + LL_PRICE(0);
  1060. }
  1061. }
  1062. last_pos = pos-1;
  1063. opt[pos].price = ZSTD_MAX_PRICE;
  1064. }
  1065. }
  1066. /* check further positions */
  1067. for (cur = 1; cur <= last_pos; cur++) {
  1068. const BYTE* const inr = ip + cur;
  1069. assert(cur <= ZSTD_OPT_NUM);
  1070. DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur);
  1071. /* Fix current position with one literal if cheaper */
  1072. { U32 const litlen = opt[cur-1].litlen + 1;
  1073. int const price = opt[cur-1].price
  1074. + LIT_PRICE(ip+cur-1)
  1075. + LL_INCPRICE(litlen);
  1076. assert(price < 1000000000); /* overflow check */
  1077. if (price <= opt[cur].price) {
  1078. ZSTD_optimal_t const prevMatch = opt[cur];
  1079. DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
  1080. inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
  1081. opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
  1082. opt[cur] = opt[cur-1];
  1083. opt[cur].litlen = litlen;
  1084. opt[cur].price = price;
  1085. if ( (optLevel >= 1) /* additional check only for higher modes */
  1086. && (prevMatch.litlen == 0) /* replace a match */
  1087. && (LL_INCPRICE(1) < 0) /* ll1 is cheaper than ll0 */
  1088. && LIKELY(ip + cur < iend)
  1089. ) {
  1090. /* check next position, in case it would be cheaper */
  1091. int with1literal = prevMatch.price + LIT_PRICE(ip+cur) + LL_INCPRICE(1);
  1092. int withMoreLiterals = price + LIT_PRICE(ip+cur) + LL_INCPRICE(litlen+1);
  1093. DEBUGLOG(7, "then at next rPos %u : match+1lit %.2f vs %ulits %.2f",
  1094. cur+1, ZSTD_fCost(with1literal), litlen+1, ZSTD_fCost(withMoreLiterals));
  1095. if ( (with1literal < withMoreLiterals)
  1096. && (with1literal < opt[cur+1].price) ) {
  1097. /* update offset history - before it disappears */
  1098. U32 const prev = cur - prevMatch.mlen;
  1099. repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0);
  1100. assert(cur >= prevMatch.mlen);
  1101. DEBUGLOG(7, "==> match+1lit is cheaper (%.2f < %.2f) (hist:%u,%u,%u) !",
  1102. ZSTD_fCost(with1literal), ZSTD_fCost(withMoreLiterals),
  1103. newReps.rep[0], newReps.rep[1], newReps.rep[2] );
  1104. opt[cur+1] = prevMatch; /* mlen & offbase */
  1105. ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(repcodes_t));
  1106. opt[cur+1].litlen = 1;
  1107. opt[cur+1].price = with1literal;
  1108. if (last_pos < cur+1) last_pos = cur+1;
  1109. }
  1110. }
  1111. } else {
  1112. DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f)",
  1113. inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price));
  1114. }
  1115. }
  1116. /* Offset history is not updated during match comparison.
  1117. * Do it here, now that the match is selected and confirmed.
  1118. */
  1119. ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
  1120. assert(cur >= opt[cur].mlen);
  1121. if (opt[cur].litlen == 0) {
  1122. /* just finished a match => alter offset history */
  1123. U32 const prev = cur - opt[cur].mlen;
  1124. repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0);
  1125. ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
  1126. }
  1127. /* last match must start at a minimum distance of 8 from oend */
  1128. if (inr > ilimit) continue;
  1129. if (cur == last_pos) break;
  1130. if ( (optLevel==0) /*static_test*/
  1131. && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
  1132. DEBUGLOG(7, "skip current position : next rPos(%u) price is cheaper", cur+1);
  1133. continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
  1134. }
  1135. assert(opt[cur].price >= 0);
  1136. { U32 const ll0 = (opt[cur].litlen == 0);
  1137. int const previousPrice = opt[cur].price;
  1138. int const basePrice = previousPrice + LL_PRICE(0);
  1139. U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
  1140. U32 matchNb;
  1141. ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
  1142. (U32)(inr-istart), (U32)(iend-inr));
  1143. if (!nbMatches) {
  1144. DEBUGLOG(7, "rPos:%u : no match found", cur);
  1145. continue;
  1146. }
  1147. { U32 const longestML = matches[nbMatches-1].len;
  1148. DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of longest ML=%u",
  1149. inr-istart, cur, nbMatches, longestML);
  1150. if ( (longestML > sufficient_len)
  1151. || (cur + longestML >= ZSTD_OPT_NUM)
  1152. || (ip + cur + longestML >= iend) ) {
  1153. lastStretch.mlen = longestML;
  1154. lastStretch.off = matches[nbMatches-1].off;
  1155. lastStretch.litlen = 0;
  1156. last_pos = cur + longestML;
  1157. goto _shortestPath;
  1158. } }
  1159. /* set prices using matches found at position == cur */
  1160. for (matchNb = 0; matchNb < nbMatches; matchNb++) {
  1161. U32 const offset = matches[matchNb].off;
  1162. U32 const lastML = matches[matchNb].len;
  1163. U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
  1164. U32 mlen;
  1165. DEBUGLOG(7, "testing match %u => offBase=%4u, mlen=%2u, llen=%2u",
  1166. matchNb, matches[matchNb].off, lastML, opt[cur].litlen);
  1167. for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
  1168. U32 const pos = cur + mlen;
  1169. int const price = basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
  1170. if ((pos > last_pos) || (price < opt[pos].price)) {
  1171. DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
  1172. pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
  1173. while (last_pos < pos) {
  1174. /* fill empty positions, for future comparisons */
  1175. last_pos++;
  1176. opt[last_pos].price = ZSTD_MAX_PRICE;
  1177. opt[last_pos].litlen = !0; /* just needs to be != 0, to mean "not an end of match" */
  1178. }
  1179. opt[pos].mlen = mlen;
  1180. opt[pos].off = offset;
  1181. opt[pos].litlen = 0;
  1182. opt[pos].price = price;
  1183. } else {
  1184. DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
  1185. pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
  1186. if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
  1187. }
  1188. } } }
  1189. opt[last_pos+1].price = ZSTD_MAX_PRICE;
  1190. } /* for (cur = 1; cur <= last_pos; cur++) */
  1191. lastStretch = opt[last_pos];
  1192. assert(cur >= lastStretch.mlen);
  1193. cur = last_pos - lastStretch.mlen;
  1194. _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
  1195. assert(opt[0].mlen == 0);
  1196. assert(last_pos >= lastStretch.mlen);
  1197. assert(cur == last_pos - lastStretch.mlen);
  1198. if (lastStretch.mlen==0) {
  1199. /* no solution : all matches have been converted into literals */
  1200. assert(lastStretch.litlen == (ip - anchor) + last_pos);
  1201. ip += last_pos;
  1202. continue;
  1203. }
  1204. assert(lastStretch.off > 0);
  1205. /* Update offset history */
  1206. if (lastStretch.litlen == 0) {
  1207. /* finishing on a match : update offset history */
  1208. repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0);
  1209. ZSTD_memcpy(rep, &reps, sizeof(repcodes_t));
  1210. } else {
  1211. ZSTD_memcpy(rep, lastStretch.rep, sizeof(repcodes_t));
  1212. assert(cur >= lastStretch.litlen);
  1213. cur -= lastStretch.litlen;
  1214. }
  1215. /* Let's write the shortest path solution.
  1216. * It is stored in @opt in reverse order,
  1217. * starting from @storeEnd (==cur+2),
  1218. * effectively partially @opt overwriting.
  1219. * Content is changed too:
  1220. * - So far, @opt stored stretches, aka a match followed by literals
  1221. * - Now, it will store sequences, aka literals followed by a match
  1222. */
  1223. { U32 const storeEnd = cur + 2;
  1224. U32 storeStart = storeEnd;
  1225. U32 stretchPos = cur;
  1226. DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
  1227. last_pos, cur); (void)last_pos;
  1228. assert(storeEnd < ZSTD_OPT_SIZE);
  1229. DEBUGLOG(6, "last stretch copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
  1230. storeEnd, lastStretch.litlen, lastStretch.mlen, lastStretch.off);
  1231. if (lastStretch.litlen > 0) {
  1232. /* last "sequence" is unfinished: just a bunch of literals */
  1233. opt[storeEnd].litlen = lastStretch.litlen;
  1234. opt[storeEnd].mlen = 0;
  1235. storeStart = storeEnd-1;
  1236. opt[storeStart] = lastStretch;
  1237. } {
  1238. opt[storeEnd] = lastStretch; /* note: litlen will be fixed */
  1239. storeStart = storeEnd;
  1240. }
  1241. while (1) {
  1242. ZSTD_optimal_t nextStretch = opt[stretchPos];
  1243. opt[storeStart].litlen = nextStretch.litlen;
  1244. DEBUGLOG(6, "selected sequence (llen=%u,mlen=%u,ofc=%u)",
  1245. opt[storeStart].litlen, opt[storeStart].mlen, opt[storeStart].off);
  1246. if (nextStretch.mlen == 0) {
  1247. /* reaching beginning of segment */
  1248. break;
  1249. }
  1250. storeStart--;
  1251. opt[storeStart] = nextStretch; /* note: litlen will be fixed */
  1252. assert(nextStretch.litlen + nextStretch.mlen <= stretchPos);
  1253. stretchPos -= nextStretch.litlen + nextStretch.mlen;
  1254. }
  1255. /* save sequences */
  1256. DEBUGLOG(6, "sending selected sequences into seqStore");
  1257. { U32 storePos;
  1258. for (storePos=storeStart; storePos <= storeEnd; storePos++) {
  1259. U32 const llen = opt[storePos].litlen;
  1260. U32 const mlen = opt[storePos].mlen;
  1261. U32 const offBase = opt[storePos].off;
  1262. U32 const advance = llen + mlen;
  1263. DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
  1264. anchor - istart, (unsigned)llen, (unsigned)mlen);
  1265. if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
  1266. assert(storePos == storeEnd); /* must be last sequence */
  1267. ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */
  1268. continue; /* will finish */
  1269. }
  1270. assert(anchor + llen <= iend);
  1271. ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen);
  1272. ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen);
  1273. anchor += advance;
  1274. ip = anchor;
  1275. } }
  1276. DEBUGLOG(7, "new offset history : %u, %u, %u", rep[0], rep[1], rep[2]);
  1277. /* update all costs */
  1278. ZSTD_setBasePrices(optStatePtr, optLevel);
  1279. }
  1280. } /* while (ip < ilimit) */
  1281. /* Return the last literals size */
  1282. return (size_t)(iend - anchor);
  1283. }
  1284. #endif /* build exclusions */
  1285. #ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
  1286. static size_t ZSTD_compressBlock_opt0(
  1287. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1288. const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
  1289. {
  1290. return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
  1291. }
  1292. #endif
  1293. #ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
  1294. static size_t ZSTD_compressBlock_opt2(
  1295. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1296. const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
  1297. {
  1298. return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
  1299. }
  1300. #endif
  1301. #ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
  1302. size_t ZSTD_compressBlock_btopt(
  1303. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1304. const void* src, size_t srcSize)
  1305. {
  1306. DEBUGLOG(5, "ZSTD_compressBlock_btopt");
  1307. return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
  1308. }
  1309. #endif
  1310. #ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
  1311. /* ZSTD_initStats_ultra():
  1312. * make a first compression pass, just to seed stats with more accurate starting values.
  1313. * only works on first block, with no dictionary and no ldm.
  1314. * this function cannot error out, its narrow contract must be respected.
  1315. */
  1316. static
  1317. ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
  1318. void ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
  1319. seqStore_t* seqStore,
  1320. U32 rep[ZSTD_REP_NUM],
  1321. const void* src, size_t srcSize)
  1322. {
  1323. U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
  1324. ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
  1325. DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
  1326. assert(ms->opt.litLengthSum == 0); /* first block */
  1327. assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */
  1328. assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
  1329. assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
  1330. ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/
  1331. /* invalidate first scan from history, only keep entropy stats */
  1332. ZSTD_resetSeqStore(seqStore);
  1333. ms->window.base -= srcSize;
  1334. ms->window.dictLimit += (U32)srcSize;
  1335. ms->window.lowLimit = ms->window.dictLimit;
  1336. ms->nextToUpdate = ms->window.dictLimit;
  1337. }
  1338. size_t ZSTD_compressBlock_btultra(
  1339. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1340. const void* src, size_t srcSize)
  1341. {
  1342. DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
  1343. return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
  1344. }
  1345. size_t ZSTD_compressBlock_btultra2(
  1346. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1347. const void* src, size_t srcSize)
  1348. {
  1349. U32 const curr = (U32)((const BYTE*)src - ms->window.base);
  1350. DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
  1351. /* 2-passes strategy:
  1352. * this strategy makes a first pass over first block to collect statistics
  1353. * in order to seed next round's statistics with it.
  1354. * After 1st pass, function forgets history, and starts a new block.
  1355. * Consequently, this can only work if no data has been previously loaded in tables,
  1356. * aka, no dictionary, no prefix, no ldm preprocessing.
  1357. * The compression ratio gain is generally small (~0.5% on first block),
  1358. * the cost is 2x cpu time on first block. */
  1359. assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
  1360. if ( (ms->opt.litLengthSum==0) /* first block */
  1361. && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
  1362. && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
  1363. && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
  1364. && (srcSize > ZSTD_PREDEF_THRESHOLD) /* input large enough to not employ default stats */
  1365. ) {
  1366. ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
  1367. }
  1368. return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
  1369. }
  1370. #endif
  1371. #ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
  1372. size_t ZSTD_compressBlock_btopt_dictMatchState(
  1373. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1374. const void* src, size_t srcSize)
  1375. {
  1376. return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
  1377. }
  1378. size_t ZSTD_compressBlock_btopt_extDict(
  1379. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1380. const void* src, size_t srcSize)
  1381. {
  1382. return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
  1383. }
  1384. #endif
  1385. #ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
  1386. size_t ZSTD_compressBlock_btultra_dictMatchState(
  1387. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1388. const void* src, size_t srcSize)
  1389. {
  1390. return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
  1391. }
  1392. size_t ZSTD_compressBlock_btultra_extDict(
  1393. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1394. const void* src, size_t srcSize)
  1395. {
  1396. return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
  1397. }
  1398. #endif
  1399. /* note : no btultra2 variant for extDict nor dictMatchState,
  1400. * because btultra2 is not meant to work with dictionaries
  1401. * and is only specific for the first block (no prefix) */