lz4hc.c 91 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192
  1. /*
  2. LZ4 HC - High Compression Mode of LZ4
  3. Copyright (C) 2011-2020, Yann Collet.
  4. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
  5. Redistribution and use in source and binary forms, with or without
  6. modification, are permitted provided that the following conditions are
  7. met:
  8. * Redistributions of source code must retain the above copyright
  9. notice, this list of conditions and the following disclaimer.
  10. * Redistributions in binary form must reproduce the above
  11. copyright notice, this list of conditions and the following disclaimer
  12. in the documentation and/or other materials provided with the
  13. distribution.
  14. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  15. "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  16. LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  17. A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  18. OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  19. SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  20. LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  21. DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  22. THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  23. (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. You can contact the author at :
  26. - LZ4 source repository : https://github.com/lz4/lz4
  27. - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
  28. */
  29. /* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
  30. /* *************************************
  31. * Tuning Parameter
  32. ***************************************/
  33. /*! HEAPMODE :
  34. * Select how stateless HC compression functions like `LZ4_compress_HC()`
  35. * allocate memory for their workspace:
  36. * in stack (0:fastest), or in heap (1:default, requires malloc()).
  37. * Since workspace is rather large, heap mode is recommended.
  38. **/
  39. #ifndef LZ4HC_HEAPMODE
  40. # define LZ4HC_HEAPMODE 1
  41. #endif
  42. /*=== Dependency ===*/
  43. #define LZ4_HC_STATIC_LINKING_ONLY
  44. #include "lz4hc.h"
  45. #include <limits.h>
  46. /*=== Shared lz4.c code ===*/
  47. #ifndef LZ4_SRC_INCLUDED
  48. # if defined(__GNUC__)
  49. # pragma GCC diagnostic ignored "-Wunused-function"
  50. # endif
  51. # if defined (__clang__)
  52. # pragma clang diagnostic ignored "-Wunused-function"
  53. # endif
  54. # define LZ4_COMMONDEFS_ONLY
  55. # include "lz4.c" /* LZ4_count, constants, mem */
  56. #endif
  57. /*=== Enums ===*/
  58. typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
  59. /*=== Constants ===*/
  60. #define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
  61. #define LZ4_OPT_NUM (1<<12)
  62. /*=== Macros ===*/
  63. #define MIN(a,b) ( (a) < (b) ? (a) : (b) )
  64. #define MAX(a,b) ( (a) > (b) ? (a) : (b) )
  65. /*=== Levels definition ===*/
  66. typedef enum { lz4mid, lz4hc, lz4opt } lz4hc_strat_e;
  67. typedef struct {
  68. lz4hc_strat_e strat;
  69. int nbSearches;
  70. U32 targetLength;
  71. } cParams_t;
  72. static const cParams_t k_clTable[LZ4HC_CLEVEL_MAX+1] = {
  73. { lz4mid, 2, 16 }, /* 0, unused */
  74. { lz4mid, 2, 16 }, /* 1, unused */
  75. { lz4mid, 2, 16 }, /* 2 */
  76. { lz4hc, 4, 16 }, /* 3 */
  77. { lz4hc, 8, 16 }, /* 4 */
  78. { lz4hc, 16, 16 }, /* 5 */
  79. { lz4hc, 32, 16 }, /* 6 */
  80. { lz4hc, 64, 16 }, /* 7 */
  81. { lz4hc, 128, 16 }, /* 8 */
  82. { lz4hc, 256, 16 }, /* 9 */
  83. { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/
  84. { lz4opt, 512,128 }, /*11 */
  85. { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
  86. };
  87. static cParams_t LZ4HC_getCLevelParams(int cLevel)
  88. {
  89. /* note : clevel convention is a bit different from lz4frame,
  90. * possibly something worth revisiting for consistency */
  91. if (cLevel < 1)
  92. cLevel = LZ4HC_CLEVEL_DEFAULT;
  93. cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
  94. return k_clTable[cLevel];
  95. }
  96. /*=== Hashing ===*/
  97. #define LZ4HC_HASHSIZE 4
  98. #define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
  99. static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
  100. #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
  101. /* lie to the compiler about data alignment; use with caution */
  102. static U64 LZ4_read64(const void* memPtr) { return *(const U64*) memPtr; }
  103. #elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
  104. /* __pack instructions are safer, but compiler specific */
  105. LZ4_PACK(typedef struct { U64 u64; }) LZ4_unalign64;
  106. static U64 LZ4_read64(const void* ptr) { return ((const LZ4_unalign64*)ptr)->u64; }
  107. #else /* safe and portable access using memcpy() */
  108. static U64 LZ4_read64(const void* memPtr)
  109. {
  110. U64 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
  111. }
  112. #endif /* LZ4_FORCE_MEMORY_ACCESS */
  113. #define LZ4MID_HASHSIZE 8
  114. #define LZ4MID_HASHLOG (LZ4HC_HASH_LOG-1)
  115. #define LZ4MID_HASHTABLESIZE (1 << LZ4MID_HASHLOG)
  116. static U32 LZ4MID_hash4(U32 v) { return (v * 2654435761U) >> (32-LZ4MID_HASHLOG); }
  117. static U32 LZ4MID_hash4Ptr(const void* ptr) { return LZ4MID_hash4(LZ4_read32(ptr)); }
  118. /* note: hash7 hashes the lower 56-bits.
  119. * It presumes input was read using little endian.*/
  120. static U32 LZ4MID_hash7(U64 v) { return (U32)(((v << (64-56)) * 58295818150454627ULL) >> (64-LZ4MID_HASHLOG)) ; }
  121. static U64 LZ4_readLE64(const void* memPtr);
  122. static U32 LZ4MID_hash8Ptr(const void* ptr) { return LZ4MID_hash7(LZ4_readLE64(ptr)); }
  123. static U64 LZ4_readLE64(const void* memPtr)
  124. {
  125. if (LZ4_isLittleEndian()) {
  126. return LZ4_read64(memPtr);
  127. } else {
  128. const BYTE* p = (const BYTE*)memPtr;
  129. /* note: relies on the compiler to simplify this expression */
  130. return (U64)p[0] | ((U64)p[1]<<8) | ((U64)p[2]<<16) | ((U64)p[3]<<24)
  131. | ((U64)p[4]<<32) | ((U64)p[5]<<40) | ((U64)p[6]<<48) | ((U64)p[7]<<56);
  132. }
  133. }
  134. /*=== Count match length ===*/
  135. LZ4_FORCE_INLINE
  136. unsigned LZ4HC_NbCommonBytes32(U32 val)
  137. {
  138. assert(val != 0);
  139. if (LZ4_isLittleEndian()) {
  140. # if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
  141. unsigned long r;
  142. _BitScanReverse(&r, val);
  143. return (unsigned)((31 - r) >> 3);
  144. # elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
  145. ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
  146. !defined(LZ4_FORCE_SW_BITCOUNT)
  147. return (unsigned)__builtin_clz(val) >> 3;
  148. # else
  149. val >>= 8;
  150. val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
  151. (val + 0x00FF0000)) >> 24;
  152. return (unsigned)val ^ 3;
  153. # endif
  154. } else {
  155. # if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
  156. unsigned long r;
  157. _BitScanForward(&r, val);
  158. return (unsigned)(r >> 3);
  159. # elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
  160. ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
  161. !defined(LZ4_FORCE_SW_BITCOUNT)
  162. return (unsigned)__builtin_ctz(val) >> 3;
  163. # else
  164. const U32 m = 0x01010101;
  165. return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
  166. # endif
  167. }
  168. }
  169. /** LZ4HC_countBack() :
  170. * @return : negative value, nb of common bytes before ip/match */
  171. LZ4_FORCE_INLINE
  172. int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
  173. const BYTE* const iMin, const BYTE* const mMin)
  174. {
  175. int back = 0;
  176. int const min = (int)MAX(iMin - ip, mMin - match);
  177. assert(min <= 0);
  178. assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31));
  179. assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31));
  180. while ((back - min) > 3) {
  181. U32 const v = LZ4_read32(ip + back - 4) ^ LZ4_read32(match + back - 4);
  182. if (v) {
  183. return (back - (int)LZ4HC_NbCommonBytes32(v));
  184. } else back -= 4; /* 4-byte step */
  185. }
  186. /* check remainder if any */
  187. while ( (back > min)
  188. && (ip[back-1] == match[back-1]) )
  189. back--;
  190. return back;
  191. }
  192. /*=== Chain table updates ===*/
  193. #define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */
  194. /* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
  195. #define UPDATABLE(ip, op, anchor) &ip, &op, &anchor
  196. /**************************************
  197. * Init
  198. **************************************/
  199. static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
  200. {
  201. MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
  202. MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
  203. }
  204. static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
  205. {
  206. size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart);
  207. size_t newStartingOffset = bufferSize + hc4->dictLimit;
  208. DEBUGLOG(5, "LZ4HC_init_internal");
  209. assert(newStartingOffset >= bufferSize); /* check overflow */
  210. if (newStartingOffset > 1 GB) {
  211. LZ4HC_clearTables(hc4);
  212. newStartingOffset = 0;
  213. }
  214. newStartingOffset += 64 KB;
  215. hc4->nextToUpdate = (U32)newStartingOffset;
  216. hc4->prefixStart = start;
  217. hc4->end = start;
  218. hc4->dictStart = start;
  219. hc4->dictLimit = (U32)newStartingOffset;
  220. hc4->lowLimit = (U32)newStartingOffset;
  221. }
  222. /**************************************
  223. * Encode
  224. **************************************/
  225. /* LZ4HC_encodeSequence() :
  226. * @return : 0 if ok,
  227. * 1 if buffer issue detected */
  228. LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
  229. const BYTE** _ip,
  230. BYTE** _op,
  231. const BYTE** _anchor,
  232. int matchLength,
  233. int offset,
  234. limitedOutput_directive limit,
  235. BYTE* oend)
  236. {
  237. #define ip (*_ip)
  238. #define op (*_op)
  239. #define anchor (*_anchor)
  240. size_t length;
  241. BYTE* const token = op++;
  242. #if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
  243. static const BYTE* start = NULL;
  244. static U32 totalCost = 0;
  245. U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
  246. U32 const ll = (U32)(ip - anchor);
  247. U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
  248. U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
  249. U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
  250. if (start==NULL) start = anchor; /* only works for single segment */
  251. /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
  252. DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5i, cost:%4u + %5u",
  253. pos,
  254. (U32)(ip - anchor), matchLength, offset,
  255. cost, totalCost);
  256. totalCost += cost;
  257. #endif
  258. /* Encode Literal length */
  259. length = (size_t)(ip - anchor);
  260. LZ4_STATIC_ASSERT(notLimited == 0);
  261. /* Check output limit */
  262. if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
  263. DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
  264. (int)length, (int)(oend - op));
  265. return 1;
  266. }
  267. if (length >= RUN_MASK) {
  268. size_t len = length - RUN_MASK;
  269. *token = (RUN_MASK << ML_BITS);
  270. for(; len >= 255 ; len -= 255) *op++ = 255;
  271. *op++ = (BYTE)len;
  272. } else {
  273. *token = (BYTE)(length << ML_BITS);
  274. }
  275. /* Copy Literals */
  276. LZ4_wildCopy8(op, anchor, op + length);
  277. op += length;
  278. /* Encode Offset */
  279. assert(offset <= LZ4_DISTANCE_MAX );
  280. assert(offset > 0);
  281. LZ4_writeLE16(op, (U16)(offset)); op += 2;
  282. /* Encode MatchLength */
  283. assert(matchLength >= MINMATCH);
  284. length = (size_t)matchLength - MINMATCH;
  285. if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
  286. DEBUGLOG(6, "Not enough room to write match length");
  287. return 1; /* Check output limit */
  288. }
  289. if (length >= ML_MASK) {
  290. *token += ML_MASK;
  291. length -= ML_MASK;
  292. for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
  293. if (length >= 255) { length -= 255; *op++ = 255; }
  294. *op++ = (BYTE)length;
  295. } else {
  296. *token += (BYTE)(length);
  297. }
  298. /* Prepare next loop */
  299. ip += matchLength;
  300. anchor = ip;
  301. return 0;
  302. #undef ip
  303. #undef op
  304. #undef anchor
  305. }
  306. typedef struct {
  307. int off;
  308. int len;
  309. int back; /* negative value */
  310. } LZ4HC_match_t;
  311. LZ4HC_match_t LZ4HC_searchExtDict(const BYTE* ip, U32 ipIndex,
  312. const BYTE* const iLowLimit, const BYTE* const iHighLimit,
  313. const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex,
  314. int currentBestML, int nbAttempts)
  315. {
  316. size_t const lDictEndIndex = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
  317. U32 lDictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
  318. U32 matchIndex = lDictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
  319. int offset = 0, sBack = 0;
  320. assert(lDictEndIndex <= 1 GB);
  321. if (lDictMatchIndex>0)
  322. DEBUGLOG(7, "lDictEndIndex = %zu, lDictMatchIndex = %u", lDictEndIndex, lDictMatchIndex);
  323. while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
  324. const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + lDictMatchIndex;
  325. if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
  326. int mlt;
  327. int back = 0;
  328. const BYTE* vLimit = ip + (lDictEndIndex - lDictMatchIndex);
  329. if (vLimit > iHighLimit) vLimit = iHighLimit;
  330. mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
  331. back = (ip > iLowLimit) ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
  332. mlt -= back;
  333. if (mlt > currentBestML) {
  334. currentBestML = mlt;
  335. offset = (int)(ipIndex - matchIndex);
  336. sBack = back;
  337. DEBUGLOG(7, "found match of length %i within extDictCtx", currentBestML);
  338. } }
  339. { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, lDictMatchIndex);
  340. lDictMatchIndex -= nextOffset;
  341. matchIndex -= nextOffset;
  342. } }
  343. { LZ4HC_match_t md;
  344. md.len = currentBestML;
  345. md.off = offset;
  346. md.back = sBack;
  347. return md;
  348. }
  349. }
  350. typedef LZ4HC_match_t (*LZ4MID_searchIntoDict_f)(const BYTE* ip, U32 ipIndex,
  351. const BYTE* const iHighLimit,
  352. const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex);
  353. static LZ4HC_match_t LZ4MID_searchHCDict(const BYTE* ip, U32 ipIndex,
  354. const BYTE* const iHighLimit,
  355. const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex)
  356. {
  357. return LZ4HC_searchExtDict(ip,ipIndex,
  358. ip, iHighLimit,
  359. dictCtx, gDictEndIndex,
  360. MINMATCH-1, 2);
  361. }
  362. static LZ4HC_match_t LZ4MID_searchExtDict(const BYTE* ip, U32 ipIndex,
  363. const BYTE* const iHighLimit,
  364. const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex)
  365. {
  366. size_t const lDictEndIndex = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
  367. const U32* const hash4Table = dictCtx->hashTable;
  368. const U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
  369. DEBUGLOG(7, "LZ4MID_searchExtDict (ipIdx=%u)", ipIndex);
  370. /* search long match first */
  371. { U32 l8DictMatchIndex = hash8Table[LZ4MID_hash8Ptr(ip)];
  372. U32 m8Index = l8DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
  373. assert(lDictEndIndex <= 1 GB);
  374. if (ipIndex - m8Index <= LZ4_DISTANCE_MAX) {
  375. const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + l8DictMatchIndex;
  376. const size_t safeLen = MIN(lDictEndIndex - l8DictMatchIndex, (size_t)(iHighLimit - ip));
  377. int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen);
  378. if (mlt >= MINMATCH) {
  379. LZ4HC_match_t md;
  380. DEBUGLOG(7, "Found long ExtDict match of len=%u", mlt);
  381. md.len = mlt;
  382. md.off = (int)(ipIndex - m8Index);
  383. md.back = 0;
  384. return md;
  385. }
  386. }
  387. }
  388. /* search for short match second */
  389. { U32 l4DictMatchIndex = hash4Table[LZ4MID_hash4Ptr(ip)];
  390. U32 m4Index = l4DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
  391. if (ipIndex - m4Index <= LZ4_DISTANCE_MAX) {
  392. const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + l4DictMatchIndex;
  393. const size_t safeLen = MIN(lDictEndIndex - l4DictMatchIndex, (size_t)(iHighLimit - ip));
  394. int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen);
  395. if (mlt >= MINMATCH) {
  396. LZ4HC_match_t md;
  397. DEBUGLOG(7, "Found short ExtDict match of len=%u", mlt);
  398. md.len = mlt;
  399. md.off = (int)(ipIndex - m4Index);
  400. md.back = 0;
  401. return md;
  402. }
  403. }
  404. }
  405. /* nothing found */
  406. { LZ4HC_match_t const md = {0, 0, 0 };
  407. return md;
  408. }
  409. }
  410. /**************************************
  411. * Mid Compression (level 2)
  412. **************************************/
  413. LZ4_FORCE_INLINE void
  414. LZ4MID_addPosition(U32* hTable, U32 hValue, U32 index)
  415. {
  416. hTable[hValue] = index;
  417. }
  418. #define ADDPOS8(_p, _idx) LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(_p), _idx)
  419. #define ADDPOS4(_p, _idx) LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(_p), _idx)
  420. /* Fill hash tables with references into dictionary.
  421. * The resulting table is only exploitable by LZ4MID (level 2) */
  422. static void
  423. LZ4MID_fillHTable (LZ4HC_CCtx_internal* cctx, const void* dict, size_t size)
  424. {
  425. U32* const hash4Table = cctx->hashTable;
  426. U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
  427. const BYTE* const prefixPtr = (const BYTE*)dict;
  428. U32 const prefixIdx = cctx->dictLimit;
  429. U32 const target = prefixIdx + (U32)size - LZ4MID_HASHSIZE;
  430. U32 idx = cctx->nextToUpdate;
  431. assert(dict == cctx->prefixStart);
  432. DEBUGLOG(4, "LZ4MID_fillHTable (size:%zu)", size);
  433. if (size <= LZ4MID_HASHSIZE)
  434. return;
  435. for (; idx < target; idx += 3) {
  436. ADDPOS4(prefixPtr+idx-prefixIdx, idx);
  437. ADDPOS8(prefixPtr+idx+1-prefixIdx, idx+1);
  438. }
  439. idx = (size > 32 KB + LZ4MID_HASHSIZE) ? target - 32 KB : cctx->nextToUpdate;
  440. for (; idx < target; idx += 1) {
  441. ADDPOS8(prefixPtr+idx-prefixIdx, idx);
  442. }
  443. cctx->nextToUpdate = target;
  444. }
  445. static LZ4MID_searchIntoDict_f select_searchDict_function(const LZ4HC_CCtx_internal* dictCtx)
  446. {
  447. if (dictCtx == NULL) return NULL;
  448. if (LZ4HC_getCLevelParams(dictCtx->compressionLevel).strat == lz4mid)
  449. return LZ4MID_searchExtDict;
  450. return LZ4MID_searchHCDict;
  451. }
  452. static int LZ4MID_compress (
  453. LZ4HC_CCtx_internal* const ctx,
  454. const char* const src,
  455. char* const dst,
  456. int* srcSizePtr,
  457. int const maxOutputSize,
  458. const limitedOutput_directive limit,
  459. const dictCtx_directive dict
  460. )
  461. {
  462. U32* const hash4Table = ctx->hashTable;
  463. U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
  464. const BYTE* ip = (const BYTE*)src;
  465. const BYTE* anchor = ip;
  466. const BYTE* const iend = ip + *srcSizePtr;
  467. const BYTE* const mflimit = iend - MFLIMIT;
  468. const BYTE* const matchlimit = (iend - LASTLITERALS);
  469. const BYTE* const ilimit = (iend - LZ4MID_HASHSIZE);
  470. BYTE* op = (BYTE*)dst;
  471. BYTE* oend = op + maxOutputSize;
  472. const BYTE* const prefixPtr = ctx->prefixStart;
  473. const U32 prefixIdx = ctx->dictLimit;
  474. const U32 ilimitIdx = (U32)(ilimit - prefixPtr) + prefixIdx;
  475. const BYTE* const dictStart = ctx->dictStart;
  476. const U32 dictIdx = ctx->lowLimit;
  477. const U32 gDictEndIndex = ctx->lowLimit;
  478. const LZ4MID_searchIntoDict_f searchIntoDict = (dict == usingDictCtxHc) ? select_searchDict_function(ctx->dictCtx) : NULL;
  479. unsigned matchLength;
  480. unsigned matchDistance;
  481. /* input sanitization */
  482. DEBUGLOG(5, "LZ4MID_compress (%i bytes)", *srcSizePtr);
  483. if (dict == usingDictCtxHc) DEBUGLOG(5, "usingDictCtxHc");
  484. assert(*srcSizePtr >= 0);
  485. if (*srcSizePtr) assert(src != NULL);
  486. if (maxOutputSize) assert(dst != NULL);
  487. if (*srcSizePtr < 0) return 0; /* invalid */
  488. if (maxOutputSize < 0) return 0; /* invalid */
  489. if (*srcSizePtr > LZ4_MAX_INPUT_SIZE) {
  490. /* forbidden: no input is allowed to be that large */
  491. return 0;
  492. }
  493. if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
  494. if (*srcSizePtr < LZ4_minLength)
  495. goto _lz4mid_last_literals; /* Input too small, no compression (all literals) */
  496. /* main loop */
  497. while (ip <= mflimit) {
  498. const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
  499. /* search long match */
  500. { U32 const h8 = LZ4MID_hash8Ptr(ip);
  501. U32 const pos8 = hash8Table[h8];
  502. assert(h8 < LZ4MID_HASHTABLESIZE);
  503. assert(pos8 < ipIndex);
  504. LZ4MID_addPosition(hash8Table, h8, ipIndex);
  505. if (ipIndex - pos8 <= LZ4_DISTANCE_MAX) {
  506. /* match candidate found */
  507. if (pos8 >= prefixIdx) {
  508. const BYTE* const matchPtr = prefixPtr + pos8 - prefixIdx;
  509. assert(matchPtr < ip);
  510. matchLength = LZ4_count(ip, matchPtr, matchlimit);
  511. if (matchLength >= MINMATCH) {
  512. DEBUGLOG(7, "found long match at pos %u (len=%u)", pos8, matchLength);
  513. matchDistance = ipIndex - pos8;
  514. goto _lz4mid_encode_sequence;
  515. }
  516. } else {
  517. if (pos8 >= dictIdx) {
  518. /* extDict match candidate */
  519. const BYTE* const matchPtr = dictStart + (pos8 - dictIdx);
  520. const size_t safeLen = MIN(prefixIdx - pos8, (size_t)(matchlimit - ip));
  521. matchLength = LZ4_count(ip, matchPtr, ip + safeLen);
  522. if (matchLength >= MINMATCH) {
  523. DEBUGLOG(7, "found long match at ExtDict pos %u (len=%u)", pos8, matchLength);
  524. matchDistance = ipIndex - pos8;
  525. goto _lz4mid_encode_sequence;
  526. }
  527. }
  528. }
  529. } }
  530. /* search short match */
  531. { U32 const h4 = LZ4MID_hash4Ptr(ip);
  532. U32 const pos4 = hash4Table[h4];
  533. assert(h4 < LZ4MID_HASHTABLESIZE);
  534. assert(pos4 < ipIndex);
  535. LZ4MID_addPosition(hash4Table, h4, ipIndex);
  536. if (ipIndex - pos4 <= LZ4_DISTANCE_MAX) {
  537. /* match candidate found */
  538. if (pos4 >= prefixIdx) {
  539. /* only search within prefix */
  540. const BYTE* const matchPtr = prefixPtr + (pos4 - prefixIdx);
  541. assert(matchPtr < ip);
  542. assert(matchPtr >= prefixPtr);
  543. matchLength = LZ4_count(ip, matchPtr, matchlimit);
  544. if (matchLength >= MINMATCH) {
  545. /* short match found, let's just check ip+1 for longer */
  546. U32 const h8 = LZ4MID_hash8Ptr(ip+1);
  547. U32 const pos8 = hash8Table[h8];
  548. U32 const m2Distance = ipIndex + 1 - pos8;
  549. matchDistance = ipIndex - pos4;
  550. if ( m2Distance <= LZ4_DISTANCE_MAX
  551. && pos8 >= prefixIdx /* only search within prefix */
  552. && likely(ip < mflimit)
  553. ) {
  554. const BYTE* const m2Ptr = prefixPtr + (pos8 - prefixIdx);
  555. unsigned ml2 = LZ4_count(ip+1, m2Ptr, matchlimit);
  556. if (ml2 > matchLength) {
  557. LZ4MID_addPosition(hash8Table, h8, ipIndex+1);
  558. ip++;
  559. matchLength = ml2;
  560. matchDistance = m2Distance;
  561. } }
  562. goto _lz4mid_encode_sequence;
  563. }
  564. } else {
  565. if (pos4 >= dictIdx) {
  566. /* extDict match candidate */
  567. const BYTE* const matchPtr = dictStart + (pos4 - dictIdx);
  568. const size_t safeLen = MIN(prefixIdx - pos4, (size_t)(matchlimit - ip));
  569. matchLength = LZ4_count(ip, matchPtr, ip + safeLen);
  570. if (matchLength >= MINMATCH) {
  571. DEBUGLOG(7, "found match at ExtDict pos %u (len=%u)", pos4, matchLength);
  572. matchDistance = ipIndex - pos4;
  573. goto _lz4mid_encode_sequence;
  574. }
  575. }
  576. }
  577. } }
  578. /* no match found in prefix */
  579. if ( (dict == usingDictCtxHc)
  580. && (ipIndex - gDictEndIndex < LZ4_DISTANCE_MAX - 8) ) {
  581. /* search a match into external dictionary */
  582. LZ4HC_match_t dMatch = searchIntoDict(ip, ipIndex,
  583. matchlimit,
  584. ctx->dictCtx, gDictEndIndex);
  585. if (dMatch.len >= MINMATCH) {
  586. DEBUGLOG(7, "found Dictionary match (offset=%i)", dMatch.off);
  587. assert(dMatch.back == 0);
  588. matchLength = (unsigned)dMatch.len;
  589. matchDistance = (unsigned)dMatch.off;
  590. goto _lz4mid_encode_sequence;
  591. }
  592. }
  593. /* no match found */
  594. ip += 1 + ((ip-anchor) >> 9); /* skip faster over incompressible data */
  595. continue;
  596. _lz4mid_encode_sequence:
  597. /* catch back */
  598. while (((ip > anchor) & ((U32)(ip-prefixPtr) > matchDistance)) && (unlikely(ip[-1] == ip[-(int)matchDistance-1]))) {
  599. ip--; matchLength++;
  600. };
  601. /* fill table with beginning of match */
  602. ADDPOS8(ip+1, ipIndex+1);
  603. ADDPOS8(ip+2, ipIndex+2);
  604. ADDPOS4(ip+1, ipIndex+1);
  605. /* encode */
  606. { BYTE* const saved_op = op;
  607. /* LZ4HC_encodeSequence always updates @op; on success, it updates @ip and @anchor */
  608. if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
  609. (int)matchLength, (int)matchDistance,
  610. limit, oend) ) {
  611. op = saved_op; /* restore @op value before failed LZ4HC_encodeSequence */
  612. goto _lz4mid_dest_overflow;
  613. }
  614. }
  615. /* fill table with end of match */
  616. { U32 endMatchIdx = (U32)(ip-prefixPtr) + prefixIdx;
  617. U32 pos_m2 = endMatchIdx - 2;
  618. if (pos_m2 < ilimitIdx) {
  619. if (likely(ip - prefixPtr > 5)) {
  620. ADDPOS8(ip-5, endMatchIdx - 5);
  621. }
  622. ADDPOS8(ip-3, endMatchIdx - 3);
  623. ADDPOS8(ip-2, endMatchIdx - 2);
  624. ADDPOS4(ip-2, endMatchIdx - 2);
  625. ADDPOS4(ip-1, endMatchIdx - 1);
  626. }
  627. }
  628. }
  629. _lz4mid_last_literals:
  630. /* Encode Last Literals */
  631. { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
  632. size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
  633. size_t const totalSize = 1 + llAdd + lastRunSize;
  634. if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
  635. if (limit && (op + totalSize > oend)) {
  636. if (limit == limitedOutput) return 0; /* not enough space in @dst */
  637. /* adapt lastRunSize to fill 'dest' */
  638. lastRunSize = (size_t)(oend - op) - 1 /*token*/;
  639. llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
  640. lastRunSize -= llAdd;
  641. }
  642. DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
  643. ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
  644. if (lastRunSize >= RUN_MASK) {
  645. size_t accumulator = lastRunSize - RUN_MASK;
  646. *op++ = (RUN_MASK << ML_BITS);
  647. for(; accumulator >= 255 ; accumulator -= 255)
  648. *op++ = 255;
  649. *op++ = (BYTE) accumulator;
  650. } else {
  651. *op++ = (BYTE)(lastRunSize << ML_BITS);
  652. }
  653. assert(lastRunSize <= (size_t)(oend - op));
  654. LZ4_memcpy(op, anchor, lastRunSize);
  655. op += lastRunSize;
  656. }
  657. /* End */
  658. DEBUGLOG(5, "compressed %i bytes into %i bytes", *srcSizePtr, (int)((char*)op - dst));
  659. assert(ip >= (const BYTE*)src);
  660. assert(ip <= iend);
  661. *srcSizePtr = (int)(ip - (const BYTE*)src);
  662. assert((char*)op >= dst);
  663. assert(op <= oend);
  664. assert((char*)op - dst < INT_MAX);
  665. return (int)((char*)op - dst);
  666. _lz4mid_dest_overflow:
  667. if (limit == fillOutput) {
  668. /* Assumption : @ip, @anchor, @optr and @matchLength must be set correctly */
  669. size_t const ll = (size_t)(ip - anchor);
  670. size_t const ll_addbytes = (ll + 240) / 255;
  671. size_t const ll_totalCost = 1 + ll_addbytes + ll;
  672. BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
  673. DEBUGLOG(6, "Last sequence is overflowing : %u literals, %u remaining space",
  674. (unsigned)ll, (unsigned)(oend-op));
  675. if (op + ll_totalCost <= maxLitPos) {
  676. /* ll validated; now adjust match length */
  677. size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
  678. size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
  679. assert(maxMlSize < INT_MAX);
  680. if ((size_t)matchLength > maxMlSize) matchLength= (unsigned)maxMlSize;
  681. if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + matchLength >= MFLIMIT) {
  682. DEBUGLOG(6, "Let's encode a last sequence (ll=%u, ml=%u)", (unsigned)ll, matchLength);
  683. LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
  684. (int)matchLength, (int)matchDistance,
  685. notLimited, oend);
  686. } }
  687. DEBUGLOG(6, "Let's finish with a run of literals (%u bytes left)", (unsigned)(oend-op));
  688. goto _lz4mid_last_literals;
  689. }
  690. /* compression failed */
  691. return 0;
  692. }
  693. /**************************************
  694. * HC Compression - Search
  695. **************************************/
  696. /* Update chains up to ip (excluded) */
  697. LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
  698. {
  699. U16* const chainTable = hc4->chainTable;
  700. U32* const hashTable = hc4->hashTable;
  701. const BYTE* const prefixPtr = hc4->prefixStart;
  702. U32 const prefixIdx = hc4->dictLimit;
  703. U32 const target = (U32)(ip - prefixPtr) + prefixIdx;
  704. U32 idx = hc4->nextToUpdate;
  705. assert(ip >= prefixPtr);
  706. assert(target >= prefixIdx);
  707. while (idx < target) {
  708. U32 const h = LZ4HC_hashPtr(prefixPtr+idx-prefixIdx);
  709. size_t delta = idx - hashTable[h];
  710. if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX;
  711. DELTANEXTU16(chainTable, idx) = (U16)delta;
  712. hashTable[h] = idx;
  713. idx++;
  714. }
  715. hc4->nextToUpdate = target;
  716. }
  717. #if defined(_MSC_VER)
  718. # define LZ4HC_rotl32(x,r) _rotl(x,r)
  719. #else
  720. # define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r)))
  721. #endif
  722. static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
  723. {
  724. size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
  725. if (bitsToRotate == 0) return pattern;
  726. return LZ4HC_rotl32(pattern, (int)bitsToRotate);
  727. }
  728. /* LZ4HC_countPattern() :
  729. * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
  730. static unsigned
  731. LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
  732. {
  733. const BYTE* const iStart = ip;
  734. reg_t const pattern = (sizeof(pattern)==8) ?
  735. (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
  736. while (likely(ip < iEnd-(sizeof(pattern)-1))) {
  737. reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
  738. if (!diff) { ip+=sizeof(pattern); continue; }
  739. ip += LZ4_NbCommonBytes(diff);
  740. return (unsigned)(ip - iStart);
  741. }
  742. if (LZ4_isLittleEndian()) {
  743. reg_t patternByte = pattern;
  744. while ((ip<iEnd) && (*ip == (BYTE)patternByte)) {
  745. ip++; patternByte >>= 8;
  746. }
  747. } else { /* big endian */
  748. U32 bitOffset = (sizeof(pattern)*8) - 8;
  749. while (ip < iEnd) {
  750. BYTE const byte = (BYTE)(pattern >> bitOffset);
  751. if (*ip != byte) break;
  752. ip ++; bitOffset -= 8;
  753. } }
  754. return (unsigned)(ip - iStart);
  755. }
  756. /* LZ4HC_reverseCountPattern() :
  757. * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
  758. * read using natural platform endianness */
  759. static unsigned
  760. LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
  761. {
  762. const BYTE* const iStart = ip;
  763. while (likely(ip >= iLow+4)) {
  764. if (LZ4_read32(ip-4) != pattern) break;
  765. ip -= 4;
  766. }
  767. { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */
  768. while (likely(ip>iLow)) {
  769. if (ip[-1] != *bytePtr) break;
  770. ip--; bytePtr--;
  771. } }
  772. return (unsigned)(iStart - ip);
  773. }
  774. /* LZ4HC_protectDictEnd() :
  775. * Checks if the match is in the last 3 bytes of the dictionary, so reading the
  776. * 4 byte MINMATCH would overflow.
  777. * @returns true if the match index is okay.
  778. */
  779. static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
  780. {
  781. return ((U32)((dictLimit - 1) - matchIndex) >= 3);
  782. }
  783. typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
  784. typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
  785. LZ4_FORCE_INLINE LZ4HC_match_t
  786. LZ4HC_InsertAndGetWiderMatch (
  787. LZ4HC_CCtx_internal* const hc4,
  788. const BYTE* const ip,
  789. const BYTE* const iLowLimit, const BYTE* const iHighLimit,
  790. int longest,
  791. const int maxNbAttempts,
  792. const int patternAnalysis, const int chainSwap,
  793. const dictCtx_directive dict,
  794. const HCfavor_e favorDecSpeed)
  795. {
  796. U16* const chainTable = hc4->chainTable;
  797. U32* const hashTable = hc4->hashTable;
  798. const LZ4HC_CCtx_internal* const dictCtx = hc4->dictCtx;
  799. const BYTE* const prefixPtr = hc4->prefixStart;
  800. const U32 prefixIdx = hc4->dictLimit;
  801. const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
  802. const int withinStartDistance = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex);
  803. const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
  804. const BYTE* const dictStart = hc4->dictStart;
  805. const U32 dictIdx = hc4->lowLimit;
  806. const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx;
  807. int const lookBackLength = (int)(ip-iLowLimit);
  808. int nbAttempts = maxNbAttempts;
  809. U32 matchChainPos = 0;
  810. U32 const pattern = LZ4_read32(ip);
  811. U32 matchIndex;
  812. repeat_state_e repeat = rep_untested;
  813. size_t srcPatternLength = 0;
  814. int offset = 0, sBack = 0;
  815. DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch");
  816. /* First Match */
  817. LZ4HC_Insert(hc4, ip); /* insert all prior positions up to ip (excluded) */
  818. matchIndex = hashTable[LZ4HC_hashPtr(ip)];
  819. DEBUGLOG(7, "First candidate match for pos %u found at index %u / %u (lowestMatchIndex)",
  820. ipIndex, matchIndex, lowestMatchIndex);
  821. while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
  822. int matchLength=0;
  823. nbAttempts--;
  824. assert(matchIndex < ipIndex);
  825. if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
  826. /* do nothing:
  827. * favorDecSpeed intentionally skips matches with offset < 8 */
  828. } else if (matchIndex >= prefixIdx) { /* within current Prefix */
  829. const BYTE* const matchPtr = prefixPtr + (matchIndex - prefixIdx);
  830. assert(matchPtr < ip);
  831. assert(longest >= 1);
  832. if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) {
  833. if (LZ4_read32(matchPtr) == pattern) {
  834. int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0;
  835. matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
  836. matchLength -= back;
  837. if (matchLength > longest) {
  838. longest = matchLength;
  839. offset = (int)(ipIndex - matchIndex);
  840. sBack = back;
  841. DEBUGLOG(7, "Found match of len=%i within prefix, offset=%i, back=%i", longest, offset, -back);
  842. } } }
  843. } else { /* lowestMatchIndex <= matchIndex < dictLimit : within Ext Dict */
  844. const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx);
  845. assert(matchIndex >= dictIdx);
  846. if ( likely(matchIndex <= prefixIdx - 4)
  847. && (LZ4_read32(matchPtr) == pattern) ) {
  848. int back = 0;
  849. const BYTE* vLimit = ip + (prefixIdx - matchIndex);
  850. if (vLimit > iHighLimit) vLimit = iHighLimit;
  851. matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
  852. if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
  853. matchLength += LZ4_count(ip+matchLength, prefixPtr, iHighLimit);
  854. back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
  855. matchLength -= back;
  856. if (matchLength > longest) {
  857. longest = matchLength;
  858. offset = (int)(ipIndex - matchIndex);
  859. sBack = back;
  860. DEBUGLOG(7, "Found match of len=%i within dict, offset=%i, back=%i", longest, offset, -back);
  861. } } }
  862. if (chainSwap && matchLength==longest) { /* better match => select a better chain */
  863. assert(lookBackLength==0); /* search forward only */
  864. if (matchIndex + (U32)longest <= ipIndex) {
  865. int const kTrigger = 4;
  866. U32 distanceToNextMatch = 1;
  867. int const end = longest - MINMATCH + 1;
  868. int step = 1;
  869. int accel = 1 << kTrigger;
  870. int pos;
  871. for (pos = 0; pos < end; pos += step) {
  872. U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos);
  873. step = (accel++ >> kTrigger);
  874. if (candidateDist > distanceToNextMatch) {
  875. distanceToNextMatch = candidateDist;
  876. matchChainPos = (U32)pos;
  877. accel = 1 << kTrigger;
  878. } }
  879. if (distanceToNextMatch > 1) {
  880. if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
  881. matchIndex -= distanceToNextMatch;
  882. continue;
  883. } } }
  884. { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex);
  885. if (patternAnalysis && distNextMatch==1 && matchChainPos==0) {
  886. U32 const matchCandidateIdx = matchIndex-1;
  887. /* may be a repeated pattern */
  888. if (repeat == rep_untested) {
  889. if ( ((pattern & 0xFFFF) == (pattern >> 16))
  890. & ((pattern & 0xFF) == (pattern >> 24)) ) {
  891. DEBUGLOG(7, "Repeat pattern detected, char %02X", pattern >> 24);
  892. repeat = rep_confirmed;
  893. srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
  894. } else {
  895. repeat = rep_not;
  896. } }
  897. if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
  898. && LZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) {
  899. const int extDict = matchCandidateIdx < prefixIdx;
  900. const BYTE* const matchPtr = extDict ? dictStart + (matchCandidateIdx - dictIdx) : prefixPtr + (matchCandidateIdx - prefixIdx);
  901. if (LZ4_read32(matchPtr) == pattern) { /* good candidate */
  902. const BYTE* const iLimit = extDict ? dictEnd : iHighLimit;
  903. size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
  904. if (extDict && matchPtr + forwardPatternLength == iLimit) {
  905. U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
  906. forwardPatternLength += LZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern);
  907. }
  908. { const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr;
  909. size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
  910. size_t currentSegmentLength;
  911. if (!extDict
  912. && matchPtr - backLength == prefixPtr
  913. && dictIdx < prefixIdx) {
  914. U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
  915. backLength += LZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern);
  916. }
  917. /* Limit backLength not go further than lowestMatchIndex */
  918. backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
  919. assert(matchCandidateIdx - backLength >= lowestMatchIndex);
  920. currentSegmentLength = backLength + forwardPatternLength;
  921. /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
  922. if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
  923. && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
  924. U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
  925. if (LZ4HC_protectDictEnd(prefixIdx, newMatchIndex))
  926. matchIndex = newMatchIndex;
  927. else {
  928. /* Can only happen if started in the prefix */
  929. assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
  930. matchIndex = prefixIdx;
  931. }
  932. } else {
  933. U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
  934. if (!LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) {
  935. assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
  936. matchIndex = prefixIdx;
  937. } else {
  938. matchIndex = newMatchIndex;
  939. if (lookBackLength==0) { /* no back possible */
  940. size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
  941. if ((size_t)longest < maxML) {
  942. assert(prefixPtr - prefixIdx + matchIndex != ip);
  943. if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > LZ4_DISTANCE_MAX) break;
  944. assert(maxML < 2 GB);
  945. longest = (int)maxML;
  946. offset = (int)(ipIndex - matchIndex);
  947. assert(sBack == 0);
  948. DEBUGLOG(7, "Found repeat pattern match of len=%i, offset=%i", longest, offset);
  949. }
  950. { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
  951. if (distToNextPattern > matchIndex) break; /* avoid overflow */
  952. matchIndex -= distToNextPattern;
  953. } } } } }
  954. continue;
  955. } }
  956. } } /* PA optimization */
  957. /* follow current chain */
  958. matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos);
  959. } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
  960. if ( dict == usingDictCtxHc
  961. && nbAttempts > 0
  962. && withinStartDistance) {
  963. size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
  964. U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
  965. assert(dictEndOffset <= 1 GB);
  966. matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
  967. if (dictMatchIndex>0) DEBUGLOG(7, "dictEndOffset = %zu, dictMatchIndex = %u => relative matchIndex = %i", dictEndOffset, dictMatchIndex, (int)dictMatchIndex - (int)dictEndOffset);
  968. while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
  969. const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex;
  970. if (LZ4_read32(matchPtr) == pattern) {
  971. int mlt;
  972. int back = 0;
  973. const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
  974. if (vLimit > iHighLimit) vLimit = iHighLimit;
  975. mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
  976. back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
  977. mlt -= back;
  978. if (mlt > longest) {
  979. longest = mlt;
  980. offset = (int)(ipIndex - matchIndex);
  981. sBack = back;
  982. DEBUGLOG(7, "found match of length %i within extDictCtx", longest);
  983. } }
  984. { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
  985. dictMatchIndex -= nextOffset;
  986. matchIndex -= nextOffset;
  987. } } }
  988. { LZ4HC_match_t md;
  989. assert(longest >= 0);
  990. md.len = longest;
  991. md.off = offset;
  992. md.back = sBack;
  993. return md;
  994. }
  995. }
  996. LZ4_FORCE_INLINE LZ4HC_match_t
  997. LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
  998. const BYTE* const ip, const BYTE* const iLimit,
  999. const int maxNbAttempts,
  1000. const int patternAnalysis,
  1001. const dictCtx_directive dict)
  1002. {
  1003. DEBUGLOG(7, "LZ4HC_InsertAndFindBestMatch");
  1004. /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
  1005. * but this won't be the case here, as we define iLowLimit==ip,
  1006. * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
  1007. return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio);
  1008. }
  1009. LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
  1010. LZ4HC_CCtx_internal* const ctx,
  1011. const char* const source,
  1012. char* const dest,
  1013. int* srcSizePtr,
  1014. int const maxOutputSize,
  1015. int maxNbAttempts,
  1016. const limitedOutput_directive limit,
  1017. const dictCtx_directive dict
  1018. )
  1019. {
  1020. const int inputSize = *srcSizePtr;
  1021. const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */
  1022. const BYTE* ip = (const BYTE*) source;
  1023. const BYTE* anchor = ip;
  1024. const BYTE* const iend = ip + inputSize;
  1025. const BYTE* const mflimit = iend - MFLIMIT;
  1026. const BYTE* const matchlimit = (iend - LASTLITERALS);
  1027. BYTE* optr = (BYTE*) dest;
  1028. BYTE* op = (BYTE*) dest;
  1029. BYTE* oend = op + maxOutputSize;
  1030. const BYTE* start0;
  1031. const BYTE* start2 = NULL;
  1032. const BYTE* start3 = NULL;
  1033. LZ4HC_match_t m0, m1, m2, m3;
  1034. const LZ4HC_match_t nomatch = {0, 0, 0};
  1035. /* init */
  1036. DEBUGLOG(5, "LZ4HC_compress_hashChain (dict?=>%i)", dict);
  1037. *srcSizePtr = 0;
  1038. if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
  1039. if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
  1040. /* Main Loop */
  1041. while (ip <= mflimit) {
  1042. m1 = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, maxNbAttempts, patternAnalysis, dict);
  1043. if (m1.len<MINMATCH) { ip++; continue; }
  1044. /* saved, in case we would skip too much */
  1045. start0 = ip; m0 = m1;
  1046. _Search2:
  1047. DEBUGLOG(7, "_Search2 (currently found match of size %i)", m1.len);
  1048. if (ip+m1.len <= mflimit) {
  1049. start2 = ip + m1.len - 2;
  1050. m2 = LZ4HC_InsertAndGetWiderMatch(ctx,
  1051. start2, ip + 0, matchlimit, m1.len,
  1052. maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
  1053. start2 += m2.back;
  1054. } else {
  1055. m2 = nomatch; /* do not search further */
  1056. }
  1057. if (m2.len <= m1.len) { /* No better match => encode ML1 immediately */
  1058. optr = op;
  1059. if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
  1060. m1.len, m1.off,
  1061. limit, oend) )
  1062. goto _dest_overflow;
  1063. continue;
  1064. }
  1065. if (start0 < ip) { /* first match was skipped at least once */
  1066. if (start2 < ip + m0.len) { /* squeezing ML1 between ML0(original ML1) and ML2 */
  1067. ip = start0; m1 = m0; /* restore initial Match1 */
  1068. } }
  1069. /* Here, start0==ip */
  1070. if ((start2 - ip) < 3) { /* First Match too small : removed */
  1071. ip = start2;
  1072. m1 = m2;
  1073. goto _Search2;
  1074. }
  1075. _Search3:
  1076. if ((start2 - ip) < OPTIMAL_ML) {
  1077. int correction;
  1078. int new_ml = m1.len;
  1079. if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
  1080. if (ip+new_ml > start2 + m2.len - MINMATCH)
  1081. new_ml = (int)(start2 - ip) + m2.len - MINMATCH;
  1082. correction = new_ml - (int)(start2 - ip);
  1083. if (correction > 0) {
  1084. start2 += correction;
  1085. m2.len -= correction;
  1086. }
  1087. }
  1088. if (start2 + m2.len <= mflimit) {
  1089. start3 = start2 + m2.len - 3;
  1090. m3 = LZ4HC_InsertAndGetWiderMatch(ctx,
  1091. start3, start2, matchlimit, m2.len,
  1092. maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
  1093. start3 += m3.back;
  1094. } else {
  1095. m3 = nomatch; /* do not search further */
  1096. }
  1097. if (m3.len <= m2.len) { /* No better match => encode ML1 and ML2 */
  1098. /* ip & ref are known; Now for ml */
  1099. if (start2 < ip+m1.len) m1.len = (int)(start2 - ip);
  1100. /* Now, encode 2 sequences */
  1101. optr = op;
  1102. if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
  1103. m1.len, m1.off,
  1104. limit, oend) )
  1105. goto _dest_overflow;
  1106. ip = start2;
  1107. optr = op;
  1108. if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
  1109. m2.len, m2.off,
  1110. limit, oend) ) {
  1111. m1 = m2;
  1112. goto _dest_overflow;
  1113. }
  1114. continue;
  1115. }
  1116. if (start3 < ip+m1.len+3) { /* Not enough space for match 2 : remove it */
  1117. if (start3 >= (ip+m1.len)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
  1118. if (start2 < ip+m1.len) {
  1119. int correction = (int)(ip+m1.len - start2);
  1120. start2 += correction;
  1121. m2.len -= correction;
  1122. if (m2.len < MINMATCH) {
  1123. start2 = start3;
  1124. m2 = m3;
  1125. }
  1126. }
  1127. optr = op;
  1128. if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
  1129. m1.len, m1.off,
  1130. limit, oend) )
  1131. goto _dest_overflow;
  1132. ip = start3;
  1133. m1 = m3;
  1134. start0 = start2;
  1135. m0 = m2;
  1136. goto _Search2;
  1137. }
  1138. start2 = start3;
  1139. m2 = m3;
  1140. goto _Search3;
  1141. }
  1142. /*
  1143. * OK, now we have 3 ascending matches;
  1144. * let's write the first one ML1.
  1145. * ip & ref are known; Now decide ml.
  1146. */
  1147. if (start2 < ip+m1.len) {
  1148. if ((start2 - ip) < OPTIMAL_ML) {
  1149. int correction;
  1150. if (m1.len > OPTIMAL_ML) m1.len = OPTIMAL_ML;
  1151. if (ip + m1.len > start2 + m2.len - MINMATCH)
  1152. m1.len = (int)(start2 - ip) + m2.len - MINMATCH;
  1153. correction = m1.len - (int)(start2 - ip);
  1154. if (correction > 0) {
  1155. start2 += correction;
  1156. m2.len -= correction;
  1157. }
  1158. } else {
  1159. m1.len = (int)(start2 - ip);
  1160. }
  1161. }
  1162. optr = op;
  1163. if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
  1164. m1.len, m1.off,
  1165. limit, oend) )
  1166. goto _dest_overflow;
  1167. /* ML2 becomes ML1 */
  1168. ip = start2; m1 = m2;
  1169. /* ML3 becomes ML2 */
  1170. start2 = start3; m2 = m3;
  1171. /* let's find a new ML3 */
  1172. goto _Search3;
  1173. }
  1174. _last_literals:
  1175. /* Encode Last Literals */
  1176. { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
  1177. size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
  1178. size_t const totalSize = 1 + llAdd + lastRunSize;
  1179. if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
  1180. if (limit && (op + totalSize > oend)) {
  1181. if (limit == limitedOutput) return 0;
  1182. /* adapt lastRunSize to fill 'dest' */
  1183. lastRunSize = (size_t)(oend - op) - 1 /*token*/;
  1184. llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
  1185. lastRunSize -= llAdd;
  1186. }
  1187. DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
  1188. ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
  1189. if (lastRunSize >= RUN_MASK) {
  1190. size_t accumulator = lastRunSize - RUN_MASK;
  1191. *op++ = (RUN_MASK << ML_BITS);
  1192. for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
  1193. *op++ = (BYTE) accumulator;
  1194. } else {
  1195. *op++ = (BYTE)(lastRunSize << ML_BITS);
  1196. }
  1197. LZ4_memcpy(op, anchor, lastRunSize);
  1198. op += lastRunSize;
  1199. }
  1200. /* End */
  1201. *srcSizePtr = (int) (((const char*)ip) - source);
  1202. return (int) (((char*)op)-dest);
  1203. _dest_overflow:
  1204. if (limit == fillOutput) {
  1205. /* Assumption : @ip, @anchor, @optr and @m1 must be set correctly */
  1206. size_t const ll = (size_t)(ip - anchor);
  1207. size_t const ll_addbytes = (ll + 240) / 255;
  1208. size_t const ll_totalCost = 1 + ll_addbytes + ll;
  1209. BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
  1210. DEBUGLOG(6, "Last sequence overflowing");
  1211. op = optr; /* restore correct out pointer */
  1212. if (op + ll_totalCost <= maxLitPos) {
  1213. /* ll validated; now adjust match length */
  1214. size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
  1215. size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
  1216. assert(maxMlSize < INT_MAX); assert(m1.len >= 0);
  1217. if ((size_t)m1.len > maxMlSize) m1.len = (int)maxMlSize;
  1218. if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + m1.len >= MFLIMIT) {
  1219. LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m1.len, m1.off, notLimited, oend);
  1220. } }
  1221. goto _last_literals;
  1222. }
  1223. /* compression failed */
  1224. return 0;
  1225. }
  1226. static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
  1227. const char* const source, char* dst,
  1228. int* srcSizePtr, int dstCapacity,
  1229. int const nbSearches, size_t sufficient_len,
  1230. const limitedOutput_directive limit, int const fullUpdate,
  1231. const dictCtx_directive dict,
  1232. const HCfavor_e favorDecSpeed);
  1233. LZ4_FORCE_INLINE int
  1234. LZ4HC_compress_generic_internal (
  1235. LZ4HC_CCtx_internal* const ctx,
  1236. const char* const src,
  1237. char* const dst,
  1238. int* const srcSizePtr,
  1239. int const dstCapacity,
  1240. int cLevel,
  1241. const limitedOutput_directive limit,
  1242. const dictCtx_directive dict
  1243. )
  1244. {
  1245. DEBUGLOG(5, "LZ4HC_compress_generic_internal(src=%p, srcSize=%d)",
  1246. src, *srcSizePtr);
  1247. if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
  1248. if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
  1249. ctx->end += *srcSizePtr;
  1250. { cParams_t const cParam = LZ4HC_getCLevelParams(cLevel);
  1251. HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
  1252. int result;
  1253. if (cParam.strat == lz4mid) {
  1254. result = LZ4MID_compress(ctx,
  1255. src, dst, srcSizePtr, dstCapacity,
  1256. limit, dict);
  1257. } else if (cParam.strat == lz4hc) {
  1258. result = LZ4HC_compress_hashChain(ctx,
  1259. src, dst, srcSizePtr, dstCapacity,
  1260. cParam.nbSearches, limit, dict);
  1261. } else {
  1262. assert(cParam.strat == lz4opt);
  1263. result = LZ4HC_compress_optimal(ctx,
  1264. src, dst, srcSizePtr, dstCapacity,
  1265. cParam.nbSearches, cParam.targetLength, limit,
  1266. cLevel >= LZ4HC_CLEVEL_MAX, /* ultra mode */
  1267. dict, favor);
  1268. }
  1269. if (result <= 0) ctx->dirty = 1;
  1270. return result;
  1271. }
  1272. }
  1273. static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock);
  1274. static int
  1275. LZ4HC_compress_generic_noDictCtx (
  1276. LZ4HC_CCtx_internal* const ctx,
  1277. const char* const src,
  1278. char* const dst,
  1279. int* const srcSizePtr,
  1280. int const dstCapacity,
  1281. int cLevel,
  1282. limitedOutput_directive limit
  1283. )
  1284. {
  1285. assert(ctx->dictCtx == NULL);
  1286. return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
  1287. }
  1288. static int isStateCompatible(const LZ4HC_CCtx_internal* ctx1, const LZ4HC_CCtx_internal* ctx2)
  1289. {
  1290. int const isMid1 = LZ4HC_getCLevelParams(ctx1->compressionLevel).strat == lz4mid;
  1291. int const isMid2 = LZ4HC_getCLevelParams(ctx2->compressionLevel).strat == lz4mid;
  1292. return !(isMid1 ^ isMid2);
  1293. }
  1294. static int
  1295. LZ4HC_compress_generic_dictCtx (
  1296. LZ4HC_CCtx_internal* const ctx,
  1297. const char* const src,
  1298. char* const dst,
  1299. int* const srcSizePtr,
  1300. int const dstCapacity,
  1301. int cLevel,
  1302. limitedOutput_directive limit
  1303. )
  1304. {
  1305. const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit);
  1306. assert(ctx->dictCtx != NULL);
  1307. if (position >= 64 KB) {
  1308. ctx->dictCtx = NULL;
  1309. return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
  1310. } else if (position == 0 && *srcSizePtr > 4 KB && isStateCompatible(ctx, ctx->dictCtx)) {
  1311. LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
  1312. LZ4HC_setExternalDict(ctx, (const BYTE *)src);
  1313. ctx->compressionLevel = (short)cLevel;
  1314. return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
  1315. } else {
  1316. return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc);
  1317. }
  1318. }
  1319. static int
  1320. LZ4HC_compress_generic (
  1321. LZ4HC_CCtx_internal* const ctx,
  1322. const char* const src,
  1323. char* const dst,
  1324. int* const srcSizePtr,
  1325. int const dstCapacity,
  1326. int cLevel,
  1327. limitedOutput_directive limit
  1328. )
  1329. {
  1330. if (ctx->dictCtx == NULL) {
  1331. return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
  1332. } else {
  1333. return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
  1334. }
  1335. }
  1336. int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
  1337. static size_t LZ4_streamHC_t_alignment(void)
  1338. {
  1339. #if LZ4_ALIGN_TEST
  1340. typedef struct { char c; LZ4_streamHC_t t; } t_a;
  1341. return sizeof(t_a) - sizeof(LZ4_streamHC_t);
  1342. #else
  1343. return 1; /* effectively disabled */
  1344. #endif
  1345. }
  1346. /* state is presumed correctly initialized,
  1347. * in which case its size and alignment have already been validate */
  1348. int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
  1349. {
  1350. LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
  1351. if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
  1352. LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
  1353. LZ4HC_init_internal (ctx, (const BYTE*)src);
  1354. if (dstCapacity < LZ4_compressBound(srcSize))
  1355. return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
  1356. else
  1357. return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited);
  1358. }
  1359. int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
  1360. {
  1361. LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
  1362. if (ctx==NULL) return 0; /* init failure */
  1363. return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
  1364. }
  1365. int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
  1366. {
  1367. int cSize;
  1368. #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
  1369. LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
  1370. if (statePtr==NULL) return 0;
  1371. #else
  1372. LZ4_streamHC_t state;
  1373. LZ4_streamHC_t* const statePtr = &state;
  1374. #endif
  1375. DEBUGLOG(5, "LZ4_compress_HC")
  1376. cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
  1377. #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
  1378. FREEMEM(statePtr);
  1379. #endif
  1380. return cSize;
  1381. }
  1382. /* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
  1383. int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
  1384. {
  1385. LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
  1386. if (ctx==NULL) return 0; /* init failure */
  1387. LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source);
  1388. LZ4_setCompressionLevel(ctx, cLevel);
  1389. return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput);
  1390. }
  1391. /**************************************
  1392. * Streaming Functions
  1393. **************************************/
  1394. /* allocation */
  1395. #if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
  1396. LZ4_streamHC_t* LZ4_createStreamHC(void)
  1397. {
  1398. LZ4_streamHC_t* const state =
  1399. (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
  1400. if (state == NULL) return NULL;
  1401. LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
  1402. return state;
  1403. }
  1404. int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
  1405. {
  1406. DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr);
  1407. if (!LZ4_streamHCPtr) return 0; /* support free on NULL */
  1408. FREEMEM(LZ4_streamHCPtr);
  1409. return 0;
  1410. }
  1411. #endif
  1412. LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
  1413. {
  1414. LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
  1415. DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
  1416. /* check conditions */
  1417. if (buffer == NULL) return NULL;
  1418. if (size < sizeof(LZ4_streamHC_t)) return NULL;
  1419. if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
  1420. /* init */
  1421. { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
  1422. MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
  1423. LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
  1424. return LZ4_streamHCPtr;
  1425. }
  1426. /* just a stub */
  1427. void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
  1428. {
  1429. LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
  1430. LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
  1431. }
  1432. void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
  1433. {
  1434. LZ4HC_CCtx_internal* const s = &LZ4_streamHCPtr->internal_donotuse;
  1435. DEBUGLOG(5, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
  1436. if (s->dirty) {
  1437. LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
  1438. } else {
  1439. assert(s->end >= s->prefixStart);
  1440. s->dictLimit += (U32)(s->end - s->prefixStart);
  1441. s->prefixStart = NULL;
  1442. s->end = NULL;
  1443. s->dictCtx = NULL;
  1444. }
  1445. LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
  1446. }
  1447. void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
  1448. {
  1449. DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel);
  1450. if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT;
  1451. if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX;
  1452. LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel;
  1453. }
  1454. void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor)
  1455. {
  1456. LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0);
  1457. }
  1458. /* LZ4_loadDictHC() :
  1459. * LZ4_streamHCPtr is presumed properly initialized */
  1460. int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
  1461. const char* dictionary, int dictSize)
  1462. {
  1463. LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
  1464. cParams_t cp;
  1465. DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d, clevel=%d)", LZ4_streamHCPtr, dictionary, dictSize, ctxPtr->compressionLevel);
  1466. assert(dictSize >= 0);
  1467. assert(LZ4_streamHCPtr != NULL);
  1468. if (dictSize > 64 KB) {
  1469. dictionary += (size_t)dictSize - 64 KB;
  1470. dictSize = 64 KB;
  1471. }
  1472. /* need a full initialization, there are bad side-effects when using resetFast() */
  1473. { int const cLevel = ctxPtr->compressionLevel;
  1474. LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
  1475. LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
  1476. cp = LZ4HC_getCLevelParams(cLevel);
  1477. }
  1478. LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary);
  1479. ctxPtr->end = (const BYTE*)dictionary + dictSize;
  1480. if (cp.strat == lz4mid) {
  1481. LZ4MID_fillHTable (ctxPtr, dictionary, (size_t)dictSize);
  1482. } else {
  1483. if (dictSize >= LZ4HC_HASHSIZE) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
  1484. }
  1485. return dictSize;
  1486. }
  1487. void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) {
  1488. working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL;
  1489. }
  1490. /* compression */
  1491. static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
  1492. {
  1493. DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
  1494. if ( (ctxPtr->end >= ctxPtr->prefixStart + 4)
  1495. && (LZ4HC_getCLevelParams(ctxPtr->compressionLevel).strat != lz4mid) ) {
  1496. LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
  1497. }
  1498. /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
  1499. ctxPtr->lowLimit = ctxPtr->dictLimit;
  1500. ctxPtr->dictStart = ctxPtr->prefixStart;
  1501. ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart);
  1502. ctxPtr->prefixStart = newBlock;
  1503. ctxPtr->end = newBlock;
  1504. ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
  1505. /* cannot reference an extDict and a dictCtx at the same time */
  1506. ctxPtr->dictCtx = NULL;
  1507. }
  1508. static int
  1509. LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
  1510. const char* src, char* dst,
  1511. int* srcSizePtr, int dstCapacity,
  1512. limitedOutput_directive limit)
  1513. {
  1514. LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
  1515. DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
  1516. LZ4_streamHCPtr, src, *srcSizePtr, limit);
  1517. assert(ctxPtr != NULL);
  1518. /* auto-init if forgotten */
  1519. if (ctxPtr->prefixStart == NULL)
  1520. LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
  1521. /* Check overflow */
  1522. if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) {
  1523. size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart);
  1524. if (dictSize > 64 KB) dictSize = 64 KB;
  1525. LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
  1526. }
  1527. /* Check if blocks follow each other */
  1528. if ((const BYTE*)src != ctxPtr->end)
  1529. LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src);
  1530. /* Check overlapping input/dictionary space */
  1531. { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
  1532. const BYTE* const dictBegin = ctxPtr->dictStart;
  1533. const BYTE* const dictEnd = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit);
  1534. if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
  1535. if (sourceEnd > dictEnd) sourceEnd = dictEnd;
  1536. ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart);
  1537. ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart);
  1538. /* invalidate dictionary is it's too small */
  1539. if (ctxPtr->dictLimit - ctxPtr->lowLimit < LZ4HC_HASHSIZE) {
  1540. ctxPtr->lowLimit = ctxPtr->dictLimit;
  1541. ctxPtr->dictStart = ctxPtr->prefixStart;
  1542. } } }
  1543. return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
  1544. }
  1545. int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
  1546. {
  1547. DEBUGLOG(5, "LZ4_compress_HC_continue");
  1548. if (dstCapacity < LZ4_compressBound(srcSize))
  1549. return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
  1550. else
  1551. return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited);
  1552. }
  1553. int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
  1554. {
  1555. return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput);
  1556. }
  1557. /* LZ4_saveDictHC :
  1558. * save history content
  1559. * into a user-provided buffer
  1560. * which is then used to continue compression
  1561. */
  1562. int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
  1563. {
  1564. LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
  1565. int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart);
  1566. DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
  1567. assert(prefixSize >= 0);
  1568. if (dictSize > 64 KB) dictSize = 64 KB;
  1569. if (dictSize < 4) dictSize = 0;
  1570. if (dictSize > prefixSize) dictSize = prefixSize;
  1571. if (safeBuffer == NULL) assert(dictSize == 0);
  1572. if (dictSize > 0)
  1573. LZ4_memmove(safeBuffer, streamPtr->end - dictSize, (size_t)dictSize);
  1574. { U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit;
  1575. streamPtr->end = (safeBuffer == NULL) ? NULL : (const BYTE*)safeBuffer + dictSize;
  1576. streamPtr->prefixStart = (const BYTE*)safeBuffer;
  1577. streamPtr->dictLimit = endIndex - (U32)dictSize;
  1578. streamPtr->lowLimit = endIndex - (U32)dictSize;
  1579. streamPtr->dictStart = streamPtr->prefixStart;
  1580. if (streamPtr->nextToUpdate < streamPtr->dictLimit)
  1581. streamPtr->nextToUpdate = streamPtr->dictLimit;
  1582. }
  1583. return dictSize;
  1584. }
  1585. /* ================================================
  1586. * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX])
  1587. * ===============================================*/
  1588. typedef struct {
  1589. int price;
  1590. int off;
  1591. int mlen;
  1592. int litlen;
  1593. } LZ4HC_optimal_t;
  1594. /* price in bytes */
  1595. LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
  1596. {
  1597. int price = litlen;
  1598. assert(litlen >= 0);
  1599. if (litlen >= (int)RUN_MASK)
  1600. price += 1 + ((litlen-(int)RUN_MASK) / 255);
  1601. return price;
  1602. }
  1603. /* requires mlen >= MINMATCH */
  1604. LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
  1605. {
  1606. int price = 1 + 2 ; /* token + 16-bit offset */
  1607. assert(litlen >= 0);
  1608. assert(mlen >= MINMATCH);
  1609. price += LZ4HC_literalsPrice(litlen);
  1610. if (mlen >= (int)(ML_MASK+MINMATCH))
  1611. price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255);
  1612. return price;
  1613. }
  1614. LZ4_FORCE_INLINE LZ4HC_match_t
  1615. LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
  1616. const BYTE* ip, const BYTE* const iHighLimit,
  1617. int minLen, int nbSearches,
  1618. const dictCtx_directive dict,
  1619. const HCfavor_e favorDecSpeed)
  1620. {
  1621. LZ4HC_match_t const match0 = { 0 , 0, 0 };
  1622. /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
  1623. * but this won't be the case here, as we define iLowLimit==ip,
  1624. ** so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
  1625. LZ4HC_match_t md = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
  1626. assert(md.back == 0);
  1627. if (md.len <= minLen) return match0;
  1628. if (favorDecSpeed) {
  1629. if ((md.len>18) & (md.len<=36)) md.len=18; /* favor dec.speed (shortcut) */
  1630. }
  1631. return md;
  1632. }
  1633. static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
  1634. const char* const source,
  1635. char* dst,
  1636. int* srcSizePtr,
  1637. int dstCapacity,
  1638. int const nbSearches,
  1639. size_t sufficient_len,
  1640. const limitedOutput_directive limit,
  1641. int const fullUpdate,
  1642. const dictCtx_directive dict,
  1643. const HCfavor_e favorDecSpeed)
  1644. {
  1645. int retval = 0;
  1646. #define TRAILING_LITERALS 3
  1647. #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
  1648. LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
  1649. #else
  1650. LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
  1651. #endif
  1652. const BYTE* ip = (const BYTE*) source;
  1653. const BYTE* anchor = ip;
  1654. const BYTE* const iend = ip + *srcSizePtr;
  1655. const BYTE* const mflimit = iend - MFLIMIT;
  1656. const BYTE* const matchlimit = iend - LASTLITERALS;
  1657. BYTE* op = (BYTE*) dst;
  1658. BYTE* opSaved = (BYTE*) dst;
  1659. BYTE* oend = op + dstCapacity;
  1660. int ovml = MINMATCH; /* overflow - last sequence */
  1661. int ovoff = 0;
  1662. /* init */
  1663. #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
  1664. if (opt == NULL) goto _return_label;
  1665. #endif
  1666. DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
  1667. *srcSizePtr = 0;
  1668. if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
  1669. if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
  1670. /* Main Loop */
  1671. while (ip <= mflimit) {
  1672. int const llen = (int)(ip - anchor);
  1673. int best_mlen, best_off;
  1674. int cur, last_match_pos = 0;
  1675. LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
  1676. if (firstMatch.len==0) { ip++; continue; }
  1677. if ((size_t)firstMatch.len > sufficient_len) {
  1678. /* good enough solution : immediate encoding */
  1679. int const firstML = firstMatch.len;
  1680. opSaved = op;
  1681. if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, firstMatch.off, limit, oend) ) { /* updates ip, op and anchor */
  1682. ovml = firstML;
  1683. ovoff = firstMatch.off;
  1684. goto _dest_overflow;
  1685. }
  1686. continue;
  1687. }
  1688. /* set prices for first positions (literals) */
  1689. { int rPos;
  1690. for (rPos = 0 ; rPos < MINMATCH ; rPos++) {
  1691. int const cost = LZ4HC_literalsPrice(llen + rPos);
  1692. opt[rPos].mlen = 1;
  1693. opt[rPos].off = 0;
  1694. opt[rPos].litlen = llen + rPos;
  1695. opt[rPos].price = cost;
  1696. DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
  1697. rPos, cost, opt[rPos].litlen);
  1698. } }
  1699. /* set prices using initial match */
  1700. { int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */
  1701. int const offset = firstMatch.off;
  1702. int mlen;
  1703. assert(matchML < LZ4_OPT_NUM);
  1704. for (mlen = MINMATCH ; mlen <= matchML ; mlen++) {
  1705. int const cost = LZ4HC_sequencePrice(llen, mlen);
  1706. opt[mlen].mlen = mlen;
  1707. opt[mlen].off = offset;
  1708. opt[mlen].litlen = llen;
  1709. opt[mlen].price = cost;
  1710. DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup",
  1711. mlen, cost, mlen);
  1712. } }
  1713. last_match_pos = firstMatch.len;
  1714. { int addLit;
  1715. for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
  1716. opt[last_match_pos+addLit].mlen = 1; /* literal */
  1717. opt[last_match_pos+addLit].off = 0;
  1718. opt[last_match_pos+addLit].litlen = addLit;
  1719. opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
  1720. DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
  1721. last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
  1722. } }
  1723. /* check further positions */
  1724. for (cur = 1; cur < last_match_pos; cur++) {
  1725. const BYTE* const curPtr = ip + cur;
  1726. LZ4HC_match_t newMatch;
  1727. if (curPtr > mflimit) break;
  1728. DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u",
  1729. cur, opt[cur].price, opt[cur+1].price, cur+1);
  1730. if (fullUpdate) {
  1731. /* not useful to search here if next position has same (or lower) cost */
  1732. if ( (opt[cur+1].price <= opt[cur].price)
  1733. /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */
  1734. && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) )
  1735. continue;
  1736. } else {
  1737. /* not useful to search here if next position has same (or lower) cost */
  1738. if (opt[cur+1].price <= opt[cur].price) continue;
  1739. }
  1740. DEBUGLOG(7, "search at rPos:%u", cur);
  1741. if (fullUpdate)
  1742. newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
  1743. else
  1744. /* only test matches of minimum length; slightly faster, but misses a few bytes */
  1745. newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed);
  1746. if (!newMatch.len) continue;
  1747. if ( ((size_t)newMatch.len > sufficient_len)
  1748. || (newMatch.len + cur >= LZ4_OPT_NUM) ) {
  1749. /* immediate encoding */
  1750. best_mlen = newMatch.len;
  1751. best_off = newMatch.off;
  1752. last_match_pos = cur + 1;
  1753. goto encode;
  1754. }
  1755. /* before match : set price with literals at beginning */
  1756. { int const baseLitlen = opt[cur].litlen;
  1757. int litlen;
  1758. for (litlen = 1; litlen < MINMATCH; litlen++) {
  1759. int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen);
  1760. int const pos = cur + litlen;
  1761. if (price < opt[pos].price) {
  1762. opt[pos].mlen = 1; /* literal */
  1763. opt[pos].off = 0;
  1764. opt[pos].litlen = baseLitlen+litlen;
  1765. opt[pos].price = price;
  1766. DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)",
  1767. pos, price, opt[pos].litlen);
  1768. } } }
  1769. /* set prices using match at position = cur */
  1770. { int const matchML = newMatch.len;
  1771. int ml = MINMATCH;
  1772. assert(cur + newMatch.len < LZ4_OPT_NUM);
  1773. for ( ; ml <= matchML ; ml++) {
  1774. int const pos = cur + ml;
  1775. int const offset = newMatch.off;
  1776. int price;
  1777. int ll;
  1778. DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)",
  1779. pos, last_match_pos);
  1780. if (opt[cur].mlen == 1) {
  1781. ll = opt[cur].litlen;
  1782. price = ((cur > ll) ? opt[cur - ll].price : 0)
  1783. + LZ4HC_sequencePrice(ll, ml);
  1784. } else {
  1785. ll = 0;
  1786. price = opt[cur].price + LZ4HC_sequencePrice(0, ml);
  1787. }
  1788. assert((U32)favorDecSpeed <= 1);
  1789. if (pos > last_match_pos+TRAILING_LITERALS
  1790. || price <= opt[pos].price - (int)favorDecSpeed) {
  1791. DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)",
  1792. pos, price, ml);
  1793. assert(pos < LZ4_OPT_NUM);
  1794. if ( (ml == matchML) /* last pos of last match */
  1795. && (last_match_pos < pos) )
  1796. last_match_pos = pos;
  1797. opt[pos].mlen = ml;
  1798. opt[pos].off = offset;
  1799. opt[pos].litlen = ll;
  1800. opt[pos].price = price;
  1801. } } }
  1802. /* complete following positions with literals */
  1803. { int addLit;
  1804. for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
  1805. opt[last_match_pos+addLit].mlen = 1; /* literal */
  1806. opt[last_match_pos+addLit].off = 0;
  1807. opt[last_match_pos+addLit].litlen = addLit;
  1808. opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
  1809. DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
  1810. } }
  1811. } /* for (cur = 1; cur <= last_match_pos; cur++) */
  1812. assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS);
  1813. best_mlen = opt[last_match_pos].mlen;
  1814. best_off = opt[last_match_pos].off;
  1815. cur = last_match_pos - best_mlen;
  1816. encode: /* cur, last_match_pos, best_mlen, best_off must be set */
  1817. assert(cur < LZ4_OPT_NUM);
  1818. assert(last_match_pos >= 1); /* == 1 when only one candidate */
  1819. DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
  1820. { int candidate_pos = cur;
  1821. int selected_matchLength = best_mlen;
  1822. int selected_offset = best_off;
  1823. while (1) { /* from end to beginning */
  1824. int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */
  1825. int const next_offset = opt[candidate_pos].off;
  1826. DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength);
  1827. opt[candidate_pos].mlen = selected_matchLength;
  1828. opt[candidate_pos].off = selected_offset;
  1829. selected_matchLength = next_matchLength;
  1830. selected_offset = next_offset;
  1831. if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */
  1832. assert(next_matchLength > 0); /* can be 1, means literal */
  1833. candidate_pos -= next_matchLength;
  1834. } }
  1835. /* encode all recorded sequences in order */
  1836. { int rPos = 0; /* relative position (to ip) */
  1837. while (rPos < last_match_pos) {
  1838. int const ml = opt[rPos].mlen;
  1839. int const offset = opt[rPos].off;
  1840. if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */
  1841. rPos += ml;
  1842. assert(ml >= MINMATCH);
  1843. assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
  1844. opSaved = op;
  1845. if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, offset, limit, oend) ) { /* updates ip, op and anchor */
  1846. ovml = ml;
  1847. ovoff = offset;
  1848. goto _dest_overflow;
  1849. } } }
  1850. } /* while (ip <= mflimit) */
  1851. _last_literals:
  1852. /* Encode Last Literals */
  1853. { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
  1854. size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
  1855. size_t const totalSize = 1 + llAdd + lastRunSize;
  1856. if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
  1857. if (limit && (op + totalSize > oend)) {
  1858. if (limit == limitedOutput) { /* Check output limit */
  1859. retval = 0;
  1860. goto _return_label;
  1861. }
  1862. /* adapt lastRunSize to fill 'dst' */
  1863. lastRunSize = (size_t)(oend - op) - 1 /*token*/;
  1864. llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
  1865. lastRunSize -= llAdd;
  1866. }
  1867. DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
  1868. ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
  1869. if (lastRunSize >= RUN_MASK) {
  1870. size_t accumulator = lastRunSize - RUN_MASK;
  1871. *op++ = (RUN_MASK << ML_BITS);
  1872. for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
  1873. *op++ = (BYTE) accumulator;
  1874. } else {
  1875. *op++ = (BYTE)(lastRunSize << ML_BITS);
  1876. }
  1877. LZ4_memcpy(op, anchor, lastRunSize);
  1878. op += lastRunSize;
  1879. }
  1880. /* End */
  1881. *srcSizePtr = (int) (((const char*)ip) - source);
  1882. retval = (int) ((char*)op-dst);
  1883. goto _return_label;
  1884. _dest_overflow:
  1885. if (limit == fillOutput) {
  1886. /* Assumption : ip, anchor, ovml and ovref must be set correctly */
  1887. size_t const ll = (size_t)(ip - anchor);
  1888. size_t const ll_addbytes = (ll + 240) / 255;
  1889. size_t const ll_totalCost = 1 + ll_addbytes + ll;
  1890. BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
  1891. DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
  1892. op = opSaved; /* restore correct out pointer */
  1893. if (op + ll_totalCost <= maxLitPos) {
  1894. /* ll validated; now adjust match length */
  1895. size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
  1896. size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
  1897. assert(maxMlSize < INT_MAX); assert(ovml >= 0);
  1898. if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
  1899. if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
  1900. DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
  1901. DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
  1902. LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovoff, notLimited, oend);
  1903. DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
  1904. } }
  1905. goto _last_literals;
  1906. }
  1907. _return_label:
  1908. #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
  1909. if (opt) FREEMEM(opt);
  1910. #endif
  1911. return retval;
  1912. }
  1913. /***************************************************
  1914. * Deprecated Functions
  1915. ***************************************************/
  1916. /* These functions currently generate deprecation warnings */
  1917. /* Wrappers for deprecated compression functions */
  1918. int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
  1919. int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
  1920. int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
  1921. int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
  1922. int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
  1923. int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
  1924. int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
  1925. int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
  1926. int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
  1927. int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
  1928. /* Deprecated streaming functions */
  1929. int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); }
  1930. /* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
  1931. * @return : 0 on success, !=0 if error */
  1932. int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
  1933. {
  1934. LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
  1935. if (hc4 == NULL) return 1; /* init failed */
  1936. LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
  1937. return 0;
  1938. }
  1939. #if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
  1940. void* LZ4_createHC (const char* inputBuffer)
  1941. {
  1942. LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
  1943. if (hc4 == NULL) return NULL; /* not enough memory */
  1944. LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
  1945. return hc4;
  1946. }
  1947. int LZ4_freeHC (void* LZ4HC_Data)
  1948. {
  1949. if (!LZ4HC_Data) return 0; /* support free on NULL */
  1950. FREEMEM(LZ4HC_Data);
  1951. return 0;
  1952. }
  1953. #endif
  1954. int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
  1955. {
  1956. return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
  1957. }
  1958. int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
  1959. {
  1960. return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
  1961. }
  1962. char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
  1963. {
  1964. LZ4HC_CCtx_internal* const s = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse;
  1965. const BYTE* const bufferStart = s->prefixStart - s->dictLimit + s->lowLimit;
  1966. LZ4_resetStreamHC_fast((LZ4_streamHC_t*)LZ4HC_Data, s->compressionLevel);
  1967. /* ugly conversion trick, required to evade (const char*) -> (char*) cast-qual warning :( */
  1968. return (char*)(uptrval)bufferStart;
  1969. }