lz4frame.c 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078
  1. /*
  2. * LZ4 auto-framing library
  3. * Copyright (C) 2011-2016, Yann Collet.
  4. *
  5. * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions are
  9. * met:
  10. *
  11. * - Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * - Redistributions in binary form must reproduce the above
  14. * copyright notice, this list of conditions and the following disclaimer
  15. * in the documentation and/or other materials provided with the
  16. * distribution.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. * You can contact the author at :
  31. * - LZ4 homepage : http://www.lz4.org
  32. * - LZ4 source repository : https://github.com/lz4/lz4
  33. */
  34. /* LZ4F is a stand-alone API to create LZ4-compressed Frames
  35. * in full conformance with specification v1.6.1 .
  36. * This library rely upon memory management capabilities (malloc, free)
  37. * provided either by <stdlib.h>,
  38. * or redirected towards another library of user's choice
  39. * (see Memory Routines below).
  40. */
  41. /*-************************************
  42. * Compiler Options
  43. **************************************/
  44. #ifdef _MSC_VER /* Visual Studio */
  45. # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
  46. #endif
  47. /*-************************************
  48. * Tuning parameters
  49. **************************************/
  50. /*
  51. * LZ4F_HEAPMODE :
  52. * Select how default compression functions will allocate memory for their hash table,
  53. * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
  54. */
  55. #ifndef LZ4F_HEAPMODE
  56. # define LZ4F_HEAPMODE 0
  57. #endif
  58. /*-************************************
  59. * Library declarations
  60. **************************************/
  61. #define LZ4F_STATIC_LINKING_ONLY
  62. #include "lz4frame.h"
  63. #define LZ4_STATIC_LINKING_ONLY
  64. #include "lz4.h"
  65. #define LZ4_HC_STATIC_LINKING_ONLY
  66. #include "lz4hc.h"
  67. #define XXH_STATIC_LINKING_ONLY
  68. #include "xxhash.h"
  69. /*-************************************
  70. * Memory routines
  71. **************************************/
  72. /*
  73. * User may redirect invocations of
  74. * malloc(), calloc() and free()
  75. * towards another library or solution of their choice
  76. * by modifying below section.
  77. **/
  78. #include <string.h> /* memset, memcpy, memmove */
  79. #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
  80. # define MEM_INIT(p,v,s) memset((p),(v),(s))
  81. #endif
  82. #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
  83. # include <stdlib.h> /* malloc, calloc, free */
  84. # define ALLOC(s) malloc(s)
  85. # define ALLOC_AND_ZERO(s) calloc(1,(s))
  86. # define FREEMEM(p) free(p)
  87. #endif
  88. static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem)
  89. {
  90. /* custom calloc defined : use it */
  91. if (cmem.customCalloc != NULL) {
  92. return cmem.customCalloc(cmem.opaqueState, s);
  93. }
  94. /* nothing defined : use default <stdlib.h>'s calloc() */
  95. if (cmem.customAlloc == NULL) {
  96. return ALLOC_AND_ZERO(s);
  97. }
  98. /* only custom alloc defined : use it, and combine it with memset() */
  99. { void* const p = cmem.customAlloc(cmem.opaqueState, s);
  100. if (p != NULL) MEM_INIT(p, 0, s);
  101. return p;
  102. } }
  103. static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem)
  104. {
  105. /* custom malloc defined : use it */
  106. if (cmem.customAlloc != NULL) {
  107. return cmem.customAlloc(cmem.opaqueState, s);
  108. }
  109. /* nothing defined : use default <stdlib.h>'s malloc() */
  110. return ALLOC(s);
  111. }
  112. static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
  113. {
  114. /* custom malloc defined : use it */
  115. if (cmem.customFree != NULL) {
  116. cmem.customFree(cmem.opaqueState, p);
  117. return;
  118. }
  119. /* nothing defined : use default <stdlib.h>'s free() */
  120. FREEMEM(p);
  121. }
  122. /*-************************************
  123. * Debug
  124. **************************************/
  125. #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
  126. # include <assert.h>
  127. #else
  128. # ifndef assert
  129. # define assert(condition) ((void)0)
  130. # endif
  131. #endif
  132. #define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
  133. #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG)
  134. # include <stdio.h>
  135. static int g_debuglog_enable = 1;
  136. # define DEBUGLOG(l, ...) { \
  137. if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
  138. fprintf(stderr, __FILE__ ": "); \
  139. fprintf(stderr, __VA_ARGS__); \
  140. fprintf(stderr, " \n"); \
  141. } }
  142. #else
  143. # define DEBUGLOG(l, ...) {} /* disabled */
  144. #endif
  145. /*-************************************
  146. * Basic Types
  147. **************************************/
  148. #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  149. # include <stdint.h>
  150. typedef uint8_t BYTE;
  151. typedef uint16_t U16;
  152. typedef uint32_t U32;
  153. typedef int32_t S32;
  154. typedef uint64_t U64;
  155. #else
  156. typedef unsigned char BYTE;
  157. typedef unsigned short U16;
  158. typedef unsigned int U32;
  159. typedef signed int S32;
  160. typedef unsigned long long U64;
  161. #endif
  162. /* unoptimized version; solves endianness & alignment issues */
  163. static U32 LZ4F_readLE32 (const void* src)
  164. {
  165. const BYTE* const srcPtr = (const BYTE*)src;
  166. U32 value32 = srcPtr[0];
  167. value32 += ((U32)srcPtr[1])<< 8;
  168. value32 += ((U32)srcPtr[2])<<16;
  169. value32 += ((U32)srcPtr[3])<<24;
  170. return value32;
  171. }
  172. static void LZ4F_writeLE32 (void* dst, U32 value32)
  173. {
  174. BYTE* const dstPtr = (BYTE*)dst;
  175. dstPtr[0] = (BYTE)value32;
  176. dstPtr[1] = (BYTE)(value32 >> 8);
  177. dstPtr[2] = (BYTE)(value32 >> 16);
  178. dstPtr[3] = (BYTE)(value32 >> 24);
  179. }
  180. static U64 LZ4F_readLE64 (const void* src)
  181. {
  182. const BYTE* const srcPtr = (const BYTE*)src;
  183. U64 value64 = srcPtr[0];
  184. value64 += ((U64)srcPtr[1]<<8);
  185. value64 += ((U64)srcPtr[2]<<16);
  186. value64 += ((U64)srcPtr[3]<<24);
  187. value64 += ((U64)srcPtr[4]<<32);
  188. value64 += ((U64)srcPtr[5]<<40);
  189. value64 += ((U64)srcPtr[6]<<48);
  190. value64 += ((U64)srcPtr[7]<<56);
  191. return value64;
  192. }
  193. static void LZ4F_writeLE64 (void* dst, U64 value64)
  194. {
  195. BYTE* const dstPtr = (BYTE*)dst;
  196. dstPtr[0] = (BYTE)value64;
  197. dstPtr[1] = (BYTE)(value64 >> 8);
  198. dstPtr[2] = (BYTE)(value64 >> 16);
  199. dstPtr[3] = (BYTE)(value64 >> 24);
  200. dstPtr[4] = (BYTE)(value64 >> 32);
  201. dstPtr[5] = (BYTE)(value64 >> 40);
  202. dstPtr[6] = (BYTE)(value64 >> 48);
  203. dstPtr[7] = (BYTE)(value64 >> 56);
  204. }
  205. /*-************************************
  206. * Constants
  207. **************************************/
  208. #ifndef LZ4_SRC_INCLUDED /* avoid double definition */
  209. # define KB *(1<<10)
  210. # define MB *(1<<20)
  211. # define GB *(1<<30)
  212. #endif
  213. #define _1BIT 0x01
  214. #define _2BITS 0x03
  215. #define _3BITS 0x07
  216. #define _4BITS 0x0F
  217. #define _8BITS 0xFF
  218. #define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
  219. #define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
  220. static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */
  221. static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */
  222. static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */
  223. static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */
  224. /*-************************************
  225. * Structures and local types
  226. **************************************/
  227. typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_blockCompression_t;
  228. typedef struct LZ4F_cctx_s
  229. {
  230. LZ4F_CustomMem cmem;
  231. LZ4F_preferences_t prefs;
  232. U32 version;
  233. U32 cStage;
  234. const LZ4F_CDict* cdict;
  235. size_t maxBlockSize;
  236. size_t maxBufferSize;
  237. BYTE* tmpBuff; /* internal buffer, for streaming */
  238. BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */
  239. size_t tmpInSize; /* amount of data to compress after tmpIn */
  240. U64 totalInSize;
  241. XXH32_state_t xxh;
  242. void* lz4CtxPtr;
  243. U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
  244. U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
  245. LZ4F_blockCompression_t blockCompression;
  246. } LZ4F_cctx_t;
  247. /*-************************************
  248. * Error management
  249. **************************************/
  250. #define LZ4F_GENERATE_STRING(STRING) #STRING,
  251. static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) };
  252. unsigned LZ4F_isError(LZ4F_errorCode_t code)
  253. {
  254. return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode));
  255. }
  256. const char* LZ4F_getErrorName(LZ4F_errorCode_t code)
  257. {
  258. static const char* codeError = "Unspecified error code";
  259. if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)];
  260. return codeError;
  261. }
  262. LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
  263. {
  264. if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError;
  265. return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
  266. }
  267. static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code)
  268. {
  269. /* A compilation error here means sizeof(ptrdiff_t) is not large enough */
  270. LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));
  271. return (LZ4F_errorCode_t)-(ptrdiff_t)code;
  272. }
  273. #define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
  274. #define RETURN_ERROR_IF(c,e) if (c) RETURN_ERROR(e)
  275. #define FORWARD_IF_ERROR(r) if (LZ4F_isError(r)) return (r)
  276. unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
  277. int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; }
  278. size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID)
  279. {
  280. static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
  281. if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
  282. if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB)
  283. RETURN_ERROR(maxBlockSize_invalid);
  284. { int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB;
  285. return blockSizes[blockSizeIdx];
  286. } }
  287. /*-************************************
  288. * Private functions
  289. **************************************/
  290. #define MIN(a,b) ( (a) < (b) ? (a) : (b) )
  291. static BYTE LZ4F_headerChecksum (const void* header, size_t length)
  292. {
  293. U32 const xxh = XXH32(header, length, 0);
  294. return (BYTE)(xxh >> 8);
  295. }
  296. /*-************************************
  297. * Simple-pass compression functions
  298. **************************************/
  299. static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID,
  300. const size_t srcSize)
  301. {
  302. LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB;
  303. size_t maxBlockSize = 64 KB;
  304. while (requestedBSID > proposedBSID) {
  305. if (srcSize <= maxBlockSize)
  306. return proposedBSID;
  307. proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1);
  308. maxBlockSize <<= 2;
  309. }
  310. return requestedBSID;
  311. }
  312. /*! LZ4F_compressBound_internal() :
  313. * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
  314. * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario.
  315. * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers.
  316. * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
  317. */
  318. static size_t LZ4F_compressBound_internal(size_t srcSize,
  319. const LZ4F_preferences_t* preferencesPtr,
  320. size_t alreadyBuffered)
  321. {
  322. LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES;
  323. prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */
  324. prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */
  325. { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr;
  326. U32 const flush = prefsPtr->autoFlush | (srcSize==0);
  327. LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID;
  328. size_t const blockSize = LZ4F_getBlockSize(blockID);
  329. size_t const maxBuffered = blockSize - 1;
  330. size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered);
  331. size_t const maxSrcSize = srcSize + bufferedSize;
  332. unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize);
  333. size_t const partialBlockSize = maxSrcSize & (blockSize-1);
  334. size_t const lastBlockSize = flush ? partialBlockSize : 0;
  335. unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0);
  336. size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag;
  337. size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize);
  338. return ((BHSize + blockCRCSize) * nbBlocks) +
  339. (blockSize * nbFullBlocks) + lastBlockSize + frameEnd;
  340. }
  341. }
  342. size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
  343. {
  344. LZ4F_preferences_t prefs;
  345. size_t const headerSize = maxFHSize; /* max header size, including optional fields */
  346. if (preferencesPtr!=NULL) prefs = *preferencesPtr;
  347. else MEM_INIT(&prefs, 0, sizeof(prefs));
  348. prefs.autoFlush = 1;
  349. return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);;
  350. }
  351. /*! LZ4F_compressFrame_usingCDict() :
  352. * Compress srcBuffer using a dictionary, in a single step.
  353. * cdict can be NULL, in which case, no dictionary is used.
  354. * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
  355. * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
  356. * however, it's the only way to provide a dictID, so it's not recommended.
  357. * @return : number of bytes written into dstBuffer,
  358. * or an error code if it fails (can be tested using LZ4F_isError())
  359. */
  360. size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
  361. void* dstBuffer, size_t dstCapacity,
  362. const void* srcBuffer, size_t srcSize,
  363. const LZ4F_CDict* cdict,
  364. const LZ4F_preferences_t* preferencesPtr)
  365. {
  366. LZ4F_preferences_t prefs;
  367. LZ4F_compressOptions_t options;
  368. BYTE* const dstStart = (BYTE*) dstBuffer;
  369. BYTE* dstPtr = dstStart;
  370. BYTE* const dstEnd = dstStart + dstCapacity;
  371. if (preferencesPtr!=NULL)
  372. prefs = *preferencesPtr;
  373. else
  374. MEM_INIT(&prefs, 0, sizeof(prefs));
  375. if (prefs.frameInfo.contentSize != 0)
  376. prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */
  377. prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize);
  378. prefs.autoFlush = 1;
  379. if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID))
  380. prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */
  381. MEM_INIT(&options, 0, sizeof(options));
  382. options.stableSrc = 1;
  383. RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall);
  384. { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */
  385. FORWARD_IF_ERROR(headerSize);
  386. dstPtr += headerSize; /* header size */ }
  387. assert(dstEnd >= dstPtr);
  388. { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options);
  389. FORWARD_IF_ERROR(cSize);
  390. dstPtr += cSize; }
  391. assert(dstEnd >= dstPtr);
  392. { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */
  393. FORWARD_IF_ERROR(tailSize);
  394. dstPtr += tailSize; }
  395. assert(dstEnd >= dstStart);
  396. return (size_t)(dstPtr - dstStart);
  397. }
  398. /*! LZ4F_compressFrame() :
  399. * Compress an entire srcBuffer into a valid LZ4 frame, in a single step.
  400. * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
  401. * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
  402. * @return : number of bytes written into dstBuffer.
  403. * or an error code if it fails (can be tested using LZ4F_isError())
  404. */
  405. size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
  406. const void* srcBuffer, size_t srcSize,
  407. const LZ4F_preferences_t* preferencesPtr)
  408. {
  409. size_t result;
  410. #if (LZ4F_HEAPMODE)
  411. LZ4F_cctx_t* cctxPtr;
  412. result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION);
  413. FORWARD_IF_ERROR(result);
  414. #else
  415. LZ4F_cctx_t cctx;
  416. LZ4_stream_t lz4ctx;
  417. LZ4F_cctx_t* const cctxPtr = &cctx;
  418. MEM_INIT(&cctx, 0, sizeof(cctx));
  419. cctx.version = LZ4F_VERSION;
  420. cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
  421. if ( preferencesPtr == NULL
  422. || preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN ) {
  423. LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
  424. cctxPtr->lz4CtxPtr = &lz4ctx;
  425. cctxPtr->lz4CtxAlloc = 1;
  426. cctxPtr->lz4CtxState = 1;
  427. }
  428. #endif
  429. DEBUGLOG(4, "LZ4F_compressFrame");
  430. result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity,
  431. srcBuffer, srcSize,
  432. NULL, preferencesPtr);
  433. #if (LZ4F_HEAPMODE)
  434. LZ4F_freeCompressionContext(cctxPtr);
  435. #else
  436. if ( preferencesPtr != NULL
  437. && preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN ) {
  438. LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
  439. }
  440. #endif
  441. return result;
  442. }
  443. /*-***************************************************
  444. * Dictionary compression
  445. *****************************************************/
  446. struct LZ4F_CDict_s {
  447. LZ4F_CustomMem cmem;
  448. void* dictContent;
  449. LZ4_stream_t* fastCtx;
  450. LZ4_streamHC_t* HCCtx;
  451. }; /* typedef'd to LZ4F_CDict within lz4frame_static.h */
  452. LZ4F_CDict*
  453. LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize)
  454. {
  455. const char* dictStart = (const char*)dictBuffer;
  456. LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem);
  457. DEBUGLOG(4, "LZ4F_createCDict_advanced");
  458. if (!cdict) return NULL;
  459. cdict->cmem = cmem;
  460. if (dictSize > 64 KB) {
  461. dictStart += dictSize - 64 KB;
  462. dictSize = 64 KB;
  463. }
  464. cdict->dictContent = LZ4F_malloc(dictSize, cmem);
  465. cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem);
  466. if (cdict->fastCtx)
  467. LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
  468. cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem);
  469. if (cdict->HCCtx)
  470. LZ4_initStream(cdict->HCCtx, sizeof(LZ4_streamHC_t));
  471. if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
  472. LZ4F_freeCDict(cdict);
  473. return NULL;
  474. }
  475. memcpy(cdict->dictContent, dictStart, dictSize);
  476. LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
  477. LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
  478. LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
  479. return cdict;
  480. }
  481. /*! LZ4F_createCDict() :
  482. * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
  483. * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
  484. * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
  485. * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict
  486. * @return : digested dictionary for compression, or NULL if failed */
  487. LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
  488. {
  489. DEBUGLOG(4, "LZ4F_createCDict");
  490. return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize);
  491. }
  492. void LZ4F_freeCDict(LZ4F_CDict* cdict)
  493. {
  494. if (cdict==NULL) return; /* support free on NULL */
  495. LZ4F_free(cdict->dictContent, cdict->cmem);
  496. LZ4F_free(cdict->fastCtx, cdict->cmem);
  497. LZ4F_free(cdict->HCCtx, cdict->cmem);
  498. LZ4F_free(cdict, cdict->cmem);
  499. }
  500. /*-*********************************
  501. * Advanced compression functions
  502. ***********************************/
  503. LZ4F_cctx*
  504. LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
  505. {
  506. LZ4F_cctx* const cctxPtr =
  507. (LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem);
  508. if (cctxPtr==NULL) return NULL;
  509. cctxPtr->cmem = customMem;
  510. cctxPtr->version = version;
  511. cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */
  512. return cctxPtr;
  513. }
  514. /*! LZ4F_createCompressionContext() :
  515. * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
  516. * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
  517. * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries.
  518. * The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
  519. * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
  520. * Object can release its memory using LZ4F_freeCompressionContext();
  521. **/
  522. LZ4F_errorCode_t
  523. LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
  524. {
  525. assert(LZ4F_compressionContextPtr != NULL); /* considered a violation of narrow contract */
  526. /* in case it nonetheless happen in production */
  527. RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null);
  528. *LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version);
  529. RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed);
  530. return LZ4F_OK_NoError;
  531. }
  532. LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
  533. {
  534. if (cctxPtr != NULL) { /* support free on NULL */
  535. LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
  536. LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
  537. LZ4F_free(cctxPtr, cctxPtr->cmem);
  538. }
  539. return LZ4F_OK_NoError;
  540. }
  541. /**
  542. * This function prepares the internal LZ4(HC) stream for a new compression,
  543. * resetting the context and attaching the dictionary, if there is one.
  544. *
  545. * It needs to be called at the beginning of each independent compression
  546. * stream (i.e., at the beginning of a frame in blockLinked mode, or at the
  547. * beginning of each block in blockIndependent mode).
  548. */
  549. static void LZ4F_initStream(void* ctx,
  550. const LZ4F_CDict* cdict,
  551. int level,
  552. LZ4F_blockMode_t blockMode) {
  553. if (level < LZ4HC_CLEVEL_MIN) {
  554. if (cdict != NULL || blockMode == LZ4F_blockLinked) {
  555. /* In these cases, we will call LZ4_compress_fast_continue(),
  556. * which needs an already reset context. Otherwise, we'll call a
  557. * one-shot API. The non-continued APIs internally perform their own
  558. * resets at the beginning of their calls, where they know what
  559. * tableType they need the context to be in. So in that case this
  560. * would be misguided / wasted work. */
  561. LZ4_resetStream_fast((LZ4_stream_t*)ctx);
  562. }
  563. LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL);
  564. } else {
  565. LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
  566. LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL);
  567. }
  568. }
  569. static int ctxTypeID_to_size(int ctxTypeID) {
  570. switch(ctxTypeID) {
  571. case 1:
  572. return LZ4_sizeofState();
  573. case 2:
  574. return LZ4_sizeofStateHC();
  575. default:
  576. return 0;
  577. }
  578. }
  579. /*! LZ4F_compressBegin_usingCDict() :
  580. * init streaming compression AND writes frame header into @dstBuffer.
  581. * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
  582. * @return : number of bytes written into @dstBuffer for the header
  583. * or an error code (can be tested using LZ4F_isError())
  584. */
  585. size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
  586. void* dstBuffer, size_t dstCapacity,
  587. const LZ4F_CDict* cdict,
  588. const LZ4F_preferences_t* preferencesPtr)
  589. {
  590. LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES;
  591. BYTE* const dstStart = (BYTE*)dstBuffer;
  592. BYTE* dstPtr = dstStart;
  593. RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall);
  594. if (preferencesPtr == NULL) preferencesPtr = &prefNull;
  595. cctxPtr->prefs = *preferencesPtr;
  596. /* cctx Management */
  597. { U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
  598. int requiredSize = ctxTypeID_to_size(ctxTypeID);
  599. int allocatedSize = ctxTypeID_to_size(cctxPtr->lz4CtxAlloc);
  600. if (allocatedSize < requiredSize) {
  601. /* not enough space allocated */
  602. LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
  603. if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
  604. /* must take ownership of memory allocation,
  605. * in order to respect custom allocator contract */
  606. cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctxPtr->cmem);
  607. if (cctxPtr->lz4CtxPtr)
  608. LZ4_initStream(cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t));
  609. } else {
  610. cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctxPtr->cmem);
  611. if (cctxPtr->lz4CtxPtr)
  612. LZ4_initStreamHC(cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
  613. }
  614. RETURN_ERROR_IF(cctxPtr->lz4CtxPtr == NULL, allocation_failed);
  615. cctxPtr->lz4CtxAlloc = ctxTypeID;
  616. cctxPtr->lz4CtxState = ctxTypeID;
  617. } else if (cctxPtr->lz4CtxState != ctxTypeID) {
  618. /* otherwise, a sufficient buffer is already allocated,
  619. * but we need to reset it to the correct context type */
  620. if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
  621. LZ4_initStream((LZ4_stream_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t));
  622. } else {
  623. LZ4_initStreamHC((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
  624. LZ4_setCompressionLevel((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
  625. }
  626. cctxPtr->lz4CtxState = ctxTypeID;
  627. } }
  628. /* Buffer Management */
  629. if (cctxPtr->prefs.frameInfo.blockSizeID == 0)
  630. cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
  631. cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
  632. { size_t const requiredBuffSize = preferencesPtr->autoFlush ?
  633. ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
  634. cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
  635. if (cctxPtr->maxBufferSize < requiredBuffSize) {
  636. cctxPtr->maxBufferSize = 0;
  637. LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
  638. cctxPtr->tmpBuff = (BYTE*)LZ4F_calloc(requiredBuffSize, cctxPtr->cmem);
  639. RETURN_ERROR_IF(cctxPtr->tmpBuff == NULL, allocation_failed);
  640. cctxPtr->maxBufferSize = requiredBuffSize;
  641. } }
  642. cctxPtr->tmpIn = cctxPtr->tmpBuff;
  643. cctxPtr->tmpInSize = 0;
  644. (void)XXH32_reset(&(cctxPtr->xxh), 0);
  645. /* context init */
  646. cctxPtr->cdict = cdict;
  647. if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
  648. /* frame init only for blockLinked : blockIndependent will be init at each block */
  649. LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked);
  650. }
  651. if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
  652. LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
  653. }
  654. /* Magic Number */
  655. LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
  656. dstPtr += 4;
  657. { BYTE* const headerStart = dstPtr;
  658. /* FLG Byte */
  659. *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
  660. + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5)
  661. + ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
  662. + ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3)
  663. + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
  664. + (cctxPtr->prefs.frameInfo.dictID > 0) );
  665. /* BD Byte */
  666. *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
  667. /* Optional Frame content size field */
  668. if (cctxPtr->prefs.frameInfo.contentSize) {
  669. LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
  670. dstPtr += 8;
  671. cctxPtr->totalInSize = 0;
  672. }
  673. /* Optional dictionary ID field */
  674. if (cctxPtr->prefs.frameInfo.dictID) {
  675. LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID);
  676. dstPtr += 4;
  677. }
  678. /* Header CRC Byte */
  679. *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart));
  680. dstPtr++;
  681. }
  682. cctxPtr->cStage = 1; /* header written, now request input data block */
  683. return (size_t)(dstPtr - dstStart);
  684. }
  685. /*! LZ4F_compressBegin() :
  686. * init streaming compression AND writes frame header into @dstBuffer.
  687. * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
  688. * @preferencesPtr can be NULL, in which case default parameters are selected.
  689. * @return : number of bytes written into dstBuffer for the header
  690. * or an error code (can be tested using LZ4F_isError())
  691. */
  692. size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
  693. void* dstBuffer, size_t dstCapacity,
  694. const LZ4F_preferences_t* preferencesPtr)
  695. {
  696. return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity,
  697. NULL, preferencesPtr);
  698. }
  699. /* LZ4F_compressBound() :
  700. * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario.
  701. * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario.
  702. * This function cannot fail.
  703. */
  704. size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
  705. {
  706. if (preferencesPtr && preferencesPtr->autoFlush) {
  707. return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
  708. }
  709. return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
  710. }
  711. typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict);
  712. /*! LZ4F_makeBlock():
  713. * compress a single block, add header and optional checksum.
  714. * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize
  715. */
  716. static size_t LZ4F_makeBlock(void* dst,
  717. const void* src, size_t srcSize,
  718. compressFunc_t compress, void* lz4ctx, int level,
  719. const LZ4F_CDict* cdict,
  720. LZ4F_blockChecksum_t crcFlag)
  721. {
  722. BYTE* const cSizePtr = (BYTE*)dst;
  723. U32 cSize;
  724. assert(compress != NULL);
  725. cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
  726. (int)(srcSize), (int)(srcSize-1),
  727. level, cdict);
  728. if (cSize == 0 || cSize >= srcSize) {
  729. cSize = (U32)srcSize;
  730. LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
  731. memcpy(cSizePtr+BHSize, src, srcSize);
  732. } else {
  733. LZ4F_writeLE32(cSizePtr, cSize);
  734. }
  735. if (crcFlag) {
  736. U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */
  737. LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32);
  738. }
  739. return BHSize + cSize + ((U32)crcFlag)*BFSize;
  740. }
  741. static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
  742. {
  743. int const acceleration = (level < 0) ? -level + 1 : 1;
  744. DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize);
  745. LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
  746. if (cdict) {
  747. return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
  748. } else {
  749. return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration);
  750. }
  751. }
  752. static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
  753. {
  754. int const acceleration = (level < 0) ? -level + 1 : 1;
  755. (void)cdict; /* init once at beginning of frame */
  756. DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize);
  757. return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
  758. }
  759. static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
  760. {
  761. LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
  762. if (cdict) {
  763. return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
  764. }
  765. return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level);
  766. }
  767. static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
  768. {
  769. (void)level; (void)cdict; /* init once at beginning of frame */
  770. return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
  771. }
  772. static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
  773. {
  774. (void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict;
  775. return 0;
  776. }
  777. static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_blockCompression_t compressMode)
  778. {
  779. if (compressMode == LZ4B_UNCOMPRESSED) return LZ4F_doNotCompressBlock;
  780. if (level < LZ4HC_CLEVEL_MIN) {
  781. if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
  782. return LZ4F_compressBlock_continue;
  783. }
  784. if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC;
  785. return LZ4F_compressBlockHC_continue;
  786. }
  787. /* Save history (up to 64KB) into @tmpBuff */
  788. static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
  789. {
  790. if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
  791. return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
  792. return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
  793. }
  794. typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
  795. static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } };
  796. /*! LZ4F_compressUpdateImpl() :
  797. * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
  798. * When successful, the function always entirely consumes @srcBuffer.
  799. * src data is either buffered or compressed into @dstBuffer.
  800. * If the block compression does not match the compression of the previous block, the old data is flushed
  801. * and operations continue with the new compression mode.
  802. * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on.
  803. * @compressOptionsPtr is optional : provide NULL to mean "default".
  804. * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
  805. * or an error code if it fails (which can be tested using LZ4F_isError())
  806. * After an error, the state is left in a UB state, and must be re-initialized.
  807. */
  808. static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
  809. void* dstBuffer, size_t dstCapacity,
  810. const void* srcBuffer, size_t srcSize,
  811. const LZ4F_compressOptions_t* compressOptionsPtr,
  812. LZ4F_blockCompression_t blockCompression)
  813. {
  814. size_t const blockSize = cctxPtr->maxBlockSize;
  815. const BYTE* srcPtr = (const BYTE*)srcBuffer;
  816. const BYTE* const srcEnd = srcPtr + srcSize;
  817. BYTE* const dstStart = (BYTE*)dstBuffer;
  818. BYTE* dstPtr = dstStart;
  819. LZ4F_lastBlockStatus lastBlockCompressed = notDone;
  820. compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression);
  821. size_t bytesWritten;
  822. DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize);
  823. RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); /* state must be initialized and waiting for next block */
  824. if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
  825. RETURN_ERROR(dstMaxSize_tooSmall);
  826. if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize)
  827. RETURN_ERROR(dstMaxSize_tooSmall);
  828. /* flush currently written block, to continue with new block compression */
  829. if (cctxPtr->blockCompression != blockCompression) {
  830. bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
  831. dstPtr += bytesWritten;
  832. cctxPtr->blockCompression = blockCompression;
  833. }
  834. if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull;
  835. /* complete tmp buffer */
  836. if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */
  837. size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
  838. assert(blockSize > cctxPtr->tmpInSize);
  839. if (sizeToCopy > srcSize) {
  840. /* add src to tmpIn buffer */
  841. memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
  842. srcPtr = srcEnd;
  843. cctxPtr->tmpInSize += srcSize;
  844. /* still needs some CRC */
  845. } else {
  846. /* complete tmpIn block and then compress it */
  847. lastBlockCompressed = fromTmpBuffer;
  848. memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy);
  849. srcPtr += sizeToCopy;
  850. dstPtr += LZ4F_makeBlock(dstPtr,
  851. cctxPtr->tmpIn, blockSize,
  852. compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
  853. cctxPtr->cdict,
  854. cctxPtr->prefs.frameInfo.blockChecksumFlag);
  855. if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
  856. cctxPtr->tmpInSize = 0;
  857. } }
  858. while ((size_t)(srcEnd - srcPtr) >= blockSize) {
  859. /* compress full blocks */
  860. lastBlockCompressed = fromSrcBuffer;
  861. dstPtr += LZ4F_makeBlock(dstPtr,
  862. srcPtr, blockSize,
  863. compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
  864. cctxPtr->cdict,
  865. cctxPtr->prefs.frameInfo.blockChecksumFlag);
  866. srcPtr += blockSize;
  867. }
  868. if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
  869. /* autoFlush : remaining input (< blockSize) is compressed */
  870. lastBlockCompressed = fromSrcBuffer;
  871. dstPtr += LZ4F_makeBlock(dstPtr,
  872. srcPtr, (size_t)(srcEnd - srcPtr),
  873. compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
  874. cctxPtr->cdict,
  875. cctxPtr->prefs.frameInfo.blockChecksumFlag);
  876. srcPtr = srcEnd;
  877. }
  878. /* preserve dictionary within @tmpBuff whenever necessary */
  879. if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
  880. /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */
  881. assert(blockCompression == LZ4B_COMPRESSED);
  882. if (compressOptionsPtr->stableSrc) {
  883. cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */
  884. } else {
  885. int const realDictSize = LZ4F_localSaveDict(cctxPtr);
  886. assert(0 <= realDictSize && realDictSize <= 64 KB);
  887. cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
  888. }
  889. }
  890. /* keep tmpIn within limits */
  891. if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */
  892. && (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */
  893. {
  894. /* only preserve 64KB within internal buffer. Ensures there is enough room for next block.
  895. * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */
  896. int const realDictSize = LZ4F_localSaveDict(cctxPtr);
  897. cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
  898. assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize));
  899. }
  900. /* some input data left, necessarily < blockSize */
  901. if (srcPtr < srcEnd) {
  902. /* fill tmp buffer */
  903. size_t const sizeToCopy = (size_t)(srcEnd - srcPtr);
  904. memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy);
  905. cctxPtr->tmpInSize = sizeToCopy;
  906. }
  907. if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled)
  908. (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize);
  909. cctxPtr->totalInSize += srcSize;
  910. return (size_t)(dstPtr - dstStart);
  911. }
  912. /*! LZ4F_compressUpdate() :
  913. * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
  914. * When successful, the function always entirely consumes @srcBuffer.
  915. * src data is either buffered or compressed into @dstBuffer.
  916. * If previously an uncompressed block was written, buffered data is flushed
  917. * before appending compressed data is continued.
  918. * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
  919. * @compressOptionsPtr is optional : provide NULL to mean "default".
  920. * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
  921. * or an error code if it fails (which can be tested using LZ4F_isError())
  922. * After an error, the state is left in a UB state, and must be re-initialized.
  923. */
  924. size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
  925. void* dstBuffer, size_t dstCapacity,
  926. const void* srcBuffer, size_t srcSize,
  927. const LZ4F_compressOptions_t* compressOptionsPtr)
  928. {
  929. return LZ4F_compressUpdateImpl(cctxPtr,
  930. dstBuffer, dstCapacity,
  931. srcBuffer, srcSize,
  932. compressOptionsPtr, LZ4B_COMPRESSED);
  933. }
  934. /*! LZ4F_compressUpdate() :
  935. * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
  936. * When successful, the function always entirely consumes @srcBuffer.
  937. * src data is either buffered or compressed into @dstBuffer.
  938. * If previously an uncompressed block was written, buffered data is flushed
  939. * before appending compressed data is continued.
  940. * This is only supported when LZ4F_blockIndependent is used
  941. * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
  942. * @compressOptionsPtr is optional : provide NULL to mean "default".
  943. * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
  944. * or an error code if it fails (which can be tested using LZ4F_isError())
  945. * After an error, the state is left in a UB state, and must be re-initialized.
  946. */
  947. size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr,
  948. void* dstBuffer, size_t dstCapacity,
  949. const void* srcBuffer, size_t srcSize,
  950. const LZ4F_compressOptions_t* compressOptionsPtr) {
  951. RETURN_ERROR_IF(cctxPtr->prefs.frameInfo.blockMode != LZ4F_blockIndependent, blockMode_invalid);
  952. return LZ4F_compressUpdateImpl(cctxPtr,
  953. dstBuffer, dstCapacity,
  954. srcBuffer, srcSize,
  955. compressOptionsPtr, LZ4B_UNCOMPRESSED);
  956. }
  957. /*! LZ4F_flush() :
  958. * When compressed data must be sent immediately, without waiting for a block to be filled,
  959. * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
  960. * The result of the function is the number of bytes written into dstBuffer.
  961. * It can be zero, this means there was no data left within LZ4F_cctx.
  962. * The function outputs an error code if it fails (can be tested using LZ4F_isError())
  963. * LZ4F_compressOptions_t* is optional. NULL is a valid argument.
  964. */
  965. size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
  966. void* dstBuffer, size_t dstCapacity,
  967. const LZ4F_compressOptions_t* compressOptionsPtr)
  968. {
  969. BYTE* const dstStart = (BYTE*)dstBuffer;
  970. BYTE* dstPtr = dstStart;
  971. compressFunc_t compress;
  972. if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
  973. RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized);
  974. RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall);
  975. (void)compressOptionsPtr; /* not useful (yet) */
  976. /* select compression function */
  977. compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompression);
  978. /* compress tmp buffer */
  979. dstPtr += LZ4F_makeBlock(dstPtr,
  980. cctxPtr->tmpIn, cctxPtr->tmpInSize,
  981. compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
  982. cctxPtr->cdict,
  983. cctxPtr->prefs.frameInfo.blockChecksumFlag);
  984. assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity));
  985. if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked)
  986. cctxPtr->tmpIn += cctxPtr->tmpInSize;
  987. cctxPtr->tmpInSize = 0;
  988. /* keep tmpIn within limits */
  989. if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */
  990. int const realDictSize = LZ4F_localSaveDict(cctxPtr);
  991. cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
  992. }
  993. return (size_t)(dstPtr - dstStart);
  994. }
  995. /*! LZ4F_compressEnd() :
  996. * When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
  997. * It will flush whatever data remained within compressionContext (like LZ4_flush())
  998. * but also properly finalize the frame, with an endMark and an (optional) checksum.
  999. * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
  1000. * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
  1001. * or an error code if it fails (can be tested using LZ4F_isError())
  1002. * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin().
  1003. */
  1004. size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
  1005. void* dstBuffer, size_t dstCapacity,
  1006. const LZ4F_compressOptions_t* compressOptionsPtr)
  1007. {
  1008. BYTE* const dstStart = (BYTE*)dstBuffer;
  1009. BYTE* dstPtr = dstStart;
  1010. size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
  1011. DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
  1012. FORWARD_IF_ERROR(flushSize);
  1013. dstPtr += flushSize;
  1014. assert(flushSize <= dstCapacity);
  1015. dstCapacity -= flushSize;
  1016. RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall);
  1017. LZ4F_writeLE32(dstPtr, 0);
  1018. dstPtr += 4; /* endMark */
  1019. if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
  1020. U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
  1021. RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall);
  1022. DEBUGLOG(5,"Writing 32-bit content checksum");
  1023. LZ4F_writeLE32(dstPtr, xxh);
  1024. dstPtr+=4; /* content Checksum */
  1025. }
  1026. cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
  1027. cctxPtr->maxBufferSize = 0; /* reuse HC context */
  1028. if (cctxPtr->prefs.frameInfo.contentSize) {
  1029. if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
  1030. RETURN_ERROR(frameSize_wrong);
  1031. }
  1032. return (size_t)(dstPtr - dstStart);
  1033. }
  1034. /*-***************************************************
  1035. * Frame Decompression
  1036. *****************************************************/
  1037. typedef enum {
  1038. dstage_getFrameHeader=0, dstage_storeFrameHeader,
  1039. dstage_init,
  1040. dstage_getBlockHeader, dstage_storeBlockHeader,
  1041. dstage_copyDirect, dstage_getBlockChecksum,
  1042. dstage_getCBlock, dstage_storeCBlock,
  1043. dstage_flushOut,
  1044. dstage_getSuffix, dstage_storeSuffix,
  1045. dstage_getSFrameSize, dstage_storeSFrameSize,
  1046. dstage_skipSkippable
  1047. } dStage_t;
  1048. struct LZ4F_dctx_s {
  1049. LZ4F_CustomMem cmem;
  1050. LZ4F_frameInfo_t frameInfo;
  1051. U32 version;
  1052. dStage_t dStage;
  1053. U64 frameRemainingSize;
  1054. size_t maxBlockSize;
  1055. size_t maxBufferSize;
  1056. BYTE* tmpIn;
  1057. size_t tmpInSize;
  1058. size_t tmpInTarget;
  1059. BYTE* tmpOutBuffer;
  1060. const BYTE* dict;
  1061. size_t dictSize;
  1062. BYTE* tmpOut;
  1063. size_t tmpOutSize;
  1064. size_t tmpOutStart;
  1065. XXH32_state_t xxh;
  1066. XXH32_state_t blockChecksum;
  1067. int skipChecksum;
  1068. BYTE header[LZ4F_HEADER_SIZE_MAX];
  1069. }; /* typedef'd to LZ4F_dctx in lz4frame.h */
  1070. LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
  1071. {
  1072. LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem);
  1073. if (dctx == NULL) return NULL;
  1074. dctx->cmem = customMem;
  1075. dctx->version = version;
  1076. return dctx;
  1077. }
  1078. /*! LZ4F_createDecompressionContext() :
  1079. * Create a decompressionContext object, which will track all decompression operations.
  1080. * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
  1081. * Object can later be released using LZ4F_freeDecompressionContext().
  1082. * @return : if != 0, there was an error during context creation.
  1083. */
  1084. LZ4F_errorCode_t
  1085. LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
  1086. {
  1087. assert(LZ4F_decompressionContextPtr != NULL); /* violation of narrow contract */
  1088. RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null); /* in case it nonetheless happen in production */
  1089. *LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber);
  1090. if (*LZ4F_decompressionContextPtr == NULL) { /* failed allocation */
  1091. RETURN_ERROR(allocation_failed);
  1092. }
  1093. return LZ4F_OK_NoError;
  1094. }
  1095. LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
  1096. {
  1097. LZ4F_errorCode_t result = LZ4F_OK_NoError;
  1098. if (dctx != NULL) { /* can accept NULL input, like free() */
  1099. result = (LZ4F_errorCode_t)dctx->dStage;
  1100. LZ4F_free(dctx->tmpIn, dctx->cmem);
  1101. LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
  1102. LZ4F_free(dctx, dctx->cmem);
  1103. }
  1104. return result;
  1105. }
  1106. /*==--- Streaming Decompression operations ---==*/
  1107. void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
  1108. {
  1109. dctx->dStage = dstage_getFrameHeader;
  1110. dctx->dict = NULL;
  1111. dctx->dictSize = 0;
  1112. dctx->skipChecksum = 0;
  1113. }
  1114. /*! LZ4F_decodeHeader() :
  1115. * input : `src` points at the **beginning of the frame**
  1116. * output : set internal values of dctx, such as
  1117. * dctx->frameInfo and dctx->dStage.
  1118. * Also allocates internal buffers.
  1119. * @return : nb Bytes read from src (necessarily <= srcSize)
  1120. * or an error code (testable with LZ4F_isError())
  1121. */
  1122. static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize)
  1123. {
  1124. unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID;
  1125. size_t frameHeaderSize;
  1126. const BYTE* srcPtr = (const BYTE*)src;
  1127. DEBUGLOG(5, "LZ4F_decodeHeader");
  1128. /* need to decode header to get frameInfo */
  1129. RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete); /* minimal frame header size */
  1130. MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo));
  1131. /* special case : skippable frames */
  1132. if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) {
  1133. dctx->frameInfo.frameType = LZ4F_skippableFrame;
  1134. if (src == (void*)(dctx->header)) {
  1135. dctx->tmpInSize = srcSize;
  1136. dctx->tmpInTarget = 8;
  1137. dctx->dStage = dstage_storeSFrameSize;
  1138. return srcSize;
  1139. } else {
  1140. dctx->dStage = dstage_getSFrameSize;
  1141. return 4;
  1142. } }
  1143. /* control magic number */
  1144. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1145. if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
  1146. DEBUGLOG(4, "frame header error : unknown magic number");
  1147. RETURN_ERROR(frameType_unknown);
  1148. }
  1149. #endif
  1150. dctx->frameInfo.frameType = LZ4F_frame;
  1151. /* Flags */
  1152. { U32 const FLG = srcPtr[4];
  1153. U32 const version = (FLG>>6) & _2BITS;
  1154. blockChecksumFlag = (FLG>>4) & _1BIT;
  1155. blockMode = (FLG>>5) & _1BIT;
  1156. contentSizeFlag = (FLG>>3) & _1BIT;
  1157. contentChecksumFlag = (FLG>>2) & _1BIT;
  1158. dictIDFlag = FLG & _1BIT;
  1159. /* validate */
  1160. if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
  1161. if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */
  1162. }
  1163. /* Frame Header Size */
  1164. frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
  1165. if (srcSize < frameHeaderSize) {
  1166. /* not enough input to fully decode frame header */
  1167. if (srcPtr != dctx->header)
  1168. memcpy(dctx->header, srcPtr, srcSize);
  1169. dctx->tmpInSize = srcSize;
  1170. dctx->tmpInTarget = frameHeaderSize;
  1171. dctx->dStage = dstage_storeFrameHeader;
  1172. return srcSize;
  1173. }
  1174. { U32 const BD = srcPtr[5];
  1175. blockSizeID = (BD>>4) & _3BITS;
  1176. /* validate */
  1177. if (((BD>>7)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
  1178. if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid); /* 4-7 only supported values for the time being */
  1179. if (((BD>>0)&_4BITS) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bits */
  1180. }
  1181. /* check header */
  1182. assert(frameHeaderSize > 5);
  1183. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1184. { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
  1185. RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid);
  1186. }
  1187. #endif
  1188. /* save */
  1189. dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode;
  1190. dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag;
  1191. dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
  1192. dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
  1193. dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID);
  1194. if (contentSizeFlag)
  1195. dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
  1196. if (dictIDFlag)
  1197. dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
  1198. dctx->dStage = dstage_init;
  1199. return frameHeaderSize;
  1200. }
  1201. /*! LZ4F_headerSize() :
  1202. * @return : size of frame header
  1203. * or an error code, which can be tested using LZ4F_isError()
  1204. */
  1205. size_t LZ4F_headerSize(const void* src, size_t srcSize)
  1206. {
  1207. RETURN_ERROR_IF(src == NULL, srcPtr_wrong);
  1208. /* minimal srcSize to determine header size */
  1209. if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH)
  1210. RETURN_ERROR(frameHeader_incomplete);
  1211. /* special case : skippable frames */
  1212. if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START)
  1213. return 8;
  1214. /* control magic number */
  1215. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1216. if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER)
  1217. RETURN_ERROR(frameType_unknown);
  1218. #endif
  1219. /* Frame Header Size */
  1220. { BYTE const FLG = ((const BYTE*)src)[4];
  1221. U32 const contentSizeFlag = (FLG>>3) & _1BIT;
  1222. U32 const dictIDFlag = FLG & _1BIT;
  1223. return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
  1224. }
  1225. }
  1226. /*! LZ4F_getFrameInfo() :
  1227. * This function extracts frame parameters (max blockSize, frame checksum, etc.).
  1228. * Usage is optional. Objective is to provide relevant information for allocation purposes.
  1229. * This function works in 2 situations :
  1230. * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process.
  1231. * Amount of input data provided must be large enough to successfully decode the frame header.
  1232. * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum.
  1233. * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx.
  1234. * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value).
  1235. * Decompression must resume from (srcBuffer + *srcSizePtr).
  1236. * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call,
  1237. * or an error code which can be tested using LZ4F_isError()
  1238. * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
  1239. * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
  1240. */
  1241. LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
  1242. LZ4F_frameInfo_t* frameInfoPtr,
  1243. const void* srcBuffer, size_t* srcSizePtr)
  1244. {
  1245. LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader);
  1246. if (dctx->dStage > dstage_storeFrameHeader) {
  1247. /* frameInfo already decoded */
  1248. size_t o=0, i=0;
  1249. *srcSizePtr = 0;
  1250. *frameInfoPtr = dctx->frameInfo;
  1251. /* returns : recommended nb of bytes for LZ4F_decompress() */
  1252. return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL);
  1253. } else {
  1254. if (dctx->dStage == dstage_storeFrameHeader) {
  1255. /* frame decoding already started, in the middle of header => automatic fail */
  1256. *srcSizePtr = 0;
  1257. RETURN_ERROR(frameDecoding_alreadyStarted);
  1258. } else {
  1259. size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr);
  1260. if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; }
  1261. if (*srcSizePtr < hSize) {
  1262. *srcSizePtr=0;
  1263. RETURN_ERROR(frameHeader_incomplete);
  1264. }
  1265. { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize);
  1266. if (LZ4F_isError(decodeResult)) {
  1267. *srcSizePtr = 0;
  1268. } else {
  1269. *srcSizePtr = decodeResult;
  1270. decodeResult = BHSize; /* block header size */
  1271. }
  1272. *frameInfoPtr = dctx->frameInfo;
  1273. return decodeResult;
  1274. } } }
  1275. }
  1276. /* LZ4F_updateDict() :
  1277. * only used for LZ4F_blockLinked mode
  1278. * Condition : @dstPtr != NULL
  1279. */
  1280. static void LZ4F_updateDict(LZ4F_dctx* dctx,
  1281. const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
  1282. unsigned withinTmp)
  1283. {
  1284. assert(dstPtr != NULL);
  1285. if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */
  1286. assert(dctx->dict != NULL);
  1287. if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
  1288. dctx->dictSize += dstSize;
  1289. return;
  1290. }
  1291. assert(dstPtr >= dstBufferStart);
  1292. if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */
  1293. dctx->dict = (const BYTE*)dstBufferStart;
  1294. dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize;
  1295. return;
  1296. }
  1297. assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
  1298. /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
  1299. assert(dctx->tmpOutBuffer != NULL);
  1300. if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
  1301. /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
  1302. assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart);
  1303. dctx->dictSize += dstSize;
  1304. return;
  1305. }
  1306. if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
  1307. size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
  1308. size_t copySize = 64 KB - dctx->tmpOutSize;
  1309. const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
  1310. if (dctx->tmpOutSize > 64 KB) copySize = 0;
  1311. if (copySize > preserveSize) copySize = preserveSize;
  1312. memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
  1313. dctx->dict = dctx->tmpOutBuffer;
  1314. dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize;
  1315. return;
  1316. }
  1317. if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */
  1318. if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */
  1319. size_t const preserveSize = 64 KB - dstSize;
  1320. memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
  1321. dctx->dictSize = preserveSize;
  1322. }
  1323. memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize);
  1324. dctx->dictSize += dstSize;
  1325. return;
  1326. }
  1327. /* join dict & dest into tmp */
  1328. { size_t preserveSize = 64 KB - dstSize;
  1329. if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize;
  1330. memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
  1331. memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize);
  1332. dctx->dict = dctx->tmpOutBuffer;
  1333. dctx->dictSize = preserveSize + dstSize;
  1334. }
  1335. }
  1336. /*! LZ4F_decompress() :
  1337. * Call this function repetitively to regenerate compressed data in srcBuffer.
  1338. * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
  1339. * into dstBuffer of capacity *dstSizePtr.
  1340. *
  1341. * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
  1342. *
  1343. * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
  1344. * If number of bytes read is < number of bytes provided, then decompression operation is not complete.
  1345. * Remaining data will have to be presented again in a subsequent invocation.
  1346. *
  1347. * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress.
  1348. * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
  1349. * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling.
  1350. * Note that this is just a hint, and it's always possible to any srcSize value.
  1351. * When a frame is fully decoded, @return will be 0.
  1352. * If decompression failed, @return is an error code which can be tested using LZ4F_isError().
  1353. */
  1354. size_t LZ4F_decompress(LZ4F_dctx* dctx,
  1355. void* dstBuffer, size_t* dstSizePtr,
  1356. const void* srcBuffer, size_t* srcSizePtr,
  1357. const LZ4F_decompressOptions_t* decompressOptionsPtr)
  1358. {
  1359. LZ4F_decompressOptions_t optionsNull;
  1360. const BYTE* const srcStart = (const BYTE*)srcBuffer;
  1361. const BYTE* const srcEnd = srcStart + *srcSizePtr;
  1362. const BYTE* srcPtr = srcStart;
  1363. BYTE* const dstStart = (BYTE*)dstBuffer;
  1364. BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL;
  1365. BYTE* dstPtr = dstStart;
  1366. const BYTE* selectedIn = NULL;
  1367. unsigned doAnotherStage = 1;
  1368. size_t nextSrcSizeHint = 1;
  1369. DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
  1370. srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
  1371. if (dstBuffer == NULL) assert(*dstSizePtr == 0);
  1372. MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
  1373. if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
  1374. *srcSizePtr = 0;
  1375. *dstSizePtr = 0;
  1376. assert(dctx != NULL);
  1377. dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */
  1378. /* behaves as a state machine */
  1379. while (doAnotherStage) {
  1380. switch(dctx->dStage)
  1381. {
  1382. case dstage_getFrameHeader:
  1383. DEBUGLOG(6, "dstage_getFrameHeader");
  1384. if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
  1385. size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
  1386. FORWARD_IF_ERROR(hSize);
  1387. srcPtr += hSize;
  1388. break;
  1389. }
  1390. dctx->tmpInSize = 0;
  1391. if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */
  1392. dctx->tmpInTarget = minFHSize; /* minimum size to decode header */
  1393. dctx->dStage = dstage_storeFrameHeader;
  1394. /* fall-through */
  1395. case dstage_storeFrameHeader:
  1396. DEBUGLOG(6, "dstage_storeFrameHeader");
  1397. { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr));
  1398. memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
  1399. dctx->tmpInSize += sizeToCopy;
  1400. srcPtr += sizeToCopy;
  1401. }
  1402. if (dctx->tmpInSize < dctx->tmpInTarget) {
  1403. nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */
  1404. doAnotherStage = 0; /* not enough src data, ask for some more */
  1405. break;
  1406. }
  1407. FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) ); /* will update dStage appropriately */
  1408. break;
  1409. case dstage_init:
  1410. DEBUGLOG(6, "dstage_init");
  1411. if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0);
  1412. /* internal buffers allocation */
  1413. { size_t const bufferNeeded = dctx->maxBlockSize
  1414. + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0);
  1415. if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */
  1416. dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/
  1417. LZ4F_free(dctx->tmpIn, dctx->cmem);
  1418. dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem);
  1419. RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed);
  1420. LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
  1421. dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem);
  1422. RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed);
  1423. dctx->maxBufferSize = bufferNeeded;
  1424. } }
  1425. dctx->tmpInSize = 0;
  1426. dctx->tmpInTarget = 0;
  1427. dctx->tmpOut = dctx->tmpOutBuffer;
  1428. dctx->tmpOutStart = 0;
  1429. dctx->tmpOutSize = 0;
  1430. dctx->dStage = dstage_getBlockHeader;
  1431. /* fall-through */
  1432. case dstage_getBlockHeader:
  1433. if ((size_t)(srcEnd - srcPtr) >= BHSize) {
  1434. selectedIn = srcPtr;
  1435. srcPtr += BHSize;
  1436. } else {
  1437. /* not enough input to read cBlockSize field */
  1438. dctx->tmpInSize = 0;
  1439. dctx->dStage = dstage_storeBlockHeader;
  1440. }
  1441. if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */
  1442. case dstage_storeBlockHeader:
  1443. { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
  1444. size_t const wantedData = BHSize - dctx->tmpInSize;
  1445. size_t const sizeToCopy = MIN(wantedData, remainingInput);
  1446. memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
  1447. srcPtr += sizeToCopy;
  1448. dctx->tmpInSize += sizeToCopy;
  1449. if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */
  1450. nextSrcSizeHint = BHSize - dctx->tmpInSize;
  1451. doAnotherStage = 0;
  1452. break;
  1453. }
  1454. selectedIn = dctx->tmpIn;
  1455. } /* if (dctx->dStage == dstage_storeBlockHeader) */
  1456. /* decode block header */
  1457. { U32 const blockHeader = LZ4F_readLE32(selectedIn);
  1458. size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
  1459. size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize;
  1460. if (blockHeader==0) { /* frameEnd signal, no more block */
  1461. DEBUGLOG(5, "end of frame");
  1462. dctx->dStage = dstage_getSuffix;
  1463. break;
  1464. }
  1465. if (nextCBlockSize > dctx->maxBlockSize) {
  1466. RETURN_ERROR(maxBlockSize_invalid);
  1467. }
  1468. if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
  1469. /* next block is uncompressed */
  1470. dctx->tmpInTarget = nextCBlockSize;
  1471. DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize);
  1472. if (dctx->frameInfo.blockChecksumFlag) {
  1473. (void)XXH32_reset(&dctx->blockChecksum, 0);
  1474. }
  1475. dctx->dStage = dstage_copyDirect;
  1476. break;
  1477. }
  1478. /* next block is a compressed block */
  1479. dctx->tmpInTarget = nextCBlockSize + crcSize;
  1480. dctx->dStage = dstage_getCBlock;
  1481. if (dstPtr==dstEnd || srcPtr==srcEnd) {
  1482. nextSrcSizeHint = BHSize + nextCBlockSize + crcSize;
  1483. doAnotherStage = 0;
  1484. }
  1485. break;
  1486. }
  1487. case dstage_copyDirect: /* uncompressed block */
  1488. DEBUGLOG(6, "dstage_copyDirect");
  1489. { size_t sizeToCopy;
  1490. if (dstPtr == NULL) {
  1491. sizeToCopy = 0;
  1492. } else {
  1493. size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
  1494. sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
  1495. memcpy(dstPtr, srcPtr, sizeToCopy);
  1496. if (!dctx->skipChecksum) {
  1497. if (dctx->frameInfo.blockChecksumFlag) {
  1498. (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
  1499. }
  1500. if (dctx->frameInfo.contentChecksumFlag)
  1501. (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
  1502. }
  1503. if (dctx->frameInfo.contentSize)
  1504. dctx->frameRemainingSize -= sizeToCopy;
  1505. /* history management (linked blocks only)*/
  1506. if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
  1507. LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
  1508. } }
  1509. srcPtr += sizeToCopy;
  1510. dstPtr += sizeToCopy;
  1511. if (sizeToCopy == dctx->tmpInTarget) { /* all done */
  1512. if (dctx->frameInfo.blockChecksumFlag) {
  1513. dctx->tmpInSize = 0;
  1514. dctx->dStage = dstage_getBlockChecksum;
  1515. } else
  1516. dctx->dStage = dstage_getBlockHeader; /* new block */
  1517. break;
  1518. }
  1519. dctx->tmpInTarget -= sizeToCopy; /* need to copy more */
  1520. }
  1521. nextSrcSizeHint = dctx->tmpInTarget +
  1522. +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
  1523. + BHSize /* next header size */;
  1524. doAnotherStage = 0;
  1525. break;
  1526. /* check block checksum for recently transferred uncompressed block */
  1527. case dstage_getBlockChecksum:
  1528. DEBUGLOG(6, "dstage_getBlockChecksum");
  1529. { const void* crcSrc;
  1530. if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) {
  1531. crcSrc = srcPtr;
  1532. srcPtr += 4;
  1533. } else {
  1534. size_t const stillToCopy = 4 - dctx->tmpInSize;
  1535. size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr));
  1536. memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
  1537. dctx->tmpInSize += sizeToCopy;
  1538. srcPtr += sizeToCopy;
  1539. if (dctx->tmpInSize < 4) { /* all input consumed */
  1540. doAnotherStage = 0;
  1541. break;
  1542. }
  1543. crcSrc = dctx->header;
  1544. }
  1545. if (!dctx->skipChecksum) {
  1546. U32 const readCRC = LZ4F_readLE32(crcSrc);
  1547. U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
  1548. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1549. DEBUGLOG(6, "compare block checksum");
  1550. if (readCRC != calcCRC) {
  1551. DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
  1552. readCRC, calcCRC);
  1553. RETURN_ERROR(blockChecksum_invalid);
  1554. }
  1555. #else
  1556. (void)readCRC;
  1557. (void)calcCRC;
  1558. #endif
  1559. } }
  1560. dctx->dStage = dstage_getBlockHeader; /* new block */
  1561. break;
  1562. case dstage_getCBlock:
  1563. DEBUGLOG(6, "dstage_getCBlock");
  1564. if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) {
  1565. dctx->tmpInSize = 0;
  1566. dctx->dStage = dstage_storeCBlock;
  1567. break;
  1568. }
  1569. /* input large enough to read full block directly */
  1570. selectedIn = srcPtr;
  1571. srcPtr += dctx->tmpInTarget;
  1572. if (0) /* always jump over next block */
  1573. case dstage_storeCBlock:
  1574. { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize;
  1575. size_t const inputLeft = (size_t)(srcEnd-srcPtr);
  1576. size_t const sizeToCopy = MIN(wantedData, inputLeft);
  1577. memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
  1578. dctx->tmpInSize += sizeToCopy;
  1579. srcPtr += sizeToCopy;
  1580. if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */
  1581. nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize)
  1582. + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
  1583. + BHSize /* next header size */;
  1584. doAnotherStage = 0;
  1585. break;
  1586. }
  1587. selectedIn = dctx->tmpIn;
  1588. }
  1589. /* At this stage, input is large enough to decode a block */
  1590. /* First, decode and control block checksum if it exists */
  1591. if (dctx->frameInfo.blockChecksumFlag) {
  1592. assert(dctx->tmpInTarget >= 4);
  1593. dctx->tmpInTarget -= 4;
  1594. assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
  1595. { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget);
  1596. U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0);
  1597. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1598. RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid);
  1599. #else
  1600. (void)readBlockCrc;
  1601. (void)calcBlockCrc;
  1602. #endif
  1603. } }
  1604. /* decode directly into destination buffer if there is enough room */
  1605. if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize)
  1606. /* unless the dictionary is stored in tmpOut:
  1607. * in which case it's faster to decode within tmpOut
  1608. * to benefit from prefix speedup */
  1609. && !(dctx->dict!= NULL && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) )
  1610. {
  1611. const char* dict = (const char*)dctx->dict;
  1612. size_t dictSize = dctx->dictSize;
  1613. int decodedSize;
  1614. assert(dstPtr != NULL);
  1615. if (dict && dictSize > 1 GB) {
  1616. /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */
  1617. dict += dictSize - 64 KB;
  1618. dictSize = 64 KB;
  1619. }
  1620. decodedSize = LZ4_decompress_safe_usingDict(
  1621. (const char*)selectedIn, (char*)dstPtr,
  1622. (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
  1623. dict, (int)dictSize);
  1624. RETURN_ERROR_IF(decodedSize < 0, decompressionFailed);
  1625. if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum))
  1626. XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize);
  1627. if (dctx->frameInfo.contentSize)
  1628. dctx->frameRemainingSize -= (size_t)decodedSize;
  1629. /* dictionary management */
  1630. if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
  1631. LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0);
  1632. }
  1633. dstPtr += decodedSize;
  1634. dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */
  1635. break;
  1636. }
  1637. /* not enough place into dst : decode into tmpOut */
  1638. /* manage dictionary */
  1639. if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
  1640. if (dctx->dict == dctx->tmpOutBuffer) {
  1641. /* truncate dictionary to 64 KB if too big */
  1642. if (dctx->dictSize > 128 KB) {
  1643. memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB);
  1644. dctx->dictSize = 64 KB;
  1645. }
  1646. dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize;
  1647. } else { /* dict not within tmpOut */
  1648. size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB);
  1649. dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace;
  1650. } }
  1651. /* Decode block into tmpOut */
  1652. { const char* dict = (const char*)dctx->dict;
  1653. size_t dictSize = dctx->dictSize;
  1654. int decodedSize;
  1655. if (dict && dictSize > 1 GB) {
  1656. /* the dictSize param is an int, avoid truncation / sign issues */
  1657. dict += dictSize - 64 KB;
  1658. dictSize = 64 KB;
  1659. }
  1660. decodedSize = LZ4_decompress_safe_usingDict(
  1661. (const char*)selectedIn, (char*)dctx->tmpOut,
  1662. (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
  1663. dict, (int)dictSize);
  1664. RETURN_ERROR_IF(decodedSize < 0, decompressionFailed);
  1665. if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum)
  1666. XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize);
  1667. if (dctx->frameInfo.contentSize)
  1668. dctx->frameRemainingSize -= (size_t)decodedSize;
  1669. dctx->tmpOutSize = (size_t)decodedSize;
  1670. dctx->tmpOutStart = 0;
  1671. dctx->dStage = dstage_flushOut;
  1672. }
  1673. /* fall-through */
  1674. case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
  1675. DEBUGLOG(6, "dstage_flushOut");
  1676. if (dstPtr != NULL) {
  1677. size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
  1678. memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy);
  1679. /* dictionary management */
  1680. if (dctx->frameInfo.blockMode == LZ4F_blockLinked)
  1681. LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/);
  1682. dctx->tmpOutStart += sizeToCopy;
  1683. dstPtr += sizeToCopy;
  1684. }
  1685. if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
  1686. dctx->dStage = dstage_getBlockHeader; /* get next block */
  1687. break;
  1688. }
  1689. /* could not flush everything : stop there, just request a block header */
  1690. doAnotherStage = 0;
  1691. nextSrcSizeHint = BHSize;
  1692. break;
  1693. case dstage_getSuffix:
  1694. RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong); /* incorrect frame size decoded */
  1695. if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */
  1696. nextSrcSizeHint = 0;
  1697. LZ4F_resetDecompressionContext(dctx);
  1698. doAnotherStage = 0;
  1699. break;
  1700. }
  1701. if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */
  1702. dctx->tmpInSize = 0;
  1703. dctx->dStage = dstage_storeSuffix;
  1704. } else {
  1705. selectedIn = srcPtr;
  1706. srcPtr += 4;
  1707. }
  1708. if (dctx->dStage == dstage_storeSuffix) /* can be skipped */
  1709. case dstage_storeSuffix:
  1710. { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
  1711. size_t const wantedData = 4 - dctx->tmpInSize;
  1712. size_t const sizeToCopy = MIN(wantedData, remainingInput);
  1713. memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
  1714. srcPtr += sizeToCopy;
  1715. dctx->tmpInSize += sizeToCopy;
  1716. if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */
  1717. nextSrcSizeHint = 4 - dctx->tmpInSize;
  1718. doAnotherStage=0;
  1719. break;
  1720. }
  1721. selectedIn = dctx->tmpIn;
  1722. } /* if (dctx->dStage == dstage_storeSuffix) */
  1723. /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
  1724. if (!dctx->skipChecksum) {
  1725. U32 const readCRC = LZ4F_readLE32(selectedIn);
  1726. U32 const resultCRC = XXH32_digest(&(dctx->xxh));
  1727. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1728. RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid);
  1729. #else
  1730. (void)readCRC;
  1731. (void)resultCRC;
  1732. #endif
  1733. }
  1734. nextSrcSizeHint = 0;
  1735. LZ4F_resetDecompressionContext(dctx);
  1736. doAnotherStage = 0;
  1737. break;
  1738. case dstage_getSFrameSize:
  1739. if ((srcEnd - srcPtr) >= 4) {
  1740. selectedIn = srcPtr;
  1741. srcPtr += 4;
  1742. } else {
  1743. /* not enough input to read cBlockSize field */
  1744. dctx->tmpInSize = 4;
  1745. dctx->tmpInTarget = 8;
  1746. dctx->dStage = dstage_storeSFrameSize;
  1747. }
  1748. if (dctx->dStage == dstage_storeSFrameSize)
  1749. case dstage_storeSFrameSize:
  1750. { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize,
  1751. (size_t)(srcEnd - srcPtr) );
  1752. memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
  1753. srcPtr += sizeToCopy;
  1754. dctx->tmpInSize += sizeToCopy;
  1755. if (dctx->tmpInSize < dctx->tmpInTarget) {
  1756. /* not enough input to get full sBlockSize; wait for more */
  1757. nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize;
  1758. doAnotherStage = 0;
  1759. break;
  1760. }
  1761. selectedIn = dctx->header + 4;
  1762. } /* if (dctx->dStage == dstage_storeSFrameSize) */
  1763. /* case dstage_decodeSFrameSize: */ /* no direct entry */
  1764. { size_t const SFrameSize = LZ4F_readLE32(selectedIn);
  1765. dctx->frameInfo.contentSize = SFrameSize;
  1766. dctx->tmpInTarget = SFrameSize;
  1767. dctx->dStage = dstage_skipSkippable;
  1768. break;
  1769. }
  1770. case dstage_skipSkippable:
  1771. { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr));
  1772. srcPtr += skipSize;
  1773. dctx->tmpInTarget -= skipSize;
  1774. doAnotherStage = 0;
  1775. nextSrcSizeHint = dctx->tmpInTarget;
  1776. if (nextSrcSizeHint) break; /* still more to skip */
  1777. /* frame fully skipped : prepare context for a new frame */
  1778. LZ4F_resetDecompressionContext(dctx);
  1779. break;
  1780. }
  1781. } /* switch (dctx->dStage) */
  1782. } /* while (doAnotherStage) */
  1783. /* preserve history within tmpOut whenever necessary */
  1784. LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2);
  1785. if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
  1786. && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */
  1787. && (dctx->dict != NULL) /* dictionary exists */
  1788. && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */
  1789. && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */
  1790. {
  1791. if (dctx->dStage == dstage_flushOut) {
  1792. size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
  1793. size_t copySize = 64 KB - dctx->tmpOutSize;
  1794. const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
  1795. if (dctx->tmpOutSize > 64 KB) copySize = 0;
  1796. if (copySize > preserveSize) copySize = preserveSize;
  1797. assert(dctx->tmpOutBuffer != NULL);
  1798. memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
  1799. dctx->dict = dctx->tmpOutBuffer;
  1800. dctx->dictSize = preserveSize + dctx->tmpOutStart;
  1801. } else {
  1802. const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize;
  1803. size_t const newDictSize = MIN(dctx->dictSize, 64 KB);
  1804. memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
  1805. dctx->dict = dctx->tmpOutBuffer;
  1806. dctx->dictSize = newDictSize;
  1807. dctx->tmpOut = dctx->tmpOutBuffer + newDictSize;
  1808. }
  1809. }
  1810. *srcSizePtr = (size_t)(srcPtr - srcStart);
  1811. *dstSizePtr = (size_t)(dstPtr - dstStart);
  1812. return nextSrcSizeHint;
  1813. }
  1814. /*! LZ4F_decompress_usingDict() :
  1815. * Same as LZ4F_decompress(), using a predefined dictionary.
  1816. * Dictionary is used "in place", without any preprocessing.
  1817. * It must remain accessible throughout the entire frame decoding.
  1818. */
  1819. size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx,
  1820. void* dstBuffer, size_t* dstSizePtr,
  1821. const void* srcBuffer, size_t* srcSizePtr,
  1822. const void* dict, size_t dictSize,
  1823. const LZ4F_decompressOptions_t* decompressOptionsPtr)
  1824. {
  1825. if (dctx->dStage <= dstage_init) {
  1826. dctx->dict = (const BYTE*)dict;
  1827. dctx->dictSize = dictSize;
  1828. }
  1829. return LZ4F_decompress(dctx, dstBuffer, dstSizePtr,
  1830. srcBuffer, srcSizePtr,
  1831. decompressOptionsPtr);
  1832. }