lz4frame.c 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136
  1. /*
  2. * LZ4 auto-framing library
  3. * Copyright (C) 2011-2016, Yann Collet.
  4. *
  5. * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions are
  9. * met:
  10. *
  11. * - Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * - Redistributions in binary form must reproduce the above
  14. * copyright notice, this list of conditions and the following disclaimer
  15. * in the documentation and/or other materials provided with the
  16. * distribution.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. * You can contact the author at :
  31. * - LZ4 homepage : http://www.lz4.org
  32. * - LZ4 source repository : https://github.com/lz4/lz4
  33. */
  34. /* LZ4F is a stand-alone API to create LZ4-compressed Frames
  35. * in full conformance with specification v1.6.1 .
  36. * This library rely upon memory management capabilities (malloc, free)
  37. * provided either by <stdlib.h>,
  38. * or redirected towards another library of user's choice
  39. * (see Memory Routines below).
  40. */
  41. /*-************************************
  42. * Compiler Options
  43. **************************************/
  44. #include <limits.h>
  45. #ifdef _MSC_VER /* Visual Studio */
  46. # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
  47. #endif
  48. /*-************************************
  49. * Tuning parameters
  50. **************************************/
  51. /*
  52. * LZ4F_HEAPMODE :
  53. * Control how LZ4F_compressFrame allocates the Compression State,
  54. * either on stack (0:default, fastest), or in memory heap (1:requires malloc()).
  55. */
  56. #ifndef LZ4F_HEAPMODE
  57. # define LZ4F_HEAPMODE 0
  58. #endif
  59. /*-************************************
  60. * Library declarations
  61. **************************************/
  62. #define LZ4F_STATIC_LINKING_ONLY
  63. #include "lz4frame.h"
  64. #define LZ4_STATIC_LINKING_ONLY
  65. #include "lz4.h"
  66. #define LZ4_HC_STATIC_LINKING_ONLY
  67. #include "lz4hc.h"
  68. #define XXH_STATIC_LINKING_ONLY
  69. #include "xxhash.h"
  70. /*-************************************
  71. * Memory routines
  72. **************************************/
  73. /*
  74. * User may redirect invocations of
  75. * malloc(), calloc() and free()
  76. * towards another library or solution of their choice
  77. * by modifying below section.
  78. **/
  79. #include <string.h> /* memset, memcpy, memmove */
  80. #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
  81. # define MEM_INIT(p,v,s) memset((p),(v),(s))
  82. #endif
  83. #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
  84. # include <stdlib.h> /* malloc, calloc, free */
  85. # define ALLOC(s) malloc(s)
  86. # define ALLOC_AND_ZERO(s) calloc(1,(s))
  87. # define FREEMEM(p) free(p)
  88. #endif
  89. static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem)
  90. {
  91. /* custom calloc defined : use it */
  92. if (cmem.customCalloc != NULL) {
  93. return cmem.customCalloc(cmem.opaqueState, s);
  94. }
  95. /* nothing defined : use default <stdlib.h>'s calloc() */
  96. if (cmem.customAlloc == NULL) {
  97. return ALLOC_AND_ZERO(s);
  98. }
  99. /* only custom alloc defined : use it, and combine it with memset() */
  100. { void* const p = cmem.customAlloc(cmem.opaqueState, s);
  101. if (p != NULL) MEM_INIT(p, 0, s);
  102. return p;
  103. } }
  104. static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem)
  105. {
  106. /* custom malloc defined : use it */
  107. if (cmem.customAlloc != NULL) {
  108. return cmem.customAlloc(cmem.opaqueState, s);
  109. }
  110. /* nothing defined : use default <stdlib.h>'s malloc() */
  111. return ALLOC(s);
  112. }
  113. static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
  114. {
  115. if (p == NULL) return;
  116. if (cmem.customFree != NULL) {
  117. /* custom allocation defined : use it */
  118. cmem.customFree(cmem.opaqueState, p);
  119. return;
  120. }
  121. /* nothing defined : use default <stdlib.h>'s free() */
  122. FREEMEM(p);
  123. }
  124. /*-************************************
  125. * Debug
  126. **************************************/
  127. #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
  128. # include <assert.h>
  129. #else
  130. # ifndef assert
  131. # define assert(condition) ((void)0)
  132. # endif
  133. #endif
  134. #define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
  135. #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG)
  136. # include <stdio.h>
  137. static int g_debuglog_enable = 1;
  138. # define DEBUGLOG(l, ...) { \
  139. if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
  140. fprintf(stderr, __FILE__ " (%i): ", __LINE__ ); \
  141. fprintf(stderr, __VA_ARGS__); \
  142. fprintf(stderr, " \n"); \
  143. } }
  144. #else
  145. # define DEBUGLOG(l, ...) {} /* disabled */
  146. #endif
  147. /*-************************************
  148. * Basic Types
  149. **************************************/
  150. #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
  151. # include <stdint.h>
  152. typedef uint8_t BYTE;
  153. typedef uint16_t U16;
  154. typedef uint32_t U32;
  155. typedef int32_t S32;
  156. typedef uint64_t U64;
  157. #else
  158. typedef unsigned char BYTE;
  159. typedef unsigned short U16;
  160. typedef unsigned int U32;
  161. typedef signed int S32;
  162. typedef unsigned long long U64;
  163. #endif
  164. /* unoptimized version; solves endianness & alignment issues */
  165. static U32 LZ4F_readLE32 (const void* src)
  166. {
  167. const BYTE* const srcPtr = (const BYTE*)src;
  168. U32 value32 = srcPtr[0];
  169. value32 |= ((U32)srcPtr[1])<< 8;
  170. value32 |= ((U32)srcPtr[2])<<16;
  171. value32 |= ((U32)srcPtr[3])<<24;
  172. return value32;
  173. }
  174. static void LZ4F_writeLE32 (void* dst, U32 value32)
  175. {
  176. BYTE* const dstPtr = (BYTE*)dst;
  177. dstPtr[0] = (BYTE)value32;
  178. dstPtr[1] = (BYTE)(value32 >> 8);
  179. dstPtr[2] = (BYTE)(value32 >> 16);
  180. dstPtr[3] = (BYTE)(value32 >> 24);
  181. }
  182. static U64 LZ4F_readLE64 (const void* src)
  183. {
  184. const BYTE* const srcPtr = (const BYTE*)src;
  185. U64 value64 = srcPtr[0];
  186. value64 |= ((U64)srcPtr[1]<<8);
  187. value64 |= ((U64)srcPtr[2]<<16);
  188. value64 |= ((U64)srcPtr[3]<<24);
  189. value64 |= ((U64)srcPtr[4]<<32);
  190. value64 |= ((U64)srcPtr[5]<<40);
  191. value64 |= ((U64)srcPtr[6]<<48);
  192. value64 |= ((U64)srcPtr[7]<<56);
  193. return value64;
  194. }
  195. static void LZ4F_writeLE64 (void* dst, U64 value64)
  196. {
  197. BYTE* const dstPtr = (BYTE*)dst;
  198. dstPtr[0] = (BYTE)value64;
  199. dstPtr[1] = (BYTE)(value64 >> 8);
  200. dstPtr[2] = (BYTE)(value64 >> 16);
  201. dstPtr[3] = (BYTE)(value64 >> 24);
  202. dstPtr[4] = (BYTE)(value64 >> 32);
  203. dstPtr[5] = (BYTE)(value64 >> 40);
  204. dstPtr[6] = (BYTE)(value64 >> 48);
  205. dstPtr[7] = (BYTE)(value64 >> 56);
  206. }
  207. /*-************************************
  208. * Constants
  209. **************************************/
  210. #ifndef LZ4_SRC_INCLUDED /* avoid double definition */
  211. # define KB *(1<<10)
  212. # define MB *(1<<20)
  213. # define GB *(1<<30)
  214. #endif
  215. #define _1BIT 0x01
  216. #define _2BITS 0x03
  217. #define _3BITS 0x07
  218. #define _4BITS 0x0F
  219. #define _8BITS 0xFF
  220. #define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
  221. #define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
  222. static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */
  223. static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */
  224. static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */
  225. static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */
  226. /*-************************************
  227. * Structures and local types
  228. **************************************/
  229. typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_BlockCompressMode_e;
  230. typedef enum { ctxNone, ctxFast, ctxHC } LZ4F_CtxType_e;
  231. typedef struct LZ4F_cctx_s
  232. {
  233. LZ4F_CustomMem cmem;
  234. LZ4F_preferences_t prefs;
  235. U32 version;
  236. U32 cStage; /* 0 : compression uninitialized ; 1 : initialized, can compress */
  237. const LZ4F_CDict* cdict;
  238. size_t maxBlockSize;
  239. size_t maxBufferSize;
  240. BYTE* tmpBuff; /* internal buffer, for streaming */
  241. BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */
  242. size_t tmpInSize; /* amount of data to compress after tmpIn */
  243. U64 totalInSize;
  244. XXH32_state_t xxh;
  245. void* lz4CtxPtr;
  246. U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
  247. U16 lz4CtxType; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
  248. LZ4F_BlockCompressMode_e blockCompressMode;
  249. } LZ4F_cctx_t;
  250. /*-************************************
  251. * Error management
  252. **************************************/
  253. #define LZ4F_GENERATE_STRING(STRING) #STRING,
  254. static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) };
  255. unsigned LZ4F_isError(LZ4F_errorCode_t code)
  256. {
  257. return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode));
  258. }
  259. const char* LZ4F_getErrorName(LZ4F_errorCode_t code)
  260. {
  261. static const char* codeError = "Unspecified error code";
  262. if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)];
  263. return codeError;
  264. }
  265. LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
  266. {
  267. if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError;
  268. return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
  269. }
  270. static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code)
  271. {
  272. /* A compilation error here means sizeof(ptrdiff_t) is not large enough */
  273. LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));
  274. return (LZ4F_errorCode_t)-(ptrdiff_t)code;
  275. }
  276. #define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
  277. #define RETURN_ERROR_IF(c,e) do { \
  278. if (c) { \
  279. DEBUGLOG(3, "Error: " #c); \
  280. RETURN_ERROR(e); \
  281. } \
  282. } while (0)
  283. #define FORWARD_IF_ERROR(r) do { if (LZ4F_isError(r)) return (r); } while (0)
  284. unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
  285. int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; }
  286. size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID)
  287. {
  288. static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
  289. if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
  290. if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB)
  291. RETURN_ERROR(maxBlockSize_invalid);
  292. { int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB;
  293. return blockSizes[blockSizeIdx];
  294. } }
  295. /*-************************************
  296. * Private functions
  297. **************************************/
  298. #define MIN(a,b) ( (a) < (b) ? (a) : (b) )
  299. static BYTE LZ4F_headerChecksum (const void* header, size_t length)
  300. {
  301. U32 const xxh = XXH32(header, length, 0);
  302. return (BYTE)(xxh >> 8);
  303. }
  304. /*-************************************
  305. * Simple-pass compression functions
  306. **************************************/
  307. static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID,
  308. const size_t srcSize)
  309. {
  310. LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB;
  311. size_t maxBlockSize = 64 KB;
  312. while (requestedBSID > proposedBSID) {
  313. if (srcSize <= maxBlockSize)
  314. return proposedBSID;
  315. proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1);
  316. maxBlockSize <<= 2;
  317. }
  318. return requestedBSID;
  319. }
  320. /*! LZ4F_compressBound_internal() :
  321. * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
  322. * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario.
  323. * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers.
  324. * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
  325. */
  326. static size_t LZ4F_compressBound_internal(size_t srcSize,
  327. const LZ4F_preferences_t* preferencesPtr,
  328. size_t alreadyBuffered)
  329. {
  330. LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES;
  331. prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */
  332. prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */
  333. { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr;
  334. U32 const flush = prefsPtr->autoFlush | (srcSize==0);
  335. LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID;
  336. size_t const blockSize = LZ4F_getBlockSize(blockID);
  337. size_t const maxBuffered = blockSize - 1;
  338. size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered);
  339. size_t const maxSrcSize = srcSize + bufferedSize;
  340. unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize);
  341. size_t const partialBlockSize = maxSrcSize & (blockSize-1);
  342. size_t const lastBlockSize = flush ? partialBlockSize : 0;
  343. unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0);
  344. size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag;
  345. size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize);
  346. return ((BHSize + blockCRCSize) * nbBlocks) +
  347. (blockSize * nbFullBlocks) + lastBlockSize + frameEnd;
  348. }
  349. }
  350. size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
  351. {
  352. LZ4F_preferences_t prefs;
  353. size_t const headerSize = maxFHSize; /* max header size, including optional fields */
  354. if (preferencesPtr!=NULL) prefs = *preferencesPtr;
  355. else MEM_INIT(&prefs, 0, sizeof(prefs));
  356. prefs.autoFlush = 1;
  357. return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);;
  358. }
  359. /*! LZ4F_compressFrame_usingCDict() :
  360. * Compress srcBuffer using a dictionary, in a single step.
  361. * cdict can be NULL, in which case, no dictionary is used.
  362. * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
  363. * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
  364. * however, it's the only way to provide a dictID, so it's not recommended.
  365. * @return : number of bytes written into dstBuffer,
  366. * or an error code if it fails (can be tested using LZ4F_isError())
  367. */
  368. size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
  369. void* dstBuffer, size_t dstCapacity,
  370. const void* srcBuffer, size_t srcSize,
  371. const LZ4F_CDict* cdict,
  372. const LZ4F_preferences_t* preferencesPtr)
  373. {
  374. LZ4F_preferences_t prefs;
  375. LZ4F_compressOptions_t options;
  376. BYTE* const dstStart = (BYTE*) dstBuffer;
  377. BYTE* dstPtr = dstStart;
  378. BYTE* const dstEnd = dstStart + dstCapacity;
  379. DEBUGLOG(4, "LZ4F_compressFrame_usingCDict (srcSize=%u)", (unsigned)srcSize);
  380. if (preferencesPtr!=NULL)
  381. prefs = *preferencesPtr;
  382. else
  383. MEM_INIT(&prefs, 0, sizeof(prefs));
  384. if (prefs.frameInfo.contentSize != 0)
  385. prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */
  386. prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize);
  387. prefs.autoFlush = 1;
  388. if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID))
  389. prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */
  390. MEM_INIT(&options, 0, sizeof(options));
  391. options.stableSrc = 1;
  392. RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall);
  393. { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */
  394. FORWARD_IF_ERROR(headerSize);
  395. dstPtr += headerSize; /* header size */ }
  396. assert(dstEnd >= dstPtr);
  397. { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options);
  398. FORWARD_IF_ERROR(cSize);
  399. dstPtr += cSize; }
  400. assert(dstEnd >= dstPtr);
  401. { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */
  402. FORWARD_IF_ERROR(tailSize);
  403. dstPtr += tailSize; }
  404. assert(dstEnd >= dstStart);
  405. return (size_t)(dstPtr - dstStart);
  406. }
  407. /*! LZ4F_compressFrame() :
  408. * Compress an entire srcBuffer into a valid LZ4 frame, in a single step.
  409. * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
  410. * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
  411. * @return : number of bytes written into dstBuffer.
  412. * or an error code if it fails (can be tested using LZ4F_isError())
  413. */
  414. size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
  415. const void* srcBuffer, size_t srcSize,
  416. const LZ4F_preferences_t* preferencesPtr)
  417. {
  418. size_t result;
  419. #if (LZ4F_HEAPMODE)
  420. LZ4F_cctx_t* cctxPtr;
  421. result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION);
  422. FORWARD_IF_ERROR(result);
  423. #else
  424. LZ4F_cctx_t cctx;
  425. LZ4_stream_t lz4ctx;
  426. LZ4F_cctx_t* const cctxPtr = &cctx;
  427. MEM_INIT(&cctx, 0, sizeof(cctx));
  428. cctx.version = LZ4F_VERSION;
  429. cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
  430. if ( preferencesPtr == NULL
  431. || preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN ) {
  432. LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
  433. cctxPtr->lz4CtxPtr = &lz4ctx;
  434. cctxPtr->lz4CtxAlloc = 1;
  435. cctxPtr->lz4CtxType = ctxFast;
  436. }
  437. #endif
  438. DEBUGLOG(4, "LZ4F_compressFrame");
  439. result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity,
  440. srcBuffer, srcSize,
  441. NULL, preferencesPtr);
  442. #if (LZ4F_HEAPMODE)
  443. LZ4F_freeCompressionContext(cctxPtr);
  444. #else
  445. if ( preferencesPtr != NULL
  446. && preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN ) {
  447. LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
  448. }
  449. #endif
  450. return result;
  451. }
  452. /*-***************************************************
  453. * Dictionary compression
  454. *****************************************************/
  455. struct LZ4F_CDict_s {
  456. LZ4F_CustomMem cmem;
  457. void* dictContent;
  458. LZ4_stream_t* fastCtx;
  459. LZ4_streamHC_t* HCCtx;
  460. }; /* typedef'd to LZ4F_CDict within lz4frame_static.h */
  461. LZ4F_CDict*
  462. LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize)
  463. {
  464. const char* dictStart = (const char*)dictBuffer;
  465. LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem);
  466. DEBUGLOG(4, "LZ4F_createCDict_advanced");
  467. if (!cdict) return NULL;
  468. cdict->cmem = cmem;
  469. if (dictSize > 64 KB) {
  470. dictStart += dictSize - 64 KB;
  471. dictSize = 64 KB;
  472. }
  473. cdict->dictContent = LZ4F_malloc(dictSize, cmem);
  474. /* note: using @cmem to allocate => can't use default create */
  475. cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem);
  476. cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem);
  477. if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
  478. LZ4F_freeCDict(cdict);
  479. return NULL;
  480. }
  481. memcpy(cdict->dictContent, dictStart, dictSize);
  482. LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
  483. LZ4_loadDictSlow(cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
  484. LZ4_initStreamHC(cdict->HCCtx, sizeof(LZ4_streamHC_t));
  485. /* note: we don't know at this point which compression level is going to be used
  486. * as a consequence, HCCtx is created for the more common HC mode */
  487. LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
  488. LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
  489. return cdict;
  490. }
  491. /*! LZ4F_createCDict() :
  492. * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
  493. * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
  494. * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
  495. * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict
  496. * @return : digested dictionary for compression, or NULL if failed */
  497. LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
  498. {
  499. DEBUGLOG(4, "LZ4F_createCDict");
  500. return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize);
  501. }
  502. void LZ4F_freeCDict(LZ4F_CDict* cdict)
  503. {
  504. if (cdict==NULL) return; /* support free on NULL */
  505. LZ4F_free(cdict->dictContent, cdict->cmem);
  506. LZ4F_free(cdict->fastCtx, cdict->cmem);
  507. LZ4F_free(cdict->HCCtx, cdict->cmem);
  508. LZ4F_free(cdict, cdict->cmem);
  509. }
  510. /*-*********************************
  511. * Advanced compression functions
  512. ***********************************/
  513. LZ4F_cctx*
  514. LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
  515. {
  516. LZ4F_cctx* const cctxPtr =
  517. (LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem);
  518. if (cctxPtr==NULL) return NULL;
  519. cctxPtr->cmem = customMem;
  520. cctxPtr->version = version;
  521. cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */
  522. return cctxPtr;
  523. }
  524. /*! LZ4F_createCompressionContext() :
  525. * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
  526. * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
  527. * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries.
  528. * The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
  529. * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
  530. * Object can release its memory using LZ4F_freeCompressionContext();
  531. **/
  532. LZ4F_errorCode_t
  533. LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
  534. {
  535. assert(LZ4F_compressionContextPtr != NULL); /* considered a violation of narrow contract */
  536. /* in case it nonetheless happen in production */
  537. RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null);
  538. *LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version);
  539. RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed);
  540. return LZ4F_OK_NoError;
  541. }
  542. LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
  543. {
  544. if (cctxPtr != NULL) { /* support free on NULL */
  545. LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
  546. LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
  547. LZ4F_free(cctxPtr, cctxPtr->cmem);
  548. }
  549. return LZ4F_OK_NoError;
  550. }
  551. /**
  552. * This function prepares the internal LZ4(HC) stream for a new compression,
  553. * resetting the context and attaching the dictionary, if there is one.
  554. *
  555. * It needs to be called at the beginning of each independent compression
  556. * stream (i.e., at the beginning of a frame in blockLinked mode, or at the
  557. * beginning of each block in blockIndependent mode).
  558. */
  559. static void LZ4F_initStream(void* ctx,
  560. const LZ4F_CDict* cdict,
  561. int level,
  562. LZ4F_blockMode_t blockMode) {
  563. if (level < LZ4HC_CLEVEL_MIN) {
  564. if (cdict || blockMode == LZ4F_blockLinked) {
  565. /* In these cases, we will call LZ4_compress_fast_continue(),
  566. * which needs an already reset context. Otherwise, we'll call a
  567. * one-shot API. The non-continued APIs internally perform their own
  568. * resets at the beginning of their calls, where they know what
  569. * tableType they need the context to be in. So in that case this
  570. * would be misguided / wasted work. */
  571. LZ4_resetStream_fast((LZ4_stream_t*)ctx);
  572. if (cdict)
  573. LZ4_attach_dictionary((LZ4_stream_t*)ctx, cdict->fastCtx);
  574. }
  575. /* In these cases, we'll call a one-shot API.
  576. * The non-continued APIs internally perform their own resets
  577. * at the beginning of their calls, where they know
  578. * which tableType they need the context to be in.
  579. * Therefore, a reset here would be wasted work. */
  580. } else {
  581. LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
  582. if (cdict)
  583. LZ4_attach_HC_dictionary((LZ4_streamHC_t*)ctx, cdict->HCCtx);
  584. }
  585. }
  586. static int ctxTypeID_to_size(int ctxTypeID) {
  587. switch(ctxTypeID) {
  588. case 1:
  589. return LZ4_sizeofState();
  590. case 2:
  591. return LZ4_sizeofStateHC();
  592. default:
  593. return 0;
  594. }
  595. }
  596. /* LZ4F_compressBegin_internal()
  597. * Note: only accepts @cdict _or_ @dictBuffer as non NULL.
  598. */
  599. size_t LZ4F_compressBegin_internal(LZ4F_cctx* cctx,
  600. void* dstBuffer, size_t dstCapacity,
  601. const void* dictBuffer, size_t dictSize,
  602. const LZ4F_CDict* cdict,
  603. const LZ4F_preferences_t* preferencesPtr)
  604. {
  605. LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES;
  606. BYTE* const dstStart = (BYTE*)dstBuffer;
  607. BYTE* dstPtr = dstStart;
  608. RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall);
  609. if (preferencesPtr == NULL) preferencesPtr = &prefNull;
  610. cctx->prefs = *preferencesPtr;
  611. /* cctx Management */
  612. { U16 const ctxTypeID = (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
  613. int requiredSize = ctxTypeID_to_size(ctxTypeID);
  614. int allocatedSize = ctxTypeID_to_size(cctx->lz4CtxAlloc);
  615. if (allocatedSize < requiredSize) {
  616. /* not enough space allocated */
  617. LZ4F_free(cctx->lz4CtxPtr, cctx->cmem);
  618. if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
  619. /* must take ownership of memory allocation,
  620. * in order to respect custom allocator contract */
  621. cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctx->cmem);
  622. if (cctx->lz4CtxPtr)
  623. LZ4_initStream(cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
  624. } else {
  625. cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctx->cmem);
  626. if (cctx->lz4CtxPtr)
  627. LZ4_initStreamHC(cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
  628. }
  629. RETURN_ERROR_IF(cctx->lz4CtxPtr == NULL, allocation_failed);
  630. cctx->lz4CtxAlloc = ctxTypeID;
  631. cctx->lz4CtxType = ctxTypeID;
  632. } else if (cctx->lz4CtxType != ctxTypeID) {
  633. /* otherwise, a sufficient buffer is already allocated,
  634. * but we need to reset it to the correct context type */
  635. if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
  636. LZ4_initStream((LZ4_stream_t*)cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
  637. } else {
  638. LZ4_initStreamHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
  639. LZ4_setCompressionLevel((LZ4_streamHC_t*)cctx->lz4CtxPtr, cctx->prefs.compressionLevel);
  640. }
  641. cctx->lz4CtxType = ctxTypeID;
  642. } }
  643. /* Buffer Management */
  644. if (cctx->prefs.frameInfo.blockSizeID == 0)
  645. cctx->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
  646. cctx->maxBlockSize = LZ4F_getBlockSize(cctx->prefs.frameInfo.blockSizeID);
  647. { size_t const requiredBuffSize = preferencesPtr->autoFlush ?
  648. ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
  649. cctx->maxBlockSize + ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
  650. if (cctx->maxBufferSize < requiredBuffSize) {
  651. cctx->maxBufferSize = 0;
  652. LZ4F_free(cctx->tmpBuff, cctx->cmem);
  653. cctx->tmpBuff = (BYTE*)LZ4F_malloc(requiredBuffSize, cctx->cmem);
  654. RETURN_ERROR_IF(cctx->tmpBuff == NULL, allocation_failed);
  655. cctx->maxBufferSize = requiredBuffSize;
  656. } }
  657. cctx->tmpIn = cctx->tmpBuff;
  658. cctx->tmpInSize = 0;
  659. (void)XXH32_reset(&(cctx->xxh), 0);
  660. /* context init */
  661. cctx->cdict = cdict;
  662. if (cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
  663. /* frame init only for blockLinked : blockIndependent will be init at each block */
  664. LZ4F_initStream(cctx->lz4CtxPtr, cdict, cctx->prefs.compressionLevel, LZ4F_blockLinked);
  665. }
  666. if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
  667. LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctx->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
  668. }
  669. if (dictBuffer) {
  670. assert(cdict == NULL);
  671. RETURN_ERROR_IF(dictSize > INT_MAX, parameter_invalid);
  672. if (cctx->lz4CtxType == ctxFast) {
  673. /* lz4 fast*/
  674. LZ4_loadDict((LZ4_stream_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
  675. } else {
  676. /* lz4hc */
  677. assert(cctx->lz4CtxType == ctxHC);
  678. LZ4_loadDictHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
  679. }
  680. }
  681. /* Stage 2 : Write Frame Header */
  682. /* Magic Number */
  683. LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
  684. dstPtr += 4;
  685. { BYTE* const headerStart = dstPtr;
  686. /* FLG Byte */
  687. *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
  688. + ((cctx->prefs.frameInfo.blockMode & _1BIT ) << 5)
  689. + ((cctx->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
  690. + ((unsigned)(cctx->prefs.frameInfo.contentSize > 0) << 3)
  691. + ((cctx->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
  692. + (cctx->prefs.frameInfo.dictID > 0) );
  693. /* BD Byte */
  694. *dstPtr++ = (BYTE)((cctx->prefs.frameInfo.blockSizeID & _3BITS) << 4);
  695. /* Optional Frame content size field */
  696. if (cctx->prefs.frameInfo.contentSize) {
  697. LZ4F_writeLE64(dstPtr, cctx->prefs.frameInfo.contentSize);
  698. dstPtr += 8;
  699. cctx->totalInSize = 0;
  700. }
  701. /* Optional dictionary ID field */
  702. if (cctx->prefs.frameInfo.dictID) {
  703. LZ4F_writeLE32(dstPtr, cctx->prefs.frameInfo.dictID);
  704. dstPtr += 4;
  705. }
  706. /* Header CRC Byte */
  707. *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart));
  708. dstPtr++;
  709. }
  710. cctx->cStage = 1; /* header written, now request input data block */
  711. return (size_t)(dstPtr - dstStart);
  712. }
  713. size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
  714. void* dstBuffer, size_t dstCapacity,
  715. const LZ4F_preferences_t* preferencesPtr)
  716. {
  717. return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
  718. NULL, 0,
  719. NULL, preferencesPtr);
  720. }
  721. /* LZ4F_compressBegin_usingDictOnce:
  722. * Hidden implementation,
  723. * employed for multi-threaded compression
  724. * when frame defines linked blocks */
  725. size_t LZ4F_compressBegin_usingDictOnce(LZ4F_cctx* cctx,
  726. void* dstBuffer, size_t dstCapacity,
  727. const void* dict, size_t dictSize,
  728. const LZ4F_preferences_t* preferencesPtr)
  729. {
  730. return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
  731. dict, dictSize,
  732. NULL, preferencesPtr);
  733. }
  734. size_t LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx,
  735. void* dstBuffer, size_t dstCapacity,
  736. const void* dict, size_t dictSize,
  737. const LZ4F_preferences_t* preferencesPtr)
  738. {
  739. /* note : incorrect implementation :
  740. * this will only use the dictionary once,
  741. * instead of once *per* block when frames defines independent blocks */
  742. return LZ4F_compressBegin_usingDictOnce(cctx, dstBuffer, dstCapacity,
  743. dict, dictSize,
  744. preferencesPtr);
  745. }
  746. size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
  747. void* dstBuffer, size_t dstCapacity,
  748. const LZ4F_CDict* cdict,
  749. const LZ4F_preferences_t* preferencesPtr)
  750. {
  751. return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
  752. NULL, 0,
  753. cdict, preferencesPtr);
  754. }
  755. /* LZ4F_compressBound() :
  756. * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario.
  757. * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario.
  758. * This function cannot fail.
  759. */
  760. size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
  761. {
  762. if (preferencesPtr && preferencesPtr->autoFlush) {
  763. return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
  764. }
  765. return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
  766. }
  767. typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict);
  768. /*! LZ4F_makeBlock():
  769. * compress a single block, add header and optional checksum.
  770. * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize
  771. */
  772. static size_t LZ4F_makeBlock(void* dst,
  773. const void* src, size_t srcSize,
  774. compressFunc_t compress, void* lz4ctx, int level,
  775. const LZ4F_CDict* cdict,
  776. LZ4F_blockChecksum_t crcFlag)
  777. {
  778. BYTE* const cSizePtr = (BYTE*)dst;
  779. U32 cSize;
  780. assert(compress != NULL);
  781. cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
  782. (int)(srcSize), (int)(srcSize-1),
  783. level, cdict);
  784. if (cSize == 0 || cSize >= srcSize) {
  785. cSize = (U32)srcSize;
  786. LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
  787. memcpy(cSizePtr+BHSize, src, srcSize);
  788. } else {
  789. LZ4F_writeLE32(cSizePtr, cSize);
  790. }
  791. if (crcFlag) {
  792. U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */
  793. LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32);
  794. }
  795. return BHSize + cSize + ((U32)crcFlag)*BFSize;
  796. }
  797. static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
  798. {
  799. int const acceleration = (level < 0) ? -level + 1 : 1;
  800. DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize);
  801. LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
  802. if (cdict) {
  803. return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
  804. } else {
  805. return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration);
  806. }
  807. }
  808. static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
  809. {
  810. int const acceleration = (level < 0) ? -level + 1 : 1;
  811. (void)cdict; /* init once at beginning of frame */
  812. DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize);
  813. return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
  814. }
  815. static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
  816. {
  817. LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
  818. if (cdict) {
  819. return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
  820. }
  821. return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level);
  822. }
  823. static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
  824. {
  825. (void)level; (void)cdict; /* init once at beginning of frame */
  826. return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
  827. }
  828. static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
  829. {
  830. (void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict;
  831. return 0;
  832. }
  833. static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_BlockCompressMode_e compressMode)
  834. {
  835. if (compressMode == LZ4B_UNCOMPRESSED)
  836. return LZ4F_doNotCompressBlock;
  837. if (level < LZ4HC_CLEVEL_MIN) {
  838. if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
  839. return LZ4F_compressBlock_continue;
  840. }
  841. if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC;
  842. return LZ4F_compressBlockHC_continue;
  843. }
  844. /* Save history (up to 64KB) into @tmpBuff */
  845. static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
  846. {
  847. if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
  848. return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
  849. return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
  850. }
  851. typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
  852. static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } };
  853. /*! LZ4F_compressUpdateImpl() :
  854. * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
  855. * When successful, the function always entirely consumes @srcBuffer.
  856. * src data is either buffered or compressed into @dstBuffer.
  857. * If the block compression does not match the compression of the previous block, the old data is flushed
  858. * and operations continue with the new compression mode.
  859. * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on.
  860. * @compressOptionsPtr is optional : provide NULL to mean "default".
  861. * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
  862. * or an error code if it fails (which can be tested using LZ4F_isError())
  863. * After an error, the state is left in a UB state, and must be re-initialized.
  864. */
  865. static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
  866. void* dstBuffer, size_t dstCapacity,
  867. const void* srcBuffer, size_t srcSize,
  868. const LZ4F_compressOptions_t* compressOptionsPtr,
  869. LZ4F_BlockCompressMode_e blockCompression)
  870. {
  871. size_t const blockSize = cctxPtr->maxBlockSize;
  872. const BYTE* srcPtr = (const BYTE*)srcBuffer;
  873. const BYTE* const srcEnd = srcPtr + srcSize;
  874. BYTE* const dstStart = (BYTE*)dstBuffer;
  875. BYTE* dstPtr = dstStart;
  876. LZ4F_lastBlockStatus lastBlockCompressed = notDone;
  877. compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression);
  878. size_t bytesWritten;
  879. DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize);
  880. RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); /* state must be initialized and waiting for next block */
  881. if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
  882. RETURN_ERROR(dstMaxSize_tooSmall);
  883. if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize)
  884. RETURN_ERROR(dstMaxSize_tooSmall);
  885. /* flush currently written block, to continue with new block compression */
  886. if (cctxPtr->blockCompressMode != blockCompression) {
  887. bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
  888. dstPtr += bytesWritten;
  889. cctxPtr->blockCompressMode = blockCompression;
  890. }
  891. if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull;
  892. /* complete tmp buffer */
  893. if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */
  894. size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
  895. assert(blockSize > cctxPtr->tmpInSize);
  896. if (sizeToCopy > srcSize) {
  897. /* add src to tmpIn buffer */
  898. memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
  899. srcPtr = srcEnd;
  900. cctxPtr->tmpInSize += srcSize;
  901. /* still needs some CRC */
  902. } else {
  903. /* complete tmpIn block and then compress it */
  904. lastBlockCompressed = fromTmpBuffer;
  905. memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy);
  906. srcPtr += sizeToCopy;
  907. dstPtr += LZ4F_makeBlock(dstPtr,
  908. cctxPtr->tmpIn, blockSize,
  909. compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
  910. cctxPtr->cdict,
  911. cctxPtr->prefs.frameInfo.blockChecksumFlag);
  912. if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
  913. cctxPtr->tmpInSize = 0;
  914. } }
  915. while ((size_t)(srcEnd - srcPtr) >= blockSize) {
  916. /* compress full blocks */
  917. lastBlockCompressed = fromSrcBuffer;
  918. dstPtr += LZ4F_makeBlock(dstPtr,
  919. srcPtr, blockSize,
  920. compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
  921. cctxPtr->cdict,
  922. cctxPtr->prefs.frameInfo.blockChecksumFlag);
  923. srcPtr += blockSize;
  924. }
  925. if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
  926. /* autoFlush : remaining input (< blockSize) is compressed */
  927. lastBlockCompressed = fromSrcBuffer;
  928. dstPtr += LZ4F_makeBlock(dstPtr,
  929. srcPtr, (size_t)(srcEnd - srcPtr),
  930. compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
  931. cctxPtr->cdict,
  932. cctxPtr->prefs.frameInfo.blockChecksumFlag);
  933. srcPtr = srcEnd;
  934. }
  935. /* preserve dictionary within @tmpBuff whenever necessary */
  936. if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
  937. /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */
  938. assert(blockCompression == LZ4B_COMPRESSED);
  939. if (compressOptionsPtr->stableSrc) {
  940. cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */
  941. } else {
  942. int const realDictSize = LZ4F_localSaveDict(cctxPtr);
  943. assert(0 <= realDictSize && realDictSize <= 64 KB);
  944. cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
  945. }
  946. }
  947. /* keep tmpIn within limits */
  948. if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */
  949. && (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */
  950. {
  951. /* only preserve 64KB within internal buffer. Ensures there is enough room for next block.
  952. * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */
  953. int const realDictSize = LZ4F_localSaveDict(cctxPtr);
  954. cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
  955. assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize));
  956. }
  957. /* some input data left, necessarily < blockSize */
  958. if (srcPtr < srcEnd) {
  959. /* fill tmp buffer */
  960. size_t const sizeToCopy = (size_t)(srcEnd - srcPtr);
  961. memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy);
  962. cctxPtr->tmpInSize = sizeToCopy;
  963. }
  964. if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled)
  965. (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize);
  966. cctxPtr->totalInSize += srcSize;
  967. return (size_t)(dstPtr - dstStart);
  968. }
  969. /*! LZ4F_compressUpdate() :
  970. * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
  971. * When successful, the function always entirely consumes @srcBuffer.
  972. * src data is either buffered or compressed into @dstBuffer.
  973. * If previously an uncompressed block was written, buffered data is flushed
  974. * before appending compressed data is continued.
  975. * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
  976. * @compressOptionsPtr is optional : provide NULL to mean "default".
  977. * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
  978. * or an error code if it fails (which can be tested using LZ4F_isError())
  979. * After an error, the state is left in a UB state, and must be re-initialized.
  980. */
  981. size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
  982. void* dstBuffer, size_t dstCapacity,
  983. const void* srcBuffer, size_t srcSize,
  984. const LZ4F_compressOptions_t* compressOptionsPtr)
  985. {
  986. return LZ4F_compressUpdateImpl(cctxPtr,
  987. dstBuffer, dstCapacity,
  988. srcBuffer, srcSize,
  989. compressOptionsPtr, LZ4B_COMPRESSED);
  990. }
  991. /*! LZ4F_uncompressedUpdate() :
  992. * Same as LZ4F_compressUpdate(), but requests blocks to be sent uncompressed.
  993. * This symbol is only supported when LZ4F_blockIndependent is used
  994. * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
  995. * @compressOptionsPtr is optional : provide NULL to mean "default".
  996. * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
  997. * or an error code if it fails (which can be tested using LZ4F_isError())
  998. * After an error, the state is left in a UB state, and must be re-initialized.
  999. */
  1000. size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr,
  1001. void* dstBuffer, size_t dstCapacity,
  1002. const void* srcBuffer, size_t srcSize,
  1003. const LZ4F_compressOptions_t* compressOptionsPtr)
  1004. {
  1005. return LZ4F_compressUpdateImpl(cctxPtr,
  1006. dstBuffer, dstCapacity,
  1007. srcBuffer, srcSize,
  1008. compressOptionsPtr, LZ4B_UNCOMPRESSED);
  1009. }
  1010. /*! LZ4F_flush() :
  1011. * When compressed data must be sent immediately, without waiting for a block to be filled,
  1012. * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
  1013. * The result of the function is the number of bytes written into dstBuffer.
  1014. * It can be zero, this means there was no data left within LZ4F_cctx.
  1015. * The function outputs an error code if it fails (can be tested using LZ4F_isError())
  1016. * LZ4F_compressOptions_t* is optional. NULL is a valid argument.
  1017. */
  1018. size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
  1019. void* dstBuffer, size_t dstCapacity,
  1020. const LZ4F_compressOptions_t* compressOptionsPtr)
  1021. {
  1022. BYTE* const dstStart = (BYTE*)dstBuffer;
  1023. BYTE* dstPtr = dstStart;
  1024. compressFunc_t compress;
  1025. if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
  1026. RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized);
  1027. RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall);
  1028. (void)compressOptionsPtr; /* not useful (yet) */
  1029. /* select compression function */
  1030. compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompressMode);
  1031. /* compress tmp buffer */
  1032. dstPtr += LZ4F_makeBlock(dstPtr,
  1033. cctxPtr->tmpIn, cctxPtr->tmpInSize,
  1034. compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
  1035. cctxPtr->cdict,
  1036. cctxPtr->prefs.frameInfo.blockChecksumFlag);
  1037. assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity));
  1038. if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked)
  1039. cctxPtr->tmpIn += cctxPtr->tmpInSize;
  1040. cctxPtr->tmpInSize = 0;
  1041. /* keep tmpIn within limits */
  1042. if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */
  1043. int const realDictSize = LZ4F_localSaveDict(cctxPtr);
  1044. cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
  1045. }
  1046. return (size_t)(dstPtr - dstStart);
  1047. }
  1048. /*! LZ4F_compressEnd() :
  1049. * When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
  1050. * It will flush whatever data remained within compressionContext (like LZ4_flush())
  1051. * but also properly finalize the frame, with an endMark and an (optional) checksum.
  1052. * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
  1053. * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
  1054. * or an error code if it fails (can be tested using LZ4F_isError())
  1055. * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin().
  1056. */
  1057. size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
  1058. void* dstBuffer, size_t dstCapacity,
  1059. const LZ4F_compressOptions_t* compressOptionsPtr)
  1060. {
  1061. BYTE* const dstStart = (BYTE*)dstBuffer;
  1062. BYTE* dstPtr = dstStart;
  1063. size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
  1064. DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
  1065. FORWARD_IF_ERROR(flushSize);
  1066. dstPtr += flushSize;
  1067. assert(flushSize <= dstCapacity);
  1068. dstCapacity -= flushSize;
  1069. RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall);
  1070. LZ4F_writeLE32(dstPtr, 0);
  1071. dstPtr += 4; /* endMark */
  1072. if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
  1073. U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
  1074. RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall);
  1075. DEBUGLOG(5,"Writing 32-bit content checksum (0x%0X)", xxh);
  1076. LZ4F_writeLE32(dstPtr, xxh);
  1077. dstPtr+=4; /* content Checksum */
  1078. }
  1079. cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
  1080. if (cctxPtr->prefs.frameInfo.contentSize) {
  1081. if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
  1082. RETURN_ERROR(frameSize_wrong);
  1083. }
  1084. return (size_t)(dstPtr - dstStart);
  1085. }
  1086. /*-***************************************************
  1087. * Frame Decompression
  1088. *****************************************************/
  1089. typedef enum {
  1090. dstage_getFrameHeader=0, dstage_storeFrameHeader,
  1091. dstage_init,
  1092. dstage_getBlockHeader, dstage_storeBlockHeader,
  1093. dstage_copyDirect, dstage_getBlockChecksum,
  1094. dstage_getCBlock, dstage_storeCBlock,
  1095. dstage_flushOut,
  1096. dstage_getSuffix, dstage_storeSuffix,
  1097. dstage_getSFrameSize, dstage_storeSFrameSize,
  1098. dstage_skipSkippable
  1099. } dStage_t;
  1100. struct LZ4F_dctx_s {
  1101. LZ4F_CustomMem cmem;
  1102. LZ4F_frameInfo_t frameInfo;
  1103. U32 version;
  1104. dStage_t dStage;
  1105. U64 frameRemainingSize;
  1106. size_t maxBlockSize;
  1107. size_t maxBufferSize;
  1108. BYTE* tmpIn;
  1109. size_t tmpInSize;
  1110. size_t tmpInTarget;
  1111. BYTE* tmpOutBuffer;
  1112. const BYTE* dict;
  1113. size_t dictSize;
  1114. BYTE* tmpOut;
  1115. size_t tmpOutSize;
  1116. size_t tmpOutStart;
  1117. XXH32_state_t xxh;
  1118. XXH32_state_t blockChecksum;
  1119. int skipChecksum;
  1120. BYTE header[LZ4F_HEADER_SIZE_MAX];
  1121. }; /* typedef'd to LZ4F_dctx in lz4frame.h */
  1122. LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
  1123. {
  1124. LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem);
  1125. if (dctx == NULL) return NULL;
  1126. dctx->cmem = customMem;
  1127. dctx->version = version;
  1128. return dctx;
  1129. }
  1130. /*! LZ4F_createDecompressionContext() :
  1131. * Create a decompressionContext object, which will track all decompression operations.
  1132. * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
  1133. * Object can later be released using LZ4F_freeDecompressionContext().
  1134. * @return : if != 0, there was an error during context creation.
  1135. */
  1136. LZ4F_errorCode_t
  1137. LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
  1138. {
  1139. assert(LZ4F_decompressionContextPtr != NULL); /* violation of narrow contract */
  1140. RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null); /* in case it nonetheless happen in production */
  1141. *LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber);
  1142. if (*LZ4F_decompressionContextPtr == NULL) { /* failed allocation */
  1143. RETURN_ERROR(allocation_failed);
  1144. }
  1145. return LZ4F_OK_NoError;
  1146. }
  1147. LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
  1148. {
  1149. LZ4F_errorCode_t result = LZ4F_OK_NoError;
  1150. if (dctx != NULL) { /* can accept NULL input, like free() */
  1151. result = (LZ4F_errorCode_t)dctx->dStage;
  1152. LZ4F_free(dctx->tmpIn, dctx->cmem);
  1153. LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
  1154. LZ4F_free(dctx, dctx->cmem);
  1155. }
  1156. return result;
  1157. }
  1158. /*==--- Streaming Decompression operations ---==*/
  1159. void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
  1160. {
  1161. DEBUGLOG(5, "LZ4F_resetDecompressionContext");
  1162. dctx->dStage = dstage_getFrameHeader;
  1163. dctx->dict = NULL;
  1164. dctx->dictSize = 0;
  1165. dctx->skipChecksum = 0;
  1166. dctx->frameRemainingSize = 0;
  1167. }
  1168. /*! LZ4F_decodeHeader() :
  1169. * input : `src` points at the **beginning of the frame**
  1170. * output : set internal values of dctx, such as
  1171. * dctx->frameInfo and dctx->dStage.
  1172. * Also allocates internal buffers.
  1173. * @return : nb Bytes read from src (necessarily <= srcSize)
  1174. * or an error code (testable with LZ4F_isError())
  1175. */
  1176. static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize)
  1177. {
  1178. unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID;
  1179. size_t frameHeaderSize;
  1180. const BYTE* srcPtr = (const BYTE*)src;
  1181. DEBUGLOG(5, "LZ4F_decodeHeader");
  1182. /* need to decode header to get frameInfo */
  1183. RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete); /* minimal frame header size */
  1184. MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo));
  1185. /* special case : skippable frames */
  1186. if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) {
  1187. dctx->frameInfo.frameType = LZ4F_skippableFrame;
  1188. if (src == (void*)(dctx->header)) {
  1189. dctx->tmpInSize = srcSize;
  1190. dctx->tmpInTarget = 8;
  1191. dctx->dStage = dstage_storeSFrameSize;
  1192. return srcSize;
  1193. } else {
  1194. dctx->dStage = dstage_getSFrameSize;
  1195. return 4;
  1196. } }
  1197. /* control magic number */
  1198. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1199. if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
  1200. DEBUGLOG(4, "frame header error : unknown magic number");
  1201. RETURN_ERROR(frameType_unknown);
  1202. }
  1203. #endif
  1204. dctx->frameInfo.frameType = LZ4F_frame;
  1205. /* Flags */
  1206. { U32 const FLG = srcPtr[4];
  1207. U32 const version = (FLG>>6) & _2BITS;
  1208. blockChecksumFlag = (FLG>>4) & _1BIT;
  1209. blockMode = (FLG>>5) & _1BIT;
  1210. contentSizeFlag = (FLG>>3) & _1BIT;
  1211. contentChecksumFlag = (FLG>>2) & _1BIT;
  1212. dictIDFlag = FLG & _1BIT;
  1213. /* validate */
  1214. if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
  1215. if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */
  1216. }
  1217. DEBUGLOG(6, "contentSizeFlag: %u", contentSizeFlag);
  1218. /* Frame Header Size */
  1219. frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
  1220. if (srcSize < frameHeaderSize) {
  1221. /* not enough input to fully decode frame header */
  1222. if (srcPtr != dctx->header)
  1223. memcpy(dctx->header, srcPtr, srcSize);
  1224. dctx->tmpInSize = srcSize;
  1225. dctx->tmpInTarget = frameHeaderSize;
  1226. dctx->dStage = dstage_storeFrameHeader;
  1227. return srcSize;
  1228. }
  1229. { U32 const BD = srcPtr[5];
  1230. blockSizeID = (BD>>4) & _3BITS;
  1231. /* validate */
  1232. if (((BD>>7)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
  1233. if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid); /* 4-7 only supported values for the time being */
  1234. if (((BD>>0)&_4BITS) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bits */
  1235. }
  1236. /* check header */
  1237. assert(frameHeaderSize > 5);
  1238. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1239. { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
  1240. RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid);
  1241. }
  1242. #endif
  1243. /* save */
  1244. dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode;
  1245. dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag;
  1246. dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
  1247. dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
  1248. dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID);
  1249. if (contentSizeFlag) {
  1250. dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
  1251. }
  1252. if (dictIDFlag)
  1253. dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
  1254. dctx->dStage = dstage_init;
  1255. return frameHeaderSize;
  1256. }
  1257. /*! LZ4F_headerSize() :
  1258. * @return : size of frame header
  1259. * or an error code, which can be tested using LZ4F_isError()
  1260. */
  1261. size_t LZ4F_headerSize(const void* src, size_t srcSize)
  1262. {
  1263. RETURN_ERROR_IF(src == NULL, srcPtr_wrong);
  1264. /* minimal srcSize to determine header size */
  1265. if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH)
  1266. RETURN_ERROR(frameHeader_incomplete);
  1267. /* special case : skippable frames */
  1268. if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START)
  1269. return 8;
  1270. /* control magic number */
  1271. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1272. if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER)
  1273. RETURN_ERROR(frameType_unknown);
  1274. #endif
  1275. /* Frame Header Size */
  1276. { BYTE const FLG = ((const BYTE*)src)[4];
  1277. U32 const contentSizeFlag = (FLG>>3) & _1BIT;
  1278. U32 const dictIDFlag = FLG & _1BIT;
  1279. return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
  1280. }
  1281. }
  1282. /*! LZ4F_getFrameInfo() :
  1283. * This function extracts frame parameters (max blockSize, frame checksum, etc.).
  1284. * Usage is optional. Objective is to provide relevant information for allocation purposes.
  1285. * This function works in 2 situations :
  1286. * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process.
  1287. * Amount of input data provided must be large enough to successfully decode the frame header.
  1288. * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum.
  1289. * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx.
  1290. * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value).
  1291. * Decompression must resume from (srcBuffer + *srcSizePtr).
  1292. * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call,
  1293. * or an error code which can be tested using LZ4F_isError()
  1294. * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
  1295. * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
  1296. */
  1297. LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
  1298. LZ4F_frameInfo_t* frameInfoPtr,
  1299. const void* srcBuffer, size_t* srcSizePtr)
  1300. {
  1301. LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader);
  1302. if (dctx->dStage > dstage_storeFrameHeader) {
  1303. /* frameInfo already decoded */
  1304. size_t o=0, i=0;
  1305. *srcSizePtr = 0;
  1306. *frameInfoPtr = dctx->frameInfo;
  1307. /* returns : recommended nb of bytes for LZ4F_decompress() */
  1308. return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL);
  1309. } else {
  1310. if (dctx->dStage == dstage_storeFrameHeader) {
  1311. /* frame decoding already started, in the middle of header => automatic fail */
  1312. *srcSizePtr = 0;
  1313. RETURN_ERROR(frameDecoding_alreadyStarted);
  1314. } else {
  1315. size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr);
  1316. if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; }
  1317. if (*srcSizePtr < hSize) {
  1318. *srcSizePtr=0;
  1319. RETURN_ERROR(frameHeader_incomplete);
  1320. }
  1321. { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize);
  1322. if (LZ4F_isError(decodeResult)) {
  1323. *srcSizePtr = 0;
  1324. } else {
  1325. *srcSizePtr = decodeResult;
  1326. decodeResult = BHSize; /* block header size */
  1327. }
  1328. *frameInfoPtr = dctx->frameInfo;
  1329. return decodeResult;
  1330. } } }
  1331. }
  1332. /* LZ4F_updateDict() :
  1333. * only used for LZ4F_blockLinked mode
  1334. * Condition : @dstPtr != NULL
  1335. */
  1336. static void LZ4F_updateDict(LZ4F_dctx* dctx,
  1337. const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
  1338. unsigned withinTmp)
  1339. {
  1340. assert(dstPtr != NULL);
  1341. if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */
  1342. assert(dctx->dict != NULL);
  1343. if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
  1344. dctx->dictSize += dstSize;
  1345. return;
  1346. }
  1347. assert(dstPtr >= dstBufferStart);
  1348. if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */
  1349. dctx->dict = (const BYTE*)dstBufferStart;
  1350. dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize;
  1351. return;
  1352. }
  1353. assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
  1354. /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
  1355. assert(dctx->tmpOutBuffer != NULL);
  1356. if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
  1357. /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
  1358. assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart);
  1359. dctx->dictSize += dstSize;
  1360. return;
  1361. }
  1362. if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
  1363. size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
  1364. size_t copySize = 64 KB - dctx->tmpOutSize;
  1365. const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
  1366. if (dctx->tmpOutSize > 64 KB) copySize = 0;
  1367. if (copySize > preserveSize) copySize = preserveSize;
  1368. memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
  1369. dctx->dict = dctx->tmpOutBuffer;
  1370. dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize;
  1371. return;
  1372. }
  1373. if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */
  1374. if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */
  1375. size_t const preserveSize = 64 KB - dstSize;
  1376. memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
  1377. dctx->dictSize = preserveSize;
  1378. }
  1379. memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize);
  1380. dctx->dictSize += dstSize;
  1381. return;
  1382. }
  1383. /* join dict & dest into tmp */
  1384. { size_t preserveSize = 64 KB - dstSize;
  1385. if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize;
  1386. memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
  1387. memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize);
  1388. dctx->dict = dctx->tmpOutBuffer;
  1389. dctx->dictSize = preserveSize + dstSize;
  1390. }
  1391. }
  1392. /*! LZ4F_decompress() :
  1393. * Call this function repetitively to regenerate compressed data in srcBuffer.
  1394. * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
  1395. * into dstBuffer of capacity *dstSizePtr.
  1396. *
  1397. * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
  1398. *
  1399. * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
  1400. * If number of bytes read is < number of bytes provided, then decompression operation is not complete.
  1401. * Remaining data will have to be presented again in a subsequent invocation.
  1402. *
  1403. * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress.
  1404. * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
  1405. * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling.
  1406. * Note that this is just a hint, and it's always possible to any srcSize value.
  1407. * When a frame is fully decoded, @return will be 0.
  1408. * If decompression failed, @return is an error code which can be tested using LZ4F_isError().
  1409. */
  1410. size_t LZ4F_decompress(LZ4F_dctx* dctx,
  1411. void* dstBuffer, size_t* dstSizePtr,
  1412. const void* srcBuffer, size_t* srcSizePtr,
  1413. const LZ4F_decompressOptions_t* decompressOptionsPtr)
  1414. {
  1415. LZ4F_decompressOptions_t optionsNull;
  1416. const BYTE* const srcStart = (const BYTE*)srcBuffer;
  1417. const BYTE* const srcEnd = srcStart + *srcSizePtr;
  1418. const BYTE* srcPtr = srcStart;
  1419. BYTE* const dstStart = (BYTE*)dstBuffer;
  1420. BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL;
  1421. BYTE* dstPtr = dstStart;
  1422. const BYTE* selectedIn = NULL;
  1423. unsigned doAnotherStage = 1;
  1424. size_t nextSrcSizeHint = 1;
  1425. DEBUGLOG(5, "LZ4F_decompress: src[%p](%u) => dst[%p](%u)",
  1426. srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
  1427. if (dstBuffer == NULL) assert(*dstSizePtr == 0);
  1428. MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
  1429. if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
  1430. *srcSizePtr = 0;
  1431. *dstSizePtr = 0;
  1432. assert(dctx != NULL);
  1433. dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */
  1434. /* behaves as a state machine */
  1435. while (doAnotherStage) {
  1436. switch(dctx->dStage)
  1437. {
  1438. case dstage_getFrameHeader:
  1439. DEBUGLOG(6, "dstage_getFrameHeader");
  1440. if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
  1441. size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
  1442. FORWARD_IF_ERROR(hSize);
  1443. srcPtr += hSize;
  1444. break;
  1445. }
  1446. dctx->tmpInSize = 0;
  1447. if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */
  1448. dctx->tmpInTarget = minFHSize; /* minimum size to decode header */
  1449. dctx->dStage = dstage_storeFrameHeader;
  1450. /* fall-through */
  1451. case dstage_storeFrameHeader:
  1452. DEBUGLOG(6, "dstage_storeFrameHeader");
  1453. { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr));
  1454. memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
  1455. dctx->tmpInSize += sizeToCopy;
  1456. srcPtr += sizeToCopy;
  1457. }
  1458. if (dctx->tmpInSize < dctx->tmpInTarget) {
  1459. nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */
  1460. doAnotherStage = 0; /* not enough src data, ask for some more */
  1461. break;
  1462. }
  1463. FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) ); /* will update dStage appropriately */
  1464. break;
  1465. case dstage_init:
  1466. DEBUGLOG(6, "dstage_init");
  1467. if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0);
  1468. /* internal buffers allocation */
  1469. { size_t const bufferNeeded = dctx->maxBlockSize
  1470. + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0);
  1471. if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */
  1472. dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/
  1473. LZ4F_free(dctx->tmpIn, dctx->cmem);
  1474. dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem);
  1475. RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed);
  1476. LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
  1477. dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem);
  1478. RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed);
  1479. dctx->maxBufferSize = bufferNeeded;
  1480. } }
  1481. dctx->tmpInSize = 0;
  1482. dctx->tmpInTarget = 0;
  1483. dctx->tmpOut = dctx->tmpOutBuffer;
  1484. dctx->tmpOutStart = 0;
  1485. dctx->tmpOutSize = 0;
  1486. dctx->dStage = dstage_getBlockHeader;
  1487. /* fall-through */
  1488. case dstage_getBlockHeader:
  1489. if ((size_t)(srcEnd - srcPtr) >= BHSize) {
  1490. selectedIn = srcPtr;
  1491. srcPtr += BHSize;
  1492. } else {
  1493. /* not enough input to read cBlockSize field */
  1494. dctx->tmpInSize = 0;
  1495. dctx->dStage = dstage_storeBlockHeader;
  1496. }
  1497. if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */
  1498. case dstage_storeBlockHeader:
  1499. { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
  1500. size_t const wantedData = BHSize - dctx->tmpInSize;
  1501. size_t const sizeToCopy = MIN(wantedData, remainingInput);
  1502. memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
  1503. srcPtr += sizeToCopy;
  1504. dctx->tmpInSize += sizeToCopy;
  1505. if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */
  1506. nextSrcSizeHint = BHSize - dctx->tmpInSize;
  1507. doAnotherStage = 0;
  1508. break;
  1509. }
  1510. selectedIn = dctx->tmpIn;
  1511. } /* if (dctx->dStage == dstage_storeBlockHeader) */
  1512. /* decode block header */
  1513. { U32 const blockHeader = LZ4F_readLE32(selectedIn);
  1514. size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
  1515. size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize;
  1516. if (blockHeader==0) { /* frameEnd signal, no more block */
  1517. DEBUGLOG(5, "end of frame");
  1518. dctx->dStage = dstage_getSuffix;
  1519. break;
  1520. }
  1521. if (nextCBlockSize > dctx->maxBlockSize) {
  1522. RETURN_ERROR(maxBlockSize_invalid);
  1523. }
  1524. if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
  1525. /* next block is uncompressed */
  1526. dctx->tmpInTarget = nextCBlockSize;
  1527. DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize);
  1528. if (dctx->frameInfo.blockChecksumFlag) {
  1529. (void)XXH32_reset(&dctx->blockChecksum, 0);
  1530. }
  1531. dctx->dStage = dstage_copyDirect;
  1532. break;
  1533. }
  1534. /* next block is a compressed block */
  1535. dctx->tmpInTarget = nextCBlockSize + crcSize;
  1536. dctx->dStage = dstage_getCBlock;
  1537. if (dstPtr==dstEnd || srcPtr==srcEnd) {
  1538. nextSrcSizeHint = BHSize + nextCBlockSize + crcSize;
  1539. doAnotherStage = 0;
  1540. }
  1541. break;
  1542. }
  1543. case dstage_copyDirect: /* uncompressed block */
  1544. DEBUGLOG(6, "dstage_copyDirect");
  1545. { size_t sizeToCopy;
  1546. if (dstPtr == NULL) {
  1547. sizeToCopy = 0;
  1548. } else {
  1549. size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
  1550. sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
  1551. memcpy(dstPtr, srcPtr, sizeToCopy);
  1552. if (!dctx->skipChecksum) {
  1553. if (dctx->frameInfo.blockChecksumFlag) {
  1554. (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
  1555. }
  1556. if (dctx->frameInfo.contentChecksumFlag)
  1557. (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
  1558. }
  1559. if (dctx->frameInfo.contentSize)
  1560. dctx->frameRemainingSize -= sizeToCopy;
  1561. /* history management (linked blocks only)*/
  1562. if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
  1563. LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
  1564. }
  1565. srcPtr += sizeToCopy;
  1566. dstPtr += sizeToCopy;
  1567. }
  1568. if (sizeToCopy == dctx->tmpInTarget) { /* all done */
  1569. if (dctx->frameInfo.blockChecksumFlag) {
  1570. dctx->tmpInSize = 0;
  1571. dctx->dStage = dstage_getBlockChecksum;
  1572. } else
  1573. dctx->dStage = dstage_getBlockHeader; /* new block */
  1574. break;
  1575. }
  1576. dctx->tmpInTarget -= sizeToCopy; /* need to copy more */
  1577. }
  1578. nextSrcSizeHint = dctx->tmpInTarget +
  1579. +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
  1580. + BHSize /* next header size */;
  1581. doAnotherStage = 0;
  1582. break;
  1583. /* check block checksum for recently transferred uncompressed block */
  1584. case dstage_getBlockChecksum:
  1585. DEBUGLOG(6, "dstage_getBlockChecksum");
  1586. { const void* crcSrc;
  1587. if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) {
  1588. crcSrc = srcPtr;
  1589. srcPtr += 4;
  1590. } else {
  1591. size_t const stillToCopy = 4 - dctx->tmpInSize;
  1592. size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr));
  1593. memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
  1594. dctx->tmpInSize += sizeToCopy;
  1595. srcPtr += sizeToCopy;
  1596. if (dctx->tmpInSize < 4) { /* all input consumed */
  1597. doAnotherStage = 0;
  1598. break;
  1599. }
  1600. crcSrc = dctx->header;
  1601. }
  1602. if (!dctx->skipChecksum) {
  1603. U32 const readCRC = LZ4F_readLE32(crcSrc);
  1604. U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
  1605. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1606. DEBUGLOG(6, "compare block checksum");
  1607. if (readCRC != calcCRC) {
  1608. DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
  1609. readCRC, calcCRC);
  1610. RETURN_ERROR(blockChecksum_invalid);
  1611. }
  1612. #else
  1613. (void)readCRC;
  1614. (void)calcCRC;
  1615. #endif
  1616. } }
  1617. dctx->dStage = dstage_getBlockHeader; /* new block */
  1618. break;
  1619. case dstage_getCBlock:
  1620. DEBUGLOG(6, "dstage_getCBlock");
  1621. if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) {
  1622. dctx->tmpInSize = 0;
  1623. dctx->dStage = dstage_storeCBlock;
  1624. break;
  1625. }
  1626. /* input large enough to read full block directly */
  1627. selectedIn = srcPtr;
  1628. srcPtr += dctx->tmpInTarget;
  1629. if (0) /* always jump over next block */
  1630. case dstage_storeCBlock:
  1631. { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize;
  1632. size_t const inputLeft = (size_t)(srcEnd-srcPtr);
  1633. size_t const sizeToCopy = MIN(wantedData, inputLeft);
  1634. memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
  1635. dctx->tmpInSize += sizeToCopy;
  1636. srcPtr += sizeToCopy;
  1637. if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */
  1638. nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize)
  1639. + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
  1640. + BHSize /* next header size */;
  1641. doAnotherStage = 0;
  1642. break;
  1643. }
  1644. selectedIn = dctx->tmpIn;
  1645. }
  1646. /* At this stage, input is large enough to decode a block */
  1647. /* First, decode and control block checksum if it exists */
  1648. if (dctx->frameInfo.blockChecksumFlag) {
  1649. assert(dctx->tmpInTarget >= 4);
  1650. dctx->tmpInTarget -= 4;
  1651. assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
  1652. { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget);
  1653. U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0);
  1654. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1655. RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid);
  1656. #else
  1657. (void)readBlockCrc;
  1658. (void)calcBlockCrc;
  1659. #endif
  1660. } }
  1661. /* decode directly into destination buffer if there is enough room */
  1662. if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize)
  1663. /* unless the dictionary is stored in tmpOut:
  1664. * in which case it's faster to decode within tmpOut
  1665. * to benefit from prefix speedup */
  1666. && !(dctx->dict!= NULL && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) )
  1667. {
  1668. const char* dict = (const char*)dctx->dict;
  1669. size_t dictSize = dctx->dictSize;
  1670. int decodedSize;
  1671. assert(dstPtr != NULL);
  1672. if (dict && dictSize > 1 GB) {
  1673. /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */
  1674. dict += dictSize - 64 KB;
  1675. dictSize = 64 KB;
  1676. }
  1677. decodedSize = LZ4_decompress_safe_usingDict(
  1678. (const char*)selectedIn, (char*)dstPtr,
  1679. (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
  1680. dict, (int)dictSize);
  1681. RETURN_ERROR_IF(decodedSize < 0, decompressionFailed);
  1682. if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum))
  1683. XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize);
  1684. if (dctx->frameInfo.contentSize)
  1685. dctx->frameRemainingSize -= (size_t)decodedSize;
  1686. /* dictionary management */
  1687. if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
  1688. LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0);
  1689. }
  1690. dstPtr += decodedSize;
  1691. dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */
  1692. break;
  1693. }
  1694. /* not enough place into dst : decode into tmpOut */
  1695. /* manage dictionary */
  1696. if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
  1697. if (dctx->dict == dctx->tmpOutBuffer) {
  1698. /* truncate dictionary to 64 KB if too big */
  1699. if (dctx->dictSize > 128 KB) {
  1700. memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB);
  1701. dctx->dictSize = 64 KB;
  1702. }
  1703. dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize;
  1704. } else { /* dict not within tmpOut */
  1705. size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB);
  1706. dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace;
  1707. } }
  1708. /* Decode block into tmpOut */
  1709. { const char* dict = (const char*)dctx->dict;
  1710. size_t dictSize = dctx->dictSize;
  1711. int decodedSize;
  1712. if (dict && dictSize > 1 GB) {
  1713. /* the dictSize param is an int, avoid truncation / sign issues */
  1714. dict += dictSize - 64 KB;
  1715. dictSize = 64 KB;
  1716. }
  1717. decodedSize = LZ4_decompress_safe_usingDict(
  1718. (const char*)selectedIn, (char*)dctx->tmpOut,
  1719. (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
  1720. dict, (int)dictSize);
  1721. RETURN_ERROR_IF(decodedSize < 0, decompressionFailed);
  1722. if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum)
  1723. XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize);
  1724. if (dctx->frameInfo.contentSize)
  1725. dctx->frameRemainingSize -= (size_t)decodedSize;
  1726. dctx->tmpOutSize = (size_t)decodedSize;
  1727. dctx->tmpOutStart = 0;
  1728. dctx->dStage = dstage_flushOut;
  1729. }
  1730. /* fall-through */
  1731. case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
  1732. DEBUGLOG(6, "dstage_flushOut");
  1733. if (dstPtr != NULL) {
  1734. size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
  1735. memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy);
  1736. /* dictionary management */
  1737. if (dctx->frameInfo.blockMode == LZ4F_blockLinked)
  1738. LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/);
  1739. dctx->tmpOutStart += sizeToCopy;
  1740. dstPtr += sizeToCopy;
  1741. }
  1742. if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
  1743. dctx->dStage = dstage_getBlockHeader; /* get next block */
  1744. break;
  1745. }
  1746. /* could not flush everything : stop there, just request a block header */
  1747. doAnotherStage = 0;
  1748. nextSrcSizeHint = BHSize;
  1749. break;
  1750. case dstage_getSuffix:
  1751. RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong); /* incorrect frame size decoded */
  1752. if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */
  1753. nextSrcSizeHint = 0;
  1754. LZ4F_resetDecompressionContext(dctx);
  1755. doAnotherStage = 0;
  1756. break;
  1757. }
  1758. if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */
  1759. dctx->tmpInSize = 0;
  1760. dctx->dStage = dstage_storeSuffix;
  1761. } else {
  1762. selectedIn = srcPtr;
  1763. srcPtr += 4;
  1764. }
  1765. if (dctx->dStage == dstage_storeSuffix) /* can be skipped */
  1766. case dstage_storeSuffix:
  1767. { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
  1768. size_t const wantedData = 4 - dctx->tmpInSize;
  1769. size_t const sizeToCopy = MIN(wantedData, remainingInput);
  1770. memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
  1771. srcPtr += sizeToCopy;
  1772. dctx->tmpInSize += sizeToCopy;
  1773. if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */
  1774. nextSrcSizeHint = 4 - dctx->tmpInSize;
  1775. doAnotherStage=0;
  1776. break;
  1777. }
  1778. selectedIn = dctx->tmpIn;
  1779. } /* if (dctx->dStage == dstage_storeSuffix) */
  1780. /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
  1781. if (!dctx->skipChecksum) {
  1782. U32 const readCRC = LZ4F_readLE32(selectedIn);
  1783. U32 const resultCRC = XXH32_digest(&(dctx->xxh));
  1784. DEBUGLOG(4, "frame checksum: stored 0x%0X vs 0x%0X processed", readCRC, resultCRC);
  1785. #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
  1786. RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid);
  1787. #else
  1788. (void)readCRC;
  1789. (void)resultCRC;
  1790. #endif
  1791. }
  1792. nextSrcSizeHint = 0;
  1793. LZ4F_resetDecompressionContext(dctx);
  1794. doAnotherStage = 0;
  1795. break;
  1796. case dstage_getSFrameSize:
  1797. if ((srcEnd - srcPtr) >= 4) {
  1798. selectedIn = srcPtr;
  1799. srcPtr += 4;
  1800. } else {
  1801. /* not enough input to read cBlockSize field */
  1802. dctx->tmpInSize = 4;
  1803. dctx->tmpInTarget = 8;
  1804. dctx->dStage = dstage_storeSFrameSize;
  1805. }
  1806. if (dctx->dStage == dstage_storeSFrameSize)
  1807. case dstage_storeSFrameSize:
  1808. { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize,
  1809. (size_t)(srcEnd - srcPtr) );
  1810. memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
  1811. srcPtr += sizeToCopy;
  1812. dctx->tmpInSize += sizeToCopy;
  1813. if (dctx->tmpInSize < dctx->tmpInTarget) {
  1814. /* not enough input to get full sBlockSize; wait for more */
  1815. nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize;
  1816. doAnotherStage = 0;
  1817. break;
  1818. }
  1819. selectedIn = dctx->header + 4;
  1820. } /* if (dctx->dStage == dstage_storeSFrameSize) */
  1821. /* case dstage_decodeSFrameSize: */ /* no direct entry */
  1822. { size_t const SFrameSize = LZ4F_readLE32(selectedIn);
  1823. dctx->frameInfo.contentSize = SFrameSize;
  1824. dctx->tmpInTarget = SFrameSize;
  1825. dctx->dStage = dstage_skipSkippable;
  1826. break;
  1827. }
  1828. case dstage_skipSkippable:
  1829. { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr));
  1830. srcPtr += skipSize;
  1831. dctx->tmpInTarget -= skipSize;
  1832. doAnotherStage = 0;
  1833. nextSrcSizeHint = dctx->tmpInTarget;
  1834. if (nextSrcSizeHint) break; /* still more to skip */
  1835. /* frame fully skipped : prepare context for a new frame */
  1836. LZ4F_resetDecompressionContext(dctx);
  1837. break;
  1838. }
  1839. } /* switch (dctx->dStage) */
  1840. } /* while (doAnotherStage) */
  1841. /* preserve history within tmpOut whenever necessary */
  1842. LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2);
  1843. if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
  1844. && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */
  1845. && (dctx->dict != NULL) /* dictionary exists */
  1846. && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */
  1847. && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */
  1848. {
  1849. if (dctx->dStage == dstage_flushOut) {
  1850. size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
  1851. size_t copySize = 64 KB - dctx->tmpOutSize;
  1852. const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
  1853. if (dctx->tmpOutSize > 64 KB) copySize = 0;
  1854. if (copySize > preserveSize) copySize = preserveSize;
  1855. assert(dctx->tmpOutBuffer != NULL);
  1856. memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
  1857. dctx->dict = dctx->tmpOutBuffer;
  1858. dctx->dictSize = preserveSize + dctx->tmpOutStart;
  1859. } else {
  1860. const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize;
  1861. size_t const newDictSize = MIN(dctx->dictSize, 64 KB);
  1862. memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
  1863. dctx->dict = dctx->tmpOutBuffer;
  1864. dctx->dictSize = newDictSize;
  1865. dctx->tmpOut = dctx->tmpOutBuffer + newDictSize;
  1866. }
  1867. }
  1868. *srcSizePtr = (size_t)(srcPtr - srcStart);
  1869. *dstSizePtr = (size_t)(dstPtr - dstStart);
  1870. return nextSrcSizeHint;
  1871. }
  1872. /*! LZ4F_decompress_usingDict() :
  1873. * Same as LZ4F_decompress(), using a predefined dictionary.
  1874. * Dictionary is used "in place", without any preprocessing.
  1875. * It must remain accessible throughout the entire frame decoding.
  1876. */
  1877. size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx,
  1878. void* dstBuffer, size_t* dstSizePtr,
  1879. const void* srcBuffer, size_t* srcSizePtr,
  1880. const void* dict, size_t dictSize,
  1881. const LZ4F_decompressOptions_t* decompressOptionsPtr)
  1882. {
  1883. if (dctx->dStage <= dstage_init) {
  1884. dctx->dict = (const BYTE*)dict;
  1885. dctx->dictSize = dictSize;
  1886. }
  1887. return LZ4F_decompress(dctx, dstBuffer, dstSizePtr,
  1888. srcBuffer, srcSizePtr,
  1889. decompressOptionsPtr);
  1890. }