lz_encoder.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. // SPDX-License-Identifier: 0BSD
  2. ///////////////////////////////////////////////////////////////////////////////
  3. //
  4. /// \file lz_encoder.c
  5. /// \brief LZ in window
  6. ///
  7. // Authors: Igor Pavlov
  8. // Lasse Collin
  9. //
  10. ///////////////////////////////////////////////////////////////////////////////
  11. #include "lz_encoder.h"
  12. #include "lz_encoder_hash.h"
  13. // See lz_encoder_hash.h. This is a bit hackish but avoids making
  14. // endianness a conditional in makefiles.
  15. #if defined(WORDS_BIGENDIAN) && !defined(HAVE_SMALL)
  16. # error #include "lz_encoder_hash_table.h"
  17. #endif
  18. #include "memcmplen.h"
  19. typedef struct {
  20. /// LZ-based encoder e.g. LZMA
  21. lzma_lz_encoder lz;
  22. /// History buffer and match finder
  23. lzma_mf mf;
  24. /// Next coder in the chain
  25. lzma_next_coder next;
  26. } lzma_coder;
  27. /// \brief Moves the data in the input window to free space for new data
  28. ///
  29. /// mf->buffer is a sliding input window, which keeps mf->keep_size_before
  30. /// bytes of input history available all the time. Now and then we need to
  31. /// "slide" the buffer to make space for the new data to the end of the
  32. /// buffer. At the same time, data older than keep_size_before is dropped.
  33. ///
  34. static void
  35. move_window(lzma_mf *mf)
  36. {
  37. // Align the move to a multiple of 16 bytes. Some LZ-based encoders
  38. // like LZMA use the lowest bits of mf->read_pos to know the
  39. // alignment of the uncompressed data. We also get better speed
  40. // for memmove() with aligned buffers.
  41. assert(mf->read_pos > mf->keep_size_before);
  42. const uint32_t move_offset
  43. = (mf->read_pos - mf->keep_size_before) & ~UINT32_C(15);
  44. assert(mf->write_pos > move_offset);
  45. const size_t move_size = mf->write_pos - move_offset;
  46. assert(move_offset + move_size <= mf->size);
  47. memmove(mf->buffer, mf->buffer + move_offset, move_size);
  48. mf->offset += move_offset;
  49. mf->read_pos -= move_offset;
  50. mf->read_limit -= move_offset;
  51. mf->write_pos -= move_offset;
  52. return;
  53. }
  54. /// \brief Tries to fill the input window (mf->buffer)
  55. ///
  56. /// If we are the last encoder in the chain, our input data is in in[].
  57. /// Otherwise we call the next filter in the chain to process in[] and
  58. /// write its output to mf->buffer.
  59. ///
  60. /// This function must not be called once it has returned LZMA_STREAM_END.
  61. ///
  62. static lzma_ret
  63. fill_window(lzma_coder *coder, const lzma_allocator *allocator,
  64. const uint8_t *in, size_t *in_pos, size_t in_size,
  65. lzma_action action)
  66. {
  67. assert(coder->mf.read_pos <= coder->mf.write_pos);
  68. // Move the sliding window if needed.
  69. if (coder->mf.read_pos >= coder->mf.size - coder->mf.keep_size_after)
  70. move_window(&coder->mf);
  71. // Maybe this is ugly, but lzma_mf uses uint32_t for most things
  72. // (which I find cleanest), but we need size_t here when filling
  73. // the history window.
  74. size_t write_pos = coder->mf.write_pos;
  75. lzma_ret ret;
  76. if (coder->next.code == NULL) {
  77. // Not using a filter, simply memcpy() as much as possible.
  78. lzma_bufcpy(in, in_pos, in_size, coder->mf.buffer,
  79. &write_pos, coder->mf.size);
  80. ret = action != LZMA_RUN && *in_pos == in_size
  81. ? LZMA_STREAM_END : LZMA_OK;
  82. } else {
  83. ret = coder->next.code(coder->next.coder, allocator,
  84. in, in_pos, in_size,
  85. coder->mf.buffer, &write_pos,
  86. coder->mf.size, action);
  87. }
  88. coder->mf.write_pos = write_pos;
  89. // Silence Valgrind. lzma_memcmplen() can read extra bytes
  90. // and Valgrind will give warnings if those bytes are uninitialized
  91. // because Valgrind cannot see that the values of the uninitialized
  92. // bytes are eventually ignored.
  93. memzero(coder->mf.buffer + write_pos, LZMA_MEMCMPLEN_EXTRA);
  94. // If end of stream has been reached or flushing completed, we allow
  95. // the encoder to process all the input (that is, read_pos is allowed
  96. // to reach write_pos). Otherwise we keep keep_size_after bytes
  97. // available as prebuffer.
  98. if (ret == LZMA_STREAM_END) {
  99. assert(*in_pos == in_size);
  100. ret = LZMA_OK;
  101. coder->mf.action = action;
  102. coder->mf.read_limit = coder->mf.write_pos;
  103. } else if (coder->mf.write_pos > coder->mf.keep_size_after) {
  104. // This needs to be done conditionally, because if we got
  105. // only little new input, there may be too little input
  106. // to do any encoding yet.
  107. coder->mf.read_limit = coder->mf.write_pos
  108. - coder->mf.keep_size_after;
  109. }
  110. // Restart the match finder after finished LZMA_SYNC_FLUSH.
  111. if (coder->mf.pending > 0
  112. && coder->mf.read_pos < coder->mf.read_limit) {
  113. // Match finder may update coder->pending and expects it to
  114. // start from zero, so use a temporary variable.
  115. const uint32_t pending = coder->mf.pending;
  116. coder->mf.pending = 0;
  117. // Rewind read_pos so that the match finder can hash
  118. // the pending bytes.
  119. assert(coder->mf.read_pos >= pending);
  120. coder->mf.read_pos -= pending;
  121. // Call the skip function directly instead of using
  122. // mf_skip(), since we don't want to touch mf->read_ahead.
  123. coder->mf.skip(&coder->mf, pending);
  124. }
  125. return ret;
  126. }
  127. static lzma_ret
  128. lz_encode(void *coder_ptr, const lzma_allocator *allocator,
  129. const uint8_t *restrict in, size_t *restrict in_pos,
  130. size_t in_size,
  131. uint8_t *restrict out, size_t *restrict out_pos,
  132. size_t out_size, lzma_action action)
  133. {
  134. lzma_coder *coder = coder_ptr;
  135. while (*out_pos < out_size
  136. && (*in_pos < in_size || action != LZMA_RUN)) {
  137. // Read more data to coder->mf.buffer if needed.
  138. if (coder->mf.action == LZMA_RUN && coder->mf.read_pos
  139. >= coder->mf.read_limit)
  140. return_if_error(fill_window(coder, allocator,
  141. in, in_pos, in_size, action));
  142. // Encode
  143. const lzma_ret ret = coder->lz.code(coder->lz.coder,
  144. &coder->mf, out, out_pos, out_size);
  145. if (ret != LZMA_OK) {
  146. // Setting this to LZMA_RUN for cases when we are
  147. // flushing. It doesn't matter when finishing or if
  148. // an error occurred.
  149. coder->mf.action = LZMA_RUN;
  150. return ret;
  151. }
  152. }
  153. return LZMA_OK;
  154. }
  155. static bool
  156. lz_encoder_prepare(lzma_mf *mf, const lzma_allocator *allocator,
  157. const lzma_lz_options *lz_options)
  158. {
  159. // For now, the dictionary size is limited to 1.5 GiB. This may grow
  160. // in the future if needed, but it needs a little more work than just
  161. // changing this check.
  162. if (!IS_ENC_DICT_SIZE_VALID(lz_options->dict_size)
  163. || lz_options->nice_len > lz_options->match_len_max)
  164. return true;
  165. mf->keep_size_before = lz_options->before_size + lz_options->dict_size;
  166. mf->keep_size_after = lz_options->after_size
  167. + lz_options->match_len_max;
  168. // To avoid constant memmove()s, allocate some extra space. Since
  169. // memmove()s become more expensive when the size of the buffer
  170. // increases, we reserve more space when a large dictionary is
  171. // used to make the memmove() calls rarer.
  172. //
  173. // This works with dictionaries up to about 3 GiB. If bigger
  174. // dictionary is wanted, some extra work is needed:
  175. // - Several variables in lzma_mf have to be changed from uint32_t
  176. // to size_t.
  177. // - Memory usage calculation needs something too, e.g. use uint64_t
  178. // for mf->size.
  179. uint32_t reserve = lz_options->dict_size / 2;
  180. if (reserve > (UINT32_C(1) << 30))
  181. reserve /= 2;
  182. reserve += (lz_options->before_size + lz_options->match_len_max
  183. + lz_options->after_size) / 2 + (UINT32_C(1) << 19);
  184. const uint32_t old_size = mf->size;
  185. mf->size = mf->keep_size_before + reserve + mf->keep_size_after;
  186. // Deallocate the old history buffer if it exists but has different
  187. // size than what is needed now.
  188. if (mf->buffer != NULL && old_size != mf->size) {
  189. lzma_free(mf->buffer, allocator);
  190. mf->buffer = NULL;
  191. }
  192. // Match finder options
  193. mf->match_len_max = lz_options->match_len_max;
  194. mf->nice_len = lz_options->nice_len;
  195. // cyclic_size has to stay smaller than 2 Gi. Note that this doesn't
  196. // mean limiting dictionary size to less than 2 GiB. With a match
  197. // finder that uses multibyte resolution (hashes start at e.g. every
  198. // fourth byte), cyclic_size would stay below 2 Gi even when
  199. // dictionary size is greater than 2 GiB.
  200. //
  201. // It would be possible to allow cyclic_size >= 2 Gi, but then we
  202. // would need to be careful to use 64-bit types in various places
  203. // (size_t could do since we would need bigger than 32-bit address
  204. // space anyway). It would also require either zeroing a multigigabyte
  205. // buffer at initialization (waste of time and RAM) or allow
  206. // normalization in lz_encoder_mf.c to access uninitialized
  207. // memory to keep the code simpler. The current way is simple and
  208. // still allows pretty big dictionaries, so I don't expect these
  209. // limits to change.
  210. mf->cyclic_size = lz_options->dict_size + 1;
  211. // Validate the match finder ID and setup the function pointers.
  212. switch (lz_options->match_finder) {
  213. #ifdef HAVE_MF_HC3
  214. case LZMA_MF_HC3:
  215. mf->find = &lzma_mf_hc3_find;
  216. mf->skip = &lzma_mf_hc3_skip;
  217. break;
  218. #endif
  219. #ifdef HAVE_MF_HC4
  220. case LZMA_MF_HC4:
  221. mf->find = &lzma_mf_hc4_find;
  222. mf->skip = &lzma_mf_hc4_skip;
  223. break;
  224. #endif
  225. #ifdef HAVE_MF_BT2
  226. case LZMA_MF_BT2:
  227. mf->find = &lzma_mf_bt2_find;
  228. mf->skip = &lzma_mf_bt2_skip;
  229. break;
  230. #endif
  231. #ifdef HAVE_MF_BT3
  232. case LZMA_MF_BT3:
  233. mf->find = &lzma_mf_bt3_find;
  234. mf->skip = &lzma_mf_bt3_skip;
  235. break;
  236. #endif
  237. #ifdef HAVE_MF_BT4
  238. case LZMA_MF_BT4:
  239. mf->find = &lzma_mf_bt4_find;
  240. mf->skip = &lzma_mf_bt4_skip;
  241. break;
  242. #endif
  243. default:
  244. return true;
  245. }
  246. // Calculate the sizes of mf->hash and mf->son.
  247. //
  248. // NOTE: Since 5.3.5beta the LZMA encoder ensures that nice_len
  249. // is big enough for the selected match finder. This makes it
  250. // easier for applications as nice_len = 2 will always be accepted
  251. // even though the effective value can be slightly bigger.
  252. const uint32_t hash_bytes
  253. = mf_get_hash_bytes(lz_options->match_finder);
  254. assert(hash_bytes <= mf->nice_len);
  255. const bool is_bt = (lz_options->match_finder & 0x10) != 0;
  256. uint32_t hs;
  257. if (hash_bytes == 2) {
  258. hs = 0xFFFF;
  259. } else {
  260. // Round dictionary size up to the next 2^n - 1 so it can
  261. // be used as a hash mask.
  262. hs = lz_options->dict_size - 1;
  263. hs |= hs >> 1;
  264. hs |= hs >> 2;
  265. hs |= hs >> 4;
  266. hs |= hs >> 8;
  267. hs >>= 1;
  268. hs |= 0xFFFF;
  269. if (hs > (UINT32_C(1) << 24)) {
  270. if (hash_bytes == 3)
  271. hs = (UINT32_C(1) << 24) - 1;
  272. else
  273. hs >>= 1;
  274. }
  275. }
  276. mf->hash_mask = hs;
  277. ++hs;
  278. if (hash_bytes > 2)
  279. hs += HASH_2_SIZE;
  280. if (hash_bytes > 3)
  281. hs += HASH_3_SIZE;
  282. /*
  283. No match finder uses this at the moment.
  284. if (mf->hash_bytes > 4)
  285. hs += HASH_4_SIZE;
  286. */
  287. const uint32_t old_hash_count = mf->hash_count;
  288. const uint32_t old_sons_count = mf->sons_count;
  289. mf->hash_count = hs;
  290. mf->sons_count = mf->cyclic_size;
  291. if (is_bt)
  292. mf->sons_count *= 2;
  293. // Deallocate the old hash array if it exists and has different size
  294. // than what is needed now.
  295. if (old_hash_count != mf->hash_count
  296. || old_sons_count != mf->sons_count) {
  297. lzma_free(mf->hash, allocator);
  298. mf->hash = NULL;
  299. lzma_free(mf->son, allocator);
  300. mf->son = NULL;
  301. }
  302. // Maximum number of match finder cycles
  303. mf->depth = lz_options->depth;
  304. if (mf->depth == 0) {
  305. if (is_bt)
  306. mf->depth = 16 + mf->nice_len / 2;
  307. else
  308. mf->depth = 4 + mf->nice_len / 4;
  309. }
  310. return false;
  311. }
  312. static bool
  313. lz_encoder_init(lzma_mf *mf, const lzma_allocator *allocator,
  314. const lzma_lz_options *lz_options)
  315. {
  316. // Allocate the history buffer.
  317. if (mf->buffer == NULL) {
  318. // lzma_memcmplen() is used for the dictionary buffer
  319. // so we need to allocate a few extra bytes to prevent
  320. // it from reading past the end of the buffer.
  321. mf->buffer = lzma_alloc(mf->size + LZMA_MEMCMPLEN_EXTRA,
  322. allocator);
  323. if (mf->buffer == NULL)
  324. return true;
  325. // Keep Valgrind happy with lzma_memcmplen() and initialize
  326. // the extra bytes whose value may get read but which will
  327. // effectively get ignored.
  328. memzero(mf->buffer + mf->size, LZMA_MEMCMPLEN_EXTRA);
  329. }
  330. // Use cyclic_size as initial mf->offset. This allows
  331. // avoiding a few branches in the match finders. The downside is
  332. // that match finder needs to be normalized more often, which may
  333. // hurt performance with huge dictionaries.
  334. mf->offset = mf->cyclic_size;
  335. mf->read_pos = 0;
  336. mf->read_ahead = 0;
  337. mf->read_limit = 0;
  338. mf->write_pos = 0;
  339. mf->pending = 0;
  340. #if UINT32_MAX >= SIZE_MAX / 4
  341. // Check for integer overflow. (Huge dictionaries are not
  342. // possible on 32-bit CPU.)
  343. if (mf->hash_count > SIZE_MAX / sizeof(uint32_t)
  344. || mf->sons_count > SIZE_MAX / sizeof(uint32_t))
  345. return true;
  346. #endif
  347. // Allocate and initialize the hash table. Since EMPTY_HASH_VALUE
  348. // is zero, we can use lzma_alloc_zero() or memzero() for mf->hash.
  349. //
  350. // We don't need to initialize mf->son, but not doing that may
  351. // make Valgrind complain in normalization (see normalize() in
  352. // lz_encoder_mf.c). Skipping the initialization is *very* good
  353. // when big dictionary is used but only small amount of data gets
  354. // actually compressed: most of the mf->son won't get actually
  355. // allocated by the kernel, so we avoid wasting RAM and improve
  356. // initialization speed a lot.
  357. if (mf->hash == NULL) {
  358. mf->hash = lzma_alloc_zero(mf->hash_count * sizeof(uint32_t),
  359. allocator);
  360. mf->son = lzma_alloc(mf->sons_count * sizeof(uint32_t),
  361. allocator);
  362. if (mf->hash == NULL || mf->son == NULL) {
  363. lzma_free(mf->hash, allocator);
  364. mf->hash = NULL;
  365. lzma_free(mf->son, allocator);
  366. mf->son = NULL;
  367. return true;
  368. }
  369. } else {
  370. /*
  371. for (uint32_t i = 0; i < mf->hash_count; ++i)
  372. mf->hash[i] = EMPTY_HASH_VALUE;
  373. */
  374. memzero(mf->hash, mf->hash_count * sizeof(uint32_t));
  375. }
  376. mf->cyclic_pos = 0;
  377. // Handle preset dictionary.
  378. if (lz_options->preset_dict != NULL
  379. && lz_options->preset_dict_size > 0) {
  380. // If the preset dictionary is bigger than the actual
  381. // dictionary, use only the tail.
  382. mf->write_pos = my_min(lz_options->preset_dict_size, mf->size);
  383. memcpy(mf->buffer, lz_options->preset_dict
  384. + lz_options->preset_dict_size - mf->write_pos,
  385. mf->write_pos);
  386. mf->action = LZMA_SYNC_FLUSH;
  387. mf->skip(mf, mf->write_pos);
  388. }
  389. mf->action = LZMA_RUN;
  390. return false;
  391. }
  392. extern uint64_t
  393. lzma_lz_encoder_memusage(const lzma_lz_options *lz_options)
  394. {
  395. // Old buffers must not exist when calling lz_encoder_prepare().
  396. lzma_mf mf = {
  397. .buffer = NULL,
  398. .hash = NULL,
  399. .son = NULL,
  400. .hash_count = 0,
  401. .sons_count = 0,
  402. };
  403. // Setup the size information into mf.
  404. if (lz_encoder_prepare(&mf, NULL, lz_options))
  405. return UINT64_MAX;
  406. // Calculate the memory usage.
  407. return ((uint64_t)(mf.hash_count) + mf.sons_count) * sizeof(uint32_t)
  408. + mf.size + sizeof(lzma_coder);
  409. }
  410. static void
  411. lz_encoder_end(void *coder_ptr, const lzma_allocator *allocator)
  412. {
  413. lzma_coder *coder = coder_ptr;
  414. lzma_next_end(&coder->next, allocator);
  415. lzma_free(coder->mf.son, allocator);
  416. lzma_free(coder->mf.hash, allocator);
  417. lzma_free(coder->mf.buffer, allocator);
  418. if (coder->lz.end != NULL)
  419. coder->lz.end(coder->lz.coder, allocator);
  420. else
  421. lzma_free(coder->lz.coder, allocator);
  422. lzma_free(coder, allocator);
  423. return;
  424. }
  425. static lzma_ret
  426. lz_encoder_update(void *coder_ptr, const lzma_allocator *allocator,
  427. const lzma_filter *filters_null lzma_attribute((__unused__)),
  428. const lzma_filter *reversed_filters)
  429. {
  430. lzma_coder *coder = coder_ptr;
  431. if (coder->lz.options_update == NULL)
  432. return LZMA_PROG_ERROR;
  433. return_if_error(coder->lz.options_update(
  434. coder->lz.coder, reversed_filters));
  435. return lzma_next_filter_update(
  436. &coder->next, allocator, reversed_filters + 1);
  437. }
  438. static lzma_ret
  439. lz_encoder_set_out_limit(void *coder_ptr, uint64_t *uncomp_size,
  440. uint64_t out_limit)
  441. {
  442. lzma_coder *coder = coder_ptr;
  443. // This is supported only if there are no other filters chained.
  444. if (coder->next.code == NULL && coder->lz.set_out_limit != NULL)
  445. return coder->lz.set_out_limit(
  446. coder->lz.coder, uncomp_size, out_limit);
  447. return LZMA_OPTIONS_ERROR;
  448. }
  449. extern lzma_ret
  450. lzma_lz_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator,
  451. const lzma_filter_info *filters,
  452. lzma_ret (*lz_init)(lzma_lz_encoder *lz,
  453. const lzma_allocator *allocator,
  454. lzma_vli id, const void *options,
  455. lzma_lz_options *lz_options))
  456. {
  457. #if defined(HAVE_SMALL) && !defined(HAVE_FUNC_ATTRIBUTE_CONSTRUCTOR)
  458. // The CRC32 table must be initialized.
  459. lzma_crc32_init();
  460. #endif
  461. // Allocate and initialize the base data structure.
  462. lzma_coder *coder = next->coder;
  463. if (coder == NULL) {
  464. coder = lzma_alloc(sizeof(lzma_coder), allocator);
  465. if (coder == NULL)
  466. return LZMA_MEM_ERROR;
  467. next->coder = coder;
  468. next->code = &lz_encode;
  469. next->end = &lz_encoder_end;
  470. next->update = &lz_encoder_update;
  471. next->set_out_limit = &lz_encoder_set_out_limit;
  472. coder->lz.coder = NULL;
  473. coder->lz.code = NULL;
  474. coder->lz.end = NULL;
  475. coder->lz.options_update = NULL;
  476. coder->lz.set_out_limit = NULL;
  477. // mf.size is initialized to silence Valgrind
  478. // when used on optimized binaries (GCC may reorder
  479. // code in a way that Valgrind gets unhappy).
  480. coder->mf.buffer = NULL;
  481. coder->mf.size = 0;
  482. coder->mf.hash = NULL;
  483. coder->mf.son = NULL;
  484. coder->mf.hash_count = 0;
  485. coder->mf.sons_count = 0;
  486. coder->next = LZMA_NEXT_CODER_INIT;
  487. }
  488. // Initialize the LZ-based encoder.
  489. lzma_lz_options lz_options;
  490. return_if_error(lz_init(&coder->lz, allocator,
  491. filters[0].id, filters[0].options, &lz_options));
  492. // Setup the size information into coder->mf and deallocate
  493. // old buffers if they have wrong size.
  494. if (lz_encoder_prepare(&coder->mf, allocator, &lz_options))
  495. return LZMA_OPTIONS_ERROR;
  496. // Allocate new buffers if needed, and do the rest of
  497. // the initialization.
  498. if (lz_encoder_init(&coder->mf, allocator, &lz_options))
  499. return LZMA_MEM_ERROR;
  500. // Initialize the next filter in the chain, if any.
  501. return lzma_next_filter_init(&coder->next, allocator, filters + 1);
  502. }
  503. extern LZMA_API(lzma_bool)
  504. lzma_mf_is_supported(lzma_match_finder mf)
  505. {
  506. switch (mf) {
  507. #ifdef HAVE_MF_HC3
  508. case LZMA_MF_HC3:
  509. return true;
  510. #endif
  511. #ifdef HAVE_MF_HC4
  512. case LZMA_MF_HC4:
  513. return true;
  514. #endif
  515. #ifdef HAVE_MF_BT2
  516. case LZMA_MF_BT2:
  517. return true;
  518. #endif
  519. #ifdef HAVE_MF_BT3
  520. case LZMA_MF_BT3:
  521. return true;
  522. #endif
  523. #ifdef HAVE_MF_BT4
  524. case LZMA_MF_BT4:
  525. return true;
  526. #endif
  527. default:
  528. return false;
  529. }
  530. }