range_decoder.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966
  1. // SPDX-License-Identifier: 0BSD
  2. ///////////////////////////////////////////////////////////////////////////////
  3. //
  4. /// \file range_decoder.h
  5. /// \brief Range Decoder
  6. ///
  7. // Authors: Igor Pavlov
  8. // Lasse Collin
  9. //
  10. ///////////////////////////////////////////////////////////////////////////////
  11. #ifndef LZMA_RANGE_DECODER_H
  12. #define LZMA_RANGE_DECODER_H
  13. #include "range_common.h"
  14. // Choose the range decoder variants to use using a bitmask.
  15. // If no bits are set, only the basic version is used.
  16. // If more than one version is selected for the same feature,
  17. // the last one on the list below is used.
  18. //
  19. // Bitwise-or of the following enable branchless C versions:
  20. // 0x01 normal bittrees
  21. // 0x02 fixed-sized reverse bittrees
  22. // 0x04 variable-sized reverse bittrees (not faster)
  23. // 0x08 matched literal (not faster)
  24. //
  25. // GCC & Clang compatible x86-64 inline assembly:
  26. // 0x010 normal bittrees
  27. // 0x020 fixed-sized reverse bittrees
  28. // 0x040 variable-sized reverse bittrees
  29. // 0x080 matched literal
  30. // 0x100 direct bits
  31. //
  32. // The default can be overridden at build time by defining
  33. // LZMA_RANGE_DECODER_CONFIG to the desired mask.
  34. //
  35. // 2024-02-22: Feedback from benchmarks:
  36. // - Brancless C (0x003) can be better than basic on x86-64 but often it's
  37. // slightly worse on other archs. Since asm is much better on x86-64,
  38. // branchless C is not used at all.
  39. // - With x86-64 asm, there are slight differences between GCC and Clang
  40. // and different processors. Overall 0x1F0 seems to be the best choice.
  41. #ifndef LZMA_RANGE_DECODER_CONFIG
  42. # if defined(__x86_64__) && !defined(__ILP32__) \
  43. && !defined(__NVCOMPILER) \
  44. && (defined(__GNUC__) || defined(__clang__))
  45. # define LZMA_RANGE_DECODER_CONFIG 0x1F0
  46. # else
  47. # define LZMA_RANGE_DECODER_CONFIG 0
  48. # endif
  49. #endif
  50. // Negative RC_BIT_MODEL_TOTAL but the lowest RC_MOVE_BITS are flipped.
  51. // This is useful for updating probability variables in branchless decoding:
  52. //
  53. // uint32_t decoded_bit = ...;
  54. // probability tmp = RC_BIT_MODEL_OFFSET;
  55. // tmp &= decoded_bit - 1;
  56. // prob -= (prob + tmp) >> RC_MOVE_BITS;
  57. #define RC_BIT_MODEL_OFFSET \
  58. ((UINT32_C(1) << RC_MOVE_BITS) - 1 - RC_BIT_MODEL_TOTAL)
  59. typedef struct {
  60. uint32_t range;
  61. uint32_t code;
  62. uint32_t init_bytes_left;
  63. } lzma_range_decoder;
  64. /// Reads the first five bytes to initialize the range decoder.
  65. static inline lzma_ret
  66. rc_read_init(lzma_range_decoder *rc, const uint8_t *restrict in,
  67. size_t *restrict in_pos, size_t in_size)
  68. {
  69. while (rc->init_bytes_left > 0) {
  70. if (*in_pos == in_size)
  71. return LZMA_OK;
  72. // The first byte is always 0x00. It could have been omitted
  73. // in LZMA2 but it wasn't, so one byte is wasted in every
  74. // LZMA2 chunk.
  75. if (rc->init_bytes_left == 5 && in[*in_pos] != 0x00)
  76. return LZMA_DATA_ERROR;
  77. rc->code = (rc->code << 8) | in[*in_pos];
  78. ++*in_pos;
  79. --rc->init_bytes_left;
  80. }
  81. return LZMA_STREAM_END;
  82. }
  83. /// Makes local copies of range decoder and *in_pos variables. Doing this
  84. /// improves speed significantly. The range decoder macros expect also
  85. /// variables 'in' and 'in_size' to be defined.
  86. #define rc_to_local(range_decoder, in_pos, fast_mode_in_required) \
  87. lzma_range_decoder rc = range_decoder; \
  88. const uint8_t *rc_in_ptr = in + (in_pos); \
  89. const uint8_t *rc_in_end = in + in_size; \
  90. const uint8_t *rc_in_fast_end \
  91. = (rc_in_end - rc_in_ptr) <= (fast_mode_in_required) \
  92. ? rc_in_ptr \
  93. : rc_in_end - (fast_mode_in_required); \
  94. (void)rc_in_fast_end; /* Silence a warning with HAVE_SMALL. */ \
  95. uint32_t rc_bound
  96. /// Evaluates to true if there is enough input remaining to use fast mode.
  97. #define rc_is_fast_allowed() (rc_in_ptr < rc_in_fast_end)
  98. /// Stores the local copes back to the range decoder structure.
  99. #define rc_from_local(range_decoder, in_pos) \
  100. do { \
  101. range_decoder = rc; \
  102. in_pos = (size_t)(rc_in_ptr - in); \
  103. } while (0)
  104. /// Resets the range decoder structure.
  105. #define rc_reset(range_decoder) \
  106. do { \
  107. (range_decoder).range = UINT32_MAX; \
  108. (range_decoder).code = 0; \
  109. (range_decoder).init_bytes_left = 5; \
  110. } while (0)
  111. /// When decoding has been properly finished, rc.code is always zero unless
  112. /// the input stream is corrupt. So checking this can catch some corrupt
  113. /// files especially if they don't have any other integrity check.
  114. #define rc_is_finished(range_decoder) \
  115. ((range_decoder).code == 0)
  116. // Read the next input byte if needed.
  117. #define rc_normalize() \
  118. do { \
  119. if (rc.range < RC_TOP_VALUE) { \
  120. rc.range <<= RC_SHIFT_BITS; \
  121. rc.code = (rc.code << RC_SHIFT_BITS) | *rc_in_ptr++; \
  122. } \
  123. } while (0)
  124. /// If more input is needed but there is
  125. /// no more input available, "goto out" is used to jump out of the main
  126. /// decoder loop. The "_safe" macros are used in the Resumable decoder
  127. /// mode in order to save the sequence to continue decoding from that
  128. /// point later.
  129. #define rc_normalize_safe(seq) \
  130. do { \
  131. if (rc.range < RC_TOP_VALUE) { \
  132. if (rc_in_ptr == rc_in_end) { \
  133. coder->sequence = seq; \
  134. goto out; \
  135. } \
  136. rc.range <<= RC_SHIFT_BITS; \
  137. rc.code = (rc.code << RC_SHIFT_BITS) | *rc_in_ptr++; \
  138. } \
  139. } while (0)
  140. /// Start decoding a bit. This must be used together with rc_update_0()
  141. /// and rc_update_1():
  142. ///
  143. /// rc_if_0(prob) {
  144. /// rc_update_0(prob);
  145. /// // Do something
  146. /// } else {
  147. /// rc_update_1(prob);
  148. /// // Do something else
  149. /// }
  150. ///
  151. #define rc_if_0(prob) \
  152. rc_normalize(); \
  153. rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \
  154. if (rc.code < rc_bound)
  155. #define rc_if_0_safe(prob, seq) \
  156. rc_normalize_safe(seq); \
  157. rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \
  158. if (rc.code < rc_bound)
  159. /// Update the range decoder state and the used probability variable to
  160. /// match a decoded bit of 0.
  161. ///
  162. /// The x86-64 assembly uses the commented method but it seems that,
  163. /// at least on x86-64, the first version is slightly faster as C code.
  164. #define rc_update_0(prob) \
  165. do { \
  166. rc.range = rc_bound; \
  167. prob += (RC_BIT_MODEL_TOTAL - (prob)) >> RC_MOVE_BITS; \
  168. /* prob -= ((prob) + RC_BIT_MODEL_OFFSET) >> RC_MOVE_BITS; */ \
  169. } while (0)
  170. /// Update the range decoder state and the used probability variable to
  171. /// match a decoded bit of 1.
  172. #define rc_update_1(prob) \
  173. do { \
  174. rc.range -= rc_bound; \
  175. rc.code -= rc_bound; \
  176. prob -= (prob) >> RC_MOVE_BITS; \
  177. } while (0)
  178. /// Decodes one bit and runs action0 or action1 depending on the decoded bit.
  179. /// This macro is used as the last step in bittree reverse decoders since
  180. /// those don't use "symbol" for anything else than indexing the probability
  181. /// arrays.
  182. #define rc_bit_last(prob, action0, action1) \
  183. do { \
  184. rc_if_0(prob) { \
  185. rc_update_0(prob); \
  186. action0; \
  187. } else { \
  188. rc_update_1(prob); \
  189. action1; \
  190. } \
  191. } while (0)
  192. #define rc_bit_last_safe(prob, action0, action1, seq) \
  193. do { \
  194. rc_if_0_safe(prob, seq) { \
  195. rc_update_0(prob); \
  196. action0; \
  197. } else { \
  198. rc_update_1(prob); \
  199. action1; \
  200. } \
  201. } while (0)
  202. /// Decodes one bit, updates "symbol", and runs action0 or action1 depending
  203. /// on the decoded bit.
  204. #define rc_bit(prob, action0, action1) \
  205. rc_bit_last(prob, \
  206. symbol <<= 1; action0, \
  207. symbol = (symbol << 1) + 1; action1);
  208. #define rc_bit_safe(prob, action0, action1, seq) \
  209. rc_bit_last_safe(prob, \
  210. symbol <<= 1; action0, \
  211. symbol = (symbol << 1) + 1; action1, \
  212. seq);
  213. // Unroll fixed-sized bittree decoding.
  214. //
  215. // A compile-time constant in final_add can be used to get rid of the high bit
  216. // from symbol that is used for the array indexing (1U << bittree_bits).
  217. // final_add may also be used to add offset to the result (LZMA length
  218. // decoder does that).
  219. //
  220. // The reason to have final_add here is that in the asm code the addition
  221. // can be done for free: in x86-64 there is SBB instruction with -1 as
  222. // the immediate value, and final_add is combined with that value.
  223. #define rc_bittree_bit(prob) \
  224. rc_bit(prob, , )
  225. #define rc_bittree3(probs, final_add) \
  226. do { \
  227. symbol = 1; \
  228. rc_bittree_bit(probs[symbol]); \
  229. rc_bittree_bit(probs[symbol]); \
  230. rc_bittree_bit(probs[symbol]); \
  231. symbol += (uint32_t)(final_add); \
  232. } while (0)
  233. #define rc_bittree6(probs, final_add) \
  234. do { \
  235. symbol = 1; \
  236. rc_bittree_bit(probs[symbol]); \
  237. rc_bittree_bit(probs[symbol]); \
  238. rc_bittree_bit(probs[symbol]); \
  239. rc_bittree_bit(probs[symbol]); \
  240. rc_bittree_bit(probs[symbol]); \
  241. rc_bittree_bit(probs[symbol]); \
  242. symbol += (uint32_t)(final_add); \
  243. } while (0)
  244. #define rc_bittree8(probs, final_add) \
  245. do { \
  246. symbol = 1; \
  247. rc_bittree_bit(probs[symbol]); \
  248. rc_bittree_bit(probs[symbol]); \
  249. rc_bittree_bit(probs[symbol]); \
  250. rc_bittree_bit(probs[symbol]); \
  251. rc_bittree_bit(probs[symbol]); \
  252. rc_bittree_bit(probs[symbol]); \
  253. rc_bittree_bit(probs[symbol]); \
  254. rc_bittree_bit(probs[symbol]); \
  255. symbol += (uint32_t)(final_add); \
  256. } while (0)
  257. // Fixed-sized reverse bittree
  258. #define rc_bittree_rev4(probs) \
  259. do { \
  260. symbol = 0; \
  261. rc_bit_last(probs[symbol + 1], , symbol += 1); \
  262. rc_bit_last(probs[symbol + 2], , symbol += 2); \
  263. rc_bit_last(probs[symbol + 4], , symbol += 4); \
  264. rc_bit_last(probs[symbol + 8], , symbol += 8); \
  265. } while (0)
  266. // Decode one bit from variable-sized reverse bittree. The loop is done
  267. // in the code that uses this macro. This could be changed if the assembly
  268. // version benefited from having the loop done in assembly but it didn't
  269. // seem so in early 2024.
  270. //
  271. // Also, if the loop was done here, the loop counter would likely be local
  272. // to the macro so that it wouldn't modify yet another input variable.
  273. // If a _safe version of a macro with a loop was done then a modifiable
  274. // input variable couldn't be avoided though.
  275. #define rc_bit_add_if_1(probs, dest, value_to_add_if_1) \
  276. rc_bit(probs[symbol], \
  277. , \
  278. dest += value_to_add_if_1);
  279. // Matched literal
  280. #define decode_with_match_bit \
  281. t_match_byte <<= 1; \
  282. t_match_bit = t_match_byte & t_offset; \
  283. t_subcoder_index = t_offset + t_match_bit + symbol; \
  284. rc_bit(probs[t_subcoder_index], \
  285. t_offset &= ~t_match_bit, \
  286. t_offset &= t_match_bit)
  287. #define rc_matched_literal(probs_base_var, match_byte) \
  288. do { \
  289. uint32_t t_match_byte = (match_byte); \
  290. uint32_t t_match_bit; \
  291. uint32_t t_subcoder_index; \
  292. uint32_t t_offset = 0x100; \
  293. symbol = 1; \
  294. decode_with_match_bit; \
  295. decode_with_match_bit; \
  296. decode_with_match_bit; \
  297. decode_with_match_bit; \
  298. decode_with_match_bit; \
  299. decode_with_match_bit; \
  300. decode_with_match_bit; \
  301. decode_with_match_bit; \
  302. } while (0)
  303. /// Decode a bit without using a probability.
  304. //
  305. // NOTE: GCC 13 and Clang/LLVM 16 can, at least on x86-64, optimize the bound
  306. // calculation to use an arithmetic right shift so there's no need to provide
  307. // the alternative code which, according to C99/C11/C23 6.3.1.3-p3 isn't
  308. // perfectly portable: rc_bound = (uint32_t)((int32_t)rc.code >> 31);
  309. #define rc_direct(dest, count_var) \
  310. do { \
  311. dest = (dest << 1) + 1; \
  312. rc_normalize(); \
  313. rc.range >>= 1; \
  314. rc.code -= rc.range; \
  315. rc_bound = UINT32_C(0) - (rc.code >> 31); \
  316. dest += rc_bound; \
  317. rc.code += rc.range & rc_bound; \
  318. } while (--count_var > 0)
  319. #define rc_direct_safe(dest, count_var, seq) \
  320. do { \
  321. rc_normalize_safe(seq); \
  322. rc.range >>= 1; \
  323. rc.code -= rc.range; \
  324. rc_bound = UINT32_C(0) - (rc.code >> 31); \
  325. rc.code += rc.range & rc_bound; \
  326. dest = (dest << 1) + (rc_bound + 1); \
  327. } while (--count_var > 0)
  328. //////////////////
  329. // Branchless C //
  330. //////////////////
  331. /// Decode a bit using a branchless method. This reduces the number of
  332. /// mispredicted branches and thus can improve speed.
  333. #define rc_c_bit(prob, action_bit, action_neg) \
  334. do { \
  335. probability *p = &(prob); \
  336. rc_normalize(); \
  337. rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * *p; \
  338. uint32_t rc_mask = rc.code >= rc_bound; /* rc_mask = decoded bit */ \
  339. action_bit; /* action when rc_mask is 0 or 1 */ \
  340. /* rc_mask becomes 0 if bit is 0 and 0xFFFFFFFF if bit is 1: */ \
  341. rc_mask = 0U - rc_mask; \
  342. rc.range &= rc_mask; /* If bit 0: set rc.range = 0 */ \
  343. rc_bound ^= rc_mask; \
  344. rc_bound -= rc_mask; /* If bit 1: rc_bound = 0U - rc_bound */ \
  345. rc.range += rc_bound; \
  346. rc_bound &= rc_mask; \
  347. rc.code += rc_bound; \
  348. action_neg; /* action when rc_mask is 0 or 0xFFFFFFFF */ \
  349. rc_mask = ~rc_mask; /* If bit 0: all bits are set in rc_mask */ \
  350. rc_mask &= RC_BIT_MODEL_OFFSET; \
  351. *p -= (*p + rc_mask) >> RC_MOVE_BITS; \
  352. } while (0)
  353. // Testing on x86-64 give an impression that only the normal bittrees and
  354. // the fixed-sized reverse bittrees are worth the branchless C code.
  355. // It should be tested on other archs for which there isn't assembly code
  356. // in this file.
  357. // Using addition in "(symbol << 1) + rc_mask" allows use of x86 LEA
  358. // or RISC-V SH1ADD instructions. Compilers might infer it from
  359. // "(symbol << 1) | rc_mask" too if they see that mask is 0 or 1 but
  360. // the use of addition doesn't require such analysis from compilers.
  361. #if LZMA_RANGE_DECODER_CONFIG & 0x01
  362. #undef rc_bittree_bit
  363. #define rc_bittree_bit(prob) \
  364. rc_c_bit(prob, \
  365. symbol = (symbol << 1) + rc_mask, \
  366. )
  367. #endif // LZMA_RANGE_DECODER_CONFIG & 0x01
  368. #if LZMA_RANGE_DECODER_CONFIG & 0x02
  369. #undef rc_bittree_rev4
  370. #define rc_bittree_rev4(probs) \
  371. do { \
  372. symbol = 0; \
  373. rc_c_bit(probs[symbol + 1], symbol += rc_mask, ); \
  374. rc_c_bit(probs[symbol + 2], symbol += rc_mask << 1, ); \
  375. rc_c_bit(probs[symbol + 4], symbol += rc_mask << 2, ); \
  376. rc_c_bit(probs[symbol + 8], symbol += rc_mask << 3, ); \
  377. } while (0)
  378. #endif // LZMA_RANGE_DECODER_CONFIG & 0x02
  379. #if LZMA_RANGE_DECODER_CONFIG & 0x04
  380. #undef rc_bit_add_if_1
  381. #define rc_bit_add_if_1(probs, dest, value_to_add_if_1) \
  382. rc_c_bit(probs[symbol], \
  383. symbol = (symbol << 1) + rc_mask, \
  384. dest += (value_to_add_if_1) & rc_mask)
  385. #endif // LZMA_RANGE_DECODER_CONFIG & 0x04
  386. #if LZMA_RANGE_DECODER_CONFIG & 0x08
  387. #undef decode_with_match_bit
  388. #define decode_with_match_bit \
  389. t_match_byte <<= 1; \
  390. t_match_bit = t_match_byte & t_offset; \
  391. t_subcoder_index = t_offset + t_match_bit + symbol; \
  392. rc_c_bit(probs[t_subcoder_index], \
  393. symbol = (symbol << 1) + rc_mask, \
  394. t_offset &= ~t_match_bit ^ rc_mask)
  395. #endif // LZMA_RANGE_DECODER_CONFIG & 0x08
  396. ////////////
  397. // x86-64 //
  398. ////////////
  399. #if LZMA_RANGE_DECODER_CONFIG & 0x1F0
  400. // rc_asm_y and rc_asm_n are used as arguments to macros to control which
  401. // strings to include or omit.
  402. #define rc_asm_y(str) str
  403. #define rc_asm_n(str)
  404. // There are a few possible variations for normalization.
  405. // This is the smallest variant which is also used by LZMA SDK.
  406. //
  407. // - This has partial register write (the MOV from (%[in_ptr])).
  408. //
  409. // - INC saves one byte in code size over ADD. False dependency on
  410. // partial flags from INC shouldn't become a problem on any processor
  411. // because the instructions after normalization don't read the flags
  412. // until SUB which sets all flags.
  413. //
  414. #define rc_asm_normalize \
  415. "cmp %[top_value], %[range]\n\t" \
  416. "jae 1f\n\t" \
  417. "shl %[shift_bits], %[code]\n\t" \
  418. "mov (%[in_ptr]), %b[code]\n\t" \
  419. "shl %[shift_bits], %[range]\n\t" \
  420. "inc %[in_ptr]\n" \
  421. "1:\n"
  422. // rc_asm_calc(prob) is roughly equivalent to the C version of rc_if_0(prob)...
  423. //
  424. // rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob);
  425. // if (rc.code < rc_bound)
  426. //
  427. // ...but the bound is stored in "range":
  428. //
  429. // t0 = range;
  430. // range = (range >> RC_BIT_MODEL_TOTAL_BITS) * (prob);
  431. // t0 -= range;
  432. // t1 = code;
  433. // code -= range;
  434. //
  435. // The carry flag (CF) from the last subtraction holds the negation of
  436. // the decoded bit (if CF==0 then the decoded bit is 1).
  437. // The values in t0 and t1 are needed for rc_update_0(prob) and
  438. // rc_update_1(prob). If the bit is 0, rc_update_0(prob)...
  439. //
  440. // rc.range = rc_bound;
  441. //
  442. // ...has already been done but the "code -= range" has to be reverted using
  443. // the old value stored in t1. (Also, prob needs to be updated.)
  444. //
  445. // If the bit is 1, rc_update_1(prob)...
  446. //
  447. // rc.range -= rc_bound;
  448. // rc.code -= rc_bound;
  449. //
  450. // ...is already done for "code" but the value for "range" needs to be taken
  451. // from t0. (Also, prob needs to be updated here as well.)
  452. //
  453. // The assignments from t0 and t1 can be done in a branchless manner with CMOV
  454. // after the instructions from this macro. The CF from SUB tells which moves
  455. // are needed.
  456. #define rc_asm_calc(prob) \
  457. "mov %[range], %[t0]\n\t" \
  458. "shr %[bit_model_total_bits], %[range]\n\t" \
  459. "imul %[" prob "], %[range]\n\t" \
  460. "sub %[range], %[t0]\n\t" \
  461. "mov %[code], %[t1]\n\t" \
  462. "sub %[range], %[code]\n\t"
  463. // Also, prob needs to be updated: The update math depends on the decoded bit.
  464. // It can be expressed in a few slightly different ways but this is fairly
  465. // convenient here:
  466. //
  467. // prob -= (prob + (bit ? 0 : RC_BIT_MODEL_OFFSET)) >> RC_MOVE_BITS;
  468. //
  469. // To do it in branchless way when the negation of the decoded bit is in CF,
  470. // both "prob" and "prob + RC_BIT_MODEL_OFFSET" are needed. Then the desired
  471. // value can be picked with CMOV. The addition can be done using LEA without
  472. // affecting CF.
  473. //
  474. // (This prob update method is a tiny bit different from LZMA SDK 23.01.
  475. // In the LZMA SDK a single register is reserved solely for a constant to
  476. // be used with CMOV when updating prob. That is fine since there are enough
  477. // free registers to do so. The method used here uses one fewer register,
  478. // which is valuable with inline assembly.)
  479. //
  480. // * * *
  481. //
  482. // In bittree decoding, each (unrolled) loop iteration decodes one bit
  483. // and needs one prob variable. To make it faster, the prob variable of
  484. // the iteration N+1 is loaded during iteration N. There are two possible
  485. // prob variables to choose from for N+1. Both are loaded from memory and
  486. // the correct one is chosen with CMOV using the same CF as is used for
  487. // other things described above.
  488. //
  489. // This preloading/prefetching requires an extra register. To avoid
  490. // useless moves from "preloaded prob register" to "current prob register",
  491. // the macros swap between the two registers for odd and even iterations.
  492. //
  493. // * * *
  494. //
  495. // Finally, the decoded bit has to be stored in "symbol". Since the negation
  496. // of the bit is in CF, this can be done with SBB: symbol -= CF - 1. That is,
  497. // if the decoded bit is 0 (CF==1) the operation is a no-op "symbol -= 0"
  498. // and when bit is 1 (CF==0) the operation is "symbol -= 0 - 1" which is
  499. // the same as "symbol += 1".
  500. //
  501. // The instructions for all things are intertwined for a few reasons:
  502. // - freeing temporary registers for new use
  503. // - not modifying CF too early
  504. // - instruction scheduling
  505. //
  506. // The first and last iterations can cheat a little. For example,
  507. // on the first iteration "symbol" is known to start from 1 so it
  508. // doesn't need to be read; it can even be immediately initialized
  509. // to 2 to prepare for the second iteration of the loop.
  510. //
  511. // * * *
  512. //
  513. // a = number of the current prob variable (0 or 1)
  514. // b = number of the next prob variable (1 or 0)
  515. // *_only = rc_asm_y or _n to include or exclude code marked with them
  516. #define rc_asm_bittree(a, b, first_only, middle_only, last_only) \
  517. first_only( \
  518. "movzwl 2(%[probs_base]), %[prob" #a "]\n\t" \
  519. "mov $2, %[symbol]\n\t" \
  520. "movzwl 4(%[probs_base]), %[prob" #b "]\n\t" \
  521. ) \
  522. middle_only( \
  523. /* Note the scaling of 4 instead of 2: */ \
  524. "movzwl (%[probs_base], %q[symbol], 4), %[prob" #b "]\n\t" \
  525. ) \
  526. last_only( \
  527. "add %[symbol], %[symbol]\n\t" \
  528. ) \
  529. \
  530. rc_asm_normalize \
  531. rc_asm_calc("prob" #a) \
  532. \
  533. "cmovae %[t0], %[range]\n\t" \
  534. \
  535. first_only( \
  536. "movzwl 6(%[probs_base]), %[t0]\n\t" \
  537. "cmovae %[t0], %[prob" #b "]\n\t" \
  538. ) \
  539. middle_only( \
  540. "movzwl 2(%[probs_base], %q[symbol], 4), %[t0]\n\t" \
  541. "lea (%q[symbol], %q[symbol]), %[symbol]\n\t" \
  542. "cmovae %[t0], %[prob" #b "]\n\t" \
  543. ) \
  544. \
  545. "lea %c[bit_model_offset](%q[prob" #a "]), %[t0]\n\t" \
  546. "cmovb %[t1], %[code]\n\t" \
  547. "mov %[symbol], %[t1]\n\t" \
  548. "cmovae %[prob" #a "], %[t0]\n\t" \
  549. \
  550. first_only( \
  551. "sbb $-1, %[symbol]\n\t" \
  552. ) \
  553. middle_only( \
  554. "sbb $-1, %[symbol]\n\t" \
  555. ) \
  556. last_only( \
  557. "sbb %[last_sbb], %[symbol]\n\t" \
  558. ) \
  559. \
  560. "shr %[move_bits], %[t0]\n\t" \
  561. "sub %[t0], %[prob" #a "]\n\t" \
  562. /* Scaling of 1 instead of 2 because symbol <<= 1. */ \
  563. "mov %w[prob" #a "], (%[probs_base], %q[t1], 1)\n\t"
  564. // NOTE: The order of variables in __asm__ can affect speed and code size.
  565. #define rc_asm_bittree_n(probs_base_var, final_add, asm_str) \
  566. do { \
  567. uint32_t t0; \
  568. uint32_t t1; \
  569. uint32_t t_prob0; \
  570. uint32_t t_prob1; \
  571. \
  572. __asm__( \
  573. asm_str \
  574. : \
  575. [range] "+&r"(rc.range), \
  576. [code] "+&r"(rc.code), \
  577. [t0] "=&r"(t0), \
  578. [t1] "=&r"(t1), \
  579. [prob0] "=&r"(t_prob0), \
  580. [prob1] "=&r"(t_prob1), \
  581. [symbol] "=&r"(symbol), \
  582. [in_ptr] "+&r"(rc_in_ptr) \
  583. : \
  584. [probs_base] "r"(probs_base_var), \
  585. [last_sbb] "n"(-1 - (final_add)), \
  586. [top_value] "n"(RC_TOP_VALUE), \
  587. [shift_bits] "n"(RC_SHIFT_BITS), \
  588. [bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
  589. [bit_model_offset] "n"(RC_BIT_MODEL_OFFSET), \
  590. [move_bits] "n"(RC_MOVE_BITS) \
  591. : \
  592. "cc", "memory"); \
  593. } while (0)
  594. #if LZMA_RANGE_DECODER_CONFIG & 0x010
  595. #undef rc_bittree3
  596. #define rc_bittree3(probs_base_var, final_add) \
  597. rc_asm_bittree_n(probs_base_var, final_add, \
  598. rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
  599. rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
  600. rc_asm_bittree(0, 1, rc_asm_n, rc_asm_n, rc_asm_y) \
  601. )
  602. #undef rc_bittree6
  603. #define rc_bittree6(probs_base_var, final_add) \
  604. rc_asm_bittree_n(probs_base_var, final_add, \
  605. rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
  606. rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
  607. rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
  608. rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
  609. rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
  610. rc_asm_bittree(1, 0, rc_asm_n, rc_asm_n, rc_asm_y) \
  611. )
  612. #undef rc_bittree8
  613. #define rc_bittree8(probs_base_var, final_add) \
  614. rc_asm_bittree_n(probs_base_var, final_add, \
  615. rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \
  616. rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
  617. rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
  618. rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
  619. rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
  620. rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \
  621. rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \
  622. rc_asm_bittree(1, 0, rc_asm_n, rc_asm_n, rc_asm_y) \
  623. )
  624. #endif // LZMA_RANGE_DECODER_CONFIG & 0x010
  625. // Fixed-sized reverse bittree
  626. //
  627. // This uses the indexing that constructs the final value in symbol directly.
  628. // add = 1, 2, 4, 8
  629. // dcur = -, 4, 8, 16
  630. // dnext0 = 4, 8, 16, -
  631. // dnext0 = 6, 12, 24, -
  632. #define rc_asm_bittree_rev(a, b, add, dcur, dnext0, dnext1, \
  633. first_only, middle_only, last_only) \
  634. first_only( \
  635. "movzwl 2(%[probs_base]), %[prob" #a "]\n\t" \
  636. "xor %[symbol], %[symbol]\n\t" \
  637. "movzwl 4(%[probs_base]), %[prob" #b "]\n\t" \
  638. ) \
  639. middle_only( \
  640. "movzwl " #dnext0 "(%[probs_base], %q[symbol], 2), " \
  641. "%[prob" #b "]\n\t" \
  642. ) \
  643. \
  644. rc_asm_normalize \
  645. rc_asm_calc("prob" #a) \
  646. \
  647. "cmovae %[t0], %[range]\n\t" \
  648. \
  649. first_only( \
  650. "movzwl 6(%[probs_base]), %[t0]\n\t" \
  651. "cmovae %[t0], %[prob" #b "]\n\t" \
  652. ) \
  653. middle_only( \
  654. "movzwl " #dnext1 "(%[probs_base], %q[symbol], 2), %[t0]\n\t" \
  655. "cmovae %[t0], %[prob" #b "]\n\t" \
  656. ) \
  657. \
  658. "lea " #add "(%q[symbol]), %[t0]\n\t" \
  659. "cmovb %[t1], %[code]\n\t" \
  660. middle_only( \
  661. "mov %[symbol], %[t1]\n\t" \
  662. ) \
  663. last_only( \
  664. "mov %[symbol], %[t1]\n\t" \
  665. ) \
  666. "cmovae %[t0], %[symbol]\n\t" \
  667. "lea %c[bit_model_offset](%q[prob" #a "]), %[t0]\n\t" \
  668. "cmovae %[prob" #a "], %[t0]\n\t" \
  669. \
  670. "shr %[move_bits], %[t0]\n\t" \
  671. "sub %[t0], %[prob" #a "]\n\t" \
  672. first_only( \
  673. "mov %w[prob" #a "], 2(%[probs_base])\n\t" \
  674. ) \
  675. middle_only( \
  676. "mov %w[prob" #a "], " \
  677. #dcur "(%[probs_base], %q[t1], 2)\n\t" \
  678. ) \
  679. last_only( \
  680. "mov %w[prob" #a "], " \
  681. #dcur "(%[probs_base], %q[t1], 2)\n\t" \
  682. )
  683. #if LZMA_RANGE_DECODER_CONFIG & 0x020
  684. #undef rc_bittree_rev4
  685. #define rc_bittree_rev4(probs_base_var) \
  686. rc_asm_bittree_n(probs_base_var, 4, \
  687. rc_asm_bittree_rev(0, 1, 1, -, 4, 6, rc_asm_y, rc_asm_n, rc_asm_n) \
  688. rc_asm_bittree_rev(1, 0, 2, 4, 8, 12, rc_asm_n, rc_asm_y, rc_asm_n) \
  689. rc_asm_bittree_rev(0, 1, 4, 8, 16, 24, rc_asm_n, rc_asm_y, rc_asm_n) \
  690. rc_asm_bittree_rev(1, 0, 8, 16, -, -, rc_asm_n, rc_asm_n, rc_asm_y) \
  691. )
  692. #endif // LZMA_RANGE_DECODER_CONFIG & 0x020
  693. #if LZMA_RANGE_DECODER_CONFIG & 0x040
  694. #undef rc_bit_add_if_1
  695. #define rc_bit_add_if_1(probs_base_var, dest_var, value_to_add_if_1) \
  696. do { \
  697. uint32_t t0; \
  698. uint32_t t1; \
  699. uint32_t t2 = (value_to_add_if_1); \
  700. uint32_t t_prob; \
  701. uint32_t t_index; \
  702. \
  703. __asm__( \
  704. "movzwl (%[probs_base], %q[symbol], 2), %[prob]\n\t" \
  705. "mov %[symbol], %[index]\n\t" \
  706. \
  707. "add %[dest], %[t2]\n\t" \
  708. "add %[symbol], %[symbol]\n\t" \
  709. \
  710. rc_asm_normalize \
  711. rc_asm_calc("prob") \
  712. \
  713. "cmovae %[t0], %[range]\n\t" \
  714. "lea %c[bit_model_offset](%q[prob]), %[t0]\n\t" \
  715. "cmovb %[t1], %[code]\n\t" \
  716. "cmovae %[prob], %[t0]\n\t" \
  717. \
  718. "cmovae %[t2], %[dest]\n\t" \
  719. "sbb $-1, %[symbol]\n\t" \
  720. \
  721. "sar %[move_bits], %[t0]\n\t" \
  722. "sub %[t0], %[prob]\n\t" \
  723. "mov %w[prob], (%[probs_base], %q[index], 2)" \
  724. : \
  725. [range] "+&r"(rc.range), \
  726. [code] "+&r"(rc.code), \
  727. [t0] "=&r"(t0), \
  728. [t1] "=&r"(t1), \
  729. [prob] "=&r"(t_prob), \
  730. [index] "=&r"(t_index), \
  731. [symbol] "+&r"(symbol), \
  732. [t2] "+&r"(t2), \
  733. [dest] "+&r"(dest_var), \
  734. [in_ptr] "+&r"(rc_in_ptr) \
  735. : \
  736. [probs_base] "r"(probs_base_var), \
  737. [top_value] "n"(RC_TOP_VALUE), \
  738. [shift_bits] "n"(RC_SHIFT_BITS), \
  739. [bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
  740. [bit_model_offset] "n"(RC_BIT_MODEL_OFFSET), \
  741. [move_bits] "n"(RC_MOVE_BITS) \
  742. : \
  743. "cc", "memory"); \
  744. } while (0)
  745. #endif // LZMA_RANGE_DECODER_CONFIG & 0x040
  746. // Literal decoding uses a normal 8-bit bittree but literal with match byte
  747. // is more complex in picking the probability variable from the correct
  748. // subtree. This doesn't use preloading/prefetching of the next prob because
  749. // there are four choices instead of two.
  750. //
  751. // FIXME? The first iteration starts with symbol = 1 so it could be optimized
  752. // by a tiny amount.
  753. #define rc_asm_matched_literal(nonlast_only) \
  754. "add %[offset], %[symbol]\n\t" \
  755. "and %[offset], %[match_bit]\n\t" \
  756. "add %[match_bit], %[symbol]\n\t" \
  757. \
  758. "movzwl (%[probs_base], %q[symbol], 2), %[prob]\n\t" \
  759. \
  760. "add %[symbol], %[symbol]\n\t" \
  761. \
  762. nonlast_only( \
  763. "xor %[match_bit], %[offset]\n\t" \
  764. "add %[match_byte], %[match_byte]\n\t" \
  765. ) \
  766. \
  767. rc_asm_normalize \
  768. rc_asm_calc("prob") \
  769. \
  770. "cmovae %[t0], %[range]\n\t" \
  771. "lea %c[bit_model_offset](%q[prob]), %[t0]\n\t" \
  772. "cmovb %[t1], %[code]\n\t" \
  773. "mov %[symbol], %[t1]\n\t" \
  774. "cmovae %[prob], %[t0]\n\t" \
  775. \
  776. nonlast_only( \
  777. "cmovae %[match_bit], %[offset]\n\t" \
  778. "mov %[match_byte], %[match_bit]\n\t" \
  779. ) \
  780. \
  781. "sbb $-1, %[symbol]\n\t" \
  782. \
  783. "shr %[move_bits], %[t0]\n\t" \
  784. /* Undo symbol += match_bit + offset: */ \
  785. "and $0x1FF, %[symbol]\n\t" \
  786. "sub %[t0], %[prob]\n\t" \
  787. \
  788. /* Scaling of 1 instead of 2 because symbol <<= 1. */ \
  789. "mov %w[prob], (%[probs_base], %q[t1], 1)\n\t"
  790. #if LZMA_RANGE_DECODER_CONFIG & 0x080
  791. #undef rc_matched_literal
  792. #define rc_matched_literal(probs_base_var, match_byte_value) \
  793. do { \
  794. uint32_t t0; \
  795. uint32_t t1; \
  796. uint32_t t_prob; \
  797. uint32_t t_match_byte = (uint32_t)(match_byte_value) << 1; \
  798. uint32_t t_match_bit = t_match_byte; \
  799. uint32_t t_offset = 0x100; \
  800. symbol = 1; \
  801. \
  802. __asm__( \
  803. rc_asm_matched_literal(rc_asm_y) \
  804. rc_asm_matched_literal(rc_asm_y) \
  805. rc_asm_matched_literal(rc_asm_y) \
  806. rc_asm_matched_literal(rc_asm_y) \
  807. rc_asm_matched_literal(rc_asm_y) \
  808. rc_asm_matched_literal(rc_asm_y) \
  809. rc_asm_matched_literal(rc_asm_y) \
  810. rc_asm_matched_literal(rc_asm_n) \
  811. : \
  812. [range] "+&r"(rc.range), \
  813. [code] "+&r"(rc.code), \
  814. [t0] "=&r"(t0), \
  815. [t1] "=&r"(t1), \
  816. [prob] "=&r"(t_prob), \
  817. [match_bit] "+&r"(t_match_bit), \
  818. [symbol] "+&r"(symbol), \
  819. [match_byte] "+&r"(t_match_byte), \
  820. [offset] "+&r"(t_offset), \
  821. [in_ptr] "+&r"(rc_in_ptr) \
  822. : \
  823. [probs_base] "r"(probs_base_var), \
  824. [top_value] "n"(RC_TOP_VALUE), \
  825. [shift_bits] "n"(RC_SHIFT_BITS), \
  826. [bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \
  827. [bit_model_offset] "n"(RC_BIT_MODEL_OFFSET), \
  828. [move_bits] "n"(RC_MOVE_BITS) \
  829. : \
  830. "cc", "memory"); \
  831. } while (0)
  832. #endif // LZMA_RANGE_DECODER_CONFIG & 0x080
  833. // Doing the loop in asm instead of C seems to help a little.
  834. #if LZMA_RANGE_DECODER_CONFIG & 0x100
  835. #undef rc_direct
  836. #define rc_direct(dest_var, count_var) \
  837. do { \
  838. uint32_t t0; \
  839. uint32_t t1; \
  840. \
  841. __asm__( \
  842. "2:\n\t" \
  843. "add %[dest], %[dest]\n\t" \
  844. "lea 1(%q[dest]), %[t1]\n\t" \
  845. \
  846. rc_asm_normalize \
  847. \
  848. "shr $1, %[range]\n\t" \
  849. "mov %[code], %[t0]\n\t" \
  850. "sub %[range], %[code]\n\t" \
  851. "cmovns %[t1], %[dest]\n\t" \
  852. "cmovs %[t0], %[code]\n\t" \
  853. "dec %[count]\n\t" \
  854. "jnz 2b\n\t" \
  855. : \
  856. [range] "+&r"(rc.range), \
  857. [code] "+&r"(rc.code), \
  858. [t0] "=&r"(t0), \
  859. [t1] "=&r"(t1), \
  860. [dest] "+&r"(dest_var), \
  861. [count] "+&r"(count_var), \
  862. [in_ptr] "+&r"(rc_in_ptr) \
  863. : \
  864. [top_value] "n"(RC_TOP_VALUE), \
  865. [shift_bits] "n"(RC_SHIFT_BITS) \
  866. : \
  867. "cc", "memory"); \
  868. } while (0)
  869. #endif // LZMA_RANGE_DECODER_CONFIG & 0x100
  870. #endif // x86_64
  871. #endif