jidctint-neon.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. /*
  2. * jidctint-neon.c - accurate integer IDCT (Arm Neon)
  3. *
  4. * Copyright (C) 2020, Arm Limited. All Rights Reserved.
  5. * Copyright (C) 2020, D. R. Commander. All Rights Reserved.
  6. *
  7. * This software is provided 'as-is', without any express or implied
  8. * warranty. In no event will the authors be held liable for any damages
  9. * arising from the use of this software.
  10. *
  11. * Permission is granted to anyone to use this software for any purpose,
  12. * including commercial applications, and to alter it and redistribute it
  13. * freely, subject to the following restrictions:
  14. *
  15. * 1. The origin of this software must not be misrepresented; you must not
  16. * claim that you wrote the original software. If you use this software
  17. * in a product, an acknowledgment in the product documentation would be
  18. * appreciated but is not required.
  19. * 2. Altered source versions must be plainly marked as such, and must not be
  20. * misrepresented as being the original software.
  21. * 3. This notice may not be removed or altered from any source distribution.
  22. */
  23. #define JPEG_INTERNALS
  24. #include "jconfigint.h"
  25. #include "../../jinclude.h"
  26. #include "../../jpeglib.h"
  27. #include "../../jsimd.h"
  28. #include "../../jdct.h"
  29. #include "../../jsimddct.h"
  30. #include "../jsimd.h"
  31. #include "align.h"
  32. #include "neon-compat.h"
  33. #include <arm_neon.h>
  34. #define CONST_BITS 13
  35. #define PASS1_BITS 2
  36. #define DESCALE_P1 (CONST_BITS - PASS1_BITS)
  37. #define DESCALE_P2 (CONST_BITS + PASS1_BITS + 3)
  38. /* The computation of the inverse DCT requires the use of constants known at
  39. * compile time. Scaled integer constants are used to avoid floating-point
  40. * arithmetic:
  41. * 0.298631336 = 2446 * 2^-13
  42. * 0.390180644 = 3196 * 2^-13
  43. * 0.541196100 = 4433 * 2^-13
  44. * 0.765366865 = 6270 * 2^-13
  45. * 0.899976223 = 7373 * 2^-13
  46. * 1.175875602 = 9633 * 2^-13
  47. * 1.501321110 = 12299 * 2^-13
  48. * 1.847759065 = 15137 * 2^-13
  49. * 1.961570560 = 16069 * 2^-13
  50. * 2.053119869 = 16819 * 2^-13
  51. * 2.562915447 = 20995 * 2^-13
  52. * 3.072711026 = 25172 * 2^-13
  53. */
  54. #define F_0_298 2446
  55. #define F_0_390 3196
  56. #define F_0_541 4433
  57. #define F_0_765 6270
  58. #define F_0_899 7373
  59. #define F_1_175 9633
  60. #define F_1_501 12299
  61. #define F_1_847 15137
  62. #define F_1_961 16069
  63. #define F_2_053 16819
  64. #define F_2_562 20995
  65. #define F_3_072 25172
  66. #define F_1_175_MINUS_1_961 (F_1_175 - F_1_961)
  67. #define F_1_175_MINUS_0_390 (F_1_175 - F_0_390)
  68. #define F_0_541_MINUS_1_847 (F_0_541 - F_1_847)
  69. #define F_3_072_MINUS_2_562 (F_3_072 - F_2_562)
  70. #define F_0_298_MINUS_0_899 (F_0_298 - F_0_899)
  71. #define F_1_501_MINUS_0_899 (F_1_501 - F_0_899)
  72. #define F_2_053_MINUS_2_562 (F_2_053 - F_2_562)
  73. #define F_0_541_PLUS_0_765 (F_0_541 + F_0_765)
  74. ALIGN(16) static const int16_t jsimd_idct_islow_neon_consts[] = {
  75. F_0_899, F_0_541,
  76. F_2_562, F_0_298_MINUS_0_899,
  77. F_1_501_MINUS_0_899, F_2_053_MINUS_2_562,
  78. F_0_541_PLUS_0_765, F_1_175,
  79. F_1_175_MINUS_0_390, F_0_541_MINUS_1_847,
  80. F_3_072_MINUS_2_562, F_1_175_MINUS_1_961,
  81. 0, 0, 0, 0
  82. };
  83. /* Forward declaration of regular and sparse IDCT helper functions */
  84. static INLINE void jsimd_idct_islow_pass1_regular(int16x4_t row0,
  85. int16x4_t row1,
  86. int16x4_t row2,
  87. int16x4_t row3,
  88. int16x4_t row4,
  89. int16x4_t row5,
  90. int16x4_t row6,
  91. int16x4_t row7,
  92. int16x4_t quant_row0,
  93. int16x4_t quant_row1,
  94. int16x4_t quant_row2,
  95. int16x4_t quant_row3,
  96. int16x4_t quant_row4,
  97. int16x4_t quant_row5,
  98. int16x4_t quant_row6,
  99. int16x4_t quant_row7,
  100. int16_t *workspace_1,
  101. int16_t *workspace_2);
  102. static INLINE void jsimd_idct_islow_pass1_sparse(int16x4_t row0,
  103. int16x4_t row1,
  104. int16x4_t row2,
  105. int16x4_t row3,
  106. int16x4_t quant_row0,
  107. int16x4_t quant_row1,
  108. int16x4_t quant_row2,
  109. int16x4_t quant_row3,
  110. int16_t *workspace_1,
  111. int16_t *workspace_2);
  112. static INLINE void jsimd_idct_islow_pass2_regular(int16_t *workspace,
  113. JSAMPARRAY output_buf,
  114. JDIMENSION output_col,
  115. unsigned buf_offset);
  116. static INLINE void jsimd_idct_islow_pass2_sparse(int16_t *workspace,
  117. JSAMPARRAY output_buf,
  118. JDIMENSION output_col,
  119. unsigned buf_offset);
  120. /* Perform dequantization and inverse DCT on one block of coefficients. For
  121. * reference, the C implementation (jpeg_idct_slow()) can be found in
  122. * jidctint.c.
  123. *
  124. * Optimization techniques used for fast data access:
  125. *
  126. * In each pass, the inverse DCT is computed for the left and right 4x8 halves
  127. * of the DCT block. This avoids spilling due to register pressure, and the
  128. * increased granularity allows for an optimized calculation depending on the
  129. * values of the DCT coefficients. Between passes, intermediate data is stored
  130. * in 4x8 workspace buffers.
  131. *
  132. * Transposing the 8x8 DCT block after each pass can be achieved by transposing
  133. * each of the four 4x4 quadrants and swapping quadrants 1 and 2 (refer to the
  134. * diagram below.) Swapping quadrants is cheap, since the second pass can just
  135. * swap the workspace buffer pointers.
  136. *
  137. * +-------+-------+ +-------+-------+
  138. * | | | | | |
  139. * | 0 | 1 | | 0 | 2 |
  140. * | | | transpose | | |
  141. * +-------+-------+ ------> +-------+-------+
  142. * | | | | | |
  143. * | 2 | 3 | | 1 | 3 |
  144. * | | | | | |
  145. * +-------+-------+ +-------+-------+
  146. *
  147. * Optimization techniques used to accelerate the inverse DCT calculation:
  148. *
  149. * In a DCT coefficient block, the coefficients are increasingly likely to be 0
  150. * as you move diagonally from top left to bottom right. If whole rows of
  151. * coefficients are 0, then the inverse DCT calculation can be simplified. On
  152. * the first pass of the inverse DCT, we test for three special cases before
  153. * defaulting to a full "regular" inverse DCT:
  154. *
  155. * 1) Coefficients in rows 4-7 are all zero. In this case, we perform a
  156. * "sparse" simplified inverse DCT on rows 0-3.
  157. * 2) AC coefficients (rows 1-7) are all zero. In this case, the inverse DCT
  158. * result is equal to the dequantized DC coefficients.
  159. * 3) AC and DC coefficients are all zero. In this case, the inverse DCT
  160. * result is all zero. For the left 4x8 half, this is handled identically
  161. * to Case 2 above. For the right 4x8 half, we do no work and signal that
  162. * the "sparse" algorithm is required for the second pass.
  163. *
  164. * In the second pass, only a single special case is tested: whether the AC and
  165. * DC coefficients were all zero in the right 4x8 block during the first pass
  166. * (refer to Case 3 above.) If this is the case, then a "sparse" variant of
  167. * the second pass is performed for both the left and right halves of the DCT
  168. * block. (The transposition after the first pass means that the right 4x8
  169. * block during the first pass becomes rows 4-7 during the second pass.)
  170. */
  171. void jsimd_idct_islow_neon(void *dct_table, JCOEFPTR coef_block,
  172. JSAMPARRAY output_buf, JDIMENSION output_col)
  173. {
  174. ISLOW_MULT_TYPE *quantptr = dct_table;
  175. int16_t workspace_l[8 * DCTSIZE / 2];
  176. int16_t workspace_r[8 * DCTSIZE / 2];
  177. /* Compute IDCT first pass on left 4x8 coefficient block. */
  178. /* Load DCT coefficients in left 4x8 block. */
  179. int16x4_t row0 = vld1_s16(coef_block + 0 * DCTSIZE);
  180. int16x4_t row1 = vld1_s16(coef_block + 1 * DCTSIZE);
  181. int16x4_t row2 = vld1_s16(coef_block + 2 * DCTSIZE);
  182. int16x4_t row3 = vld1_s16(coef_block + 3 * DCTSIZE);
  183. int16x4_t row4 = vld1_s16(coef_block + 4 * DCTSIZE);
  184. int16x4_t row5 = vld1_s16(coef_block + 5 * DCTSIZE);
  185. int16x4_t row6 = vld1_s16(coef_block + 6 * DCTSIZE);
  186. int16x4_t row7 = vld1_s16(coef_block + 7 * DCTSIZE);
  187. /* Load quantization table for left 4x8 block. */
  188. int16x4_t quant_row0 = vld1_s16(quantptr + 0 * DCTSIZE);
  189. int16x4_t quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE);
  190. int16x4_t quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE);
  191. int16x4_t quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE);
  192. int16x4_t quant_row4 = vld1_s16(quantptr + 4 * DCTSIZE);
  193. int16x4_t quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE);
  194. int16x4_t quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE);
  195. int16x4_t quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE);
  196. /* Construct bitmap to test if DCT coefficients in left 4x8 block are 0. */
  197. int16x4_t bitmap = vorr_s16(row7, row6);
  198. bitmap = vorr_s16(bitmap, row5);
  199. bitmap = vorr_s16(bitmap, row4);
  200. int64_t bitmap_rows_4567 = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
  201. if (bitmap_rows_4567 == 0) {
  202. bitmap = vorr_s16(bitmap, row3);
  203. bitmap = vorr_s16(bitmap, row2);
  204. bitmap = vorr_s16(bitmap, row1);
  205. int64_t left_ac_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
  206. if (left_ac_bitmap == 0) {
  207. int16x4_t dcval = vshl_n_s16(vmul_s16(row0, quant_row0), PASS1_BITS);
  208. int16x4x4_t quadrant = { { dcval, dcval, dcval, dcval } };
  209. /* Store 4x4 blocks to workspace, transposing in the process. */
  210. vst4_s16(workspace_l, quadrant);
  211. vst4_s16(workspace_r, quadrant);
  212. } else {
  213. jsimd_idct_islow_pass1_sparse(row0, row1, row2, row3, quant_row0,
  214. quant_row1, quant_row2, quant_row3,
  215. workspace_l, workspace_r);
  216. }
  217. } else {
  218. jsimd_idct_islow_pass1_regular(row0, row1, row2, row3, row4, row5,
  219. row6, row7, quant_row0, quant_row1,
  220. quant_row2, quant_row3, quant_row4,
  221. quant_row5, quant_row6, quant_row7,
  222. workspace_l, workspace_r);
  223. }
  224. /* Compute IDCT first pass on right 4x8 coefficient block. */
  225. /* Load DCT coefficients in right 4x8 block. */
  226. row0 = vld1_s16(coef_block + 0 * DCTSIZE + 4);
  227. row1 = vld1_s16(coef_block + 1 * DCTSIZE + 4);
  228. row2 = vld1_s16(coef_block + 2 * DCTSIZE + 4);
  229. row3 = vld1_s16(coef_block + 3 * DCTSIZE + 4);
  230. row4 = vld1_s16(coef_block + 4 * DCTSIZE + 4);
  231. row5 = vld1_s16(coef_block + 5 * DCTSIZE + 4);
  232. row6 = vld1_s16(coef_block + 6 * DCTSIZE + 4);
  233. row7 = vld1_s16(coef_block + 7 * DCTSIZE + 4);
  234. /* Load quantization table for right 4x8 block. */
  235. quant_row0 = vld1_s16(quantptr + 0 * DCTSIZE + 4);
  236. quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE + 4);
  237. quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE + 4);
  238. quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE + 4);
  239. quant_row4 = vld1_s16(quantptr + 4 * DCTSIZE + 4);
  240. quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE + 4);
  241. quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE + 4);
  242. quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE + 4);
  243. /* Construct bitmap to test if DCT coefficients in right 4x8 block are 0. */
  244. bitmap = vorr_s16(row7, row6);
  245. bitmap = vorr_s16(bitmap, row5);
  246. bitmap = vorr_s16(bitmap, row4);
  247. bitmap_rows_4567 = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
  248. bitmap = vorr_s16(bitmap, row3);
  249. bitmap = vorr_s16(bitmap, row2);
  250. bitmap = vorr_s16(bitmap, row1);
  251. int64_t right_ac_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
  252. /* If this remains non-zero, a "regular" second pass will be performed. */
  253. int64_t right_ac_dc_bitmap = 1;
  254. if (right_ac_bitmap == 0) {
  255. bitmap = vorr_s16(bitmap, row0);
  256. right_ac_dc_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0);
  257. if (right_ac_dc_bitmap != 0) {
  258. int16x4_t dcval = vshl_n_s16(vmul_s16(row0, quant_row0), PASS1_BITS);
  259. int16x4x4_t quadrant = { { dcval, dcval, dcval, dcval } };
  260. /* Store 4x4 blocks to workspace, transposing in the process. */
  261. vst4_s16(workspace_l + 4 * DCTSIZE / 2, quadrant);
  262. vst4_s16(workspace_r + 4 * DCTSIZE / 2, quadrant);
  263. }
  264. } else {
  265. if (bitmap_rows_4567 == 0) {
  266. jsimd_idct_islow_pass1_sparse(row0, row1, row2, row3, quant_row0,
  267. quant_row1, quant_row2, quant_row3,
  268. workspace_l + 4 * DCTSIZE / 2,
  269. workspace_r + 4 * DCTSIZE / 2);
  270. } else {
  271. jsimd_idct_islow_pass1_regular(row0, row1, row2, row3, row4, row5,
  272. row6, row7, quant_row0, quant_row1,
  273. quant_row2, quant_row3, quant_row4,
  274. quant_row5, quant_row6, quant_row7,
  275. workspace_l + 4 * DCTSIZE / 2,
  276. workspace_r + 4 * DCTSIZE / 2);
  277. }
  278. }
  279. /* Second pass: compute IDCT on rows in workspace. */
  280. /* If all coefficients in right 4x8 block are 0, use "sparse" second pass. */
  281. if (right_ac_dc_bitmap == 0) {
  282. jsimd_idct_islow_pass2_sparse(workspace_l, output_buf, output_col, 0);
  283. jsimd_idct_islow_pass2_sparse(workspace_r, output_buf, output_col, 4);
  284. } else {
  285. jsimd_idct_islow_pass2_regular(workspace_l, output_buf, output_col, 0);
  286. jsimd_idct_islow_pass2_regular(workspace_r, output_buf, output_col, 4);
  287. }
  288. }
  289. /* Perform dequantization and the first pass of the accurate inverse DCT on a
  290. * 4x8 block of coefficients. (To process the full 8x8 DCT block, this
  291. * function-- or some other optimized variant-- needs to be called for both the
  292. * left and right 4x8 blocks.)
  293. *
  294. * This "regular" version assumes that no optimization can be made to the IDCT
  295. * calculation, since no useful set of AC coefficients is all 0.
  296. *
  297. * The original C implementation of the accurate IDCT (jpeg_idct_slow()) can be
  298. * found in jidctint.c. Algorithmic changes made here are documented inline.
  299. */
  300. static INLINE void jsimd_idct_islow_pass1_regular(int16x4_t row0,
  301. int16x4_t row1,
  302. int16x4_t row2,
  303. int16x4_t row3,
  304. int16x4_t row4,
  305. int16x4_t row5,
  306. int16x4_t row6,
  307. int16x4_t row7,
  308. int16x4_t quant_row0,
  309. int16x4_t quant_row1,
  310. int16x4_t quant_row2,
  311. int16x4_t quant_row3,
  312. int16x4_t quant_row4,
  313. int16x4_t quant_row5,
  314. int16x4_t quant_row6,
  315. int16x4_t quant_row7,
  316. int16_t *workspace_1,
  317. int16_t *workspace_2)
  318. {
  319. /* Load constants for IDCT computation. */
  320. #ifdef HAVE_VLD1_S16_X3
  321. const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
  322. #else
  323. const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
  324. const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
  325. const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
  326. const int16x4x3_t consts = { { consts1, consts2, consts3 } };
  327. #endif
  328. /* Even part */
  329. int16x4_t z2_s16 = vmul_s16(row2, quant_row2);
  330. int16x4_t z3_s16 = vmul_s16(row6, quant_row6);
  331. int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
  332. int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
  333. tmp2 = vmlal_lane_s16(tmp2, z3_s16, consts.val[2], 1);
  334. tmp3 = vmlal_lane_s16(tmp3, z3_s16, consts.val[0], 1);
  335. z2_s16 = vmul_s16(row0, quant_row0);
  336. z3_s16 = vmul_s16(row4, quant_row4);
  337. int32x4_t tmp0 = vshll_n_s16(vadd_s16(z2_s16, z3_s16), CONST_BITS);
  338. int32x4_t tmp1 = vshll_n_s16(vsub_s16(z2_s16, z3_s16), CONST_BITS);
  339. int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
  340. int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
  341. int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
  342. int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
  343. /* Odd part */
  344. int16x4_t tmp0_s16 = vmul_s16(row7, quant_row7);
  345. int16x4_t tmp1_s16 = vmul_s16(row5, quant_row5);
  346. int16x4_t tmp2_s16 = vmul_s16(row3, quant_row3);
  347. int16x4_t tmp3_s16 = vmul_s16(row1, quant_row1);
  348. z3_s16 = vadd_s16(tmp0_s16, tmp2_s16);
  349. int16x4_t z4_s16 = vadd_s16(tmp1_s16, tmp3_s16);
  350. /* Implementation as per jpeg_idct_islow() in jidctint.c:
  351. * z5 = (z3 + z4) * 1.175875602;
  352. * z3 = z3 * -1.961570560; z4 = z4 * -0.390180644;
  353. * z3 += z5; z4 += z5;
  354. *
  355. * This implementation:
  356. * z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602;
  357. * z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644);
  358. */
  359. int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
  360. int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
  361. z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
  362. z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
  363. /* Implementation as per jpeg_idct_islow() in jidctint.c:
  364. * z1 = tmp0 + tmp3; z2 = tmp1 + tmp2;
  365. * tmp0 = tmp0 * 0.298631336; tmp1 = tmp1 * 2.053119869;
  366. * tmp2 = tmp2 * 3.072711026; tmp3 = tmp3 * 1.501321110;
  367. * z1 = z1 * -0.899976223; z2 = z2 * -2.562915447;
  368. * tmp0 += z1 + z3; tmp1 += z2 + z4;
  369. * tmp2 += z2 + z3; tmp3 += z1 + z4;
  370. *
  371. * This implementation:
  372. * tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223;
  373. * tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447;
  374. * tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447);
  375. * tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223);
  376. * tmp0 += z3; tmp1 += z4;
  377. * tmp2 += z3; tmp3 += z4;
  378. */
  379. tmp0 = vmull_lane_s16(tmp0_s16, consts.val[0], 3);
  380. tmp1 = vmull_lane_s16(tmp1_s16, consts.val[1], 1);
  381. tmp2 = vmull_lane_s16(tmp2_s16, consts.val[2], 2);
  382. tmp3 = vmull_lane_s16(tmp3_s16, consts.val[1], 0);
  383. tmp0 = vmlsl_lane_s16(tmp0, tmp3_s16, consts.val[0], 0);
  384. tmp1 = vmlsl_lane_s16(tmp1, tmp2_s16, consts.val[0], 2);
  385. tmp2 = vmlsl_lane_s16(tmp2, tmp1_s16, consts.val[0], 2);
  386. tmp3 = vmlsl_lane_s16(tmp3, tmp0_s16, consts.val[0], 0);
  387. tmp0 = vaddq_s32(tmp0, z3);
  388. tmp1 = vaddq_s32(tmp1, z4);
  389. tmp2 = vaddq_s32(tmp2, z3);
  390. tmp3 = vaddq_s32(tmp3, z4);
  391. /* Final output stage: descale and narrow to 16-bit. */
  392. int16x4x4_t rows_0123 = { {
  393. vrshrn_n_s32(vaddq_s32(tmp10, tmp3), DESCALE_P1),
  394. vrshrn_n_s32(vaddq_s32(tmp11, tmp2), DESCALE_P1),
  395. vrshrn_n_s32(vaddq_s32(tmp12, tmp1), DESCALE_P1),
  396. vrshrn_n_s32(vaddq_s32(tmp13, tmp0), DESCALE_P1)
  397. } };
  398. int16x4x4_t rows_4567 = { {
  399. vrshrn_n_s32(vsubq_s32(tmp13, tmp0), DESCALE_P1),
  400. vrshrn_n_s32(vsubq_s32(tmp12, tmp1), DESCALE_P1),
  401. vrshrn_n_s32(vsubq_s32(tmp11, tmp2), DESCALE_P1),
  402. vrshrn_n_s32(vsubq_s32(tmp10, tmp3), DESCALE_P1)
  403. } };
  404. /* Store 4x4 blocks to the intermediate workspace, ready for the second pass.
  405. * (VST4 transposes the blocks. We need to operate on rows in the next
  406. * pass.)
  407. */
  408. vst4_s16(workspace_1, rows_0123);
  409. vst4_s16(workspace_2, rows_4567);
  410. }
  411. /* Perform dequantization and the first pass of the accurate inverse DCT on a
  412. * 4x8 block of coefficients.
  413. *
  414. * This "sparse" version assumes that the AC coefficients in rows 4-7 are all
  415. * 0. This simplifies the IDCT calculation, accelerating overall performance.
  416. */
  417. static INLINE void jsimd_idct_islow_pass1_sparse(int16x4_t row0,
  418. int16x4_t row1,
  419. int16x4_t row2,
  420. int16x4_t row3,
  421. int16x4_t quant_row0,
  422. int16x4_t quant_row1,
  423. int16x4_t quant_row2,
  424. int16x4_t quant_row3,
  425. int16_t *workspace_1,
  426. int16_t *workspace_2)
  427. {
  428. /* Load constants for IDCT computation. */
  429. #ifdef HAVE_VLD1_S16_X3
  430. const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
  431. #else
  432. const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
  433. const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
  434. const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
  435. const int16x4x3_t consts = { { consts1, consts2, consts3 } };
  436. #endif
  437. /* Even part (z3 is all 0) */
  438. int16x4_t z2_s16 = vmul_s16(row2, quant_row2);
  439. int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
  440. int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
  441. z2_s16 = vmul_s16(row0, quant_row0);
  442. int32x4_t tmp0 = vshll_n_s16(z2_s16, CONST_BITS);
  443. int32x4_t tmp1 = vshll_n_s16(z2_s16, CONST_BITS);
  444. int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
  445. int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
  446. int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
  447. int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
  448. /* Odd part (tmp0 and tmp1 are both all 0) */
  449. int16x4_t tmp2_s16 = vmul_s16(row3, quant_row3);
  450. int16x4_t tmp3_s16 = vmul_s16(row1, quant_row1);
  451. int16x4_t z3_s16 = tmp2_s16;
  452. int16x4_t z4_s16 = tmp3_s16;
  453. int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
  454. int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
  455. z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
  456. z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
  457. tmp0 = vmlsl_lane_s16(z3, tmp3_s16, consts.val[0], 0);
  458. tmp1 = vmlsl_lane_s16(z4, tmp2_s16, consts.val[0], 2);
  459. tmp2 = vmlal_lane_s16(z3, tmp2_s16, consts.val[2], 2);
  460. tmp3 = vmlal_lane_s16(z4, tmp3_s16, consts.val[1], 0);
  461. /* Final output stage: descale and narrow to 16-bit. */
  462. int16x4x4_t rows_0123 = { {
  463. vrshrn_n_s32(vaddq_s32(tmp10, tmp3), DESCALE_P1),
  464. vrshrn_n_s32(vaddq_s32(tmp11, tmp2), DESCALE_P1),
  465. vrshrn_n_s32(vaddq_s32(tmp12, tmp1), DESCALE_P1),
  466. vrshrn_n_s32(vaddq_s32(tmp13, tmp0), DESCALE_P1)
  467. } };
  468. int16x4x4_t rows_4567 = { {
  469. vrshrn_n_s32(vsubq_s32(tmp13, tmp0), DESCALE_P1),
  470. vrshrn_n_s32(vsubq_s32(tmp12, tmp1), DESCALE_P1),
  471. vrshrn_n_s32(vsubq_s32(tmp11, tmp2), DESCALE_P1),
  472. vrshrn_n_s32(vsubq_s32(tmp10, tmp3), DESCALE_P1)
  473. } };
  474. /* Store 4x4 blocks to the intermediate workspace, ready for the second pass.
  475. * (VST4 transposes the blocks. We need to operate on rows in the next
  476. * pass.)
  477. */
  478. vst4_s16(workspace_1, rows_0123);
  479. vst4_s16(workspace_2, rows_4567);
  480. }
  481. /* Perform the second pass of the accurate inverse DCT on a 4x8 block of
  482. * coefficients. (To process the full 8x8 DCT block, this function-- or some
  483. * other optimized variant-- needs to be called for both the right and left 4x8
  484. * blocks.)
  485. *
  486. * This "regular" version assumes that no optimization can be made to the IDCT
  487. * calculation, since no useful set of coefficient values are all 0 after the
  488. * first pass.
  489. *
  490. * Again, the original C implementation of the accurate IDCT (jpeg_idct_slow())
  491. * can be found in jidctint.c. Algorithmic changes made here are documented
  492. * inline.
  493. */
  494. static INLINE void jsimd_idct_islow_pass2_regular(int16_t *workspace,
  495. JSAMPARRAY output_buf,
  496. JDIMENSION output_col,
  497. unsigned buf_offset)
  498. {
  499. /* Load constants for IDCT computation. */
  500. #ifdef HAVE_VLD1_S16_X3
  501. const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
  502. #else
  503. const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
  504. const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
  505. const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
  506. const int16x4x3_t consts = { { consts1, consts2, consts3 } };
  507. #endif
  508. /* Even part */
  509. int16x4_t z2_s16 = vld1_s16(workspace + 2 * DCTSIZE / 2);
  510. int16x4_t z3_s16 = vld1_s16(workspace + 6 * DCTSIZE / 2);
  511. int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
  512. int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
  513. tmp2 = vmlal_lane_s16(tmp2, z3_s16, consts.val[2], 1);
  514. tmp3 = vmlal_lane_s16(tmp3, z3_s16, consts.val[0], 1);
  515. z2_s16 = vld1_s16(workspace + 0 * DCTSIZE / 2);
  516. z3_s16 = vld1_s16(workspace + 4 * DCTSIZE / 2);
  517. int32x4_t tmp0 = vshll_n_s16(vadd_s16(z2_s16, z3_s16), CONST_BITS);
  518. int32x4_t tmp1 = vshll_n_s16(vsub_s16(z2_s16, z3_s16), CONST_BITS);
  519. int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
  520. int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
  521. int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
  522. int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
  523. /* Odd part */
  524. int16x4_t tmp0_s16 = vld1_s16(workspace + 7 * DCTSIZE / 2);
  525. int16x4_t tmp1_s16 = vld1_s16(workspace + 5 * DCTSIZE / 2);
  526. int16x4_t tmp2_s16 = vld1_s16(workspace + 3 * DCTSIZE / 2);
  527. int16x4_t tmp3_s16 = vld1_s16(workspace + 1 * DCTSIZE / 2);
  528. z3_s16 = vadd_s16(tmp0_s16, tmp2_s16);
  529. int16x4_t z4_s16 = vadd_s16(tmp1_s16, tmp3_s16);
  530. /* Implementation as per jpeg_idct_islow() in jidctint.c:
  531. * z5 = (z3 + z4) * 1.175875602;
  532. * z3 = z3 * -1.961570560; z4 = z4 * -0.390180644;
  533. * z3 += z5; z4 += z5;
  534. *
  535. * This implementation:
  536. * z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602;
  537. * z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644);
  538. */
  539. int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
  540. int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
  541. z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
  542. z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
  543. /* Implementation as per jpeg_idct_islow() in jidctint.c:
  544. * z1 = tmp0 + tmp3; z2 = tmp1 + tmp2;
  545. * tmp0 = tmp0 * 0.298631336; tmp1 = tmp1 * 2.053119869;
  546. * tmp2 = tmp2 * 3.072711026; tmp3 = tmp3 * 1.501321110;
  547. * z1 = z1 * -0.899976223; z2 = z2 * -2.562915447;
  548. * tmp0 += z1 + z3; tmp1 += z2 + z4;
  549. * tmp2 += z2 + z3; tmp3 += z1 + z4;
  550. *
  551. * This implementation:
  552. * tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223;
  553. * tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447;
  554. * tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447);
  555. * tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223);
  556. * tmp0 += z3; tmp1 += z4;
  557. * tmp2 += z3; tmp3 += z4;
  558. */
  559. tmp0 = vmull_lane_s16(tmp0_s16, consts.val[0], 3);
  560. tmp1 = vmull_lane_s16(tmp1_s16, consts.val[1], 1);
  561. tmp2 = vmull_lane_s16(tmp2_s16, consts.val[2], 2);
  562. tmp3 = vmull_lane_s16(tmp3_s16, consts.val[1], 0);
  563. tmp0 = vmlsl_lane_s16(tmp0, tmp3_s16, consts.val[0], 0);
  564. tmp1 = vmlsl_lane_s16(tmp1, tmp2_s16, consts.val[0], 2);
  565. tmp2 = vmlsl_lane_s16(tmp2, tmp1_s16, consts.val[0], 2);
  566. tmp3 = vmlsl_lane_s16(tmp3, tmp0_s16, consts.val[0], 0);
  567. tmp0 = vaddq_s32(tmp0, z3);
  568. tmp1 = vaddq_s32(tmp1, z4);
  569. tmp2 = vaddq_s32(tmp2, z3);
  570. tmp3 = vaddq_s32(tmp3, z4);
  571. /* Final output stage: descale and narrow to 16-bit. */
  572. int16x8_t cols_02_s16 = vcombine_s16(vaddhn_s32(tmp10, tmp3),
  573. vaddhn_s32(tmp12, tmp1));
  574. int16x8_t cols_13_s16 = vcombine_s16(vaddhn_s32(tmp11, tmp2),
  575. vaddhn_s32(tmp13, tmp0));
  576. int16x8_t cols_46_s16 = vcombine_s16(vsubhn_s32(tmp13, tmp0),
  577. vsubhn_s32(tmp11, tmp2));
  578. int16x8_t cols_57_s16 = vcombine_s16(vsubhn_s32(tmp12, tmp1),
  579. vsubhn_s32(tmp10, tmp3));
  580. /* Descale and narrow to 8-bit. */
  581. int8x8_t cols_02_s8 = vqrshrn_n_s16(cols_02_s16, DESCALE_P2 - 16);
  582. int8x8_t cols_13_s8 = vqrshrn_n_s16(cols_13_s16, DESCALE_P2 - 16);
  583. int8x8_t cols_46_s8 = vqrshrn_n_s16(cols_46_s16, DESCALE_P2 - 16);
  584. int8x8_t cols_57_s8 = vqrshrn_n_s16(cols_57_s16, DESCALE_P2 - 16);
  585. /* Clamp to range [0-255]. */
  586. uint8x8_t cols_02_u8 = vadd_u8(vreinterpret_u8_s8(cols_02_s8),
  587. vdup_n_u8(CENTERJSAMPLE));
  588. uint8x8_t cols_13_u8 = vadd_u8(vreinterpret_u8_s8(cols_13_s8),
  589. vdup_n_u8(CENTERJSAMPLE));
  590. uint8x8_t cols_46_u8 = vadd_u8(vreinterpret_u8_s8(cols_46_s8),
  591. vdup_n_u8(CENTERJSAMPLE));
  592. uint8x8_t cols_57_u8 = vadd_u8(vreinterpret_u8_s8(cols_57_s8),
  593. vdup_n_u8(CENTERJSAMPLE));
  594. /* Transpose 4x8 block and store to memory. (Zipping adjacent columns
  595. * together allows us to store 16-bit elements.)
  596. */
  597. uint8x8x2_t cols_01_23 = vzip_u8(cols_02_u8, cols_13_u8);
  598. uint8x8x2_t cols_45_67 = vzip_u8(cols_46_u8, cols_57_u8);
  599. uint16x4x4_t cols_01_23_45_67 = { {
  600. vreinterpret_u16_u8(cols_01_23.val[0]),
  601. vreinterpret_u16_u8(cols_01_23.val[1]),
  602. vreinterpret_u16_u8(cols_45_67.val[0]),
  603. vreinterpret_u16_u8(cols_45_67.val[1])
  604. } };
  605. JSAMPROW outptr0 = output_buf[buf_offset + 0] + output_col;
  606. JSAMPROW outptr1 = output_buf[buf_offset + 1] + output_col;
  607. JSAMPROW outptr2 = output_buf[buf_offset + 2] + output_col;
  608. JSAMPROW outptr3 = output_buf[buf_offset + 3] + output_col;
  609. /* VST4 of 16-bit elements completes the transpose. */
  610. vst4_lane_u16((uint16_t *)outptr0, cols_01_23_45_67, 0);
  611. vst4_lane_u16((uint16_t *)outptr1, cols_01_23_45_67, 1);
  612. vst4_lane_u16((uint16_t *)outptr2, cols_01_23_45_67, 2);
  613. vst4_lane_u16((uint16_t *)outptr3, cols_01_23_45_67, 3);
  614. }
  615. /* Performs the second pass of the accurate inverse DCT on a 4x8 block
  616. * of coefficients.
  617. *
  618. * This "sparse" version assumes that the coefficient values (after the first
  619. * pass) in rows 4-7 are all 0. This simplifies the IDCT calculation,
  620. * accelerating overall performance.
  621. */
  622. static INLINE void jsimd_idct_islow_pass2_sparse(int16_t *workspace,
  623. JSAMPARRAY output_buf,
  624. JDIMENSION output_col,
  625. unsigned buf_offset)
  626. {
  627. /* Load constants for IDCT computation. */
  628. #ifdef HAVE_VLD1_S16_X3
  629. const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_islow_neon_consts);
  630. #else
  631. const int16x4_t consts1 = vld1_s16(jsimd_idct_islow_neon_consts);
  632. const int16x4_t consts2 = vld1_s16(jsimd_idct_islow_neon_consts + 4);
  633. const int16x4_t consts3 = vld1_s16(jsimd_idct_islow_neon_consts + 8);
  634. const int16x4x3_t consts = { { consts1, consts2, consts3 } };
  635. #endif
  636. /* Even part (z3 is all 0) */
  637. int16x4_t z2_s16 = vld1_s16(workspace + 2 * DCTSIZE / 2);
  638. int32x4_t tmp2 = vmull_lane_s16(z2_s16, consts.val[0], 1);
  639. int32x4_t tmp3 = vmull_lane_s16(z2_s16, consts.val[1], 2);
  640. z2_s16 = vld1_s16(workspace + 0 * DCTSIZE / 2);
  641. int32x4_t tmp0 = vshll_n_s16(z2_s16, CONST_BITS);
  642. int32x4_t tmp1 = vshll_n_s16(z2_s16, CONST_BITS);
  643. int32x4_t tmp10 = vaddq_s32(tmp0, tmp3);
  644. int32x4_t tmp13 = vsubq_s32(tmp0, tmp3);
  645. int32x4_t tmp11 = vaddq_s32(tmp1, tmp2);
  646. int32x4_t tmp12 = vsubq_s32(tmp1, tmp2);
  647. /* Odd part (tmp0 and tmp1 are both all 0) */
  648. int16x4_t tmp2_s16 = vld1_s16(workspace + 3 * DCTSIZE / 2);
  649. int16x4_t tmp3_s16 = vld1_s16(workspace + 1 * DCTSIZE / 2);
  650. int16x4_t z3_s16 = tmp2_s16;
  651. int16x4_t z4_s16 = tmp3_s16;
  652. int32x4_t z3 = vmull_lane_s16(z3_s16, consts.val[2], 3);
  653. z3 = vmlal_lane_s16(z3, z4_s16, consts.val[1], 3);
  654. int32x4_t z4 = vmull_lane_s16(z3_s16, consts.val[1], 3);
  655. z4 = vmlal_lane_s16(z4, z4_s16, consts.val[2], 0);
  656. tmp0 = vmlsl_lane_s16(z3, tmp3_s16, consts.val[0], 0);
  657. tmp1 = vmlsl_lane_s16(z4, tmp2_s16, consts.val[0], 2);
  658. tmp2 = vmlal_lane_s16(z3, tmp2_s16, consts.val[2], 2);
  659. tmp3 = vmlal_lane_s16(z4, tmp3_s16, consts.val[1], 0);
  660. /* Final output stage: descale and narrow to 16-bit. */
  661. int16x8_t cols_02_s16 = vcombine_s16(vaddhn_s32(tmp10, tmp3),
  662. vaddhn_s32(tmp12, tmp1));
  663. int16x8_t cols_13_s16 = vcombine_s16(vaddhn_s32(tmp11, tmp2),
  664. vaddhn_s32(tmp13, tmp0));
  665. int16x8_t cols_46_s16 = vcombine_s16(vsubhn_s32(tmp13, tmp0),
  666. vsubhn_s32(tmp11, tmp2));
  667. int16x8_t cols_57_s16 = vcombine_s16(vsubhn_s32(tmp12, tmp1),
  668. vsubhn_s32(tmp10, tmp3));
  669. /* Descale and narrow to 8-bit. */
  670. int8x8_t cols_02_s8 = vqrshrn_n_s16(cols_02_s16, DESCALE_P2 - 16);
  671. int8x8_t cols_13_s8 = vqrshrn_n_s16(cols_13_s16, DESCALE_P2 - 16);
  672. int8x8_t cols_46_s8 = vqrshrn_n_s16(cols_46_s16, DESCALE_P2 - 16);
  673. int8x8_t cols_57_s8 = vqrshrn_n_s16(cols_57_s16, DESCALE_P2 - 16);
  674. /* Clamp to range [0-255]. */
  675. uint8x8_t cols_02_u8 = vadd_u8(vreinterpret_u8_s8(cols_02_s8),
  676. vdup_n_u8(CENTERJSAMPLE));
  677. uint8x8_t cols_13_u8 = vadd_u8(vreinterpret_u8_s8(cols_13_s8),
  678. vdup_n_u8(CENTERJSAMPLE));
  679. uint8x8_t cols_46_u8 = vadd_u8(vreinterpret_u8_s8(cols_46_s8),
  680. vdup_n_u8(CENTERJSAMPLE));
  681. uint8x8_t cols_57_u8 = vadd_u8(vreinterpret_u8_s8(cols_57_s8),
  682. vdup_n_u8(CENTERJSAMPLE));
  683. /* Transpose 4x8 block and store to memory. (Zipping adjacent columns
  684. * together allows us to store 16-bit elements.)
  685. */
  686. uint8x8x2_t cols_01_23 = vzip_u8(cols_02_u8, cols_13_u8);
  687. uint8x8x2_t cols_45_67 = vzip_u8(cols_46_u8, cols_57_u8);
  688. uint16x4x4_t cols_01_23_45_67 = { {
  689. vreinterpret_u16_u8(cols_01_23.val[0]),
  690. vreinterpret_u16_u8(cols_01_23.val[1]),
  691. vreinterpret_u16_u8(cols_45_67.val[0]),
  692. vreinterpret_u16_u8(cols_45_67.val[1])
  693. } };
  694. JSAMPROW outptr0 = output_buf[buf_offset + 0] + output_col;
  695. JSAMPROW outptr1 = output_buf[buf_offset + 1] + output_col;
  696. JSAMPROW outptr2 = output_buf[buf_offset + 2] + output_col;
  697. JSAMPROW outptr3 = output_buf[buf_offset + 3] + output_col;
  698. /* VST4 of 16-bit elements completes the transpose. */
  699. vst4_lane_u16((uint16_t *)outptr0, cols_01_23_45_67, 0);
  700. vst4_lane_u16((uint16_t *)outptr1, cols_01_23_45_67, 1);
  701. vst4_lane_u16((uint16_t *)outptr2, cols_01_23_45_67, 2);
  702. vst4_lane_u16((uint16_t *)outptr3, cols_01_23_45_67, 3);
  703. }