jidctred-neon.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. /*
  2. * jidctred-neon.c - reduced-size IDCT (Arm Neon)
  3. *
  4. * Copyright (C) 2020, Arm Limited. All Rights Reserved.
  5. * Copyright (C) 2020, D. R. Commander. All Rights Reserved.
  6. *
  7. * This software is provided 'as-is', without any express or implied
  8. * warranty. In no event will the authors be held liable for any damages
  9. * arising from the use of this software.
  10. *
  11. * Permission is granted to anyone to use this software for any purpose,
  12. * including commercial applications, and to alter it and redistribute it
  13. * freely, subject to the following restrictions:
  14. *
  15. * 1. The origin of this software must not be misrepresented; you must not
  16. * claim that you wrote the original software. If you use this software
  17. * in a product, an acknowledgment in the product documentation would be
  18. * appreciated but is not required.
  19. * 2. Altered source versions must be plainly marked as such, and must not be
  20. * misrepresented as being the original software.
  21. * 3. This notice may not be removed or altered from any source distribution.
  22. */
  23. #define JPEG_INTERNALS
  24. #include "../../jinclude.h"
  25. #include "../../jpeglib.h"
  26. #include "../../jsimd.h"
  27. #include "../../jdct.h"
  28. #include "../../jsimddct.h"
  29. #include "../jsimd.h"
  30. #include "align.h"
  31. #include "neon-compat.h"
  32. #include <arm_neon.h>
  33. #define CONST_BITS 13
  34. #define PASS1_BITS 2
  35. #define F_0_211 1730
  36. #define F_0_509 4176
  37. #define F_0_601 4926
  38. #define F_0_720 5906
  39. #define F_0_765 6270
  40. #define F_0_850 6967
  41. #define F_0_899 7373
  42. #define F_1_061 8697
  43. #define F_1_272 10426
  44. #define F_1_451 11893
  45. #define F_1_847 15137
  46. #define F_2_172 17799
  47. #define F_2_562 20995
  48. #define F_3_624 29692
  49. /* jsimd_idct_2x2_neon() is an inverse DCT function that produces reduced-size
  50. * 2x2 output from an 8x8 DCT block. It uses the same calculations and
  51. * produces exactly the same output as IJG's original jpeg_idct_2x2() function
  52. * from jpeg-6b, which can be found in jidctred.c.
  53. *
  54. * Scaled integer constants are used to avoid floating-point arithmetic:
  55. * 0.720959822 = 5906 * 2^-13
  56. * 0.850430095 = 6967 * 2^-13
  57. * 1.272758580 = 10426 * 2^-13
  58. * 3.624509785 = 29692 * 2^-13
  59. *
  60. * See jidctred.c for further details of the 2x2 IDCT algorithm. Where
  61. * possible, the variable names and comments here in jsimd_idct_2x2_neon()
  62. * match up with those in jpeg_idct_2x2().
  63. */
  64. ALIGN(16) static const int16_t jsimd_idct_2x2_neon_consts[] = {
  65. -F_0_720, F_0_850, -F_1_272, F_3_624
  66. };
  67. void jsimd_idct_2x2_neon(void *dct_table, JCOEFPTR coef_block,
  68. JSAMPARRAY output_buf, JDIMENSION output_col)
  69. {
  70. ISLOW_MULT_TYPE *quantptr = dct_table;
  71. /* Load DCT coefficients. */
  72. int16x8_t row0 = vld1q_s16(coef_block + 0 * DCTSIZE);
  73. int16x8_t row1 = vld1q_s16(coef_block + 1 * DCTSIZE);
  74. int16x8_t row3 = vld1q_s16(coef_block + 3 * DCTSIZE);
  75. int16x8_t row5 = vld1q_s16(coef_block + 5 * DCTSIZE);
  76. int16x8_t row7 = vld1q_s16(coef_block + 7 * DCTSIZE);
  77. /* Load quantization table values. */
  78. int16x8_t quant_row0 = vld1q_s16(quantptr + 0 * DCTSIZE);
  79. int16x8_t quant_row1 = vld1q_s16(quantptr + 1 * DCTSIZE);
  80. int16x8_t quant_row3 = vld1q_s16(quantptr + 3 * DCTSIZE);
  81. int16x8_t quant_row5 = vld1q_s16(quantptr + 5 * DCTSIZE);
  82. int16x8_t quant_row7 = vld1q_s16(quantptr + 7 * DCTSIZE);
  83. /* Dequantize DCT coefficients. */
  84. row0 = vmulq_s16(row0, quant_row0);
  85. row1 = vmulq_s16(row1, quant_row1);
  86. row3 = vmulq_s16(row3, quant_row3);
  87. row5 = vmulq_s16(row5, quant_row5);
  88. row7 = vmulq_s16(row7, quant_row7);
  89. /* Load IDCT conversion constants. */
  90. const int16x4_t consts = vld1_s16(jsimd_idct_2x2_neon_consts);
  91. /* Pass 1: process columns from input, put results in vectors row0 and
  92. * row1.
  93. */
  94. /* Even part */
  95. int32x4_t tmp10_l = vshll_n_s16(vget_low_s16(row0), CONST_BITS + 2);
  96. int32x4_t tmp10_h = vshll_n_s16(vget_high_s16(row0), CONST_BITS + 2);
  97. /* Odd part */
  98. int32x4_t tmp0_l = vmull_lane_s16(vget_low_s16(row1), consts, 3);
  99. tmp0_l = vmlal_lane_s16(tmp0_l, vget_low_s16(row3), consts, 2);
  100. tmp0_l = vmlal_lane_s16(tmp0_l, vget_low_s16(row5), consts, 1);
  101. tmp0_l = vmlal_lane_s16(tmp0_l, vget_low_s16(row7), consts, 0);
  102. int32x4_t tmp0_h = vmull_lane_s16(vget_high_s16(row1), consts, 3);
  103. tmp0_h = vmlal_lane_s16(tmp0_h, vget_high_s16(row3), consts, 2);
  104. tmp0_h = vmlal_lane_s16(tmp0_h, vget_high_s16(row5), consts, 1);
  105. tmp0_h = vmlal_lane_s16(tmp0_h, vget_high_s16(row7), consts, 0);
  106. /* Final output stage: descale and narrow to 16-bit. */
  107. row0 = vcombine_s16(vrshrn_n_s32(vaddq_s32(tmp10_l, tmp0_l), CONST_BITS),
  108. vrshrn_n_s32(vaddq_s32(tmp10_h, tmp0_h), CONST_BITS));
  109. row1 = vcombine_s16(vrshrn_n_s32(vsubq_s32(tmp10_l, tmp0_l), CONST_BITS),
  110. vrshrn_n_s32(vsubq_s32(tmp10_h, tmp0_h), CONST_BITS));
  111. /* Transpose two rows, ready for second pass. */
  112. int16x8x2_t cols_0246_1357 = vtrnq_s16(row0, row1);
  113. int16x8_t cols_0246 = cols_0246_1357.val[0];
  114. int16x8_t cols_1357 = cols_0246_1357.val[1];
  115. /* Duplicate columns such that each is accessible in its own vector. */
  116. int32x4x2_t cols_1155_3377 = vtrnq_s32(vreinterpretq_s32_s16(cols_1357),
  117. vreinterpretq_s32_s16(cols_1357));
  118. int16x8_t cols_1155 = vreinterpretq_s16_s32(cols_1155_3377.val[0]);
  119. int16x8_t cols_3377 = vreinterpretq_s16_s32(cols_1155_3377.val[1]);
  120. /* Pass 2: process two rows, store to output array. */
  121. /* Even part: we're only interested in col0; the top half of tmp10 is "don't
  122. * care."
  123. */
  124. int32x4_t tmp10 = vshll_n_s16(vget_low_s16(cols_0246), CONST_BITS + 2);
  125. /* Odd part: we're only interested in the bottom half of tmp0. */
  126. int32x4_t tmp0 = vmull_lane_s16(vget_low_s16(cols_1155), consts, 3);
  127. tmp0 = vmlal_lane_s16(tmp0, vget_low_s16(cols_3377), consts, 2);
  128. tmp0 = vmlal_lane_s16(tmp0, vget_high_s16(cols_1155), consts, 1);
  129. tmp0 = vmlal_lane_s16(tmp0, vget_high_s16(cols_3377), consts, 0);
  130. /* Final output stage: descale and clamp to range [0-255]. */
  131. int16x8_t output_s16 = vcombine_s16(vaddhn_s32(tmp10, tmp0),
  132. vsubhn_s32(tmp10, tmp0));
  133. output_s16 = vrsraq_n_s16(vdupq_n_s16(CENTERJSAMPLE), output_s16,
  134. CONST_BITS + PASS1_BITS + 3 + 2 - 16);
  135. /* Narrow to 8-bit and convert to unsigned. */
  136. uint8x8_t output_u8 = vqmovun_s16(output_s16);
  137. /* Store 2x2 block to memory. */
  138. vst1_lane_u8(output_buf[0] + output_col, output_u8, 0);
  139. vst1_lane_u8(output_buf[1] + output_col, output_u8, 1);
  140. vst1_lane_u8(output_buf[0] + output_col + 1, output_u8, 4);
  141. vst1_lane_u8(output_buf[1] + output_col + 1, output_u8, 5);
  142. }
  143. /* jsimd_idct_4x4_neon() is an inverse DCT function that produces reduced-size
  144. * 4x4 output from an 8x8 DCT block. It uses the same calculations and
  145. * produces exactly the same output as IJG's original jpeg_idct_4x4() function
  146. * from jpeg-6b, which can be found in jidctred.c.
  147. *
  148. * Scaled integer constants are used to avoid floating-point arithmetic:
  149. * 0.211164243 = 1730 * 2^-13
  150. * 0.509795579 = 4176 * 2^-13
  151. * 0.601344887 = 4926 * 2^-13
  152. * 0.765366865 = 6270 * 2^-13
  153. * 0.899976223 = 7373 * 2^-13
  154. * 1.061594337 = 8697 * 2^-13
  155. * 1.451774981 = 11893 * 2^-13
  156. * 1.847759065 = 15137 * 2^-13
  157. * 2.172734803 = 17799 * 2^-13
  158. * 2.562915447 = 20995 * 2^-13
  159. *
  160. * See jidctred.c for further details of the 4x4 IDCT algorithm. Where
  161. * possible, the variable names and comments here in jsimd_idct_4x4_neon()
  162. * match up with those in jpeg_idct_4x4().
  163. */
  164. ALIGN(16) static const int16_t jsimd_idct_4x4_neon_consts[] = {
  165. F_1_847, -F_0_765, -F_0_211, F_1_451,
  166. -F_2_172, F_1_061, -F_0_509, -F_0_601,
  167. F_0_899, F_2_562, 0, 0
  168. };
  169. void jsimd_idct_4x4_neon(void *dct_table, JCOEFPTR coef_block,
  170. JSAMPARRAY output_buf, JDIMENSION output_col)
  171. {
  172. ISLOW_MULT_TYPE *quantptr = dct_table;
  173. /* Load DCT coefficients. */
  174. int16x8_t row0 = vld1q_s16(coef_block + 0 * DCTSIZE);
  175. int16x8_t row1 = vld1q_s16(coef_block + 1 * DCTSIZE);
  176. int16x8_t row2 = vld1q_s16(coef_block + 2 * DCTSIZE);
  177. int16x8_t row3 = vld1q_s16(coef_block + 3 * DCTSIZE);
  178. int16x8_t row5 = vld1q_s16(coef_block + 5 * DCTSIZE);
  179. int16x8_t row6 = vld1q_s16(coef_block + 6 * DCTSIZE);
  180. int16x8_t row7 = vld1q_s16(coef_block + 7 * DCTSIZE);
  181. /* Load quantization table values for DC coefficients. */
  182. int16x8_t quant_row0 = vld1q_s16(quantptr + 0 * DCTSIZE);
  183. /* Dequantize DC coefficients. */
  184. row0 = vmulq_s16(row0, quant_row0);
  185. /* Construct bitmap to test if all AC coefficients are 0. */
  186. int16x8_t bitmap = vorrq_s16(row1, row2);
  187. bitmap = vorrq_s16(bitmap, row3);
  188. bitmap = vorrq_s16(bitmap, row5);
  189. bitmap = vorrq_s16(bitmap, row6);
  190. bitmap = vorrq_s16(bitmap, row7);
  191. int64_t left_ac_bitmap = vgetq_lane_s64(vreinterpretq_s64_s16(bitmap), 0);
  192. int64_t right_ac_bitmap = vgetq_lane_s64(vreinterpretq_s64_s16(bitmap), 1);
  193. /* Load constants for IDCT computation. */
  194. #ifdef HAVE_VLD1_S16_X3
  195. const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_4x4_neon_consts);
  196. #else
  197. /* GCC does not currently support the intrinsic vld1_<type>_x3(). */
  198. const int16x4_t consts1 = vld1_s16(jsimd_idct_4x4_neon_consts);
  199. const int16x4_t consts2 = vld1_s16(jsimd_idct_4x4_neon_consts + 4);
  200. const int16x4_t consts3 = vld1_s16(jsimd_idct_4x4_neon_consts + 8);
  201. const int16x4x3_t consts = { { consts1, consts2, consts3 } };
  202. #endif
  203. if (left_ac_bitmap == 0 && right_ac_bitmap == 0) {
  204. /* All AC coefficients are zero.
  205. * Compute DC values and duplicate into row vectors 0, 1, 2, and 3.
  206. */
  207. int16x8_t dcval = vshlq_n_s16(row0, PASS1_BITS);
  208. row0 = dcval;
  209. row1 = dcval;
  210. row2 = dcval;
  211. row3 = dcval;
  212. } else if (left_ac_bitmap == 0) {
  213. /* AC coefficients are zero for columns 0, 1, 2, and 3.
  214. * Compute DC values for these columns.
  215. */
  216. int16x4_t dcval = vshl_n_s16(vget_low_s16(row0), PASS1_BITS);
  217. /* Commence regular IDCT computation for columns 4, 5, 6, and 7. */
  218. /* Load quantization table. */
  219. int16x4_t quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE + 4);
  220. int16x4_t quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE + 4);
  221. int16x4_t quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE + 4);
  222. int16x4_t quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE + 4);
  223. int16x4_t quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE + 4);
  224. int16x4_t quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE + 4);
  225. /* Even part */
  226. int32x4_t tmp0 = vshll_n_s16(vget_high_s16(row0), CONST_BITS + 1);
  227. int16x4_t z2 = vmul_s16(vget_high_s16(row2), quant_row2);
  228. int16x4_t z3 = vmul_s16(vget_high_s16(row6), quant_row6);
  229. int32x4_t tmp2 = vmull_lane_s16(z2, consts.val[0], 0);
  230. tmp2 = vmlal_lane_s16(tmp2, z3, consts.val[0], 1);
  231. int32x4_t tmp10 = vaddq_s32(tmp0, tmp2);
  232. int32x4_t tmp12 = vsubq_s32(tmp0, tmp2);
  233. /* Odd part */
  234. int16x4_t z1 = vmul_s16(vget_high_s16(row7), quant_row7);
  235. z2 = vmul_s16(vget_high_s16(row5), quant_row5);
  236. z3 = vmul_s16(vget_high_s16(row3), quant_row3);
  237. int16x4_t z4 = vmul_s16(vget_high_s16(row1), quant_row1);
  238. tmp0 = vmull_lane_s16(z1, consts.val[0], 2);
  239. tmp0 = vmlal_lane_s16(tmp0, z2, consts.val[0], 3);
  240. tmp0 = vmlal_lane_s16(tmp0, z3, consts.val[1], 0);
  241. tmp0 = vmlal_lane_s16(tmp0, z4, consts.val[1], 1);
  242. tmp2 = vmull_lane_s16(z1, consts.val[1], 2);
  243. tmp2 = vmlal_lane_s16(tmp2, z2, consts.val[1], 3);
  244. tmp2 = vmlal_lane_s16(tmp2, z3, consts.val[2], 0);
  245. tmp2 = vmlal_lane_s16(tmp2, z4, consts.val[2], 1);
  246. /* Final output stage: descale and narrow to 16-bit. */
  247. row0 = vcombine_s16(dcval, vrshrn_n_s32(vaddq_s32(tmp10, tmp2),
  248. CONST_BITS - PASS1_BITS + 1));
  249. row3 = vcombine_s16(dcval, vrshrn_n_s32(vsubq_s32(tmp10, tmp2),
  250. CONST_BITS - PASS1_BITS + 1));
  251. row1 = vcombine_s16(dcval, vrshrn_n_s32(vaddq_s32(tmp12, tmp0),
  252. CONST_BITS - PASS1_BITS + 1));
  253. row2 = vcombine_s16(dcval, vrshrn_n_s32(vsubq_s32(tmp12, tmp0),
  254. CONST_BITS - PASS1_BITS + 1));
  255. } else if (right_ac_bitmap == 0) {
  256. /* AC coefficients are zero for columns 4, 5, 6, and 7.
  257. * Compute DC values for these columns.
  258. */
  259. int16x4_t dcval = vshl_n_s16(vget_high_s16(row0), PASS1_BITS);
  260. /* Commence regular IDCT computation for columns 0, 1, 2, and 3. */
  261. /* Load quantization table. */
  262. int16x4_t quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE);
  263. int16x4_t quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE);
  264. int16x4_t quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE);
  265. int16x4_t quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE);
  266. int16x4_t quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE);
  267. int16x4_t quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE);
  268. /* Even part */
  269. int32x4_t tmp0 = vshll_n_s16(vget_low_s16(row0), CONST_BITS + 1);
  270. int16x4_t z2 = vmul_s16(vget_low_s16(row2), quant_row2);
  271. int16x4_t z3 = vmul_s16(vget_low_s16(row6), quant_row6);
  272. int32x4_t tmp2 = vmull_lane_s16(z2, consts.val[0], 0);
  273. tmp2 = vmlal_lane_s16(tmp2, z3, consts.val[0], 1);
  274. int32x4_t tmp10 = vaddq_s32(tmp0, tmp2);
  275. int32x4_t tmp12 = vsubq_s32(tmp0, tmp2);
  276. /* Odd part */
  277. int16x4_t z1 = vmul_s16(vget_low_s16(row7), quant_row7);
  278. z2 = vmul_s16(vget_low_s16(row5), quant_row5);
  279. z3 = vmul_s16(vget_low_s16(row3), quant_row3);
  280. int16x4_t z4 = vmul_s16(vget_low_s16(row1), quant_row1);
  281. tmp0 = vmull_lane_s16(z1, consts.val[0], 2);
  282. tmp0 = vmlal_lane_s16(tmp0, z2, consts.val[0], 3);
  283. tmp0 = vmlal_lane_s16(tmp0, z3, consts.val[1], 0);
  284. tmp0 = vmlal_lane_s16(tmp0, z4, consts.val[1], 1);
  285. tmp2 = vmull_lane_s16(z1, consts.val[1], 2);
  286. tmp2 = vmlal_lane_s16(tmp2, z2, consts.val[1], 3);
  287. tmp2 = vmlal_lane_s16(tmp2, z3, consts.val[2], 0);
  288. tmp2 = vmlal_lane_s16(tmp2, z4, consts.val[2], 1);
  289. /* Final output stage: descale and narrow to 16-bit. */
  290. row0 = vcombine_s16(vrshrn_n_s32(vaddq_s32(tmp10, tmp2),
  291. CONST_BITS - PASS1_BITS + 1), dcval);
  292. row3 = vcombine_s16(vrshrn_n_s32(vsubq_s32(tmp10, tmp2),
  293. CONST_BITS - PASS1_BITS + 1), dcval);
  294. row1 = vcombine_s16(vrshrn_n_s32(vaddq_s32(tmp12, tmp0),
  295. CONST_BITS - PASS1_BITS + 1), dcval);
  296. row2 = vcombine_s16(vrshrn_n_s32(vsubq_s32(tmp12, tmp0),
  297. CONST_BITS - PASS1_BITS + 1), dcval);
  298. } else {
  299. /* All AC coefficients are non-zero; full IDCT calculation required. */
  300. int16x8_t quant_row1 = vld1q_s16(quantptr + 1 * DCTSIZE);
  301. int16x8_t quant_row2 = vld1q_s16(quantptr + 2 * DCTSIZE);
  302. int16x8_t quant_row3 = vld1q_s16(quantptr + 3 * DCTSIZE);
  303. int16x8_t quant_row5 = vld1q_s16(quantptr + 5 * DCTSIZE);
  304. int16x8_t quant_row6 = vld1q_s16(quantptr + 6 * DCTSIZE);
  305. int16x8_t quant_row7 = vld1q_s16(quantptr + 7 * DCTSIZE);
  306. /* Even part */
  307. int32x4_t tmp0_l = vshll_n_s16(vget_low_s16(row0), CONST_BITS + 1);
  308. int32x4_t tmp0_h = vshll_n_s16(vget_high_s16(row0), CONST_BITS + 1);
  309. int16x8_t z2 = vmulq_s16(row2, quant_row2);
  310. int16x8_t z3 = vmulq_s16(row6, quant_row6);
  311. int32x4_t tmp2_l = vmull_lane_s16(vget_low_s16(z2), consts.val[0], 0);
  312. int32x4_t tmp2_h = vmull_lane_s16(vget_high_s16(z2), consts.val[0], 0);
  313. tmp2_l = vmlal_lane_s16(tmp2_l, vget_low_s16(z3), consts.val[0], 1);
  314. tmp2_h = vmlal_lane_s16(tmp2_h, vget_high_s16(z3), consts.val[0], 1);
  315. int32x4_t tmp10_l = vaddq_s32(tmp0_l, tmp2_l);
  316. int32x4_t tmp10_h = vaddq_s32(tmp0_h, tmp2_h);
  317. int32x4_t tmp12_l = vsubq_s32(tmp0_l, tmp2_l);
  318. int32x4_t tmp12_h = vsubq_s32(tmp0_h, tmp2_h);
  319. /* Odd part */
  320. int16x8_t z1 = vmulq_s16(row7, quant_row7);
  321. z2 = vmulq_s16(row5, quant_row5);
  322. z3 = vmulq_s16(row3, quant_row3);
  323. int16x8_t z4 = vmulq_s16(row1, quant_row1);
  324. tmp0_l = vmull_lane_s16(vget_low_s16(z1), consts.val[0], 2);
  325. tmp0_l = vmlal_lane_s16(tmp0_l, vget_low_s16(z2), consts.val[0], 3);
  326. tmp0_l = vmlal_lane_s16(tmp0_l, vget_low_s16(z3), consts.val[1], 0);
  327. tmp0_l = vmlal_lane_s16(tmp0_l, vget_low_s16(z4), consts.val[1], 1);
  328. tmp0_h = vmull_lane_s16(vget_high_s16(z1), consts.val[0], 2);
  329. tmp0_h = vmlal_lane_s16(tmp0_h, vget_high_s16(z2), consts.val[0], 3);
  330. tmp0_h = vmlal_lane_s16(tmp0_h, vget_high_s16(z3), consts.val[1], 0);
  331. tmp0_h = vmlal_lane_s16(tmp0_h, vget_high_s16(z4), consts.val[1], 1);
  332. tmp2_l = vmull_lane_s16(vget_low_s16(z1), consts.val[1], 2);
  333. tmp2_l = vmlal_lane_s16(tmp2_l, vget_low_s16(z2), consts.val[1], 3);
  334. tmp2_l = vmlal_lane_s16(tmp2_l, vget_low_s16(z3), consts.val[2], 0);
  335. tmp2_l = vmlal_lane_s16(tmp2_l, vget_low_s16(z4), consts.val[2], 1);
  336. tmp2_h = vmull_lane_s16(vget_high_s16(z1), consts.val[1], 2);
  337. tmp2_h = vmlal_lane_s16(tmp2_h, vget_high_s16(z2), consts.val[1], 3);
  338. tmp2_h = vmlal_lane_s16(tmp2_h, vget_high_s16(z3), consts.val[2], 0);
  339. tmp2_h = vmlal_lane_s16(tmp2_h, vget_high_s16(z4), consts.val[2], 1);
  340. /* Final output stage: descale and narrow to 16-bit. */
  341. row0 = vcombine_s16(vrshrn_n_s32(vaddq_s32(tmp10_l, tmp2_l),
  342. CONST_BITS - PASS1_BITS + 1),
  343. vrshrn_n_s32(vaddq_s32(tmp10_h, tmp2_h),
  344. CONST_BITS - PASS1_BITS + 1));
  345. row3 = vcombine_s16(vrshrn_n_s32(vsubq_s32(tmp10_l, tmp2_l),
  346. CONST_BITS - PASS1_BITS + 1),
  347. vrshrn_n_s32(vsubq_s32(tmp10_h, tmp2_h),
  348. CONST_BITS - PASS1_BITS + 1));
  349. row1 = vcombine_s16(vrshrn_n_s32(vaddq_s32(tmp12_l, tmp0_l),
  350. CONST_BITS - PASS1_BITS + 1),
  351. vrshrn_n_s32(vaddq_s32(tmp12_h, tmp0_h),
  352. CONST_BITS - PASS1_BITS + 1));
  353. row2 = vcombine_s16(vrshrn_n_s32(vsubq_s32(tmp12_l, tmp0_l),
  354. CONST_BITS - PASS1_BITS + 1),
  355. vrshrn_n_s32(vsubq_s32(tmp12_h, tmp0_h),
  356. CONST_BITS - PASS1_BITS + 1));
  357. }
  358. /* Transpose 8x4 block to perform IDCT on rows in second pass. */
  359. int16x8x2_t row_01 = vtrnq_s16(row0, row1);
  360. int16x8x2_t row_23 = vtrnq_s16(row2, row3);
  361. int32x4x2_t cols_0426 = vtrnq_s32(vreinterpretq_s32_s16(row_01.val[0]),
  362. vreinterpretq_s32_s16(row_23.val[0]));
  363. int32x4x2_t cols_1537 = vtrnq_s32(vreinterpretq_s32_s16(row_01.val[1]),
  364. vreinterpretq_s32_s16(row_23.val[1]));
  365. int16x4_t col0 = vreinterpret_s16_s32(vget_low_s32(cols_0426.val[0]));
  366. int16x4_t col1 = vreinterpret_s16_s32(vget_low_s32(cols_1537.val[0]));
  367. int16x4_t col2 = vreinterpret_s16_s32(vget_low_s32(cols_0426.val[1]));
  368. int16x4_t col3 = vreinterpret_s16_s32(vget_low_s32(cols_1537.val[1]));
  369. int16x4_t col5 = vreinterpret_s16_s32(vget_high_s32(cols_1537.val[0]));
  370. int16x4_t col6 = vreinterpret_s16_s32(vget_high_s32(cols_0426.val[1]));
  371. int16x4_t col7 = vreinterpret_s16_s32(vget_high_s32(cols_1537.val[1]));
  372. /* Commence second pass of IDCT. */
  373. /* Even part */
  374. int32x4_t tmp0 = vshll_n_s16(col0, CONST_BITS + 1);
  375. int32x4_t tmp2 = vmull_lane_s16(col2, consts.val[0], 0);
  376. tmp2 = vmlal_lane_s16(tmp2, col6, consts.val[0], 1);
  377. int32x4_t tmp10 = vaddq_s32(tmp0, tmp2);
  378. int32x4_t tmp12 = vsubq_s32(tmp0, tmp2);
  379. /* Odd part */
  380. tmp0 = vmull_lane_s16(col7, consts.val[0], 2);
  381. tmp0 = vmlal_lane_s16(tmp0, col5, consts.val[0], 3);
  382. tmp0 = vmlal_lane_s16(tmp0, col3, consts.val[1], 0);
  383. tmp0 = vmlal_lane_s16(tmp0, col1, consts.val[1], 1);
  384. tmp2 = vmull_lane_s16(col7, consts.val[1], 2);
  385. tmp2 = vmlal_lane_s16(tmp2, col5, consts.val[1], 3);
  386. tmp2 = vmlal_lane_s16(tmp2, col3, consts.val[2], 0);
  387. tmp2 = vmlal_lane_s16(tmp2, col1, consts.val[2], 1);
  388. /* Final output stage: descale and clamp to range [0-255]. */
  389. int16x8_t output_cols_02 = vcombine_s16(vaddhn_s32(tmp10, tmp2),
  390. vsubhn_s32(tmp12, tmp0));
  391. int16x8_t output_cols_13 = vcombine_s16(vaddhn_s32(tmp12, tmp0),
  392. vsubhn_s32(tmp10, tmp2));
  393. output_cols_02 = vrsraq_n_s16(vdupq_n_s16(CENTERJSAMPLE), output_cols_02,
  394. CONST_BITS + PASS1_BITS + 3 + 1 - 16);
  395. output_cols_13 = vrsraq_n_s16(vdupq_n_s16(CENTERJSAMPLE), output_cols_13,
  396. CONST_BITS + PASS1_BITS + 3 + 1 - 16);
  397. /* Narrow to 8-bit and convert to unsigned while zipping 8-bit elements.
  398. * An interleaving store completes the transpose.
  399. */
  400. uint8x8x2_t output_0123 = vzip_u8(vqmovun_s16(output_cols_02),
  401. vqmovun_s16(output_cols_13));
  402. uint16x4x2_t output_01_23 = { {
  403. vreinterpret_u16_u8(output_0123.val[0]),
  404. vreinterpret_u16_u8(output_0123.val[1])
  405. } };
  406. /* Store 4x4 block to memory. */
  407. JSAMPROW outptr0 = output_buf[0] + output_col;
  408. JSAMPROW outptr1 = output_buf[1] + output_col;
  409. JSAMPROW outptr2 = output_buf[2] + output_col;
  410. JSAMPROW outptr3 = output_buf[3] + output_col;
  411. vst2_lane_u16((uint16_t *)outptr0, output_01_23, 0);
  412. vst2_lane_u16((uint16_t *)outptr1, output_01_23, 1);
  413. vst2_lane_u16((uint16_t *)outptr2, output_01_23, 2);
  414. vst2_lane_u16((uint16_t *)outptr3, output_01_23, 3);
  415. }