jfdctfst-neon.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /*
  2. * jfdctfst-neon.c - fast integer FDCT (Arm Neon)
  3. *
  4. * Copyright (C) 2020, Arm Limited. All Rights Reserved.
  5. *
  6. * This software is provided 'as-is', without any express or implied
  7. * warranty. In no event will the authors be held liable for any damages
  8. * arising from the use of this software.
  9. *
  10. * Permission is granted to anyone to use this software for any purpose,
  11. * including commercial applications, and to alter it and redistribute it
  12. * freely, subject to the following restrictions:
  13. *
  14. * 1. The origin of this software must not be misrepresented; you must not
  15. * claim that you wrote the original software. If you use this software
  16. * in a product, an acknowledgment in the product documentation would be
  17. * appreciated but is not required.
  18. * 2. Altered source versions must be plainly marked as such, and must not be
  19. * misrepresented as being the original software.
  20. * 3. This notice may not be removed or altered from any source distribution.
  21. */
  22. #define JPEG_INTERNALS
  23. #include "../../jinclude.h"
  24. #include "../../jpeglib.h"
  25. #include "../../jsimd.h"
  26. #include "../../jdct.h"
  27. #include "../../jsimddct.h"
  28. #include "../jsimd.h"
  29. #include "align.h"
  30. #include <arm_neon.h>
  31. /* jsimd_fdct_ifast_neon() performs a fast, not so accurate forward DCT
  32. * (Discrete Cosine Transform) on one block of samples. It uses the same
  33. * calculations and produces exactly the same output as IJG's original
  34. * jpeg_fdct_ifast() function, which can be found in jfdctfst.c.
  35. *
  36. * Scaled integer constants are used to avoid floating-point arithmetic:
  37. * 0.382683433 = 12544 * 2^-15
  38. * 0.541196100 = 17795 * 2^-15
  39. * 0.707106781 = 23168 * 2^-15
  40. * 0.306562965 = 9984 * 2^-15
  41. *
  42. * See jfdctfst.c for further details of the DCT algorithm. Where possible,
  43. * the variable names and comments here in jsimd_fdct_ifast_neon() match up
  44. * with those in jpeg_fdct_ifast().
  45. */
  46. #define F_0_382 12544
  47. #define F_0_541 17792
  48. #define F_0_707 23168
  49. #define F_0_306 9984
  50. ALIGN(16) static const int16_t jsimd_fdct_ifast_neon_consts[] = {
  51. F_0_382, F_0_541, F_0_707, F_0_306
  52. };
  53. void jsimd_fdct_ifast_neon(DCTELEM *data)
  54. {
  55. /* Load an 8x8 block of samples into Neon registers. De-interleaving loads
  56. * are used, followed by vuzp to transpose the block such that we have a
  57. * column of samples per vector - allowing all rows to be processed at once.
  58. */
  59. int16x8x4_t data1 = vld4q_s16(data);
  60. int16x8x4_t data2 = vld4q_s16(data + 4 * DCTSIZE);
  61. int16x8x2_t cols_04 = vuzpq_s16(data1.val[0], data2.val[0]);
  62. int16x8x2_t cols_15 = vuzpq_s16(data1.val[1], data2.val[1]);
  63. int16x8x2_t cols_26 = vuzpq_s16(data1.val[2], data2.val[2]);
  64. int16x8x2_t cols_37 = vuzpq_s16(data1.val[3], data2.val[3]);
  65. int16x8_t col0 = cols_04.val[0];
  66. int16x8_t col1 = cols_15.val[0];
  67. int16x8_t col2 = cols_26.val[0];
  68. int16x8_t col3 = cols_37.val[0];
  69. int16x8_t col4 = cols_04.val[1];
  70. int16x8_t col5 = cols_15.val[1];
  71. int16x8_t col6 = cols_26.val[1];
  72. int16x8_t col7 = cols_37.val[1];
  73. /* Pass 1: process rows. */
  74. /* Load DCT conversion constants. */
  75. const int16x4_t consts = vld1_s16(jsimd_fdct_ifast_neon_consts);
  76. int16x8_t tmp0 = vaddq_s16(col0, col7);
  77. int16x8_t tmp7 = vsubq_s16(col0, col7);
  78. int16x8_t tmp1 = vaddq_s16(col1, col6);
  79. int16x8_t tmp6 = vsubq_s16(col1, col6);
  80. int16x8_t tmp2 = vaddq_s16(col2, col5);
  81. int16x8_t tmp5 = vsubq_s16(col2, col5);
  82. int16x8_t tmp3 = vaddq_s16(col3, col4);
  83. int16x8_t tmp4 = vsubq_s16(col3, col4);
  84. /* Even part */
  85. int16x8_t tmp10 = vaddq_s16(tmp0, tmp3); /* phase 2 */
  86. int16x8_t tmp13 = vsubq_s16(tmp0, tmp3);
  87. int16x8_t tmp11 = vaddq_s16(tmp1, tmp2);
  88. int16x8_t tmp12 = vsubq_s16(tmp1, tmp2);
  89. col0 = vaddq_s16(tmp10, tmp11); /* phase 3 */
  90. col4 = vsubq_s16(tmp10, tmp11);
  91. int16x8_t z1 = vqdmulhq_lane_s16(vaddq_s16(tmp12, tmp13), consts, 2);
  92. col2 = vaddq_s16(tmp13, z1); /* phase 5 */
  93. col6 = vsubq_s16(tmp13, z1);
  94. /* Odd part */
  95. tmp10 = vaddq_s16(tmp4, tmp5); /* phase 2 */
  96. tmp11 = vaddq_s16(tmp5, tmp6);
  97. tmp12 = vaddq_s16(tmp6, tmp7);
  98. int16x8_t z5 = vqdmulhq_lane_s16(vsubq_s16(tmp10, tmp12), consts, 0);
  99. int16x8_t z2 = vqdmulhq_lane_s16(tmp10, consts, 1);
  100. z2 = vaddq_s16(z2, z5);
  101. int16x8_t z4 = vqdmulhq_lane_s16(tmp12, consts, 3);
  102. z5 = vaddq_s16(tmp12, z5);
  103. z4 = vaddq_s16(z4, z5);
  104. int16x8_t z3 = vqdmulhq_lane_s16(tmp11, consts, 2);
  105. int16x8_t z11 = vaddq_s16(tmp7, z3); /* phase 5 */
  106. int16x8_t z13 = vsubq_s16(tmp7, z3);
  107. col5 = vaddq_s16(z13, z2); /* phase 6 */
  108. col3 = vsubq_s16(z13, z2);
  109. col1 = vaddq_s16(z11, z4);
  110. col7 = vsubq_s16(z11, z4);
  111. /* Transpose to work on columns in pass 2. */
  112. int16x8x2_t cols_01 = vtrnq_s16(col0, col1);
  113. int16x8x2_t cols_23 = vtrnq_s16(col2, col3);
  114. int16x8x2_t cols_45 = vtrnq_s16(col4, col5);
  115. int16x8x2_t cols_67 = vtrnq_s16(col6, col7);
  116. int32x4x2_t cols_0145_l = vtrnq_s32(vreinterpretq_s32_s16(cols_01.val[0]),
  117. vreinterpretq_s32_s16(cols_45.val[0]));
  118. int32x4x2_t cols_0145_h = vtrnq_s32(vreinterpretq_s32_s16(cols_01.val[1]),
  119. vreinterpretq_s32_s16(cols_45.val[1]));
  120. int32x4x2_t cols_2367_l = vtrnq_s32(vreinterpretq_s32_s16(cols_23.val[0]),
  121. vreinterpretq_s32_s16(cols_67.val[0]));
  122. int32x4x2_t cols_2367_h = vtrnq_s32(vreinterpretq_s32_s16(cols_23.val[1]),
  123. vreinterpretq_s32_s16(cols_67.val[1]));
  124. int32x4x2_t rows_04 = vzipq_s32(cols_0145_l.val[0], cols_2367_l.val[0]);
  125. int32x4x2_t rows_15 = vzipq_s32(cols_0145_h.val[0], cols_2367_h.val[0]);
  126. int32x4x2_t rows_26 = vzipq_s32(cols_0145_l.val[1], cols_2367_l.val[1]);
  127. int32x4x2_t rows_37 = vzipq_s32(cols_0145_h.val[1], cols_2367_h.val[1]);
  128. int16x8_t row0 = vreinterpretq_s16_s32(rows_04.val[0]);
  129. int16x8_t row1 = vreinterpretq_s16_s32(rows_15.val[0]);
  130. int16x8_t row2 = vreinterpretq_s16_s32(rows_26.val[0]);
  131. int16x8_t row3 = vreinterpretq_s16_s32(rows_37.val[0]);
  132. int16x8_t row4 = vreinterpretq_s16_s32(rows_04.val[1]);
  133. int16x8_t row5 = vreinterpretq_s16_s32(rows_15.val[1]);
  134. int16x8_t row6 = vreinterpretq_s16_s32(rows_26.val[1]);
  135. int16x8_t row7 = vreinterpretq_s16_s32(rows_37.val[1]);
  136. /* Pass 2: process columns. */
  137. tmp0 = vaddq_s16(row0, row7);
  138. tmp7 = vsubq_s16(row0, row7);
  139. tmp1 = vaddq_s16(row1, row6);
  140. tmp6 = vsubq_s16(row1, row6);
  141. tmp2 = vaddq_s16(row2, row5);
  142. tmp5 = vsubq_s16(row2, row5);
  143. tmp3 = vaddq_s16(row3, row4);
  144. tmp4 = vsubq_s16(row3, row4);
  145. /* Even part */
  146. tmp10 = vaddq_s16(tmp0, tmp3); /* phase 2 */
  147. tmp13 = vsubq_s16(tmp0, tmp3);
  148. tmp11 = vaddq_s16(tmp1, tmp2);
  149. tmp12 = vsubq_s16(tmp1, tmp2);
  150. row0 = vaddq_s16(tmp10, tmp11); /* phase 3 */
  151. row4 = vsubq_s16(tmp10, tmp11);
  152. z1 = vqdmulhq_lane_s16(vaddq_s16(tmp12, tmp13), consts, 2);
  153. row2 = vaddq_s16(tmp13, z1); /* phase 5 */
  154. row6 = vsubq_s16(tmp13, z1);
  155. /* Odd part */
  156. tmp10 = vaddq_s16(tmp4, tmp5); /* phase 2 */
  157. tmp11 = vaddq_s16(tmp5, tmp6);
  158. tmp12 = vaddq_s16(tmp6, tmp7);
  159. z5 = vqdmulhq_lane_s16(vsubq_s16(tmp10, tmp12), consts, 0);
  160. z2 = vqdmulhq_lane_s16(tmp10, consts, 1);
  161. z2 = vaddq_s16(z2, z5);
  162. z4 = vqdmulhq_lane_s16(tmp12, consts, 3);
  163. z5 = vaddq_s16(tmp12, z5);
  164. z4 = vaddq_s16(z4, z5);
  165. z3 = vqdmulhq_lane_s16(tmp11, consts, 2);
  166. z11 = vaddq_s16(tmp7, z3); /* phase 5 */
  167. z13 = vsubq_s16(tmp7, z3);
  168. row5 = vaddq_s16(z13, z2); /* phase 6 */
  169. row3 = vsubq_s16(z13, z2);
  170. row1 = vaddq_s16(z11, z4);
  171. row7 = vsubq_s16(z11, z4);
  172. vst1q_s16(data + 0 * DCTSIZE, row0);
  173. vst1q_s16(data + 1 * DCTSIZE, row1);
  174. vst1q_s16(data + 2 * DCTSIZE, row2);
  175. vst1q_s16(data + 3 * DCTSIZE, row3);
  176. vst1q_s16(data + 4 * DCTSIZE, row4);
  177. vst1q_s16(data + 5 * DCTSIZE, row5);
  178. vst1q_s16(data + 6 * DCTSIZE, row6);
  179. vst1q_s16(data + 7 * DCTSIZE, row7);
  180. }