enc_sse41.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. // Copyright 2015 Google Inc. All Rights Reserved.
  2. //
  3. // Use of this source code is governed by a BSD-style license
  4. // that can be found in the COPYING file in the root of the source
  5. // tree. An additional intellectual property rights grant can be found
  6. // in the file PATENTS. All contributing project authors may
  7. // be found in the AUTHORS file in the root of the source tree.
  8. // -----------------------------------------------------------------------------
  9. //
  10. // SSE4 version of some encoding functions.
  11. //
  12. // Author: Skal (pascal.massimino@gmail.com)
  13. #include "./dsp.h"
  14. #if defined(WEBP_USE_SSE41)
  15. #include <smmintrin.h>
  16. #include <stdlib.h> // for abs()
  17. #include "./common_sse2.h"
  18. #include "../enc/vp8i_enc.h"
  19. //------------------------------------------------------------------------------
  20. // Compute susceptibility based on DCT-coeff histograms.
  21. static void CollectHistogram_SSE41(const uint8_t* ref, const uint8_t* pred,
  22. int start_block, int end_block,
  23. VP8Histogram* const histo) {
  24. const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);
  25. int j;
  26. int distribution[MAX_COEFF_THRESH + 1] = { 0 };
  27. for (j = start_block; j < end_block; ++j) {
  28. int16_t out[16];
  29. int k;
  30. VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
  31. // Convert coefficients to bin (within out[]).
  32. {
  33. // Load.
  34. const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]);
  35. const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]);
  36. // v = abs(out) >> 3
  37. const __m128i abs0 = _mm_abs_epi16(out0);
  38. const __m128i abs1 = _mm_abs_epi16(out1);
  39. const __m128i v0 = _mm_srai_epi16(abs0, 3);
  40. const __m128i v1 = _mm_srai_epi16(abs1, 3);
  41. // bin = min(v, MAX_COEFF_THRESH)
  42. const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);
  43. const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);
  44. // Store.
  45. _mm_storeu_si128((__m128i*)&out[0], bin0);
  46. _mm_storeu_si128((__m128i*)&out[8], bin1);
  47. }
  48. // Convert coefficients to bin.
  49. for (k = 0; k < 16; ++k) {
  50. ++distribution[out[k]];
  51. }
  52. }
  53. VP8SetHistogramData(distribution, histo);
  54. }
  55. //------------------------------------------------------------------------------
  56. // Texture distortion
  57. //
  58. // We try to match the spectral content (weighted) between source and
  59. // reconstructed samples.
  60. // Hadamard transform
  61. // Returns the weighted sum of the absolute value of transformed coefficients.
  62. // w[] contains a row-major 4 by 4 symmetric matrix.
  63. static int TTransform_SSE41(const uint8_t* inA, const uint8_t* inB,
  64. const uint16_t* const w) {
  65. int32_t sum[4];
  66. __m128i tmp_0, tmp_1, tmp_2, tmp_3;
  67. // Load and combine inputs.
  68. {
  69. const __m128i inA_0 = _mm_loadu_si128((const __m128i*)&inA[BPS * 0]);
  70. const __m128i inA_1 = _mm_loadu_si128((const __m128i*)&inA[BPS * 1]);
  71. const __m128i inA_2 = _mm_loadu_si128((const __m128i*)&inA[BPS * 2]);
  72. // In SSE4.1, with gcc 4.8 at least (maybe other versions),
  73. // _mm_loadu_si128 is faster than _mm_loadl_epi64. But for the last lump
  74. // of inA and inB, _mm_loadl_epi64 is still used not to have an out of
  75. // bound read.
  76. const __m128i inA_3 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 3]);
  77. const __m128i inB_0 = _mm_loadu_si128((const __m128i*)&inB[BPS * 0]);
  78. const __m128i inB_1 = _mm_loadu_si128((const __m128i*)&inB[BPS * 1]);
  79. const __m128i inB_2 = _mm_loadu_si128((const __m128i*)&inB[BPS * 2]);
  80. const __m128i inB_3 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 3]);
  81. // Combine inA and inB (we'll do two transforms in parallel).
  82. const __m128i inAB_0 = _mm_unpacklo_epi32(inA_0, inB_0);
  83. const __m128i inAB_1 = _mm_unpacklo_epi32(inA_1, inB_1);
  84. const __m128i inAB_2 = _mm_unpacklo_epi32(inA_2, inB_2);
  85. const __m128i inAB_3 = _mm_unpacklo_epi32(inA_3, inB_3);
  86. tmp_0 = _mm_cvtepu8_epi16(inAB_0);
  87. tmp_1 = _mm_cvtepu8_epi16(inAB_1);
  88. tmp_2 = _mm_cvtepu8_epi16(inAB_2);
  89. tmp_3 = _mm_cvtepu8_epi16(inAB_3);
  90. // a00 a01 a02 a03 b00 b01 b02 b03
  91. // a10 a11 a12 a13 b10 b11 b12 b13
  92. // a20 a21 a22 a23 b20 b21 b22 b23
  93. // a30 a31 a32 a33 b30 b31 b32 b33
  94. }
  95. // Vertical pass first to avoid a transpose (vertical and horizontal passes
  96. // are commutative because w/kWeightY is symmetric) and subsequent transpose.
  97. {
  98. // Calculate a and b (two 4x4 at once).
  99. const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
  100. const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
  101. const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
  102. const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
  103. const __m128i b0 = _mm_add_epi16(a0, a1);
  104. const __m128i b1 = _mm_add_epi16(a3, a2);
  105. const __m128i b2 = _mm_sub_epi16(a3, a2);
  106. const __m128i b3 = _mm_sub_epi16(a0, a1);
  107. // a00 a01 a02 a03 b00 b01 b02 b03
  108. // a10 a11 a12 a13 b10 b11 b12 b13
  109. // a20 a21 a22 a23 b20 b21 b22 b23
  110. // a30 a31 a32 a33 b30 b31 b32 b33
  111. // Transpose the two 4x4.
  112. VP8Transpose_2_4x4_16b(&b0, &b1, &b2, &b3, &tmp_0, &tmp_1, &tmp_2, &tmp_3);
  113. }
  114. // Horizontal pass and difference of weighted sums.
  115. {
  116. // Load all inputs.
  117. const __m128i w_0 = _mm_loadu_si128((const __m128i*)&w[0]);
  118. const __m128i w_8 = _mm_loadu_si128((const __m128i*)&w[8]);
  119. // Calculate a and b (two 4x4 at once).
  120. const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
  121. const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
  122. const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
  123. const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
  124. const __m128i b0 = _mm_add_epi16(a0, a1);
  125. const __m128i b1 = _mm_add_epi16(a3, a2);
  126. const __m128i b2 = _mm_sub_epi16(a3, a2);
  127. const __m128i b3 = _mm_sub_epi16(a0, a1);
  128. // Separate the transforms of inA and inB.
  129. __m128i A_b0 = _mm_unpacklo_epi64(b0, b1);
  130. __m128i A_b2 = _mm_unpacklo_epi64(b2, b3);
  131. __m128i B_b0 = _mm_unpackhi_epi64(b0, b1);
  132. __m128i B_b2 = _mm_unpackhi_epi64(b2, b3);
  133. A_b0 = _mm_abs_epi16(A_b0);
  134. A_b2 = _mm_abs_epi16(A_b2);
  135. B_b0 = _mm_abs_epi16(B_b0);
  136. B_b2 = _mm_abs_epi16(B_b2);
  137. // weighted sums
  138. A_b0 = _mm_madd_epi16(A_b0, w_0);
  139. A_b2 = _mm_madd_epi16(A_b2, w_8);
  140. B_b0 = _mm_madd_epi16(B_b0, w_0);
  141. B_b2 = _mm_madd_epi16(B_b2, w_8);
  142. A_b0 = _mm_add_epi32(A_b0, A_b2);
  143. B_b0 = _mm_add_epi32(B_b0, B_b2);
  144. // difference of weighted sums
  145. A_b2 = _mm_sub_epi32(A_b0, B_b0);
  146. _mm_storeu_si128((__m128i*)&sum[0], A_b2);
  147. }
  148. return sum[0] + sum[1] + sum[2] + sum[3];
  149. }
  150. static int Disto4x4_SSE41(const uint8_t* const a, const uint8_t* const b,
  151. const uint16_t* const w) {
  152. const int diff_sum = TTransform_SSE41(a, b, w);
  153. return abs(diff_sum) >> 5;
  154. }
  155. static int Disto16x16_SSE41(const uint8_t* const a, const uint8_t* const b,
  156. const uint16_t* const w) {
  157. int D = 0;
  158. int x, y;
  159. for (y = 0; y < 16 * BPS; y += 4 * BPS) {
  160. for (x = 0; x < 16; x += 4) {
  161. D += Disto4x4_SSE41(a + x + y, b + x + y, w);
  162. }
  163. }
  164. return D;
  165. }
  166. //------------------------------------------------------------------------------
  167. // Quantization
  168. //
  169. // Generates a pshufb constant for shuffling 16b words.
  170. #define PSHUFB_CST(A,B,C,D,E,F,G,H) \
  171. _mm_set_epi8(2 * (H) + 1, 2 * (H) + 0, 2 * (G) + 1, 2 * (G) + 0, \
  172. 2 * (F) + 1, 2 * (F) + 0, 2 * (E) + 1, 2 * (E) + 0, \
  173. 2 * (D) + 1, 2 * (D) + 0, 2 * (C) + 1, 2 * (C) + 0, \
  174. 2 * (B) + 1, 2 * (B) + 0, 2 * (A) + 1, 2 * (A) + 0)
  175. static WEBP_INLINE int DoQuantizeBlock_SSE41(int16_t in[16], int16_t out[16],
  176. const uint16_t* const sharpen,
  177. const VP8Matrix* const mtx) {
  178. const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL);
  179. const __m128i zero = _mm_setzero_si128();
  180. __m128i out0, out8;
  181. __m128i packed_out;
  182. // Load all inputs.
  183. __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]);
  184. __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]);
  185. const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq_[0]);
  186. const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq_[8]);
  187. const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q_[0]);
  188. const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q_[8]);
  189. // coeff = abs(in)
  190. __m128i coeff0 = _mm_abs_epi16(in0);
  191. __m128i coeff8 = _mm_abs_epi16(in8);
  192. // coeff = abs(in) + sharpen
  193. if (sharpen != NULL) {
  194. const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]);
  195. const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]);
  196. coeff0 = _mm_add_epi16(coeff0, sharpen0);
  197. coeff8 = _mm_add_epi16(coeff8, sharpen8);
  198. }
  199. // out = (coeff * iQ + B) >> QFIX
  200. {
  201. // doing calculations with 32b precision (QFIX=17)
  202. // out = (coeff * iQ)
  203. const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0);
  204. const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0);
  205. const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8);
  206. const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8);
  207. __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H);
  208. __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H);
  209. __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H);
  210. __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H);
  211. // out = (coeff * iQ + B)
  212. const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias_[0]);
  213. const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias_[4]);
  214. const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias_[8]);
  215. const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias_[12]);
  216. out_00 = _mm_add_epi32(out_00, bias_00);
  217. out_04 = _mm_add_epi32(out_04, bias_04);
  218. out_08 = _mm_add_epi32(out_08, bias_08);
  219. out_12 = _mm_add_epi32(out_12, bias_12);
  220. // out = QUANTDIV(coeff, iQ, B, QFIX)
  221. out_00 = _mm_srai_epi32(out_00, QFIX);
  222. out_04 = _mm_srai_epi32(out_04, QFIX);
  223. out_08 = _mm_srai_epi32(out_08, QFIX);
  224. out_12 = _mm_srai_epi32(out_12, QFIX);
  225. // pack result as 16b
  226. out0 = _mm_packs_epi32(out_00, out_04);
  227. out8 = _mm_packs_epi32(out_08, out_12);
  228. // if (coeff > 2047) coeff = 2047
  229. out0 = _mm_min_epi16(out0, max_coeff_2047);
  230. out8 = _mm_min_epi16(out8, max_coeff_2047);
  231. }
  232. // put sign back
  233. out0 = _mm_sign_epi16(out0, in0);
  234. out8 = _mm_sign_epi16(out8, in8);
  235. // in = out * Q
  236. in0 = _mm_mullo_epi16(out0, q0);
  237. in8 = _mm_mullo_epi16(out8, q8);
  238. _mm_storeu_si128((__m128i*)&in[0], in0);
  239. _mm_storeu_si128((__m128i*)&in[8], in8);
  240. // zigzag the output before storing it. The re-ordering is:
  241. // 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15
  242. // -> 0 1 4[8]5 2 3 6 | 9 12 13 10 [7]11 14 15
  243. // There's only two misplaced entries ([8] and [7]) that are crossing the
  244. // reg's boundaries.
  245. // We use pshufb instead of pshuflo/pshufhi.
  246. {
  247. const __m128i kCst_lo = PSHUFB_CST(0, 1, 4, -1, 5, 2, 3, 6);
  248. const __m128i kCst_7 = PSHUFB_CST(-1, -1, -1, -1, 7, -1, -1, -1);
  249. const __m128i tmp_lo = _mm_shuffle_epi8(out0, kCst_lo);
  250. const __m128i tmp_7 = _mm_shuffle_epi8(out0, kCst_7); // extract #7
  251. const __m128i kCst_hi = PSHUFB_CST(1, 4, 5, 2, -1, 3, 6, 7);
  252. const __m128i kCst_8 = PSHUFB_CST(-1, -1, -1, 0, -1, -1, -1, -1);
  253. const __m128i tmp_hi = _mm_shuffle_epi8(out8, kCst_hi);
  254. const __m128i tmp_8 = _mm_shuffle_epi8(out8, kCst_8); // extract #8
  255. const __m128i out_z0 = _mm_or_si128(tmp_lo, tmp_8);
  256. const __m128i out_z8 = _mm_or_si128(tmp_hi, tmp_7);
  257. _mm_storeu_si128((__m128i*)&out[0], out_z0);
  258. _mm_storeu_si128((__m128i*)&out[8], out_z8);
  259. packed_out = _mm_packs_epi16(out_z0, out_z8);
  260. }
  261. // detect if all 'out' values are zeroes or not
  262. return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff);
  263. }
  264. #undef PSHUFB_CST
  265. static int QuantizeBlock_SSE41(int16_t in[16], int16_t out[16],
  266. const VP8Matrix* const mtx) {
  267. return DoQuantizeBlock_SSE41(in, out, &mtx->sharpen_[0], mtx);
  268. }
  269. static int QuantizeBlockWHT_SSE41(int16_t in[16], int16_t out[16],
  270. const VP8Matrix* const mtx) {
  271. return DoQuantizeBlock_SSE41(in, out, NULL, mtx);
  272. }
  273. static int Quantize2Blocks_SSE41(int16_t in[32], int16_t out[32],
  274. const VP8Matrix* const mtx) {
  275. int nz;
  276. const uint16_t* const sharpen = &mtx->sharpen_[0];
  277. nz = DoQuantizeBlock_SSE41(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0;
  278. nz |= DoQuantizeBlock_SSE41(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1;
  279. return nz;
  280. }
  281. //------------------------------------------------------------------------------
  282. // Entry point
  283. extern void VP8EncDspInitSSE41(void);
  284. WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE41(void) {
  285. VP8CollectHistogram = CollectHistogram_SSE41;
  286. VP8EncQuantizeBlock = QuantizeBlock_SSE41;
  287. VP8EncQuantize2Blocks = Quantize2Blocks_SSE41;
  288. VP8EncQuantizeBlockWHT = QuantizeBlockWHT_SSE41;
  289. VP8TDisto4x4 = Disto4x4_SSE41;
  290. VP8TDisto16x16 = Disto16x16_SSE41;
  291. }
  292. #else // !WEBP_USE_SSE41
  293. WEBP_DSP_INIT_STUB(VP8EncDspInitSSE41)
  294. #endif // WEBP_USE_SSE41