upsampling_neon.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. // Copyright 2011 Google Inc. All Rights Reserved.
  2. //
  3. // Use of this source code is governed by a BSD-style license
  4. // that can be found in the COPYING file in the root of the source
  5. // tree. An additional intellectual property rights grant can be found
  6. // in the file PATENTS. All contributing project authors may
  7. // be found in the AUTHORS file in the root of the source tree.
  8. // -----------------------------------------------------------------------------
  9. //
  10. // NEON version of YUV to RGB upsampling functions.
  11. //
  12. // Author: mans@mansr.com (Mans Rullgard)
  13. // Based on SSE code by: somnath@google.com (Somnath Banerjee)
  14. #include "./dsp.h"
  15. #if defined(WEBP_USE_NEON)
  16. #include <assert.h>
  17. #include <arm_neon.h>
  18. #include <string.h>
  19. #include "./neon.h"
  20. #include "./yuv.h"
  21. #ifdef FANCY_UPSAMPLING
  22. //-----------------------------------------------------------------------------
  23. // U/V upsampling
  24. // Loads 9 pixels each from rows r1 and r2 and generates 16 pixels.
  25. #define UPSAMPLE_16PIXELS(r1, r2, out) do { \
  26. const uint8x8_t a = vld1_u8(r1 + 0); \
  27. const uint8x8_t b = vld1_u8(r1 + 1); \
  28. const uint8x8_t c = vld1_u8(r2 + 0); \
  29. const uint8x8_t d = vld1_u8(r2 + 1); \
  30. /* a + b + c + d */ \
  31. const uint16x8_t ad = vaddl_u8(a, d); \
  32. const uint16x8_t bc = vaddl_u8(b, c); \
  33. const uint16x8_t abcd = vaddq_u16(ad, bc); \
  34. /* 3a + b + c + 3d */ \
  35. const uint16x8_t al = vaddq_u16(abcd, vshlq_n_u16(ad, 1)); \
  36. /* a + 3b + 3c + d */ \
  37. const uint16x8_t bl = vaddq_u16(abcd, vshlq_n_u16(bc, 1)); \
  38. \
  39. const uint8x8_t diag2 = vshrn_n_u16(al, 3); \
  40. const uint8x8_t diag1 = vshrn_n_u16(bl, 3); \
  41. \
  42. const uint8x8_t A = vrhadd_u8(a, diag1); \
  43. const uint8x8_t B = vrhadd_u8(b, diag2); \
  44. const uint8x8_t C = vrhadd_u8(c, diag2); \
  45. const uint8x8_t D = vrhadd_u8(d, diag1); \
  46. \
  47. uint8x8x2_t A_B, C_D; \
  48. INIT_VECTOR2(A_B, A, B); \
  49. INIT_VECTOR2(C_D, C, D); \
  50. vst2_u8(out + 0, A_B); \
  51. vst2_u8(out + 32, C_D); \
  52. } while (0)
  53. // Turn the macro into a function for reducing code-size when non-critical
  54. static void Upsample16Pixels_NEON(const uint8_t* r1, const uint8_t* r2,
  55. uint8_t* out) {
  56. UPSAMPLE_16PIXELS(r1, r2, out);
  57. }
  58. #define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
  59. uint8_t r1[9], r2[9]; \
  60. memcpy(r1, (tb), (num_pixels)); \
  61. memcpy(r2, (bb), (num_pixels)); \
  62. /* replicate last byte */ \
  63. memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \
  64. memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \
  65. Upsample16Pixels_NEON(r1, r2, out); \
  66. }
  67. //-----------------------------------------------------------------------------
  68. // YUV->RGB conversion
  69. // note: we represent the 33050 large constant as 32768 + 282
  70. static const int16_t kCoeffs1[4] = { 19077, 26149, 6419, 13320 };
  71. #define v255 vdup_n_u8(255)
  72. #define STORE_Rgb(out, r, g, b) do { \
  73. uint8x8x3_t r_g_b; \
  74. INIT_VECTOR3(r_g_b, r, g, b); \
  75. vst3_u8(out, r_g_b); \
  76. } while (0)
  77. #define STORE_Bgr(out, r, g, b) do { \
  78. uint8x8x3_t b_g_r; \
  79. INIT_VECTOR3(b_g_r, b, g, r); \
  80. vst3_u8(out, b_g_r); \
  81. } while (0)
  82. #define STORE_Rgba(out, r, g, b) do { \
  83. uint8x8x4_t r_g_b_v255; \
  84. INIT_VECTOR4(r_g_b_v255, r, g, b, v255); \
  85. vst4_u8(out, r_g_b_v255); \
  86. } while (0)
  87. #define STORE_Bgra(out, r, g, b) do { \
  88. uint8x8x4_t b_g_r_v255; \
  89. INIT_VECTOR4(b_g_r_v255, b, g, r, v255); \
  90. vst4_u8(out, b_g_r_v255); \
  91. } while (0)
  92. #define STORE_Argb(out, r, g, b) do { \
  93. uint8x8x4_t v255_r_g_b; \
  94. INIT_VECTOR4(v255_r_g_b, v255, r, g, b); \
  95. vst4_u8(out, v255_r_g_b); \
  96. } while (0)
  97. #if !defined(WEBP_SWAP_16BIT_CSP)
  98. #define ZIP_U8(lo, hi) vzip_u8((lo), (hi))
  99. #else
  100. #define ZIP_U8(lo, hi) vzip_u8((hi), (lo))
  101. #endif
  102. #define STORE_Rgba4444(out, r, g, b) do { \
  103. const uint8x8_t rg = vsri_n_u8(r, g, 4); /* shift g, insert r */ \
  104. const uint8x8_t ba = vsri_n_u8(b, v255, 4); /* shift a, insert b */ \
  105. const uint8x8x2_t rgba4444 = ZIP_U8(rg, ba); \
  106. vst1q_u8(out, vcombine_u8(rgba4444.val[0], rgba4444.val[1])); \
  107. } while (0)
  108. #define STORE_Rgb565(out, r, g, b) do { \
  109. const uint8x8_t rg = vsri_n_u8(r, g, 5); /* shift g and insert r */ \
  110. const uint8x8_t g1 = vshl_n_u8(g, 3); /* pre-shift g: 3bits */ \
  111. const uint8x8_t gb = vsri_n_u8(g1, b, 3); /* shift b and insert g */ \
  112. const uint8x8x2_t rgb565 = ZIP_U8(rg, gb); \
  113. vst1q_u8(out, vcombine_u8(rgb565.val[0], rgb565.val[1])); \
  114. } while (0)
  115. #define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) do { \
  116. int i; \
  117. for (i = 0; i < N; i += 8) { \
  118. const int off = ((cur_x) + i) * XSTEP; \
  119. const uint8x8_t y = vld1_u8((src_y) + (cur_x) + i); \
  120. const uint8x8_t u = vld1_u8((src_uv) + i + 0); \
  121. const uint8x8_t v = vld1_u8((src_uv) + i + 16); \
  122. const int16x8_t Y0 = vreinterpretq_s16_u16(vshll_n_u8(y, 7)); \
  123. const int16x8_t U0 = vreinterpretq_s16_u16(vshll_n_u8(u, 7)); \
  124. const int16x8_t V0 = vreinterpretq_s16_u16(vshll_n_u8(v, 7)); \
  125. const int16x8_t Y1 = vqdmulhq_lane_s16(Y0, coeff1, 0); \
  126. const int16x8_t R0 = vqdmulhq_lane_s16(V0, coeff1, 1); \
  127. const int16x8_t G0 = vqdmulhq_lane_s16(U0, coeff1, 2); \
  128. const int16x8_t G1 = vqdmulhq_lane_s16(V0, coeff1, 3); \
  129. const int16x8_t B0 = vqdmulhq_n_s16(U0, 282); \
  130. const int16x8_t R1 = vqaddq_s16(Y1, R_Rounder); \
  131. const int16x8_t G2 = vqaddq_s16(Y1, G_Rounder); \
  132. const int16x8_t B1 = vqaddq_s16(Y1, B_Rounder); \
  133. const int16x8_t R2 = vqaddq_s16(R0, R1); \
  134. const int16x8_t G3 = vqaddq_s16(G0, G1); \
  135. const int16x8_t B2 = vqaddq_s16(B0, B1); \
  136. const int16x8_t G4 = vqsubq_s16(G2, G3); \
  137. const int16x8_t B3 = vqaddq_s16(B2, U0); \
  138. const uint8x8_t R = vqshrun_n_s16(R2, YUV_FIX2); \
  139. const uint8x8_t G = vqshrun_n_s16(G4, YUV_FIX2); \
  140. const uint8x8_t B = vqshrun_n_s16(B3, YUV_FIX2); \
  141. STORE_ ## FMT(out + off, R, G, B); \
  142. } \
  143. } while (0)
  144. #define CONVERT1(FUNC, XSTEP, N, src_y, src_uv, rgb, cur_x) { \
  145. int i; \
  146. for (i = 0; i < N; i++) { \
  147. const int off = ((cur_x) + i) * XSTEP; \
  148. const int y = src_y[(cur_x) + i]; \
  149. const int u = (src_uv)[i]; \
  150. const int v = (src_uv)[i + 16]; \
  151. FUNC(y, u, v, rgb + off); \
  152. } \
  153. }
  154. #define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \
  155. top_dst, bottom_dst, cur_x, len) { \
  156. CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x); \
  157. if (bottom_y != NULL) { \
  158. CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
  159. } \
  160. }
  161. #define CONVERT2RGB_1(FUNC, XSTEP, top_y, bottom_y, uv, \
  162. top_dst, bottom_dst, cur_x, len) { \
  163. CONVERT1(FUNC, XSTEP, len, top_y, uv, top_dst, cur_x); \
  164. if (bottom_y != NULL) { \
  165. CONVERT1(FUNC, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
  166. } \
  167. }
  168. #define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \
  169. static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
  170. const uint8_t* top_u, const uint8_t* top_v, \
  171. const uint8_t* cur_u, const uint8_t* cur_v, \
  172. uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
  173. int block; \
  174. /* 16 byte aligned array to cache reconstructed u and v */ \
  175. uint8_t uv_buf[2 * 32 + 15]; \
  176. uint8_t* const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
  177. const int uv_len = (len + 1) >> 1; \
  178. /* 9 pixels must be read-able for each block */ \
  179. const int num_blocks = (uv_len - 1) >> 3; \
  180. const int leftover = uv_len - num_blocks * 8; \
  181. const int last_pos = 1 + 16 * num_blocks; \
  182. \
  183. const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
  184. const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
  185. \
  186. const int16x4_t coeff1 = vld1_s16(kCoeffs1); \
  187. const int16x8_t R_Rounder = vdupq_n_s16(-14234); \
  188. const int16x8_t G_Rounder = vdupq_n_s16(8708); \
  189. const int16x8_t B_Rounder = vdupq_n_s16(-17685); \
  190. \
  191. /* Treat the first pixel in regular way */ \
  192. assert(top_y != NULL); \
  193. { \
  194. const int u0 = (top_u[0] + u_diag) >> 1; \
  195. const int v0 = (top_v[0] + v_diag) >> 1; \
  196. VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \
  197. } \
  198. if (bottom_y != NULL) { \
  199. const int u0 = (cur_u[0] + u_diag) >> 1; \
  200. const int v0 = (cur_v[0] + v_diag) >> 1; \
  201. VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \
  202. } \
  203. \
  204. for (block = 0; block < num_blocks; ++block) { \
  205. UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \
  206. UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \
  207. CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \
  208. top_dst, bottom_dst, 16 * block + 1, 16); \
  209. top_u += 8; \
  210. cur_u += 8; \
  211. top_v += 8; \
  212. cur_v += 8; \
  213. } \
  214. \
  215. UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \
  216. UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \
  217. CONVERT2RGB_1(VP8YuvTo ## FMT, XSTEP, top_y, bottom_y, r_uv, \
  218. top_dst, bottom_dst, last_pos, len - last_pos); \
  219. }
  220. // NEON variants of the fancy upsampler.
  221. NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePair_NEON, Rgba, 4)
  222. NEON_UPSAMPLE_FUNC(UpsampleBgraLinePair_NEON, Bgra, 4)
  223. #if !defined(WEBP_REDUCE_CSP)
  224. NEON_UPSAMPLE_FUNC(UpsampleRgbLinePair_NEON, Rgb, 3)
  225. NEON_UPSAMPLE_FUNC(UpsampleBgrLinePair_NEON, Bgr, 3)
  226. NEON_UPSAMPLE_FUNC(UpsampleArgbLinePair_NEON, Argb, 4)
  227. NEON_UPSAMPLE_FUNC(UpsampleRgba4444LinePair_NEON, Rgba4444, 2)
  228. NEON_UPSAMPLE_FUNC(UpsampleRgb565LinePair_NEON, Rgb565, 2)
  229. #endif // WEBP_REDUCE_CSP
  230. //------------------------------------------------------------------------------
  231. // Entry point
  232. extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
  233. extern void WebPInitUpsamplersNEON(void);
  234. WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersNEON(void) {
  235. WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair_NEON;
  236. WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair_NEON;
  237. WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair_NEON;
  238. WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair_NEON;
  239. #if !defined(WEBP_REDUCE_CSP)
  240. WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair_NEON;
  241. WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair_NEON;
  242. WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair_NEON;
  243. WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair_NEON;
  244. WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair_NEON;
  245. WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair_NEON;
  246. WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair_NEON;
  247. #endif // WEBP_REDUCE_CSP
  248. }
  249. #endif // FANCY_UPSAMPLING
  250. #endif // WEBP_USE_NEON
  251. #if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_NEON))
  252. WEBP_DSP_INIT_STUB(WebPInitUpsamplersNEON)
  253. #endif