jccolext-neon.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /*
  2. * jccolext-neon.c - colorspace conversion (64-bit Arm Neon)
  3. *
  4. * Copyright (C) 2020, Arm Limited. All Rights Reserved.
  5. *
  6. * This software is provided 'as-is', without any express or implied
  7. * warranty. In no event will the authors be held liable for any damages
  8. * arising from the use of this software.
  9. *
  10. * Permission is granted to anyone to use this software for any purpose,
  11. * including commercial applications, and to alter it and redistribute it
  12. * freely, subject to the following restrictions:
  13. *
  14. * 1. The origin of this software must not be misrepresented; you must not
  15. * claim that you wrote the original software. If you use this software
  16. * in a product, an acknowledgment in the product documentation would be
  17. * appreciated but is not required.
  18. * 2. Altered source versions must be plainly marked as such, and must not be
  19. * misrepresented as being the original software.
  20. * 3. This notice may not be removed or altered from any source distribution.
  21. */
  22. /* This file is included by jccolor-neon.c */
  23. /* RGB -> YCbCr conversion is defined by the following equations:
  24. * Y = 0.29900 * R + 0.58700 * G + 0.11400 * B
  25. * Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + 128
  26. * Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + 128
  27. *
  28. * Avoid floating point arithmetic by using shifted integer constants:
  29. * 0.29899597 = 19595 * 2^-16
  30. * 0.58700561 = 38470 * 2^-16
  31. * 0.11399841 = 7471 * 2^-16
  32. * 0.16874695 = 11059 * 2^-16
  33. * 0.33125305 = 21709 * 2^-16
  34. * 0.50000000 = 32768 * 2^-16
  35. * 0.41868592 = 27439 * 2^-16
  36. * 0.08131409 = 5329 * 2^-16
  37. * These constants are defined in jccolor-neon.c
  38. *
  39. * We add the fixed-point equivalent of 0.5 to Cb and Cr, which effectively
  40. * rounds up or down the result via integer truncation.
  41. */
  42. void jsimd_rgb_ycc_convert_neon(JDIMENSION image_width, JSAMPARRAY input_buf,
  43. JSAMPIMAGE output_buf, JDIMENSION output_row,
  44. int num_rows)
  45. {
  46. /* Pointer to RGB(X/A) input data */
  47. JSAMPROW inptr;
  48. /* Pointers to Y, Cb, and Cr output data */
  49. JSAMPROW outptr0, outptr1, outptr2;
  50. /* Allocate temporary buffer for final (image_width % 16) pixels in row. */
  51. ALIGN(16) uint8_t tmp_buf[16 * RGB_PIXELSIZE];
  52. /* Set up conversion constants. */
  53. const uint16x8_t consts = vld1q_u16(jsimd_rgb_ycc_neon_consts);
  54. const uint32x4_t scaled_128_5 = vdupq_n_u32((128 << 16) + 32767);
  55. while (--num_rows >= 0) {
  56. inptr = *input_buf++;
  57. outptr0 = output_buf[0][output_row];
  58. outptr1 = output_buf[1][output_row];
  59. outptr2 = output_buf[2][output_row];
  60. output_row++;
  61. int cols_remaining = image_width;
  62. for (; cols_remaining >= 16; cols_remaining -= 16) {
  63. #if RGB_PIXELSIZE == 4
  64. uint8x16x4_t input_pixels = vld4q_u8(inptr);
  65. #else
  66. uint8x16x3_t input_pixels = vld3q_u8(inptr);
  67. #endif
  68. uint16x8_t r_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_RED]));
  69. uint16x8_t g_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_GREEN]));
  70. uint16x8_t b_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_BLUE]));
  71. uint16x8_t r_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_RED]));
  72. uint16x8_t g_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_GREEN]));
  73. uint16x8_t b_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_BLUE]));
  74. /* Compute Y = 0.29900 * R + 0.58700 * G + 0.11400 * B */
  75. uint32x4_t y_ll = vmull_laneq_u16(vget_low_u16(r_l), consts, 0);
  76. y_ll = vmlal_laneq_u16(y_ll, vget_low_u16(g_l), consts, 1);
  77. y_ll = vmlal_laneq_u16(y_ll, vget_low_u16(b_l), consts, 2);
  78. uint32x4_t y_lh = vmull_laneq_u16(vget_high_u16(r_l), consts, 0);
  79. y_lh = vmlal_laneq_u16(y_lh, vget_high_u16(g_l), consts, 1);
  80. y_lh = vmlal_laneq_u16(y_lh, vget_high_u16(b_l), consts, 2);
  81. uint32x4_t y_hl = vmull_laneq_u16(vget_low_u16(r_h), consts, 0);
  82. y_hl = vmlal_laneq_u16(y_hl, vget_low_u16(g_h), consts, 1);
  83. y_hl = vmlal_laneq_u16(y_hl, vget_low_u16(b_h), consts, 2);
  84. uint32x4_t y_hh = vmull_laneq_u16(vget_high_u16(r_h), consts, 0);
  85. y_hh = vmlal_laneq_u16(y_hh, vget_high_u16(g_h), consts, 1);
  86. y_hh = vmlal_laneq_u16(y_hh, vget_high_u16(b_h), consts, 2);
  87. /* Compute Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + 128 */
  88. uint32x4_t cb_ll = scaled_128_5;
  89. cb_ll = vmlsl_laneq_u16(cb_ll, vget_low_u16(r_l), consts, 3);
  90. cb_ll = vmlsl_laneq_u16(cb_ll, vget_low_u16(g_l), consts, 4);
  91. cb_ll = vmlal_laneq_u16(cb_ll, vget_low_u16(b_l), consts, 5);
  92. uint32x4_t cb_lh = scaled_128_5;
  93. cb_lh = vmlsl_laneq_u16(cb_lh, vget_high_u16(r_l), consts, 3);
  94. cb_lh = vmlsl_laneq_u16(cb_lh, vget_high_u16(g_l), consts, 4);
  95. cb_lh = vmlal_laneq_u16(cb_lh, vget_high_u16(b_l), consts, 5);
  96. uint32x4_t cb_hl = scaled_128_5;
  97. cb_hl = vmlsl_laneq_u16(cb_hl, vget_low_u16(r_h), consts, 3);
  98. cb_hl = vmlsl_laneq_u16(cb_hl, vget_low_u16(g_h), consts, 4);
  99. cb_hl = vmlal_laneq_u16(cb_hl, vget_low_u16(b_h), consts, 5);
  100. uint32x4_t cb_hh = scaled_128_5;
  101. cb_hh = vmlsl_laneq_u16(cb_hh, vget_high_u16(r_h), consts, 3);
  102. cb_hh = vmlsl_laneq_u16(cb_hh, vget_high_u16(g_h), consts, 4);
  103. cb_hh = vmlal_laneq_u16(cb_hh, vget_high_u16(b_h), consts, 5);
  104. /* Compute Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + 128 */
  105. uint32x4_t cr_ll = scaled_128_5;
  106. cr_ll = vmlal_laneq_u16(cr_ll, vget_low_u16(r_l), consts, 5);
  107. cr_ll = vmlsl_laneq_u16(cr_ll, vget_low_u16(g_l), consts, 6);
  108. cr_ll = vmlsl_laneq_u16(cr_ll, vget_low_u16(b_l), consts, 7);
  109. uint32x4_t cr_lh = scaled_128_5;
  110. cr_lh = vmlal_laneq_u16(cr_lh, vget_high_u16(r_l), consts, 5);
  111. cr_lh = vmlsl_laneq_u16(cr_lh, vget_high_u16(g_l), consts, 6);
  112. cr_lh = vmlsl_laneq_u16(cr_lh, vget_high_u16(b_l), consts, 7);
  113. uint32x4_t cr_hl = scaled_128_5;
  114. cr_hl = vmlal_laneq_u16(cr_hl, vget_low_u16(r_h), consts, 5);
  115. cr_hl = vmlsl_laneq_u16(cr_hl, vget_low_u16(g_h), consts, 6);
  116. cr_hl = vmlsl_laneq_u16(cr_hl, vget_low_u16(b_h), consts, 7);
  117. uint32x4_t cr_hh = scaled_128_5;
  118. cr_hh = vmlal_laneq_u16(cr_hh, vget_high_u16(r_h), consts, 5);
  119. cr_hh = vmlsl_laneq_u16(cr_hh, vget_high_u16(g_h), consts, 6);
  120. cr_hh = vmlsl_laneq_u16(cr_hh, vget_high_u16(b_h), consts, 7);
  121. /* Descale Y values (rounding right shift) and narrow to 16-bit. */
  122. uint16x8_t y_l = vcombine_u16(vrshrn_n_u32(y_ll, 16),
  123. vrshrn_n_u32(y_lh, 16));
  124. uint16x8_t y_h = vcombine_u16(vrshrn_n_u32(y_hl, 16),
  125. vrshrn_n_u32(y_hh, 16));
  126. /* Descale Cb values (right shift) and narrow to 16-bit. */
  127. uint16x8_t cb_l = vcombine_u16(vshrn_n_u32(cb_ll, 16),
  128. vshrn_n_u32(cb_lh, 16));
  129. uint16x8_t cb_h = vcombine_u16(vshrn_n_u32(cb_hl, 16),
  130. vshrn_n_u32(cb_hh, 16));
  131. /* Descale Cr values (right shift) and narrow to 16-bit. */
  132. uint16x8_t cr_l = vcombine_u16(vshrn_n_u32(cr_ll, 16),
  133. vshrn_n_u32(cr_lh, 16));
  134. uint16x8_t cr_h = vcombine_u16(vshrn_n_u32(cr_hl, 16),
  135. vshrn_n_u32(cr_hh, 16));
  136. /* Narrow Y, Cb, and Cr values to 8-bit and store to memory. Buffer
  137. * overwrite is permitted up to the next multiple of ALIGN_SIZE bytes.
  138. */
  139. vst1q_u8(outptr0, vcombine_u8(vmovn_u16(y_l), vmovn_u16(y_h)));
  140. vst1q_u8(outptr1, vcombine_u8(vmovn_u16(cb_l), vmovn_u16(cb_h)));
  141. vst1q_u8(outptr2, vcombine_u8(vmovn_u16(cr_l), vmovn_u16(cr_h)));
  142. /* Increment pointers. */
  143. inptr += (16 * RGB_PIXELSIZE);
  144. outptr0 += 16;
  145. outptr1 += 16;
  146. outptr2 += 16;
  147. }
  148. if (cols_remaining > 8) {
  149. /* To prevent buffer overread by the vector load instructions, the last
  150. * (image_width % 16) columns of data are first memcopied to a temporary
  151. * buffer large enough to accommodate the vector load.
  152. */
  153. memcpy(tmp_buf, inptr, cols_remaining * RGB_PIXELSIZE);
  154. inptr = tmp_buf;
  155. #if RGB_PIXELSIZE == 4
  156. uint8x16x4_t input_pixels = vld4q_u8(inptr);
  157. #else
  158. uint8x16x3_t input_pixels = vld3q_u8(inptr);
  159. #endif
  160. uint16x8_t r_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_RED]));
  161. uint16x8_t g_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_GREEN]));
  162. uint16x8_t b_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_BLUE]));
  163. uint16x8_t r_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_RED]));
  164. uint16x8_t g_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_GREEN]));
  165. uint16x8_t b_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_BLUE]));
  166. /* Compute Y = 0.29900 * R + 0.58700 * G + 0.11400 * B */
  167. uint32x4_t y_ll = vmull_laneq_u16(vget_low_u16(r_l), consts, 0);
  168. y_ll = vmlal_laneq_u16(y_ll, vget_low_u16(g_l), consts, 1);
  169. y_ll = vmlal_laneq_u16(y_ll, vget_low_u16(b_l), consts, 2);
  170. uint32x4_t y_lh = vmull_laneq_u16(vget_high_u16(r_l), consts, 0);
  171. y_lh = vmlal_laneq_u16(y_lh, vget_high_u16(g_l), consts, 1);
  172. y_lh = vmlal_laneq_u16(y_lh, vget_high_u16(b_l), consts, 2);
  173. uint32x4_t y_hl = vmull_laneq_u16(vget_low_u16(r_h), consts, 0);
  174. y_hl = vmlal_laneq_u16(y_hl, vget_low_u16(g_h), consts, 1);
  175. y_hl = vmlal_laneq_u16(y_hl, vget_low_u16(b_h), consts, 2);
  176. uint32x4_t y_hh = vmull_laneq_u16(vget_high_u16(r_h), consts, 0);
  177. y_hh = vmlal_laneq_u16(y_hh, vget_high_u16(g_h), consts, 1);
  178. y_hh = vmlal_laneq_u16(y_hh, vget_high_u16(b_h), consts, 2);
  179. /* Compute Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + 128 */
  180. uint32x4_t cb_ll = scaled_128_5;
  181. cb_ll = vmlsl_laneq_u16(cb_ll, vget_low_u16(r_l), consts, 3);
  182. cb_ll = vmlsl_laneq_u16(cb_ll, vget_low_u16(g_l), consts, 4);
  183. cb_ll = vmlal_laneq_u16(cb_ll, vget_low_u16(b_l), consts, 5);
  184. uint32x4_t cb_lh = scaled_128_5;
  185. cb_lh = vmlsl_laneq_u16(cb_lh, vget_high_u16(r_l), consts, 3);
  186. cb_lh = vmlsl_laneq_u16(cb_lh, vget_high_u16(g_l), consts, 4);
  187. cb_lh = vmlal_laneq_u16(cb_lh, vget_high_u16(b_l), consts, 5);
  188. uint32x4_t cb_hl = scaled_128_5;
  189. cb_hl = vmlsl_laneq_u16(cb_hl, vget_low_u16(r_h), consts, 3);
  190. cb_hl = vmlsl_laneq_u16(cb_hl, vget_low_u16(g_h), consts, 4);
  191. cb_hl = vmlal_laneq_u16(cb_hl, vget_low_u16(b_h), consts, 5);
  192. uint32x4_t cb_hh = scaled_128_5;
  193. cb_hh = vmlsl_laneq_u16(cb_hh, vget_high_u16(r_h), consts, 3);
  194. cb_hh = vmlsl_laneq_u16(cb_hh, vget_high_u16(g_h), consts, 4);
  195. cb_hh = vmlal_laneq_u16(cb_hh, vget_high_u16(b_h), consts, 5);
  196. /* Compute Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + 128 */
  197. uint32x4_t cr_ll = scaled_128_5;
  198. cr_ll = vmlal_laneq_u16(cr_ll, vget_low_u16(r_l), consts, 5);
  199. cr_ll = vmlsl_laneq_u16(cr_ll, vget_low_u16(g_l), consts, 6);
  200. cr_ll = vmlsl_laneq_u16(cr_ll, vget_low_u16(b_l), consts, 7);
  201. uint32x4_t cr_lh = scaled_128_5;
  202. cr_lh = vmlal_laneq_u16(cr_lh, vget_high_u16(r_l), consts, 5);
  203. cr_lh = vmlsl_laneq_u16(cr_lh, vget_high_u16(g_l), consts, 6);
  204. cr_lh = vmlsl_laneq_u16(cr_lh, vget_high_u16(b_l), consts, 7);
  205. uint32x4_t cr_hl = scaled_128_5;
  206. cr_hl = vmlal_laneq_u16(cr_hl, vget_low_u16(r_h), consts, 5);
  207. cr_hl = vmlsl_laneq_u16(cr_hl, vget_low_u16(g_h), consts, 6);
  208. cr_hl = vmlsl_laneq_u16(cr_hl, vget_low_u16(b_h), consts, 7);
  209. uint32x4_t cr_hh = scaled_128_5;
  210. cr_hh = vmlal_laneq_u16(cr_hh, vget_high_u16(r_h), consts, 5);
  211. cr_hh = vmlsl_laneq_u16(cr_hh, vget_high_u16(g_h), consts, 6);
  212. cr_hh = vmlsl_laneq_u16(cr_hh, vget_high_u16(b_h), consts, 7);
  213. /* Descale Y values (rounding right shift) and narrow to 16-bit. */
  214. uint16x8_t y_l = vcombine_u16(vrshrn_n_u32(y_ll, 16),
  215. vrshrn_n_u32(y_lh, 16));
  216. uint16x8_t y_h = vcombine_u16(vrshrn_n_u32(y_hl, 16),
  217. vrshrn_n_u32(y_hh, 16));
  218. /* Descale Cb values (right shift) and narrow to 16-bit. */
  219. uint16x8_t cb_l = vcombine_u16(vshrn_n_u32(cb_ll, 16),
  220. vshrn_n_u32(cb_lh, 16));
  221. uint16x8_t cb_h = vcombine_u16(vshrn_n_u32(cb_hl, 16),
  222. vshrn_n_u32(cb_hh, 16));
  223. /* Descale Cr values (right shift) and narrow to 16-bit. */
  224. uint16x8_t cr_l = vcombine_u16(vshrn_n_u32(cr_ll, 16),
  225. vshrn_n_u32(cr_lh, 16));
  226. uint16x8_t cr_h = vcombine_u16(vshrn_n_u32(cr_hl, 16),
  227. vshrn_n_u32(cr_hh, 16));
  228. /* Narrow Y, Cb, and Cr values to 8-bit and store to memory. Buffer
  229. * overwrite is permitted up to the next multiple of ALIGN_SIZE bytes.
  230. */
  231. vst1q_u8(outptr0, vcombine_u8(vmovn_u16(y_l), vmovn_u16(y_h)));
  232. vst1q_u8(outptr1, vcombine_u8(vmovn_u16(cb_l), vmovn_u16(cb_h)));
  233. vst1q_u8(outptr2, vcombine_u8(vmovn_u16(cr_l), vmovn_u16(cr_h)));
  234. } else if (cols_remaining > 0) {
  235. /* To prevent buffer overread by the vector load instructions, the last
  236. * (image_width % 8) columns of data are first memcopied to a temporary
  237. * buffer large enough to accommodate the vector load.
  238. */
  239. memcpy(tmp_buf, inptr, cols_remaining * RGB_PIXELSIZE);
  240. inptr = tmp_buf;
  241. #if RGB_PIXELSIZE == 4
  242. uint8x8x4_t input_pixels = vld4_u8(inptr);
  243. #else
  244. uint8x8x3_t input_pixels = vld3_u8(inptr);
  245. #endif
  246. uint16x8_t r = vmovl_u8(input_pixels.val[RGB_RED]);
  247. uint16x8_t g = vmovl_u8(input_pixels.val[RGB_GREEN]);
  248. uint16x8_t b = vmovl_u8(input_pixels.val[RGB_BLUE]);
  249. /* Compute Y = 0.29900 * R + 0.58700 * G + 0.11400 * B */
  250. uint32x4_t y_l = vmull_laneq_u16(vget_low_u16(r), consts, 0);
  251. y_l = vmlal_laneq_u16(y_l, vget_low_u16(g), consts, 1);
  252. y_l = vmlal_laneq_u16(y_l, vget_low_u16(b), consts, 2);
  253. uint32x4_t y_h = vmull_laneq_u16(vget_high_u16(r), consts, 0);
  254. y_h = vmlal_laneq_u16(y_h, vget_high_u16(g), consts, 1);
  255. y_h = vmlal_laneq_u16(y_h, vget_high_u16(b), consts, 2);
  256. /* Compute Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + 128 */
  257. uint32x4_t cb_l = scaled_128_5;
  258. cb_l = vmlsl_laneq_u16(cb_l, vget_low_u16(r), consts, 3);
  259. cb_l = vmlsl_laneq_u16(cb_l, vget_low_u16(g), consts, 4);
  260. cb_l = vmlal_laneq_u16(cb_l, vget_low_u16(b), consts, 5);
  261. uint32x4_t cb_h = scaled_128_5;
  262. cb_h = vmlsl_laneq_u16(cb_h, vget_high_u16(r), consts, 3);
  263. cb_h = vmlsl_laneq_u16(cb_h, vget_high_u16(g), consts, 4);
  264. cb_h = vmlal_laneq_u16(cb_h, vget_high_u16(b), consts, 5);
  265. /* Compute Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + 128 */
  266. uint32x4_t cr_l = scaled_128_5;
  267. cr_l = vmlal_laneq_u16(cr_l, vget_low_u16(r), consts, 5);
  268. cr_l = vmlsl_laneq_u16(cr_l, vget_low_u16(g), consts, 6);
  269. cr_l = vmlsl_laneq_u16(cr_l, vget_low_u16(b), consts, 7);
  270. uint32x4_t cr_h = scaled_128_5;
  271. cr_h = vmlal_laneq_u16(cr_h, vget_high_u16(r), consts, 5);
  272. cr_h = vmlsl_laneq_u16(cr_h, vget_high_u16(g), consts, 6);
  273. cr_h = vmlsl_laneq_u16(cr_h, vget_high_u16(b), consts, 7);
  274. /* Descale Y values (rounding right shift) and narrow to 16-bit. */
  275. uint16x8_t y_u16 = vcombine_u16(vrshrn_n_u32(y_l, 16),
  276. vrshrn_n_u32(y_h, 16));
  277. /* Descale Cb values (right shift) and narrow to 16-bit. */
  278. uint16x8_t cb_u16 = vcombine_u16(vshrn_n_u32(cb_l, 16),
  279. vshrn_n_u32(cb_h, 16));
  280. /* Descale Cr values (right shift) and narrow to 16-bit. */
  281. uint16x8_t cr_u16 = vcombine_u16(vshrn_n_u32(cr_l, 16),
  282. vshrn_n_u32(cr_h, 16));
  283. /* Narrow Y, Cb, and Cr values to 8-bit and store to memory. Buffer
  284. * overwrite is permitted up to the next multiple of ALIGN_SIZE bytes.
  285. */
  286. vst1_u8(outptr0, vmovn_u16(y_u16));
  287. vst1_u8(outptr1, vmovn_u16(cb_u16));
  288. vst1_u8(outptr2, vmovn_u16(cr_u16));
  289. }
  290. }
  291. }