jdsample-neon.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. /*
  2. * jdsample-neon.c - upsampling (Arm Neon)
  3. *
  4. * Copyright (C) 2020, Arm Limited. All Rights Reserved.
  5. * Copyright (C) 2020, D. R. Commander. All Rights Reserved.
  6. *
  7. * This software is provided 'as-is', without any express or implied
  8. * warranty. In no event will the authors be held liable for any damages
  9. * arising from the use of this software.
  10. *
  11. * Permission is granted to anyone to use this software for any purpose,
  12. * including commercial applications, and to alter it and redistribute it
  13. * freely, subject to the following restrictions:
  14. *
  15. * 1. The origin of this software must not be misrepresented; you must not
  16. * claim that you wrote the original software. If you use this software
  17. * in a product, an acknowledgment in the product documentation would be
  18. * appreciated but is not required.
  19. * 2. Altered source versions must be plainly marked as such, and must not be
  20. * misrepresented as being the original software.
  21. * 3. This notice may not be removed or altered from any source distribution.
  22. */
  23. #define JPEG_INTERNALS
  24. #include "../../jinclude.h"
  25. #include "../../jpeglib.h"
  26. #include "../../jsimd.h"
  27. #include "../../jdct.h"
  28. #include "../../jsimddct.h"
  29. #include "../jsimd.h"
  30. #include <arm_neon.h>
  31. /* The diagram below shows a row of samples produced by h2v1 downsampling.
  32. *
  33. * s0 s1 s2
  34. * +---------+---------+---------+
  35. * | | | |
  36. * | p0 p1 | p2 p3 | p4 p5 |
  37. * | | | |
  38. * +---------+---------+---------+
  39. *
  40. * Samples s0-s2 were created by averaging the original pixel component values
  41. * centered at positions p0-p5 above. To approximate those original pixel
  42. * component values, we proportionally blend the adjacent samples in each row.
  43. *
  44. * An upsampled pixel component value is computed by blending the sample
  45. * containing the pixel center with the nearest neighboring sample, in the
  46. * ratio 3:1. For example:
  47. * p1(upsampled) = 3/4 * s0 + 1/4 * s1
  48. * p2(upsampled) = 3/4 * s1 + 1/4 * s0
  49. * When computing the first and last pixel component values in the row, there
  50. * is no adjacent sample to blend, so:
  51. * p0(upsampled) = s0
  52. * p5(upsampled) = s2
  53. */
  54. void jsimd_h2v1_fancy_upsample_neon(int max_v_samp_factor,
  55. JDIMENSION downsampled_width,
  56. JSAMPARRAY input_data,
  57. JSAMPARRAY *output_data_ptr)
  58. {
  59. JSAMPARRAY output_data = *output_data_ptr;
  60. JSAMPROW inptr, outptr;
  61. int inrow;
  62. unsigned colctr;
  63. /* Set up constants. */
  64. const uint16x8_t one_u16 = vdupq_n_u16(1);
  65. const uint8x8_t three_u8 = vdup_n_u8(3);
  66. for (inrow = 0; inrow < max_v_samp_factor; inrow++) {
  67. inptr = input_data[inrow];
  68. outptr = output_data[inrow];
  69. /* First pixel component value in this row of the original image */
  70. *outptr = (JSAMPLE)GETJSAMPLE(*inptr);
  71. /* 3/4 * containing sample + 1/4 * nearest neighboring sample
  72. * For p1: containing sample = s0, nearest neighboring sample = s1
  73. * For p2: containing sample = s1, nearest neighboring sample = s0
  74. */
  75. uint8x16_t s0 = vld1q_u8(inptr);
  76. uint8x16_t s1 = vld1q_u8(inptr + 1);
  77. /* Multiplication makes vectors twice as wide. '_l' and '_h' suffixes
  78. * denote low half and high half respectively.
  79. */
  80. uint16x8_t s1_add_3s0_l =
  81. vmlal_u8(vmovl_u8(vget_low_u8(s1)), vget_low_u8(s0), three_u8);
  82. uint16x8_t s1_add_3s0_h =
  83. vmlal_u8(vmovl_u8(vget_high_u8(s1)), vget_high_u8(s0), three_u8);
  84. uint16x8_t s0_add_3s1_l =
  85. vmlal_u8(vmovl_u8(vget_low_u8(s0)), vget_low_u8(s1), three_u8);
  86. uint16x8_t s0_add_3s1_h =
  87. vmlal_u8(vmovl_u8(vget_high_u8(s0)), vget_high_u8(s1), three_u8);
  88. /* Add ordered dithering bias to odd pixel values. */
  89. s0_add_3s1_l = vaddq_u16(s0_add_3s1_l, one_u16);
  90. s0_add_3s1_h = vaddq_u16(s0_add_3s1_h, one_u16);
  91. /* The offset is initially 1, because the first pixel component has already
  92. * been stored. However, in subsequent iterations of the SIMD loop, this
  93. * offset is (2 * colctr - 1) to stay within the bounds of the sample
  94. * buffers without having to resort to a slow scalar tail case for the last
  95. * (downsampled_width % 16) samples. See "Creation of 2-D sample arrays"
  96. * in jmemmgr.c for more details.
  97. */
  98. unsigned outptr_offset = 1;
  99. uint8x16x2_t output_pixels;
  100. /* We use software pipelining to maximise performance. The code indented
  101. * an extra two spaces begins the next iteration of the loop.
  102. */
  103. for (colctr = 16; colctr < downsampled_width; colctr += 16) {
  104. s0 = vld1q_u8(inptr + colctr - 1);
  105. s1 = vld1q_u8(inptr + colctr);
  106. /* Right-shift by 2 (divide by 4), narrow to 8-bit, and combine. */
  107. output_pixels.val[0] = vcombine_u8(vrshrn_n_u16(s1_add_3s0_l, 2),
  108. vrshrn_n_u16(s1_add_3s0_h, 2));
  109. output_pixels.val[1] = vcombine_u8(vshrn_n_u16(s0_add_3s1_l, 2),
  110. vshrn_n_u16(s0_add_3s1_h, 2));
  111. /* Multiplication makes vectors twice as wide. '_l' and '_h' suffixes
  112. * denote low half and high half respectively.
  113. */
  114. s1_add_3s0_l =
  115. vmlal_u8(vmovl_u8(vget_low_u8(s1)), vget_low_u8(s0), three_u8);
  116. s1_add_3s0_h =
  117. vmlal_u8(vmovl_u8(vget_high_u8(s1)), vget_high_u8(s0), three_u8);
  118. s0_add_3s1_l =
  119. vmlal_u8(vmovl_u8(vget_low_u8(s0)), vget_low_u8(s1), three_u8);
  120. s0_add_3s1_h =
  121. vmlal_u8(vmovl_u8(vget_high_u8(s0)), vget_high_u8(s1), three_u8);
  122. /* Add ordered dithering bias to odd pixel values. */
  123. s0_add_3s1_l = vaddq_u16(s0_add_3s1_l, one_u16);
  124. s0_add_3s1_h = vaddq_u16(s0_add_3s1_h, one_u16);
  125. /* Store pixel component values to memory. */
  126. vst2q_u8(outptr + outptr_offset, output_pixels);
  127. outptr_offset = 2 * colctr - 1;
  128. }
  129. /* Complete the last iteration of the loop. */
  130. /* Right-shift by 2 (divide by 4), narrow to 8-bit, and combine. */
  131. output_pixels.val[0] = vcombine_u8(vrshrn_n_u16(s1_add_3s0_l, 2),
  132. vrshrn_n_u16(s1_add_3s0_h, 2));
  133. output_pixels.val[1] = vcombine_u8(vshrn_n_u16(s0_add_3s1_l, 2),
  134. vshrn_n_u16(s0_add_3s1_h, 2));
  135. /* Store pixel component values to memory. */
  136. vst2q_u8(outptr + outptr_offset, output_pixels);
  137. /* Last pixel component value in this row of the original image */
  138. outptr[2 * downsampled_width - 1] =
  139. GETJSAMPLE(inptr[downsampled_width - 1]);
  140. }
  141. }
  142. /* The diagram below shows an array of samples produced by h2v2 downsampling.
  143. *
  144. * s0 s1 s2
  145. * +---------+---------+---------+
  146. * | p0 p1 | p2 p3 | p4 p5 |
  147. * sA | | | |
  148. * | p6 p7 | p8 p9 | p10 p11|
  149. * +---------+---------+---------+
  150. * | p12 p13| p14 p15| p16 p17|
  151. * sB | | | |
  152. * | p18 p19| p20 p21| p22 p23|
  153. * +---------+---------+---------+
  154. * | p24 p25| p26 p27| p28 p29|
  155. * sC | | | |
  156. * | p30 p31| p32 p33| p34 p35|
  157. * +---------+---------+---------+
  158. *
  159. * Samples s0A-s2C were created by averaging the original pixel component
  160. * values centered at positions p0-p35 above. To approximate one of those
  161. * original pixel component values, we proportionally blend the sample
  162. * containing the pixel center with the nearest neighboring samples in each
  163. * row, column, and diagonal.
  164. *
  165. * An upsampled pixel component value is computed by first blending the sample
  166. * containing the pixel center with the nearest neighboring samples in the
  167. * same column, in the ratio 3:1, and then blending each column sum with the
  168. * nearest neighboring column sum, in the ratio 3:1. For example:
  169. * p14(upsampled) = 3/4 * (3/4 * s1B + 1/4 * s1A) +
  170. * 1/4 * (3/4 * s0B + 1/4 * s0A)
  171. * = 9/16 * s1B + 3/16 * s1A + 3/16 * s0B + 1/16 * s0A
  172. * When computing the first and last pixel component values in the row, there
  173. * is no horizontally adjacent sample to blend, so:
  174. * p12(upsampled) = 3/4 * s0B + 1/4 * s0A
  175. * p23(upsampled) = 3/4 * s2B + 1/4 * s2C
  176. * When computing the first and last pixel component values in the column,
  177. * there is no vertically adjacent sample to blend, so:
  178. * p2(upsampled) = 3/4 * s1A + 1/4 * s0A
  179. * p33(upsampled) = 3/4 * s1C + 1/4 * s2C
  180. * When computing the corner pixel component values, there is no adjacent
  181. * sample to blend, so:
  182. * p0(upsampled) = s0A
  183. * p35(upsampled) = s2C
  184. */
  185. void jsimd_h2v2_fancy_upsample_neon(int max_v_samp_factor,
  186. JDIMENSION downsampled_width,
  187. JSAMPARRAY input_data,
  188. JSAMPARRAY *output_data_ptr)
  189. {
  190. JSAMPARRAY output_data = *output_data_ptr;
  191. JSAMPROW inptr0, inptr1, inptr2, outptr0, outptr1;
  192. int inrow, outrow;
  193. unsigned colctr;
  194. /* Set up constants. */
  195. const uint16x8_t seven_u16 = vdupq_n_u16(7);
  196. const uint8x8_t three_u8 = vdup_n_u8(3);
  197. const uint16x8_t three_u16 = vdupq_n_u16(3);
  198. inrow = outrow = 0;
  199. while (outrow < max_v_samp_factor) {
  200. inptr0 = input_data[inrow - 1];
  201. inptr1 = input_data[inrow];
  202. inptr2 = input_data[inrow + 1];
  203. /* Suffixes 0 and 1 denote the upper and lower rows of output pixels,
  204. * respectively.
  205. */
  206. outptr0 = output_data[outrow++];
  207. outptr1 = output_data[outrow++];
  208. /* First pixel component value in this row of the original image */
  209. int s0colsum0 = GETJSAMPLE(*inptr1) * 3 + GETJSAMPLE(*inptr0);
  210. *outptr0 = (JSAMPLE)((s0colsum0 * 4 + 8) >> 4);
  211. int s0colsum1 = GETJSAMPLE(*inptr1) * 3 + GETJSAMPLE(*inptr2);
  212. *outptr1 = (JSAMPLE)((s0colsum1 * 4 + 8) >> 4);
  213. /* Step 1: Blend samples vertically in columns s0 and s1.
  214. * Leave the divide by 4 until the end, when it can be done for both
  215. * dimensions at once, right-shifting by 4.
  216. */
  217. /* Load and compute s0colsum0 and s0colsum1. */
  218. uint8x16_t s0A = vld1q_u8(inptr0);
  219. uint8x16_t s0B = vld1q_u8(inptr1);
  220. uint8x16_t s0C = vld1q_u8(inptr2);
  221. /* Multiplication makes vectors twice as wide. '_l' and '_h' suffixes
  222. * denote low half and high half respectively.
  223. */
  224. uint16x8_t s0colsum0_l = vmlal_u8(vmovl_u8(vget_low_u8(s0A)),
  225. vget_low_u8(s0B), three_u8);
  226. uint16x8_t s0colsum0_h = vmlal_u8(vmovl_u8(vget_high_u8(s0A)),
  227. vget_high_u8(s0B), three_u8);
  228. uint16x8_t s0colsum1_l = vmlal_u8(vmovl_u8(vget_low_u8(s0C)),
  229. vget_low_u8(s0B), three_u8);
  230. uint16x8_t s0colsum1_h = vmlal_u8(vmovl_u8(vget_high_u8(s0C)),
  231. vget_high_u8(s0B), three_u8);
  232. /* Load and compute s1colsum0 and s1colsum1. */
  233. uint8x16_t s1A = vld1q_u8(inptr0 + 1);
  234. uint8x16_t s1B = vld1q_u8(inptr1 + 1);
  235. uint8x16_t s1C = vld1q_u8(inptr2 + 1);
  236. uint16x8_t s1colsum0_l = vmlal_u8(vmovl_u8(vget_low_u8(s1A)),
  237. vget_low_u8(s1B), three_u8);
  238. uint16x8_t s1colsum0_h = vmlal_u8(vmovl_u8(vget_high_u8(s1A)),
  239. vget_high_u8(s1B), three_u8);
  240. uint16x8_t s1colsum1_l = vmlal_u8(vmovl_u8(vget_low_u8(s1C)),
  241. vget_low_u8(s1B), three_u8);
  242. uint16x8_t s1colsum1_h = vmlal_u8(vmovl_u8(vget_high_u8(s1C)),
  243. vget_high_u8(s1B), three_u8);
  244. /* Step 2: Blend the already-blended columns. */
  245. uint16x8_t output0_p1_l = vmlaq_u16(s1colsum0_l, s0colsum0_l, three_u16);
  246. uint16x8_t output0_p1_h = vmlaq_u16(s1colsum0_h, s0colsum0_h, three_u16);
  247. uint16x8_t output0_p2_l = vmlaq_u16(s0colsum0_l, s1colsum0_l, three_u16);
  248. uint16x8_t output0_p2_h = vmlaq_u16(s0colsum0_h, s1colsum0_h, three_u16);
  249. uint16x8_t output1_p1_l = vmlaq_u16(s1colsum1_l, s0colsum1_l, three_u16);
  250. uint16x8_t output1_p1_h = vmlaq_u16(s1colsum1_h, s0colsum1_h, three_u16);
  251. uint16x8_t output1_p2_l = vmlaq_u16(s0colsum1_l, s1colsum1_l, three_u16);
  252. uint16x8_t output1_p2_h = vmlaq_u16(s0colsum1_h, s1colsum1_h, three_u16);
  253. /* Add ordered dithering bias to odd pixel values. */
  254. output0_p1_l = vaddq_u16(output0_p1_l, seven_u16);
  255. output0_p1_h = vaddq_u16(output0_p1_h, seven_u16);
  256. output1_p1_l = vaddq_u16(output1_p1_l, seven_u16);
  257. output1_p1_h = vaddq_u16(output1_p1_h, seven_u16);
  258. /* Right-shift by 4 (divide by 16), narrow to 8-bit, and combine. */
  259. uint8x16x2_t output_pixels0 = { {
  260. vcombine_u8(vshrn_n_u16(output0_p1_l, 4), vshrn_n_u16(output0_p1_h, 4)),
  261. vcombine_u8(vrshrn_n_u16(output0_p2_l, 4), vrshrn_n_u16(output0_p2_h, 4))
  262. } };
  263. uint8x16x2_t output_pixels1 = { {
  264. vcombine_u8(vshrn_n_u16(output1_p1_l, 4), vshrn_n_u16(output1_p1_h, 4)),
  265. vcombine_u8(vrshrn_n_u16(output1_p2_l, 4), vrshrn_n_u16(output1_p2_h, 4))
  266. } };
  267. /* Store pixel component values to memory.
  268. * The minimum size of the output buffer for each row is 64 bytes => no
  269. * need to worry about buffer overflow here. See "Creation of 2-D sample
  270. * arrays" in jmemmgr.c for more details.
  271. */
  272. vst2q_u8(outptr0 + 1, output_pixels0);
  273. vst2q_u8(outptr1 + 1, output_pixels1);
  274. /* The first pixel of the image shifted our loads and stores by one byte.
  275. * We have to re-align on a 32-byte boundary at some point before the end
  276. * of the row (we do it now on the 32/33 pixel boundary) to stay within the
  277. * bounds of the sample buffers without having to resort to a slow scalar
  278. * tail case for the last (downsampled_width % 16) samples. See "Creation
  279. * of 2-D sample arrays" in jmemmgr.c for more details.
  280. */
  281. for (colctr = 16; colctr < downsampled_width; colctr += 16) {
  282. /* Step 1: Blend samples vertically in columns s0 and s1. */
  283. /* Load and compute s0colsum0 and s0colsum1. */
  284. s0A = vld1q_u8(inptr0 + colctr - 1);
  285. s0B = vld1q_u8(inptr1 + colctr - 1);
  286. s0C = vld1q_u8(inptr2 + colctr - 1);
  287. s0colsum0_l = vmlal_u8(vmovl_u8(vget_low_u8(s0A)), vget_low_u8(s0B),
  288. three_u8);
  289. s0colsum0_h = vmlal_u8(vmovl_u8(vget_high_u8(s0A)), vget_high_u8(s0B),
  290. three_u8);
  291. s0colsum1_l = vmlal_u8(vmovl_u8(vget_low_u8(s0C)), vget_low_u8(s0B),
  292. three_u8);
  293. s0colsum1_h = vmlal_u8(vmovl_u8(vget_high_u8(s0C)), vget_high_u8(s0B),
  294. three_u8);
  295. /* Load and compute s1colsum0 and s1colsum1. */
  296. s1A = vld1q_u8(inptr0 + colctr);
  297. s1B = vld1q_u8(inptr1 + colctr);
  298. s1C = vld1q_u8(inptr2 + colctr);
  299. s1colsum0_l = vmlal_u8(vmovl_u8(vget_low_u8(s1A)), vget_low_u8(s1B),
  300. three_u8);
  301. s1colsum0_h = vmlal_u8(vmovl_u8(vget_high_u8(s1A)), vget_high_u8(s1B),
  302. three_u8);
  303. s1colsum1_l = vmlal_u8(vmovl_u8(vget_low_u8(s1C)), vget_low_u8(s1B),
  304. three_u8);
  305. s1colsum1_h = vmlal_u8(vmovl_u8(vget_high_u8(s1C)), vget_high_u8(s1B),
  306. three_u8);
  307. /* Step 2: Blend the already-blended columns. */
  308. output0_p1_l = vmlaq_u16(s1colsum0_l, s0colsum0_l, three_u16);
  309. output0_p1_h = vmlaq_u16(s1colsum0_h, s0colsum0_h, three_u16);
  310. output0_p2_l = vmlaq_u16(s0colsum0_l, s1colsum0_l, three_u16);
  311. output0_p2_h = vmlaq_u16(s0colsum0_h, s1colsum0_h, three_u16);
  312. output1_p1_l = vmlaq_u16(s1colsum1_l, s0colsum1_l, three_u16);
  313. output1_p1_h = vmlaq_u16(s1colsum1_h, s0colsum1_h, three_u16);
  314. output1_p2_l = vmlaq_u16(s0colsum1_l, s1colsum1_l, three_u16);
  315. output1_p2_h = vmlaq_u16(s0colsum1_h, s1colsum1_h, three_u16);
  316. /* Add ordered dithering bias to odd pixel values. */
  317. output0_p1_l = vaddq_u16(output0_p1_l, seven_u16);
  318. output0_p1_h = vaddq_u16(output0_p1_h, seven_u16);
  319. output1_p1_l = vaddq_u16(output1_p1_l, seven_u16);
  320. output1_p1_h = vaddq_u16(output1_p1_h, seven_u16);
  321. /* Right-shift by 4 (divide by 16), narrow to 8-bit, and combine. */
  322. output_pixels0.val[0] = vcombine_u8(vshrn_n_u16(output0_p1_l, 4),
  323. vshrn_n_u16(output0_p1_h, 4));
  324. output_pixels0.val[1] = vcombine_u8(vrshrn_n_u16(output0_p2_l, 4),
  325. vrshrn_n_u16(output0_p2_h, 4));
  326. output_pixels1.val[0] = vcombine_u8(vshrn_n_u16(output1_p1_l, 4),
  327. vshrn_n_u16(output1_p1_h, 4));
  328. output_pixels1.val[1] = vcombine_u8(vrshrn_n_u16(output1_p2_l, 4),
  329. vrshrn_n_u16(output1_p2_h, 4));
  330. /* Store pixel component values to memory. */
  331. vst2q_u8(outptr0 + 2 * colctr - 1, output_pixels0);
  332. vst2q_u8(outptr1 + 2 * colctr - 1, output_pixels1);
  333. }
  334. /* Last pixel component value in this row of the original image */
  335. int s1colsum0 = GETJSAMPLE(inptr1[downsampled_width - 1]) * 3 +
  336. GETJSAMPLE(inptr0[downsampled_width - 1]);
  337. outptr0[2 * downsampled_width - 1] = (JSAMPLE)((s1colsum0 * 4 + 7) >> 4);
  338. int s1colsum1 = GETJSAMPLE(inptr1[downsampled_width - 1]) * 3 +
  339. GETJSAMPLE(inptr2[downsampled_width - 1]);
  340. outptr1[2 * downsampled_width - 1] = (JSAMPLE)((s1colsum1 * 4 + 7) >> 4);
  341. inrow++;
  342. }
  343. }
  344. /* The diagram below shows a column of samples produced by h1v2 downsampling
  345. * (or by losslessly rotating or transposing an h2v1-downsampled image.)
  346. *
  347. * +---------+
  348. * | p0 |
  349. * sA | |
  350. * | p1 |
  351. * +---------+
  352. * | p2 |
  353. * sB | |
  354. * | p3 |
  355. * +---------+
  356. * | p4 |
  357. * sC | |
  358. * | p5 |
  359. * +---------+
  360. *
  361. * Samples sA-sC were created by averaging the original pixel component values
  362. * centered at positions p0-p5 above. To approximate those original pixel
  363. * component values, we proportionally blend the adjacent samples in each
  364. * column.
  365. *
  366. * An upsampled pixel component value is computed by blending the sample
  367. * containing the pixel center with the nearest neighboring sample, in the
  368. * ratio 3:1. For example:
  369. * p1(upsampled) = 3/4 * sA + 1/4 * sB
  370. * p2(upsampled) = 3/4 * sB + 1/4 * sA
  371. * When computing the first and last pixel component values in the column,
  372. * there is no adjacent sample to blend, so:
  373. * p0(upsampled) = sA
  374. * p5(upsampled) = sC
  375. */
  376. void jsimd_h1v2_fancy_upsample_neon(int max_v_samp_factor,
  377. JDIMENSION downsampled_width,
  378. JSAMPARRAY input_data,
  379. JSAMPARRAY *output_data_ptr)
  380. {
  381. JSAMPARRAY output_data = *output_data_ptr;
  382. JSAMPROW inptr0, inptr1, inptr2, outptr0, outptr1;
  383. int inrow, outrow;
  384. unsigned colctr;
  385. /* Set up constants. */
  386. const uint16x8_t one_u16 = vdupq_n_u16(1);
  387. const uint8x8_t three_u8 = vdup_n_u8(3);
  388. inrow = outrow = 0;
  389. while (outrow < max_v_samp_factor) {
  390. inptr0 = input_data[inrow - 1];
  391. inptr1 = input_data[inrow];
  392. inptr2 = input_data[inrow + 1];
  393. /* Suffixes 0 and 1 denote the upper and lower rows of output pixels,
  394. * respectively.
  395. */
  396. outptr0 = output_data[outrow++];
  397. outptr1 = output_data[outrow++];
  398. inrow++;
  399. /* The size of the input and output buffers is always a multiple of 32
  400. * bytes => no need to worry about buffer overflow when reading/writing
  401. * memory. See "Creation of 2-D sample arrays" in jmemmgr.c for more
  402. * details.
  403. */
  404. for (colctr = 0; colctr < downsampled_width; colctr += 16) {
  405. /* Load samples. */
  406. uint8x16_t sA = vld1q_u8(inptr0 + colctr);
  407. uint8x16_t sB = vld1q_u8(inptr1 + colctr);
  408. uint8x16_t sC = vld1q_u8(inptr2 + colctr);
  409. /* Blend samples vertically. */
  410. uint16x8_t colsum0_l = vmlal_u8(vmovl_u8(vget_low_u8(sA)),
  411. vget_low_u8(sB), three_u8);
  412. uint16x8_t colsum0_h = vmlal_u8(vmovl_u8(vget_high_u8(sA)),
  413. vget_high_u8(sB), three_u8);
  414. uint16x8_t colsum1_l = vmlal_u8(vmovl_u8(vget_low_u8(sC)),
  415. vget_low_u8(sB), three_u8);
  416. uint16x8_t colsum1_h = vmlal_u8(vmovl_u8(vget_high_u8(sC)),
  417. vget_high_u8(sB), three_u8);
  418. /* Add ordered dithering bias to pixel values in even output rows. */
  419. colsum0_l = vaddq_u16(colsum0_l, one_u16);
  420. colsum0_h = vaddq_u16(colsum0_h, one_u16);
  421. /* Right-shift by 2 (divide by 4), narrow to 8-bit, and combine. */
  422. uint8x16_t output_pixels0 = vcombine_u8(vshrn_n_u16(colsum0_l, 2),
  423. vshrn_n_u16(colsum0_h, 2));
  424. uint8x16_t output_pixels1 = vcombine_u8(vrshrn_n_u16(colsum1_l, 2),
  425. vrshrn_n_u16(colsum1_h, 2));
  426. /* Store pixel component values to memory. */
  427. vst1q_u8(outptr0 + colctr, output_pixels0);
  428. vst1q_u8(outptr1 + colctr, output_pixels1);
  429. }
  430. }
  431. }
  432. /* The diagram below shows a row of samples produced by h2v1 downsampling.
  433. *
  434. * s0 s1
  435. * +---------+---------+
  436. * | | |
  437. * | p0 p1 | p2 p3 |
  438. * | | |
  439. * +---------+---------+
  440. *
  441. * Samples s0 and s1 were created by averaging the original pixel component
  442. * values centered at positions p0-p3 above. To approximate those original
  443. * pixel component values, we duplicate the samples horizontally:
  444. * p0(upsampled) = p1(upsampled) = s0
  445. * p2(upsampled) = p3(upsampled) = s1
  446. */
  447. void jsimd_h2v1_upsample_neon(int max_v_samp_factor, JDIMENSION output_width,
  448. JSAMPARRAY input_data,
  449. JSAMPARRAY *output_data_ptr)
  450. {
  451. JSAMPARRAY output_data = *output_data_ptr;
  452. JSAMPROW inptr, outptr;
  453. int inrow;
  454. unsigned colctr;
  455. for (inrow = 0; inrow < max_v_samp_factor; inrow++) {
  456. inptr = input_data[inrow];
  457. outptr = output_data[inrow];
  458. for (colctr = 0; 2 * colctr < output_width; colctr += 16) {
  459. uint8x16_t samples = vld1q_u8(inptr + colctr);
  460. /* Duplicate the samples. The store operation below interleaves them so
  461. * that adjacent pixel component values take on the same sample value,
  462. * per above.
  463. */
  464. uint8x16x2_t output_pixels = { { samples, samples } };
  465. /* Store pixel component values to memory.
  466. * Due to the way sample buffers are allocated, we don't need to worry
  467. * about tail cases when output_width is not a multiple of 32. See
  468. * "Creation of 2-D sample arrays" in jmemmgr.c for details.
  469. */
  470. vst2q_u8(outptr + 2 * colctr, output_pixels);
  471. }
  472. }
  473. }
  474. /* The diagram below shows an array of samples produced by h2v2 downsampling.
  475. *
  476. * s0 s1
  477. * +---------+---------+
  478. * | p0 p1 | p2 p3 |
  479. * sA | | |
  480. * | p4 p5 | p6 p7 |
  481. * +---------+---------+
  482. * | p8 p9 | p10 p11|
  483. * sB | | |
  484. * | p12 p13| p14 p15|
  485. * +---------+---------+
  486. *
  487. * Samples s0A-s1B were created by averaging the original pixel component
  488. * values centered at positions p0-p15 above. To approximate those original
  489. * pixel component values, we duplicate the samples both horizontally and
  490. * vertically:
  491. * p0(upsampled) = p1(upsampled) = p4(upsampled) = p5(upsampled) = s0A
  492. * p2(upsampled) = p3(upsampled) = p6(upsampled) = p7(upsampled) = s1A
  493. * p8(upsampled) = p9(upsampled) = p12(upsampled) = p13(upsampled) = s0B
  494. * p10(upsampled) = p11(upsampled) = p14(upsampled) = p15(upsampled) = s1B
  495. */
  496. void jsimd_h2v2_upsample_neon(int max_v_samp_factor, JDIMENSION output_width,
  497. JSAMPARRAY input_data,
  498. JSAMPARRAY *output_data_ptr)
  499. {
  500. JSAMPARRAY output_data = *output_data_ptr;
  501. JSAMPROW inptr, outptr0, outptr1;
  502. int inrow, outrow;
  503. unsigned colctr;
  504. for (inrow = 0, outrow = 0; outrow < max_v_samp_factor; inrow++) {
  505. inptr = input_data[inrow];
  506. outptr0 = output_data[outrow++];
  507. outptr1 = output_data[outrow++];
  508. for (colctr = 0; 2 * colctr < output_width; colctr += 16) {
  509. uint8x16_t samples = vld1q_u8(inptr + colctr);
  510. /* Duplicate the samples. The store operation below interleaves them so
  511. * that adjacent pixel component values take on the same sample value,
  512. * per above.
  513. */
  514. uint8x16x2_t output_pixels = { { samples, samples } };
  515. /* Store pixel component values for both output rows to memory.
  516. * Due to the way sample buffers are allocated, we don't need to worry
  517. * about tail cases when output_width is not a multiple of 32. See
  518. * "Creation of 2-D sample arrays" in jmemmgr.c for details.
  519. */
  520. vst2q_u8(outptr0 + 2 * colctr, output_pixels);
  521. vst2q_u8(outptr1 + 2 * colctr, output_pixels);
  522. }
  523. }
  524. }