jsimd_neon.S 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209
  1. /*
  2. * Armv7 Neon optimizations for libjpeg-turbo
  3. *
  4. * Copyright (C) 2009-2011, Nokia Corporation and/or its subsidiary(-ies).
  5. * All Rights Reserved.
  6. * Author: Siarhei Siamashka <siarhei.siamashka@nokia.com>
  7. * Copyright (C) 2014, Siarhei Siamashka. All Rights Reserved.
  8. * Copyright (C) 2014, Linaro Limited. All Rights Reserved.
  9. * Copyright (C) 2015, D. R. Commander. All Rights Reserved.
  10. * Copyright (C) 2015-2016, 2018, Matthieu Darbois. All Rights Reserved.
  11. *
  12. * This software is provided 'as-is', without any express or implied
  13. * warranty. In no event will the authors be held liable for any damages
  14. * arising from the use of this software.
  15. *
  16. * Permission is granted to anyone to use this software for any purpose,
  17. * including commercial applications, and to alter it and redistribute it
  18. * freely, subject to the following restrictions:
  19. *
  20. * 1. The origin of this software must not be misrepresented; you must not
  21. * claim that you wrote the original software. If you use this software
  22. * in a product, an acknowledgment in the product documentation would be
  23. * appreciated but is not required.
  24. * 2. Altered source versions must be plainly marked as such, and must not be
  25. * misrepresented as being the original software.
  26. * 3. This notice may not be removed or altered from any source distribution.
  27. */
  28. #if defined(__linux__) && defined(__ELF__)
  29. .section .note.GNU-stack, "", %progbits /* mark stack as non-executable */
  30. #endif
  31. #if defined(__clang__) && defined(__arm__)
  32. #define LITE_ASM 1
  33. #endif
  34. .text
  35. #ifndef LITE_ASM
  36. .fpu neon
  37. .arch armv7a
  38. .object_arch armv4
  39. #endif
  40. .arm
  41. .syntax unified
  42. /*****************************************************************************/
  43. /* Supplementary macro for setting function attributes */
  44. .macro asm_function fname
  45. #ifdef __APPLE__
  46. .private_extern _\fname
  47. .globl _\fname
  48. _\fname:
  49. #else
  50. #ifndef LITE_ASM
  51. .func \fname
  52. #endif
  53. .global \fname
  54. #ifdef __ELF__
  55. .hidden \fname
  56. .type \fname, %function
  57. #endif
  58. \fname:
  59. #endif
  60. .endm
  61. #define CENTERJSAMPLE 128
  62. /*****************************************************************************/
  63. /*
  64. * Perform dequantization and inverse DCT on one block of coefficients.
  65. *
  66. * GLOBAL(void)
  67. * jsimd_idct_islow_neon(void *dct_table, JCOEFPTR coef_block,
  68. * JSAMPARRAY output_buf, JDIMENSION output_col)
  69. */
  70. #define FIX_0_298631336 (2446)
  71. #define FIX_0_390180644 (3196)
  72. #define FIX_0_541196100 (4433)
  73. #define FIX_0_765366865 (6270)
  74. #define FIX_0_899976223 (7373)
  75. #define FIX_1_175875602 (9633)
  76. #define FIX_1_501321110 (12299)
  77. #define FIX_1_847759065 (15137)
  78. #define FIX_1_961570560 (16069)
  79. #define FIX_2_053119869 (16819)
  80. #define FIX_2_562915447 (20995)
  81. #define FIX_3_072711026 (25172)
  82. #define FIX_1_175875602_MINUS_1_961570560 (FIX_1_175875602 - FIX_1_961570560)
  83. #define FIX_1_175875602_MINUS_0_390180644 (FIX_1_175875602 - FIX_0_390180644)
  84. #define FIX_0_541196100_MINUS_1_847759065 (FIX_0_541196100 - FIX_1_847759065)
  85. #define FIX_3_072711026_MINUS_2_562915447 (FIX_3_072711026 - FIX_2_562915447)
  86. #define FIX_0_298631336_MINUS_0_899976223 (FIX_0_298631336 - FIX_0_899976223)
  87. #define FIX_1_501321110_MINUS_0_899976223 (FIX_1_501321110 - FIX_0_899976223)
  88. #define FIX_2_053119869_MINUS_2_562915447 (FIX_2_053119869 - FIX_2_562915447)
  89. #define FIX_0_541196100_PLUS_0_765366865 (FIX_0_541196100 + FIX_0_765366865)
  90. /*
  91. * Reference SIMD-friendly 1-D ISLOW iDCT C implementation.
  92. * Uses some ideas from the comments in 'simd/jiss2int-64.asm'
  93. */
  94. #define REF_1D_IDCT(xrow0, xrow1, xrow2, xrow3, xrow4, xrow5, xrow6, xrow7) { \
  95. DCTELEM row0, row1, row2, row3, row4, row5, row6, row7; \
  96. JLONG q1, q2, q3, q4, q5, q6, q7; \
  97. JLONG tmp11_plus_tmp2, tmp11_minus_tmp2; \
  98. \
  99. /* 1-D iDCT input data */ \
  100. row0 = xrow0; \
  101. row1 = xrow1; \
  102. row2 = xrow2; \
  103. row3 = xrow3; \
  104. row4 = xrow4; \
  105. row5 = xrow5; \
  106. row6 = xrow6; \
  107. row7 = xrow7; \
  108. \
  109. q5 = row7 + row3; \
  110. q4 = row5 + row1; \
  111. q6 = MULTIPLY(q5, FIX_1_175875602_MINUS_1_961570560) + \
  112. MULTIPLY(q4, FIX_1_175875602); \
  113. q7 = MULTIPLY(q5, FIX_1_175875602) + \
  114. MULTIPLY(q4, FIX_1_175875602_MINUS_0_390180644); \
  115. q2 = MULTIPLY(row2, FIX_0_541196100) + \
  116. MULTIPLY(row6, FIX_0_541196100_MINUS_1_847759065); \
  117. q4 = q6; \
  118. q3 = ((JLONG)row0 - (JLONG)row4) << 13; \
  119. q6 += MULTIPLY(row5, -FIX_2_562915447) + \
  120. MULTIPLY(row3, FIX_3_072711026_MINUS_2_562915447); \
  121. /* now we can use q1 (reloadable constants have been used up) */ \
  122. q1 = q3 + q2; \
  123. q4 += MULTIPLY(row7, FIX_0_298631336_MINUS_0_899976223) + \
  124. MULTIPLY(row1, -FIX_0_899976223); \
  125. q5 = q7; \
  126. q1 = q1 + q6; \
  127. q7 += MULTIPLY(row7, -FIX_0_899976223) + \
  128. MULTIPLY(row1, FIX_1_501321110_MINUS_0_899976223); \
  129. \
  130. /* (tmp11 + tmp2) has been calculated (out_row1 before descale) */ \
  131. tmp11_plus_tmp2 = q1; \
  132. row1 = 0; \
  133. \
  134. q1 = q1 - q6; \
  135. q5 += MULTIPLY(row5, FIX_2_053119869_MINUS_2_562915447) + \
  136. MULTIPLY(row3, -FIX_2_562915447); \
  137. q1 = q1 - q6; \
  138. q6 = MULTIPLY(row2, FIX_0_541196100_PLUS_0_765366865) + \
  139. MULTIPLY(row6, FIX_0_541196100); \
  140. q3 = q3 - q2; \
  141. \
  142. /* (tmp11 - tmp2) has been calculated (out_row6 before descale) */ \
  143. tmp11_minus_tmp2 = q1; \
  144. \
  145. q1 = ((JLONG)row0 + (JLONG)row4) << 13; \
  146. q2 = q1 + q6; \
  147. q1 = q1 - q6; \
  148. \
  149. /* pick up the results */ \
  150. tmp0 = q4; \
  151. tmp1 = q5; \
  152. tmp2 = (tmp11_plus_tmp2 - tmp11_minus_tmp2) / 2; \
  153. tmp3 = q7; \
  154. tmp10 = q2; \
  155. tmp11 = (tmp11_plus_tmp2 + tmp11_minus_tmp2) / 2; \
  156. tmp12 = q3; \
  157. tmp13 = q1; \
  158. }
  159. #define XFIX_0_899976223 d0[0]
  160. #define XFIX_0_541196100 d0[1]
  161. #define XFIX_2_562915447 d0[2]
  162. #define XFIX_0_298631336_MINUS_0_899976223 d0[3]
  163. #define XFIX_1_501321110_MINUS_0_899976223 d1[0]
  164. #define XFIX_2_053119869_MINUS_2_562915447 d1[1]
  165. #define XFIX_0_541196100_PLUS_0_765366865 d1[2]
  166. #define XFIX_1_175875602 d1[3]
  167. #define XFIX_1_175875602_MINUS_0_390180644 d2[0]
  168. #define XFIX_0_541196100_MINUS_1_847759065 d2[1]
  169. #define XFIX_3_072711026_MINUS_2_562915447 d2[2]
  170. #define XFIX_1_175875602_MINUS_1_961570560 d2[3]
  171. .balign 16
  172. jsimd_idct_islow_neon_consts:
  173. .short FIX_0_899976223 /* d0[0] */
  174. .short FIX_0_541196100 /* d0[1] */
  175. .short FIX_2_562915447 /* d0[2] */
  176. .short FIX_0_298631336_MINUS_0_899976223 /* d0[3] */
  177. .short FIX_1_501321110_MINUS_0_899976223 /* d1[0] */
  178. .short FIX_2_053119869_MINUS_2_562915447 /* d1[1] */
  179. .short FIX_0_541196100_PLUS_0_765366865 /* d1[2] */
  180. .short FIX_1_175875602 /* d1[3] */
  181. /* reloadable constants */
  182. .short FIX_1_175875602_MINUS_0_390180644 /* d2[0] */
  183. .short FIX_0_541196100_MINUS_1_847759065 /* d2[1] */
  184. .short FIX_3_072711026_MINUS_2_562915447 /* d2[2] */
  185. .short FIX_1_175875602_MINUS_1_961570560 /* d2[3] */
  186. asm_function jsimd_idct_islow_neon
  187. DCT_TABLE .req r0
  188. COEF_BLOCK .req r1
  189. OUTPUT_BUF .req r2
  190. OUTPUT_COL .req r3
  191. TMP1 .req r0
  192. TMP2 .req r1
  193. TMP3 .req r2
  194. TMP4 .req ip
  195. ROW0L .req d16
  196. ROW0R .req d17
  197. ROW1L .req d18
  198. ROW1R .req d19
  199. ROW2L .req d20
  200. ROW2R .req d21
  201. ROW3L .req d22
  202. ROW3R .req d23
  203. ROW4L .req d24
  204. ROW4R .req d25
  205. ROW5L .req d26
  206. ROW5R .req d27
  207. ROW6L .req d28
  208. ROW6R .req d29
  209. ROW7L .req d30
  210. ROW7R .req d31
  211. /* Load and dequantize coefficients into Neon registers
  212. * with the following allocation:
  213. * 0 1 2 3 | 4 5 6 7
  214. * ---------+--------
  215. * 0 | d16 | d17 ( q8 )
  216. * 1 | d18 | d19 ( q9 )
  217. * 2 | d20 | d21 ( q10 )
  218. * 3 | d22 | d23 ( q11 )
  219. * 4 | d24 | d25 ( q12 )
  220. * 5 | d26 | d27 ( q13 )
  221. * 6 | d28 | d29 ( q14 )
  222. * 7 | d30 | d31 ( q15 )
  223. */
  224. adr ip, jsimd_idct_islow_neon_consts
  225. vld1.16 {d16, d17, d18, d19}, [COEF_BLOCK, :128]!
  226. vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
  227. vld1.16 {d20, d21, d22, d23}, [COEF_BLOCK, :128]!
  228. vmul.s16 q8, q8, q0
  229. vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
  230. vmul.s16 q9, q9, q1
  231. vld1.16 {d24, d25, d26, d27}, [COEF_BLOCK, :128]!
  232. vmul.s16 q10, q10, q2
  233. vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
  234. vmul.s16 q11, q11, q3
  235. vld1.16 {d28, d29, d30, d31}, [COEF_BLOCK, :128]
  236. vmul.s16 q12, q12, q0
  237. vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
  238. vmul.s16 q14, q14, q2
  239. vmul.s16 q13, q13, q1
  240. vld1.16 {d0, d1, d2, d3}, [ip, :128] /* load constants */
  241. add ip, ip, #16
  242. vmul.s16 q15, q15, q3
  243. vpush {d8 - d15} /* save Neon registers */
  244. /* 1-D IDCT, pass 1, left 4x8 half */
  245. vadd.s16 d4, ROW7L, ROW3L
  246. vadd.s16 d5, ROW5L, ROW1L
  247. vmull.s16 q6, d4, XFIX_1_175875602_MINUS_1_961570560
  248. vmlal.s16 q6, d5, XFIX_1_175875602
  249. vmull.s16 q7, d4, XFIX_1_175875602
  250. /* Check for the zero coefficients in the right 4x8 half */
  251. push {r4, r5}
  252. vmlal.s16 q7, d5, XFIX_1_175875602_MINUS_0_390180644
  253. vsubl.s16 q3, ROW0L, ROW4L
  254. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 1 * 8))]
  255. vmull.s16 q2, ROW2L, XFIX_0_541196100
  256. vmlal.s16 q2, ROW6L, XFIX_0_541196100_MINUS_1_847759065
  257. orr r0, r4, r5
  258. vmov q4, q6
  259. vmlsl.s16 q6, ROW5L, XFIX_2_562915447
  260. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 2 * 8))]
  261. vmlal.s16 q6, ROW3L, XFIX_3_072711026_MINUS_2_562915447
  262. vshl.s32 q3, q3, #13
  263. orr r0, r0, r4
  264. vmlsl.s16 q4, ROW1L, XFIX_0_899976223
  265. orr r0, r0, r5
  266. vadd.s32 q1, q3, q2
  267. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 3 * 8))]
  268. vmov q5, q7
  269. vadd.s32 q1, q1, q6
  270. orr r0, r0, r4
  271. vmlsl.s16 q7, ROW7L, XFIX_0_899976223
  272. orr r0, r0, r5
  273. vmlal.s16 q7, ROW1L, XFIX_1_501321110_MINUS_0_899976223
  274. vrshrn.s32 ROW1L, q1, #11
  275. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 4 * 8))]
  276. vsub.s32 q1, q1, q6
  277. vmlal.s16 q5, ROW5L, XFIX_2_053119869_MINUS_2_562915447
  278. orr r0, r0, r4
  279. vmlsl.s16 q5, ROW3L, XFIX_2_562915447
  280. orr r0, r0, r5
  281. vsub.s32 q1, q1, q6
  282. vmull.s16 q6, ROW2L, XFIX_0_541196100_PLUS_0_765366865
  283. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 5 * 8))]
  284. vmlal.s16 q6, ROW6L, XFIX_0_541196100
  285. vsub.s32 q3, q3, q2
  286. orr r0, r0, r4
  287. vrshrn.s32 ROW6L, q1, #11
  288. orr r0, r0, r5
  289. vadd.s32 q1, q3, q5
  290. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 6 * 8))]
  291. vsub.s32 q3, q3, q5
  292. vaddl.s16 q5, ROW0L, ROW4L
  293. orr r0, r0, r4
  294. vrshrn.s32 ROW2L, q1, #11
  295. orr r0, r0, r5
  296. vrshrn.s32 ROW5L, q3, #11
  297. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 7 * 8))]
  298. vshl.s32 q5, q5, #13
  299. vmlal.s16 q4, ROW7L, XFIX_0_298631336_MINUS_0_899976223
  300. orr r0, r0, r4
  301. vadd.s32 q2, q5, q6
  302. orrs r0, r0, r5
  303. vsub.s32 q1, q5, q6
  304. vadd.s32 q6, q2, q7
  305. ldrd r4, [COEF_BLOCK, #(-96 + 2 * (4 + 0 * 8))]
  306. vsub.s32 q2, q2, q7
  307. vadd.s32 q5, q1, q4
  308. orr r0, r4, r5
  309. vsub.s32 q3, q1, q4
  310. pop {r4, r5}
  311. vrshrn.s32 ROW7L, q2, #11
  312. vrshrn.s32 ROW3L, q5, #11
  313. vrshrn.s32 ROW0L, q6, #11
  314. vrshrn.s32 ROW4L, q3, #11
  315. beq 3f /* Go to do some special handling for the sparse
  316. right 4x8 half */
  317. /* 1-D IDCT, pass 1, right 4x8 half */
  318. vld1.s16 {d2}, [ip, :64] /* reload constants */
  319. vadd.s16 d10, ROW7R, ROW3R
  320. vadd.s16 d8, ROW5R, ROW1R
  321. /* Transpose left 4x8 half */
  322. vtrn.16 ROW6L, ROW7L
  323. vmull.s16 q6, d10, XFIX_1_175875602_MINUS_1_961570560
  324. vmlal.s16 q6, d8, XFIX_1_175875602
  325. vtrn.16 ROW2L, ROW3L
  326. vmull.s16 q7, d10, XFIX_1_175875602
  327. vmlal.s16 q7, d8, XFIX_1_175875602_MINUS_0_390180644
  328. vtrn.16 ROW0L, ROW1L
  329. vsubl.s16 q3, ROW0R, ROW4R
  330. vmull.s16 q2, ROW2R, XFIX_0_541196100
  331. vmlal.s16 q2, ROW6R, XFIX_0_541196100_MINUS_1_847759065
  332. vtrn.16 ROW4L, ROW5L
  333. vmov q4, q6
  334. vmlsl.s16 q6, ROW5R, XFIX_2_562915447
  335. vmlal.s16 q6, ROW3R, XFIX_3_072711026_MINUS_2_562915447
  336. vtrn.32 ROW1L, ROW3L
  337. vshl.s32 q3, q3, #13
  338. vmlsl.s16 q4, ROW1R, XFIX_0_899976223
  339. vtrn.32 ROW4L, ROW6L
  340. vadd.s32 q1, q3, q2
  341. vmov q5, q7
  342. vadd.s32 q1, q1, q6
  343. vtrn.32 ROW0L, ROW2L
  344. vmlsl.s16 q7, ROW7R, XFIX_0_899976223
  345. vmlal.s16 q7, ROW1R, XFIX_1_501321110_MINUS_0_899976223
  346. vrshrn.s32 ROW1R, q1, #11
  347. vtrn.32 ROW5L, ROW7L
  348. vsub.s32 q1, q1, q6
  349. vmlal.s16 q5, ROW5R, XFIX_2_053119869_MINUS_2_562915447
  350. vmlsl.s16 q5, ROW3R, XFIX_2_562915447
  351. vsub.s32 q1, q1, q6
  352. vmull.s16 q6, ROW2R, XFIX_0_541196100_PLUS_0_765366865
  353. vmlal.s16 q6, ROW6R, XFIX_0_541196100
  354. vsub.s32 q3, q3, q2
  355. vrshrn.s32 ROW6R, q1, #11
  356. vadd.s32 q1, q3, q5
  357. vsub.s32 q3, q3, q5
  358. vaddl.s16 q5, ROW0R, ROW4R
  359. vrshrn.s32 ROW2R, q1, #11
  360. vrshrn.s32 ROW5R, q3, #11
  361. vshl.s32 q5, q5, #13
  362. vmlal.s16 q4, ROW7R, XFIX_0_298631336_MINUS_0_899976223
  363. vadd.s32 q2, q5, q6
  364. vsub.s32 q1, q5, q6
  365. vadd.s32 q6, q2, q7
  366. vsub.s32 q2, q2, q7
  367. vadd.s32 q5, q1, q4
  368. vsub.s32 q3, q1, q4
  369. vrshrn.s32 ROW7R, q2, #11
  370. vrshrn.s32 ROW3R, q5, #11
  371. vrshrn.s32 ROW0R, q6, #11
  372. vrshrn.s32 ROW4R, q3, #11
  373. /* Transpose right 4x8 half */
  374. vtrn.16 ROW6R, ROW7R
  375. vtrn.16 ROW2R, ROW3R
  376. vtrn.16 ROW0R, ROW1R
  377. vtrn.16 ROW4R, ROW5R
  378. vtrn.32 ROW1R, ROW3R
  379. vtrn.32 ROW4R, ROW6R
  380. vtrn.32 ROW0R, ROW2R
  381. vtrn.32 ROW5R, ROW7R
  382. 1: /* 1-D IDCT, pass 2 (normal variant), left 4x8 half */
  383. vld1.s16 {d2}, [ip, :64] /* reload constants */
  384. vmull.s16 q6, ROW1R, XFIX_1_175875602 /* ROW5L <-> ROW1R */
  385. vmlal.s16 q6, ROW1L, XFIX_1_175875602
  386. vmlal.s16 q6, ROW3R, XFIX_1_175875602_MINUS_1_961570560 /* ROW7L <-> ROW3R */
  387. vmlal.s16 q6, ROW3L, XFIX_1_175875602_MINUS_1_961570560
  388. vmull.s16 q7, ROW3R, XFIX_1_175875602 /* ROW7L <-> ROW3R */
  389. vmlal.s16 q7, ROW3L, XFIX_1_175875602
  390. vmlal.s16 q7, ROW1R, XFIX_1_175875602_MINUS_0_390180644 /* ROW5L <-> ROW1R */
  391. vmlal.s16 q7, ROW1L, XFIX_1_175875602_MINUS_0_390180644
  392. vsubl.s16 q3, ROW0L, ROW0R /* ROW4L <-> ROW0R */
  393. vmull.s16 q2, ROW2L, XFIX_0_541196100
  394. vmlal.s16 q2, ROW2R, XFIX_0_541196100_MINUS_1_847759065 /* ROW6L <-> ROW2R */
  395. vmov q4, q6
  396. vmlsl.s16 q6, ROW1R, XFIX_2_562915447 /* ROW5L <-> ROW1R */
  397. vmlal.s16 q6, ROW3L, XFIX_3_072711026_MINUS_2_562915447
  398. vshl.s32 q3, q3, #13
  399. vmlsl.s16 q4, ROW1L, XFIX_0_899976223
  400. vadd.s32 q1, q3, q2
  401. vmov q5, q7
  402. vadd.s32 q1, q1, q6
  403. vmlsl.s16 q7, ROW3R, XFIX_0_899976223 /* ROW7L <-> ROW3R */
  404. vmlal.s16 q7, ROW1L, XFIX_1_501321110_MINUS_0_899976223
  405. vshrn.s32 ROW1L, q1, #16
  406. vsub.s32 q1, q1, q6
  407. vmlal.s16 q5, ROW1R, XFIX_2_053119869_MINUS_2_562915447 /* ROW5L <-> ROW1R */
  408. vmlsl.s16 q5, ROW3L, XFIX_2_562915447
  409. vsub.s32 q1, q1, q6
  410. vmull.s16 q6, ROW2L, XFIX_0_541196100_PLUS_0_765366865
  411. vmlal.s16 q6, ROW2R, XFIX_0_541196100 /* ROW6L <-> ROW2R */
  412. vsub.s32 q3, q3, q2
  413. vshrn.s32 ROW2R, q1, #16 /* ROW6L <-> ROW2R */
  414. vadd.s32 q1, q3, q5
  415. vsub.s32 q3, q3, q5
  416. vaddl.s16 q5, ROW0L, ROW0R /* ROW4L <-> ROW0R */
  417. vshrn.s32 ROW2L, q1, #16
  418. vshrn.s32 ROW1R, q3, #16 /* ROW5L <-> ROW1R */
  419. vshl.s32 q5, q5, #13
  420. vmlal.s16 q4, ROW3R, XFIX_0_298631336_MINUS_0_899976223 /* ROW7L <-> ROW3R */
  421. vadd.s32 q2, q5, q6
  422. vsub.s32 q1, q5, q6
  423. vadd.s32 q6, q2, q7
  424. vsub.s32 q2, q2, q7
  425. vadd.s32 q5, q1, q4
  426. vsub.s32 q3, q1, q4
  427. vshrn.s32 ROW3R, q2, #16 /* ROW7L <-> ROW3R */
  428. vshrn.s32 ROW3L, q5, #16
  429. vshrn.s32 ROW0L, q6, #16
  430. vshrn.s32 ROW0R, q3, #16 /* ROW4L <-> ROW0R */
  431. /* 1-D IDCT, pass 2, right 4x8 half */
  432. vld1.s16 {d2}, [ip, :64] /* reload constants */
  433. vmull.s16 q6, ROW5R, XFIX_1_175875602
  434. vmlal.s16 q6, ROW5L, XFIX_1_175875602 /* ROW5L <-> ROW1R */
  435. vmlal.s16 q6, ROW7R, XFIX_1_175875602_MINUS_1_961570560
  436. vmlal.s16 q6, ROW7L, XFIX_1_175875602_MINUS_1_961570560 /* ROW7L <-> ROW3R */
  437. vmull.s16 q7, ROW7R, XFIX_1_175875602
  438. vmlal.s16 q7, ROW7L, XFIX_1_175875602 /* ROW7L <-> ROW3R */
  439. vmlal.s16 q7, ROW5R, XFIX_1_175875602_MINUS_0_390180644
  440. vmlal.s16 q7, ROW5L, XFIX_1_175875602_MINUS_0_390180644 /* ROW5L <-> ROW1R */
  441. vsubl.s16 q3, ROW4L, ROW4R /* ROW4L <-> ROW0R */
  442. vmull.s16 q2, ROW6L, XFIX_0_541196100 /* ROW6L <-> ROW2R */
  443. vmlal.s16 q2, ROW6R, XFIX_0_541196100_MINUS_1_847759065
  444. vmov q4, q6
  445. vmlsl.s16 q6, ROW5R, XFIX_2_562915447
  446. vmlal.s16 q6, ROW7L, XFIX_3_072711026_MINUS_2_562915447 /* ROW7L <-> ROW3R */
  447. vshl.s32 q3, q3, #13
  448. vmlsl.s16 q4, ROW5L, XFIX_0_899976223 /* ROW5L <-> ROW1R */
  449. vadd.s32 q1, q3, q2
  450. vmov q5, q7
  451. vadd.s32 q1, q1, q6
  452. vmlsl.s16 q7, ROW7R, XFIX_0_899976223
  453. vmlal.s16 q7, ROW5L, XFIX_1_501321110_MINUS_0_899976223 /* ROW5L <-> ROW1R */
  454. vshrn.s32 ROW5L, q1, #16 /* ROW5L <-> ROW1R */
  455. vsub.s32 q1, q1, q6
  456. vmlal.s16 q5, ROW5R, XFIX_2_053119869_MINUS_2_562915447
  457. vmlsl.s16 q5, ROW7L, XFIX_2_562915447 /* ROW7L <-> ROW3R */
  458. vsub.s32 q1, q1, q6
  459. vmull.s16 q6, ROW6L, XFIX_0_541196100_PLUS_0_765366865 /* ROW6L <-> ROW2R */
  460. vmlal.s16 q6, ROW6R, XFIX_0_541196100
  461. vsub.s32 q3, q3, q2
  462. vshrn.s32 ROW6R, q1, #16
  463. vadd.s32 q1, q3, q5
  464. vsub.s32 q3, q3, q5
  465. vaddl.s16 q5, ROW4L, ROW4R /* ROW4L <-> ROW0R */
  466. vshrn.s32 ROW6L, q1, #16 /* ROW6L <-> ROW2R */
  467. vshrn.s32 ROW5R, q3, #16
  468. vshl.s32 q5, q5, #13
  469. vmlal.s16 q4, ROW7R, XFIX_0_298631336_MINUS_0_899976223
  470. vadd.s32 q2, q5, q6
  471. vsub.s32 q1, q5, q6
  472. vadd.s32 q6, q2, q7
  473. vsub.s32 q2, q2, q7
  474. vadd.s32 q5, q1, q4
  475. vsub.s32 q3, q1, q4
  476. vshrn.s32 ROW7R, q2, #16
  477. vshrn.s32 ROW7L, q5, #16 /* ROW7L <-> ROW3R */
  478. vshrn.s32 ROW4L, q6, #16 /* ROW4L <-> ROW0R */
  479. vshrn.s32 ROW4R, q3, #16
  480. 2: /* Descale to 8-bit and range limit */
  481. vqrshrn.s16 d16, q8, #2
  482. vqrshrn.s16 d17, q9, #2
  483. vqrshrn.s16 d18, q10, #2
  484. vqrshrn.s16 d19, q11, #2
  485. vpop {d8 - d15} /* restore Neon registers */
  486. vqrshrn.s16 d20, q12, #2
  487. /* Transpose the final 8-bit samples and do signed->unsigned conversion */
  488. vtrn.16 q8, q9
  489. vqrshrn.s16 d21, q13, #2
  490. vqrshrn.s16 d22, q14, #2
  491. vmov.u8 q0, #(CENTERJSAMPLE)
  492. vqrshrn.s16 d23, q15, #2
  493. vtrn.8 d16, d17
  494. vtrn.8 d18, d19
  495. vadd.u8 q8, q8, q0
  496. vadd.u8 q9, q9, q0
  497. vtrn.16 q10, q11
  498. /* Store results to the output buffer */
  499. ldmia OUTPUT_BUF!, {TMP1, TMP2}
  500. add TMP1, TMP1, OUTPUT_COL
  501. add TMP2, TMP2, OUTPUT_COL
  502. vst1.8 {d16}, [TMP1]
  503. vtrn.8 d20, d21
  504. vst1.8 {d17}, [TMP2]
  505. ldmia OUTPUT_BUF!, {TMP1, TMP2}
  506. add TMP1, TMP1, OUTPUT_COL
  507. add TMP2, TMP2, OUTPUT_COL
  508. vst1.8 {d18}, [TMP1]
  509. vadd.u8 q10, q10, q0
  510. vst1.8 {d19}, [TMP2]
  511. ldmia OUTPUT_BUF, {TMP1, TMP2, TMP3, TMP4}
  512. add TMP1, TMP1, OUTPUT_COL
  513. add TMP2, TMP2, OUTPUT_COL
  514. add TMP3, TMP3, OUTPUT_COL
  515. add TMP4, TMP4, OUTPUT_COL
  516. vtrn.8 d22, d23
  517. vst1.8 {d20}, [TMP1]
  518. vadd.u8 q11, q11, q0
  519. vst1.8 {d21}, [TMP2]
  520. vst1.8 {d22}, [TMP3]
  521. vst1.8 {d23}, [TMP4]
  522. bx lr
  523. 3: /* Left 4x8 half is done, right 4x8 half contains mostly zeros */
  524. /* Transpose left 4x8 half */
  525. vtrn.16 ROW6L, ROW7L
  526. vtrn.16 ROW2L, ROW3L
  527. vtrn.16 ROW0L, ROW1L
  528. vtrn.16 ROW4L, ROW5L
  529. vshl.s16 ROW0R, ROW0R, #2 /* PASS1_BITS */
  530. vtrn.32 ROW1L, ROW3L
  531. vtrn.32 ROW4L, ROW6L
  532. vtrn.32 ROW0L, ROW2L
  533. vtrn.32 ROW5L, ROW7L
  534. cmp r0, #0
  535. beq 4f /* Right 4x8 half has all zeros, go to 'sparse' second
  536. pass */
  537. /* Only row 0 is non-zero for the right 4x8 half */
  538. vdup.s16 ROW1R, ROW0R[1]
  539. vdup.s16 ROW2R, ROW0R[2]
  540. vdup.s16 ROW3R, ROW0R[3]
  541. vdup.s16 ROW4R, ROW0R[0]
  542. vdup.s16 ROW5R, ROW0R[1]
  543. vdup.s16 ROW6R, ROW0R[2]
  544. vdup.s16 ROW7R, ROW0R[3]
  545. vdup.s16 ROW0R, ROW0R[0]
  546. b 1b /* Go to 'normal' second pass */
  547. 4: /* 1-D IDCT, pass 2 (sparse variant with zero rows 4-7), left 4x8 half */
  548. vld1.s16 {d2}, [ip, :64] /* reload constants */
  549. vmull.s16 q6, ROW1L, XFIX_1_175875602
  550. vmlal.s16 q6, ROW3L, XFIX_1_175875602_MINUS_1_961570560
  551. vmull.s16 q7, ROW3L, XFIX_1_175875602
  552. vmlal.s16 q7, ROW1L, XFIX_1_175875602_MINUS_0_390180644
  553. vmull.s16 q2, ROW2L, XFIX_0_541196100
  554. vshll.s16 q3, ROW0L, #13
  555. vmov q4, q6
  556. vmlal.s16 q6, ROW3L, XFIX_3_072711026_MINUS_2_562915447
  557. vmlsl.s16 q4, ROW1L, XFIX_0_899976223
  558. vadd.s32 q1, q3, q2
  559. vmov q5, q7
  560. vmlal.s16 q7, ROW1L, XFIX_1_501321110_MINUS_0_899976223
  561. vadd.s32 q1, q1, q6
  562. vadd.s32 q6, q6, q6
  563. vmlsl.s16 q5, ROW3L, XFIX_2_562915447
  564. vshrn.s32 ROW1L, q1, #16
  565. vsub.s32 q1, q1, q6
  566. vmull.s16 q6, ROW2L, XFIX_0_541196100_PLUS_0_765366865
  567. vsub.s32 q3, q3, q2
  568. vshrn.s32 ROW2R, q1, #16 /* ROW6L <-> ROW2R */
  569. vadd.s32 q1, q3, q5
  570. vsub.s32 q3, q3, q5
  571. vshll.s16 q5, ROW0L, #13
  572. vshrn.s32 ROW2L, q1, #16
  573. vshrn.s32 ROW1R, q3, #16 /* ROW5L <-> ROW1R */
  574. vadd.s32 q2, q5, q6
  575. vsub.s32 q1, q5, q6
  576. vadd.s32 q6, q2, q7
  577. vsub.s32 q2, q2, q7
  578. vadd.s32 q5, q1, q4
  579. vsub.s32 q3, q1, q4
  580. vshrn.s32 ROW3R, q2, #16 /* ROW7L <-> ROW3R */
  581. vshrn.s32 ROW3L, q5, #16
  582. vshrn.s32 ROW0L, q6, #16
  583. vshrn.s32 ROW0R, q3, #16 /* ROW4L <-> ROW0R */
  584. /* 1-D IDCT, pass 2 (sparse variant with zero rows 4-7), right 4x8 half */
  585. vld1.s16 {d2}, [ip, :64] /* reload constants */
  586. vmull.s16 q6, ROW5L, XFIX_1_175875602
  587. vmlal.s16 q6, ROW7L, XFIX_1_175875602_MINUS_1_961570560
  588. vmull.s16 q7, ROW7L, XFIX_1_175875602
  589. vmlal.s16 q7, ROW5L, XFIX_1_175875602_MINUS_0_390180644
  590. vmull.s16 q2, ROW6L, XFIX_0_541196100
  591. vshll.s16 q3, ROW4L, #13
  592. vmov q4, q6
  593. vmlal.s16 q6, ROW7L, XFIX_3_072711026_MINUS_2_562915447
  594. vmlsl.s16 q4, ROW5L, XFIX_0_899976223
  595. vadd.s32 q1, q3, q2
  596. vmov q5, q7
  597. vmlal.s16 q7, ROW5L, XFIX_1_501321110_MINUS_0_899976223
  598. vadd.s32 q1, q1, q6
  599. vadd.s32 q6, q6, q6
  600. vmlsl.s16 q5, ROW7L, XFIX_2_562915447
  601. vshrn.s32 ROW5L, q1, #16 /* ROW5L <-> ROW1R */
  602. vsub.s32 q1, q1, q6
  603. vmull.s16 q6, ROW6L, XFIX_0_541196100_PLUS_0_765366865
  604. vsub.s32 q3, q3, q2
  605. vshrn.s32 ROW6R, q1, #16
  606. vadd.s32 q1, q3, q5
  607. vsub.s32 q3, q3, q5
  608. vshll.s16 q5, ROW4L, #13
  609. vshrn.s32 ROW6L, q1, #16 /* ROW6L <-> ROW2R */
  610. vshrn.s32 ROW5R, q3, #16
  611. vadd.s32 q2, q5, q6
  612. vsub.s32 q1, q5, q6
  613. vadd.s32 q6, q2, q7
  614. vsub.s32 q2, q2, q7
  615. vadd.s32 q5, q1, q4
  616. vsub.s32 q3, q1, q4
  617. vshrn.s32 ROW7R, q2, #16
  618. vshrn.s32 ROW7L, q5, #16 /* ROW7L <-> ROW3R */
  619. vshrn.s32 ROW4L, q6, #16 /* ROW4L <-> ROW0R */
  620. vshrn.s32 ROW4R, q3, #16
  621. b 2b /* Go to epilogue */
  622. .unreq DCT_TABLE
  623. .unreq COEF_BLOCK
  624. .unreq OUTPUT_BUF
  625. .unreq OUTPUT_COL
  626. .unreq TMP1
  627. .unreq TMP2
  628. .unreq TMP3
  629. .unreq TMP4
  630. .unreq ROW0L
  631. .unreq ROW0R
  632. .unreq ROW1L
  633. .unreq ROW1R
  634. .unreq ROW2L
  635. .unreq ROW2R
  636. .unreq ROW3L
  637. .unreq ROW3R
  638. .unreq ROW4L
  639. .unreq ROW4R
  640. .unreq ROW5L
  641. .unreq ROW5R
  642. .unreq ROW6L
  643. .unreq ROW6R
  644. .unreq ROW7L
  645. .unreq ROW7R
  646. /*****************************************************************************/
  647. /*
  648. * jsimd_idct_ifast_neon
  649. *
  650. * This function contains a fast, not so accurate integer implementation of
  651. * the inverse DCT (Discrete Cosine Transform). It uses the same calculations
  652. * and produces exactly the same output as IJG's original 'jpeg_idct_ifast'
  653. * function from jidctfst.c
  654. *
  655. * Normally 1-D AAN DCT needs 5 multiplications and 29 additions.
  656. * But in Arm Neon case some extra additions are required because VQDMULH
  657. * instruction can't handle the constants larger than 1. So the expressions
  658. * like "x * 1.082392200" have to be converted to "x * 0.082392200 + x",
  659. * which introduces an extra addition. Overall, there are 6 extra additions
  660. * per 1-D IDCT pass, totalling to 5 VQDMULH and 35 VADD/VSUB instructions.
  661. */
  662. #define XFIX_1_082392200 d0[0]
  663. #define XFIX_1_414213562 d0[1]
  664. #define XFIX_1_847759065 d0[2]
  665. #define XFIX_2_613125930 d0[3]
  666. .balign 16
  667. jsimd_idct_ifast_neon_consts:
  668. .short (277 * 128 - 256 * 128) /* XFIX_1_082392200 */
  669. .short (362 * 128 - 256 * 128) /* XFIX_1_414213562 */
  670. .short (473 * 128 - 256 * 128) /* XFIX_1_847759065 */
  671. .short (669 * 128 - 512 * 128) /* XFIX_2_613125930 */
  672. asm_function jsimd_idct_ifast_neon
  673. DCT_TABLE .req r0
  674. COEF_BLOCK .req r1
  675. OUTPUT_BUF .req r2
  676. OUTPUT_COL .req r3
  677. TMP1 .req r0
  678. TMP2 .req r1
  679. TMP3 .req r2
  680. TMP4 .req ip
  681. /* Load and dequantize coefficients into Neon registers
  682. * with the following allocation:
  683. * 0 1 2 3 | 4 5 6 7
  684. * ---------+--------
  685. * 0 | d16 | d17 ( q8 )
  686. * 1 | d18 | d19 ( q9 )
  687. * 2 | d20 | d21 ( q10 )
  688. * 3 | d22 | d23 ( q11 )
  689. * 4 | d24 | d25 ( q12 )
  690. * 5 | d26 | d27 ( q13 )
  691. * 6 | d28 | d29 ( q14 )
  692. * 7 | d30 | d31 ( q15 )
  693. */
  694. adr ip, jsimd_idct_ifast_neon_consts
  695. vld1.16 {d16, d17, d18, d19}, [COEF_BLOCK, :128]!
  696. vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
  697. vld1.16 {d20, d21, d22, d23}, [COEF_BLOCK, :128]!
  698. vmul.s16 q8, q8, q0
  699. vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
  700. vmul.s16 q9, q9, q1
  701. vld1.16 {d24, d25, d26, d27}, [COEF_BLOCK, :128]!
  702. vmul.s16 q10, q10, q2
  703. vld1.16 {d0, d1, d2, d3}, [DCT_TABLE, :128]!
  704. vmul.s16 q11, q11, q3
  705. vld1.16 {d28, d29, d30, d31}, [COEF_BLOCK, :128]
  706. vmul.s16 q12, q12, q0
  707. vld1.16 {d4, d5, d6, d7}, [DCT_TABLE, :128]!
  708. vmul.s16 q14, q14, q2
  709. vmul.s16 q13, q13, q1
  710. vld1.16 {d0}, [ip, :64] /* load constants */
  711. vmul.s16 q15, q15, q3
  712. vpush {d8 - d13} /* save Neon registers */
  713. /* 1-D IDCT, pass 1 */
  714. vsub.s16 q2, q10, q14
  715. vadd.s16 q14, q10, q14
  716. vsub.s16 q1, q11, q13
  717. vadd.s16 q13, q11, q13
  718. vsub.s16 q5, q9, q15
  719. vadd.s16 q15, q9, q15
  720. vqdmulh.s16 q4, q2, XFIX_1_414213562
  721. vqdmulh.s16 q6, q1, XFIX_2_613125930
  722. vadd.s16 q3, q1, q1
  723. vsub.s16 q1, q5, q1
  724. vadd.s16 q10, q2, q4
  725. vqdmulh.s16 q4, q1, XFIX_1_847759065
  726. vsub.s16 q2, q15, q13
  727. vadd.s16 q3, q3, q6
  728. vqdmulh.s16 q6, q2, XFIX_1_414213562
  729. vadd.s16 q1, q1, q4
  730. vqdmulh.s16 q4, q5, XFIX_1_082392200
  731. vsub.s16 q10, q10, q14
  732. vadd.s16 q2, q2, q6
  733. vsub.s16 q6, q8, q12
  734. vadd.s16 q12, q8, q12
  735. vadd.s16 q9, q5, q4
  736. vadd.s16 q5, q6, q10
  737. vsub.s16 q10, q6, q10
  738. vadd.s16 q6, q15, q13
  739. vadd.s16 q8, q12, q14
  740. vsub.s16 q3, q6, q3
  741. vsub.s16 q12, q12, q14
  742. vsub.s16 q3, q3, q1
  743. vsub.s16 q1, q9, q1
  744. vadd.s16 q2, q3, q2
  745. vsub.s16 q15, q8, q6
  746. vadd.s16 q1, q1, q2
  747. vadd.s16 q8, q8, q6
  748. vadd.s16 q14, q5, q3
  749. vsub.s16 q9, q5, q3
  750. vsub.s16 q13, q10, q2
  751. vadd.s16 q10, q10, q2
  752. /* Transpose */
  753. vtrn.16 q8, q9
  754. vsub.s16 q11, q12, q1
  755. vtrn.16 q14, q15
  756. vadd.s16 q12, q12, q1
  757. vtrn.16 q10, q11
  758. vtrn.16 q12, q13
  759. vtrn.32 q9, q11
  760. vtrn.32 q12, q14
  761. vtrn.32 q8, q10
  762. vtrn.32 q13, q15
  763. vswp d28, d21
  764. vswp d26, d19
  765. /* 1-D IDCT, pass 2 */
  766. vsub.s16 q2, q10, q14
  767. vswp d30, d23
  768. vadd.s16 q14, q10, q14
  769. vswp d24, d17
  770. vsub.s16 q1, q11, q13
  771. vadd.s16 q13, q11, q13
  772. vsub.s16 q5, q9, q15
  773. vadd.s16 q15, q9, q15
  774. vqdmulh.s16 q4, q2, XFIX_1_414213562
  775. vqdmulh.s16 q6, q1, XFIX_2_613125930
  776. vadd.s16 q3, q1, q1
  777. vsub.s16 q1, q5, q1
  778. vadd.s16 q10, q2, q4
  779. vqdmulh.s16 q4, q1, XFIX_1_847759065
  780. vsub.s16 q2, q15, q13
  781. vadd.s16 q3, q3, q6
  782. vqdmulh.s16 q6, q2, XFIX_1_414213562
  783. vadd.s16 q1, q1, q4
  784. vqdmulh.s16 q4, q5, XFIX_1_082392200
  785. vsub.s16 q10, q10, q14
  786. vadd.s16 q2, q2, q6
  787. vsub.s16 q6, q8, q12
  788. vadd.s16 q12, q8, q12
  789. vadd.s16 q9, q5, q4
  790. vadd.s16 q5, q6, q10
  791. vsub.s16 q10, q6, q10
  792. vadd.s16 q6, q15, q13
  793. vadd.s16 q8, q12, q14
  794. vsub.s16 q3, q6, q3
  795. vsub.s16 q12, q12, q14
  796. vsub.s16 q3, q3, q1
  797. vsub.s16 q1, q9, q1
  798. vadd.s16 q2, q3, q2
  799. vsub.s16 q15, q8, q6
  800. vadd.s16 q1, q1, q2
  801. vadd.s16 q8, q8, q6
  802. vadd.s16 q14, q5, q3
  803. vsub.s16 q9, q5, q3
  804. vsub.s16 q13, q10, q2
  805. vpop {d8 - d13} /* restore Neon registers */
  806. vadd.s16 q10, q10, q2
  807. vsub.s16 q11, q12, q1
  808. vadd.s16 q12, q12, q1
  809. /* Descale to 8-bit and range limit */
  810. vmov.u8 q0, #0x80
  811. vqshrn.s16 d16, q8, #5
  812. vqshrn.s16 d17, q9, #5
  813. vqshrn.s16 d18, q10, #5
  814. vqshrn.s16 d19, q11, #5
  815. vqshrn.s16 d20, q12, #5
  816. vqshrn.s16 d21, q13, #5
  817. vqshrn.s16 d22, q14, #5
  818. vqshrn.s16 d23, q15, #5
  819. vadd.u8 q8, q8, q0
  820. vadd.u8 q9, q9, q0
  821. vadd.u8 q10, q10, q0
  822. vadd.u8 q11, q11, q0
  823. /* Transpose the final 8-bit samples */
  824. vtrn.16 q8, q9
  825. vtrn.16 q10, q11
  826. vtrn.32 q8, q10
  827. vtrn.32 q9, q11
  828. vtrn.8 d16, d17
  829. vtrn.8 d18, d19
  830. /* Store results to the output buffer */
  831. ldmia OUTPUT_BUF!, {TMP1, TMP2}
  832. add TMP1, TMP1, OUTPUT_COL
  833. add TMP2, TMP2, OUTPUT_COL
  834. vst1.8 {d16}, [TMP1]
  835. vst1.8 {d17}, [TMP2]
  836. ldmia OUTPUT_BUF!, {TMP1, TMP2}
  837. add TMP1, TMP1, OUTPUT_COL
  838. add TMP2, TMP2, OUTPUT_COL
  839. vst1.8 {d18}, [TMP1]
  840. vtrn.8 d20, d21
  841. vst1.8 {d19}, [TMP2]
  842. ldmia OUTPUT_BUF, {TMP1, TMP2, TMP3, TMP4}
  843. add TMP1, TMP1, OUTPUT_COL
  844. add TMP2, TMP2, OUTPUT_COL
  845. add TMP3, TMP3, OUTPUT_COL
  846. add TMP4, TMP4, OUTPUT_COL
  847. vst1.8 {d20}, [TMP1]
  848. vtrn.8 d22, d23
  849. vst1.8 {d21}, [TMP2]
  850. vst1.8 {d22}, [TMP3]
  851. vst1.8 {d23}, [TMP4]
  852. bx lr
  853. .unreq DCT_TABLE
  854. .unreq COEF_BLOCK
  855. .unreq OUTPUT_BUF
  856. .unreq OUTPUT_COL
  857. .unreq TMP1
  858. .unreq TMP2
  859. .unreq TMP3
  860. .unreq TMP4
  861. /*****************************************************************************/
  862. /*
  863. * jsimd_extrgb_ycc_convert_neon
  864. * jsimd_extbgr_ycc_convert_neon
  865. * jsimd_extrgbx_ycc_convert_neon
  866. * jsimd_extbgrx_ycc_convert_neon
  867. * jsimd_extxbgr_ycc_convert_neon
  868. * jsimd_extxrgb_ycc_convert_neon
  869. *
  870. * Colorspace conversion RGB -> YCbCr
  871. */
  872. .macro do_store size
  873. .if \size == 8
  874. vst1.8 {d20}, [Y]!
  875. vst1.8 {d21}, [U]!
  876. vst1.8 {d22}, [V]!
  877. .elseif \size == 4
  878. vst1.8 {d20[0]}, [Y]!
  879. vst1.8 {d20[1]}, [Y]!
  880. vst1.8 {d20[2]}, [Y]!
  881. vst1.8 {d20[3]}, [Y]!
  882. vst1.8 {d21[0]}, [U]!
  883. vst1.8 {d21[1]}, [U]!
  884. vst1.8 {d21[2]}, [U]!
  885. vst1.8 {d21[3]}, [U]!
  886. vst1.8 {d22[0]}, [V]!
  887. vst1.8 {d22[1]}, [V]!
  888. vst1.8 {d22[2]}, [V]!
  889. vst1.8 {d22[3]}, [V]!
  890. .elseif \size == 2
  891. vst1.8 {d20[4]}, [Y]!
  892. vst1.8 {d20[5]}, [Y]!
  893. vst1.8 {d21[4]}, [U]!
  894. vst1.8 {d21[5]}, [U]!
  895. vst1.8 {d22[4]}, [V]!
  896. vst1.8 {d22[5]}, [V]!
  897. .elseif \size == 1
  898. vst1.8 {d20[6]}, [Y]!
  899. vst1.8 {d21[6]}, [U]!
  900. vst1.8 {d22[6]}, [V]!
  901. .else
  902. .error unsupported macroblock size
  903. .endif
  904. .endm
  905. .macro do_load bpp, size
  906. .if \bpp == 24
  907. .if \size == 8
  908. vld3.8 {d10, d11, d12}, [RGB]!
  909. pld [RGB, #128]
  910. .elseif \size == 4
  911. vld3.8 {d10[0], d11[0], d12[0]}, [RGB]!
  912. vld3.8 {d10[1], d11[1], d12[1]}, [RGB]!
  913. vld3.8 {d10[2], d11[2], d12[2]}, [RGB]!
  914. vld3.8 {d10[3], d11[3], d12[3]}, [RGB]!
  915. .elseif \size == 2
  916. vld3.8 {d10[4], d11[4], d12[4]}, [RGB]!
  917. vld3.8 {d10[5], d11[5], d12[5]}, [RGB]!
  918. .elseif \size == 1
  919. vld3.8 {d10[6], d11[6], d12[6]}, [RGB]!
  920. .else
  921. .error unsupported macroblock size
  922. .endif
  923. .elseif \bpp == 32
  924. .if \size == 8
  925. vld4.8 {d10, d11, d12, d13}, [RGB]!
  926. pld [RGB, #128]
  927. .elseif \size == 4
  928. vld4.8 {d10[0], d11[0], d12[0], d13[0]}, [RGB]!
  929. vld4.8 {d10[1], d11[1], d12[1], d13[1]}, [RGB]!
  930. vld4.8 {d10[2], d11[2], d12[2], d13[2]}, [RGB]!
  931. vld4.8 {d10[3], d11[3], d12[3], d13[3]}, [RGB]!
  932. .elseif \size == 2
  933. vld4.8 {d10[4], d11[4], d12[4], d13[4]}, [RGB]!
  934. vld4.8 {d10[5], d11[5], d12[5], d13[5]}, [RGB]!
  935. .elseif \size == 1
  936. vld4.8 {d10[6], d11[6], d12[6], d13[6]}, [RGB]!
  937. .else
  938. .error unsupported macroblock size
  939. .endif
  940. .else
  941. .error unsupported bpp
  942. .endif
  943. .endm
  944. .macro generate_jsimd_rgb_ycc_convert_neon colorid, bpp, r_offs, g_offs, b_offs
  945. /*
  946. * 2-stage pipelined RGB->YCbCr conversion
  947. */
  948. .macro do_rgb_to_yuv_stage1
  949. vmovl.u8 q2, d1\r_offs /* r = { d4, d5 } */
  950. vmovl.u8 q3, d1\g_offs /* g = { d6, d7 } */
  951. vmovl.u8 q4, d1\b_offs /* b = { d8, d9 } */
  952. vmull.u16 q7, d4, d0[0]
  953. vmlal.u16 q7, d6, d0[1]
  954. vmlal.u16 q7, d8, d0[2]
  955. vmull.u16 q8, d5, d0[0]
  956. vmlal.u16 q8, d7, d0[1]
  957. vmlal.u16 q8, d9, d0[2]
  958. vrev64.32 q9, q1
  959. vrev64.32 q13, q1
  960. vmlsl.u16 q9, d4, d0[3]
  961. vmlsl.u16 q9, d6, d1[0]
  962. vmlal.u16 q9, d8, d1[1]
  963. vmlsl.u16 q13, d5, d0[3]
  964. vmlsl.u16 q13, d7, d1[0]
  965. vmlal.u16 q13, d9, d1[1]
  966. vrev64.32 q14, q1
  967. vrev64.32 q15, q1
  968. vmlal.u16 q14, d4, d1[1]
  969. vmlsl.u16 q14, d6, d1[2]
  970. vmlsl.u16 q14, d8, d1[3]
  971. vmlal.u16 q15, d5, d1[1]
  972. vmlsl.u16 q15, d7, d1[2]
  973. vmlsl.u16 q15, d9, d1[3]
  974. .endm
  975. .macro do_rgb_to_yuv_stage2
  976. vrshrn.u32 d20, q7, #16
  977. vrshrn.u32 d21, q8, #16
  978. vshrn.u32 d22, q9, #16
  979. vshrn.u32 d23, q13, #16
  980. vshrn.u32 d24, q14, #16
  981. vshrn.u32 d25, q15, #16
  982. vmovn.u16 d20, q10 /* d20 = y */
  983. vmovn.u16 d21, q11 /* d21 = u */
  984. vmovn.u16 d22, q12 /* d22 = v */
  985. .endm
  986. .macro do_rgb_to_yuv
  987. do_rgb_to_yuv_stage1
  988. do_rgb_to_yuv_stage2
  989. .endm
  990. .macro do_rgb_to_yuv_stage2_store_load_stage1
  991. vrshrn.u32 d20, q7, #16
  992. vrshrn.u32 d21, q8, #16
  993. vshrn.u32 d22, q9, #16
  994. vrev64.32 q9, q1
  995. vshrn.u32 d23, q13, #16
  996. vrev64.32 q13, q1
  997. vshrn.u32 d24, q14, #16
  998. vshrn.u32 d25, q15, #16
  999. do_load \bpp, 8
  1000. vmovn.u16 d20, q10 /* d20 = y */
  1001. vmovl.u8 q2, d1\r_offs /* r = { d4, d5 } */
  1002. vmovn.u16 d21, q11 /* d21 = u */
  1003. vmovl.u8 q3, d1\g_offs /* g = { d6, d7 } */
  1004. vmovn.u16 d22, q12 /* d22 = v */
  1005. vmovl.u8 q4, d1\b_offs /* b = { d8, d9 } */
  1006. vmull.u16 q7, d4, d0[0]
  1007. vmlal.u16 q7, d6, d0[1]
  1008. vmlal.u16 q7, d8, d0[2]
  1009. vst1.8 {d20}, [Y]!
  1010. vmull.u16 q8, d5, d0[0]
  1011. vmlal.u16 q8, d7, d0[1]
  1012. vmlal.u16 q8, d9, d0[2]
  1013. vmlsl.u16 q9, d4, d0[3]
  1014. vmlsl.u16 q9, d6, d1[0]
  1015. vmlal.u16 q9, d8, d1[1]
  1016. vst1.8 {d21}, [U]!
  1017. vmlsl.u16 q13, d5, d0[3]
  1018. vmlsl.u16 q13, d7, d1[0]
  1019. vmlal.u16 q13, d9, d1[1]
  1020. vrev64.32 q14, q1
  1021. vrev64.32 q15, q1
  1022. vmlal.u16 q14, d4, d1[1]
  1023. vmlsl.u16 q14, d6, d1[2]
  1024. vmlsl.u16 q14, d8, d1[3]
  1025. vst1.8 {d22}, [V]!
  1026. vmlal.u16 q15, d5, d1[1]
  1027. vmlsl.u16 q15, d7, d1[2]
  1028. vmlsl.u16 q15, d9, d1[3]
  1029. .endm
  1030. .balign 16
  1031. jsimd_\colorid\()_ycc_neon_consts:
  1032. .short 19595, 38470, 7471, 11059
  1033. .short 21709, 32768, 27439, 5329
  1034. .short 32767, 128, 32767, 128
  1035. .short 32767, 128, 32767, 128
  1036. asm_function jsimd_\colorid\()_ycc_convert_neon
  1037. OUTPUT_WIDTH .req r0
  1038. INPUT_BUF .req r1
  1039. OUTPUT_BUF .req r2
  1040. OUTPUT_ROW .req r3
  1041. NUM_ROWS .req r4
  1042. OUTPUT_BUF0 .req r5
  1043. OUTPUT_BUF1 .req r6
  1044. OUTPUT_BUF2 .req OUTPUT_BUF
  1045. RGB .req r7
  1046. Y .req r8
  1047. U .req r9
  1048. V .req r10
  1049. N .req ip
  1050. /* Load constants to d0, d1, d2, d3 */
  1051. adr ip, jsimd_\colorid\()_ycc_neon_consts
  1052. vld1.16 {d0, d1, d2, d3}, [ip, :128]
  1053. /* Save Arm registers and handle input arguments */
  1054. push {r4, r5, r6, r7, r8, r9, r10, lr}
  1055. ldr NUM_ROWS, [sp, #(4 * 8)]
  1056. ldr OUTPUT_BUF0, [OUTPUT_BUF]
  1057. ldr OUTPUT_BUF1, [OUTPUT_BUF, #4]
  1058. ldr OUTPUT_BUF2, [OUTPUT_BUF, #8]
  1059. .unreq OUTPUT_BUF
  1060. /* Save Neon registers */
  1061. vpush {d8 - d15}
  1062. /* Outer loop over scanlines */
  1063. cmp NUM_ROWS, #1
  1064. blt 9f
  1065. 0:
  1066. ldr Y, [OUTPUT_BUF0, OUTPUT_ROW, lsl #2]
  1067. ldr U, [OUTPUT_BUF1, OUTPUT_ROW, lsl #2]
  1068. mov N, OUTPUT_WIDTH
  1069. ldr V, [OUTPUT_BUF2, OUTPUT_ROW, lsl #2]
  1070. add OUTPUT_ROW, OUTPUT_ROW, #1
  1071. ldr RGB, [INPUT_BUF], #4
  1072. /* Inner loop over pixels */
  1073. subs N, N, #8
  1074. blt 3f
  1075. do_load \bpp, 8
  1076. do_rgb_to_yuv_stage1
  1077. subs N, N, #8
  1078. blt 2f
  1079. 1:
  1080. do_rgb_to_yuv_stage2_store_load_stage1
  1081. subs N, N, #8
  1082. bge 1b
  1083. 2:
  1084. do_rgb_to_yuv_stage2
  1085. do_store 8
  1086. tst N, #7
  1087. beq 8f
  1088. 3:
  1089. tst N, #4
  1090. beq 3f
  1091. do_load \bpp, 4
  1092. 3:
  1093. tst N, #2
  1094. beq 4f
  1095. do_load \bpp, 2
  1096. 4:
  1097. tst N, #1
  1098. beq 5f
  1099. do_load \bpp, 1
  1100. 5:
  1101. do_rgb_to_yuv
  1102. tst N, #4
  1103. beq 6f
  1104. do_store 4
  1105. 6:
  1106. tst N, #2
  1107. beq 7f
  1108. do_store 2
  1109. 7:
  1110. tst N, #1
  1111. beq 8f
  1112. do_store 1
  1113. 8:
  1114. subs NUM_ROWS, NUM_ROWS, #1
  1115. bgt 0b
  1116. 9:
  1117. /* Restore all registers and return */
  1118. vpop {d8 - d15}
  1119. pop {r4, r5, r6, r7, r8, r9, r10, pc}
  1120. .unreq OUTPUT_WIDTH
  1121. .unreq OUTPUT_ROW
  1122. .unreq INPUT_BUF
  1123. .unreq NUM_ROWS
  1124. .unreq OUTPUT_BUF0
  1125. .unreq OUTPUT_BUF1
  1126. .unreq OUTPUT_BUF2
  1127. .unreq RGB
  1128. .unreq Y
  1129. .unreq U
  1130. .unreq V
  1131. .unreq N
  1132. .purgem do_rgb_to_yuv
  1133. .purgem do_rgb_to_yuv_stage1
  1134. .purgem do_rgb_to_yuv_stage2
  1135. .purgem do_rgb_to_yuv_stage2_store_load_stage1
  1136. .endm
  1137. /*--------------------------------- id ----- bpp R G B */
  1138. generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2
  1139. generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0
  1140. generate_jsimd_rgb_ycc_convert_neon extrgbx, 32, 0, 1, 2
  1141. generate_jsimd_rgb_ycc_convert_neon extbgrx, 32, 2, 1, 0
  1142. generate_jsimd_rgb_ycc_convert_neon extxbgr, 32, 3, 2, 1
  1143. generate_jsimd_rgb_ycc_convert_neon extxrgb, 32, 1, 2, 3
  1144. .purgem do_load
  1145. .purgem do_store