rgb2rgb_neon.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. /*
  2. * Copyright (c) 2020 Martin Storsjo
  3. * Copyright (c) 2024 Ramiro Polla
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/aarch64/asm.S"
  22. #define RGB2YUV_COEFFS 16*4+16*32
  23. #define BY v0.h[0]
  24. #define GY v0.h[1]
  25. #define RY v0.h[2]
  26. #define BU v1.h[0]
  27. #define GU v1.h[1]
  28. #define RU v1.h[2]
  29. #define BV v2.h[0]
  30. #define GV v2.h[1]
  31. #define RV v2.h[2]
  32. #define Y_OFFSET v22
  33. #define UV_OFFSET v23
  34. const shuf_0321_tbl, align=4
  35. .byte 0, 3, 2, 1
  36. .byte 4, 7, 6, 5
  37. .byte 8, 11, 10, 9
  38. .byte 12, 15, 14, 13
  39. endconst
  40. const shuf_1230_tbl, align=4
  41. .byte 1, 2, 3, 0
  42. .byte 5, 6, 7, 4
  43. .byte 9, 10, 11, 8
  44. .byte 13, 14, 15, 12
  45. endconst
  46. const shuf_2103_tbl, align=4
  47. .byte 2, 1, 0, 3
  48. .byte 6, 5, 4, 7
  49. .byte 10, 9, 8, 11
  50. .byte 14, 13, 12, 15
  51. endconst
  52. const shuf_3012_tbl, align=4
  53. .byte 3, 0, 1, 2
  54. .byte 7, 4, 5, 6
  55. .byte 11, 8, 9, 10
  56. .byte 15, 12, 13, 14
  57. endconst
  58. const shuf_3210_tbl, align=4
  59. .byte 3, 2, 1, 0
  60. .byte 7, 6, 5, 4
  61. .byte 11, 10, 9, 8
  62. .byte 15, 14, 13, 12
  63. endconst
  64. const shuf_3102_tbl, align=4
  65. .byte 3, 1, 0, 2
  66. .byte 7, 5, 4, 6
  67. .byte 11, 9, 8, 10
  68. .byte 15, 13, 12, 14
  69. endconst
  70. const shuf_2013_tbl, align=4
  71. .byte 2, 0, 1, 3
  72. .byte 6, 4, 5, 7
  73. .byte 10, 8, 9, 11
  74. .byte 14, 12, 13, 15
  75. endconst
  76. const shuf_1203_tbl, align=4
  77. .byte 1, 2, 0, 3
  78. .byte 5, 6, 4, 7
  79. .byte 9, 10, 8, 11
  80. .byte 13, 14, 12, 15
  81. endconst
  82. const shuf_2130_tbl, align=4
  83. .byte 2, 1, 3, 0
  84. .byte 6, 5, 7, 4
  85. .byte 10, 9, 11, 8
  86. .byte 14, 13, 15, 12
  87. endconst
  88. // convert rgb to 16-bit y, u, or v
  89. // uses v3 and v4
  90. .macro rgbconv16 dst, b, g, r, bc, gc, rc
  91. smull v3.4s, \b\().4h, \bc
  92. smlal v3.4s, \g\().4h, \gc
  93. smlal v3.4s, \r\().4h, \rc
  94. smull2 v4.4s, \b\().8h, \bc
  95. smlal2 v4.4s, \g\().8h, \gc
  96. smlal2 v4.4s, \r\().8h, \rc // v3:v4 = b * bc + g * gc + r * rc (32-bit)
  97. shrn \dst\().4h, v3.4s, #7
  98. shrn2 \dst\().8h, v4.4s, #7 // dst = b * bc + g * gc + r * rc (16-bit)
  99. .endm
  100. // void ff_rgb24toyv12_neon(const uint8_t *src, uint8_t *ydst, uint8_t *udst,
  101. // uint8_t *vdst, int width, int height, int lumStride,
  102. // int chromStride, int srcStride, int32_t *rgb2yuv);
  103. function ff_rgb24toyv12_neon, export=1
  104. // x0 const uint8_t *src
  105. // x1 uint8_t *ydst
  106. // x2 uint8_t *udst
  107. // x3 uint8_t *vdst
  108. // w4 int width
  109. // w5 int height
  110. // w6 int lumStride
  111. // w7 int chromStride
  112. ldrsw x14, [sp]
  113. ldr x15, [sp, #8]
  114. // x14 int srcStride
  115. // x15 int32_t *rgb2yuv
  116. // extend width and stride parameters
  117. uxtw x4, w4
  118. sxtw x6, w6
  119. sxtw x7, w7
  120. // src1 = x0
  121. // src2 = x10
  122. add x10, x0, x14 // x10 = src + srcStride
  123. lsl x14, x14, #1 // srcStride *= 2
  124. add x11, x4, x4, lsl #1 // x11 = 3 * width
  125. sub x14, x14, x11 // srcPadding = (2 * srcStride) - (3 * width)
  126. // ydst1 = x1
  127. // ydst2 = x11
  128. add x11, x1, x6 // x11 = ydst + lumStride
  129. lsl x6, x6, #1 // lumStride *= 2
  130. sub x6, x6, x4 // lumPadding = (2 * lumStride) - width
  131. sub x7, x7, x4, lsr #1 // chromPadding = chromStride - (width / 2)
  132. // load rgb2yuv coefficients into v0, v1, and v2
  133. add x15, x15, #RGB2YUV_COEFFS
  134. ld1 {v0.8h-v2.8h}, [x15] // load 24 values
  135. // load offset constants
  136. movi Y_OFFSET.8h, #0x10, lsl #8
  137. movi UV_OFFSET.8h, #0x80, lsl #8
  138. 1:
  139. mov w15, w4 // w15 = width
  140. 2:
  141. // load first line
  142. ld3 {v26.16b, v27.16b, v28.16b}, [x0], #48
  143. // widen first line to 16-bit
  144. uxtl v16.8h, v26.8b // v16 = B11
  145. uxtl v17.8h, v27.8b // v17 = G11
  146. uxtl v18.8h, v28.8b // v18 = R11
  147. uxtl2 v19.8h, v26.16b // v19 = B12
  148. uxtl2 v20.8h, v27.16b // v20 = G12
  149. uxtl2 v21.8h, v28.16b // v21 = R12
  150. // calculate Y values for first line
  151. rgbconv16 v24, v16, v17, v18, BY, GY, RY // v24 = Y11
  152. rgbconv16 v25, v19, v20, v21, BY, GY, RY // v25 = Y12
  153. // load second line
  154. ld3 {v26.16b, v27.16b, v28.16b}, [x10], #48
  155. // pairwise add and save rgb values to calculate average
  156. addp v5.8h, v16.8h, v19.8h
  157. addp v6.8h, v17.8h, v20.8h
  158. addp v7.8h, v18.8h, v21.8h
  159. // widen second line to 16-bit
  160. uxtl v16.8h, v26.8b // v16 = B21
  161. uxtl v17.8h, v27.8b // v17 = G21
  162. uxtl v18.8h, v28.8b // v18 = R21
  163. uxtl2 v19.8h, v26.16b // v19 = B22
  164. uxtl2 v20.8h, v27.16b // v20 = G22
  165. uxtl2 v21.8h, v28.16b // v21 = R22
  166. // calculate Y values for second line
  167. rgbconv16 v26, v16, v17, v18, BY, GY, RY // v26 = Y21
  168. rgbconv16 v27, v19, v20, v21, BY, GY, RY // v27 = Y22
  169. // pairwise add rgb values to calculate average
  170. addp v16.8h, v16.8h, v19.8h
  171. addp v17.8h, v17.8h, v20.8h
  172. addp v18.8h, v18.8h, v21.8h
  173. // calculate average
  174. add v16.8h, v16.8h, v5.8h
  175. add v17.8h, v17.8h, v6.8h
  176. add v18.8h, v18.8h, v7.8h
  177. ushr v16.8h, v16.8h, #2
  178. ushr v17.8h, v17.8h, #2
  179. ushr v18.8h, v18.8h, #2
  180. // calculate U and V values
  181. rgbconv16 v28, v16, v17, v18, BU, GU, RU // v28 = U
  182. rgbconv16 v29, v16, v17, v18, BV, GV, RV // v29 = V
  183. // add offsets and narrow all values
  184. addhn v24.8b, v24.8h, Y_OFFSET.8h
  185. addhn v25.8b, v25.8h, Y_OFFSET.8h
  186. addhn v26.8b, v26.8h, Y_OFFSET.8h
  187. addhn v27.8b, v27.8h, Y_OFFSET.8h
  188. addhn v28.8b, v28.8h, UV_OFFSET.8h
  189. addhn v29.8b, v29.8h, UV_OFFSET.8h
  190. subs w15, w15, #16
  191. // store output
  192. st1 {v24.8b, v25.8b}, [x1], #16 // store ydst1
  193. st1 {v26.8b, v27.8b}, [x11], #16 // store ydst2
  194. st1 {v28.8b}, [x2], #8 // store udst
  195. st1 {v29.8b}, [x3], #8 // store vdst
  196. b.gt 2b
  197. subs w5, w5, #2
  198. // row += 2
  199. add x0, x0, x14 // src1 += srcPadding
  200. add x10, x10, x14 // src2 += srcPadding
  201. add x1, x1, x6 // ydst1 += lumPadding
  202. add x11, x11, x6 // ydst2 += lumPadding
  203. add x2, x2, x7 // udst += chromPadding
  204. add x3, x3, x7 // vdst += chromPadding
  205. b.gt 1b
  206. ret
  207. endfunc
  208. // void ff_interleave_bytes_neon(const uint8_t *src1, const uint8_t *src2,
  209. // uint8_t *dest, int width, int height,
  210. // int src1Stride, int src2Stride, int dstStride);
  211. function ff_interleave_bytes_neon, export=1
  212. sub w5, w5, w3
  213. sub w6, w6, w3
  214. sub w7, w7, w3, lsl #1
  215. 1:
  216. ands w8, w3, #0xfffffff0 // & ~15
  217. b.eq 3f
  218. 2:
  219. ld1 {v0.16b}, [x0], #16
  220. ld1 {v1.16b}, [x1], #16
  221. subs w8, w8, #16
  222. st2 {v0.16b, v1.16b}, [x2], #32
  223. b.gt 2b
  224. tst w3, #15
  225. b.eq 9f
  226. 3:
  227. tst w3, #8
  228. b.eq 4f
  229. ld1 {v0.8b}, [x0], #8
  230. ld1 {v1.8b}, [x1], #8
  231. st2 {v0.8b, v1.8b}, [x2], #16
  232. 4:
  233. tst w3, #4
  234. b.eq 5f
  235. ld1 {v0.s}[0], [x0], #4
  236. ld1 {v1.s}[0], [x1], #4
  237. zip1 v0.8b, v0.8b, v1.8b
  238. st1 {v0.8b}, [x2], #8
  239. 5:
  240. ands w8, w3, #3
  241. b.eq 9f
  242. 6:
  243. ldrb w9, [x0], #1
  244. ldrb w10, [x1], #1
  245. subs w8, w8, #1
  246. bfi w9, w10, #8, #8
  247. strh w9, [x2], #2
  248. b.gt 6b
  249. 9:
  250. subs w4, w4, #1
  251. b.eq 0f
  252. add x0, x0, w5, sxtw
  253. add x1, x1, w6, sxtw
  254. add x2, x2, w7, sxtw
  255. b 1b
  256. 0:
  257. ret
  258. endfunc
  259. // void ff_deinterleave_bytes_neon(const uint8_t *src, uint8_t *dst1, uint8_t *dst2,
  260. // int width, int height, int srcStride,
  261. // int dst1Stride, int dst2Stride);
  262. function ff_deinterleave_bytes_neon, export=1
  263. sub w5, w5, w3, lsl #1
  264. sub w6, w6, w3
  265. sub w7, w7, w3
  266. 1:
  267. ands w8, w3, #0xfffffff0 // & ~15
  268. b.eq 3f
  269. 2:
  270. ld2 {v0.16b, v1.16b}, [x0], #32
  271. subs w8, w8, #16
  272. st1 {v0.16b}, [x1], #16
  273. st1 {v1.16b}, [x2], #16
  274. b.gt 2b
  275. tst w3, #15
  276. b.eq 9f
  277. 3:
  278. tst w3, #8
  279. b.eq 4f
  280. ld2 {v0.8b, v1.8b}, [x0], #16
  281. st1 {v0.8b}, [x1], #8
  282. st1 {v1.8b}, [x2], #8
  283. 4:
  284. tst w3, #4
  285. b.eq 5f
  286. ld1 {v0.8b}, [x0], #8
  287. shrn v1.8b, v0.8h, #8
  288. xtn v0.8b, v0.8h
  289. st1 {v0.s}[0], [x1], #4
  290. st1 {v1.s}[0], [x2], #4
  291. 5:
  292. ands w8, w3, #3
  293. b.eq 9f
  294. 6:
  295. ldrh w9, [x0], #2
  296. subs w8, w8, #1
  297. ubfx w10, w9, #8, #8
  298. strb w9, [x1], #1
  299. strb w10, [x2], #1
  300. b.gt 6b
  301. 9:
  302. subs w4, w4, #1
  303. b.eq 0f
  304. add x0, x0, w5, sxtw
  305. add x1, x1, w6, sxtw
  306. add x2, x2, w7, sxtw
  307. b 1b
  308. 0:
  309. ret
  310. endfunc
  311. .macro neon_shuf shuf
  312. function ff_shuffle_bytes_\shuf\()_neon, export=1
  313. movrel x9, shuf_\shuf\()_tbl
  314. ld1 {v1.16b}, [x9]
  315. and w5, w2, #~15
  316. and w3, w2, #8
  317. and w4, w2, #4
  318. cbz w5, 2f
  319. 1:
  320. ld1 {v0.16b}, [x0], #16
  321. subs w5, w5, #16
  322. tbl v0.16b, {v0.16b}, v1.16b
  323. st1 {v0.16b}, [x1], #16
  324. b.gt 1b
  325. 2:
  326. cbz w3, 3f
  327. ld1 {v0.8b}, [x0], #8
  328. tbl v0.8b, {v0.16b}, v1.8b
  329. st1 {v0.8b}, [x1], #8
  330. 3:
  331. cbz w4, 4f
  332. .if \shuf == 0321
  333. ldr w5, [x0]
  334. rev w5, w5
  335. ror w5, w5, #24
  336. str w5, [x1]
  337. .endif
  338. .if \shuf == 1230
  339. ldr w5, [x0]
  340. ror w5, w5, #8
  341. str w5, [x1]
  342. .endif
  343. .if \shuf == 2103
  344. ldr w5, [x0]
  345. rev w5, w5
  346. ror w5, w5, #8
  347. str w5, [x1]
  348. .endif
  349. .if \shuf == 3012
  350. ldr w5, [x0]
  351. ror w5, w5, #24
  352. str w5, [x1]
  353. .endif
  354. .if \shuf == 3210
  355. ldr w5, [x0]
  356. rev w5, w5
  357. str w5, [x1]
  358. .endif
  359. .if \shuf == 3102 || \shuf == 2013 || \shuf == 1203 || \shuf == 2130
  360. ld1 {v0.s}[0], [x0]
  361. tbl v0.8b, {v0.16b}, v1.8b
  362. st1 {v0.s}[0], [x1]
  363. .endif
  364. 4:
  365. ret
  366. endfunc
  367. .endm
  368. neon_shuf 0321
  369. neon_shuf 1230
  370. neon_shuf 2103
  371. neon_shuf 3012
  372. neon_shuf 3102
  373. neon_shuf 2013
  374. neon_shuf 1203
  375. neon_shuf 2130
  376. neon_shuf 3210