yuv2rgb_neon.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /*
  2. * Copyright (c) 2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
  3. * Copyright (c) 2016 Clément Bœsch <clement stupeflix.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/aarch64/asm.S"
  22. .macro load_yoff_ycoeff yoff ycoeff
  23. #if defined(__APPLE__)
  24. ldp w9, w10, [sp, #\yoff]
  25. #else
  26. ldr w9, [sp, #\yoff]
  27. ldr w10, [sp, #\ycoeff]
  28. #endif
  29. .endm
  30. .macro load_dst1_dst2 dst1 linesize1 dst2 linesize2
  31. #if defined(__APPLE__)
  32. #define DST_OFFSET 8
  33. #else
  34. #define DST_OFFSET 0
  35. #endif
  36. ldr x10, [sp, #\dst1 - DST_OFFSET]
  37. ldr w12, [sp, #\linesize1 - DST_OFFSET]
  38. ldr x15, [sp, #\dst2 - DST_OFFSET]
  39. ldr w16, [sp, #\linesize2 - DST_OFFSET]
  40. #undef DST_OFFSET
  41. sub w12, w12, w0 // w12 = linesize1 - width (padding1)
  42. sub w16, w16, w0 // w16 = linesize2 - width (padding2)
  43. .endm
  44. .macro load_args_nv12 ofmt
  45. ldr x8, [sp] // table
  46. load_yoff_ycoeff 8, 16 // y_offset, y_coeff
  47. ld1 {v1.1d}, [x8]
  48. dup v0.8h, w10
  49. dup v3.8h, w9
  50. .ifc \ofmt,gbrp
  51. load_dst1_dst2 24, 32, 40, 48
  52. sub w3, w3, w0 // w3 = linesize - width (padding)
  53. .else
  54. sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
  55. .endif
  56. sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
  57. sub w7, w7, w0 // w7 = linesizeC - width (paddingC)
  58. neg w11, w0
  59. .endm
  60. .macro load_args_nv21 ofmt
  61. load_args_nv12 \ofmt
  62. .endm
  63. .macro load_args_yuv420p ofmt
  64. ldr x13, [sp] // srcV
  65. ldr w14, [sp, #8] // linesizeV
  66. ldr x8, [sp, #16] // table
  67. load_yoff_ycoeff 24, 32 // y_offset, y_coeff
  68. ld1 {v1.1d}, [x8]
  69. dup v0.8h, w10
  70. dup v3.8h, w9
  71. .ifc \ofmt,gbrp
  72. load_dst1_dst2 40, 48, 56, 64
  73. sub w3, w3, w0 // w3 = linesize - width (padding)
  74. .else
  75. sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
  76. .endif
  77. sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
  78. sub w7, w7, w0, lsr #1 // w7 = linesizeU - width / 2 (paddingU)
  79. sub w14, w14, w0, lsr #1 // w14 = linesizeV - width / 2 (paddingV)
  80. lsr w11, w0, #1
  81. neg w11, w11
  82. .endm
  83. .macro load_args_yuv422p ofmt
  84. ldr x13, [sp] // srcV
  85. ldr w14, [sp, #8] // linesizeV
  86. ldr x8, [sp, #16] // table
  87. load_yoff_ycoeff 24, 32 // y_offset, y_coeff
  88. ld1 {v1.1d}, [x8]
  89. dup v0.8h, w10
  90. dup v3.8h, w9
  91. .ifc \ofmt,gbrp
  92. load_dst1_dst2 40, 48, 56, 64
  93. sub w3, w3, w0 // w3 = linesize - width (padding)
  94. .else
  95. sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
  96. .endif
  97. sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
  98. sub w7, w7, w0, lsr #1 // w7 = linesizeU - width / 2 (paddingU)
  99. sub w14, w14, w0, lsr #1 // w14 = linesizeV - width / 2 (paddingV)
  100. .endm
  101. .macro load_chroma_nv12
  102. ld2 {v16.8b, v17.8b}, [x6], #16
  103. ushll v18.8h, v16.8b, #3
  104. ushll v19.8h, v17.8b, #3
  105. .endm
  106. .macro load_chroma_nv21
  107. ld2 {v16.8b, v17.8b}, [x6], #16
  108. ushll v19.8h, v16.8b, #3
  109. ushll v18.8h, v17.8b, #3
  110. .endm
  111. .macro load_chroma_yuv420p
  112. ld1 {v16.8b}, [ x6], #8
  113. ld1 {v17.8b}, [x13], #8
  114. ushll v18.8h, v16.8b, #3
  115. ushll v19.8h, v17.8b, #3
  116. .endm
  117. .macro load_chroma_yuv422p
  118. load_chroma_yuv420p
  119. .endm
  120. .macro increment_nv12
  121. ands w17, w1, #1
  122. csel w17, w7, w11, ne // incC = (h & 1) ? paddincC : -width
  123. add x6, x6, w17, sxtw // srcC += incC
  124. .endm
  125. .macro increment_nv21
  126. increment_nv12
  127. .endm
  128. .macro increment_yuv420p
  129. ands w17, w1, #1
  130. csel w17, w7, w11, ne // incU = (h & 1) ? paddincU : -width/2
  131. add x6, x6, w17, sxtw // srcU += incU
  132. csel w17, w14, w11, ne // incV = (h & 1) ? paddincV : -width/2
  133. add x13, x13, w17, sxtw // srcV += incV
  134. .endm
  135. .macro increment_yuv422p
  136. add x6, x6, w7, sxtw // srcU += incU
  137. add x13, x13, w14, sxtw // srcV += incV
  138. .endm
  139. .macro compute_rgb r1 g1 b1 r2 g2 b2
  140. add v20.8h, v26.8h, v20.8h // Y1 + R1
  141. add v21.8h, v27.8h, v21.8h // Y2 + R2
  142. add v22.8h, v26.8h, v22.8h // Y1 + G1
  143. add v23.8h, v27.8h, v23.8h // Y2 + G2
  144. add v24.8h, v26.8h, v24.8h // Y1 + B1
  145. add v25.8h, v27.8h, v25.8h // Y2 + B2
  146. sqrshrun \r1, v20.8h, #1 // clip_u8((Y1 + R1) >> 1)
  147. sqrshrun \r2, v21.8h, #1 // clip_u8((Y2 + R1) >> 1)
  148. sqrshrun \g1, v22.8h, #1 // clip_u8((Y1 + G1) >> 1)
  149. sqrshrun \g2, v23.8h, #1 // clip_u8((Y2 + G1) >> 1)
  150. sqrshrun \b1, v24.8h, #1 // clip_u8((Y1 + B1) >> 1)
  151. sqrshrun \b2, v25.8h, #1 // clip_u8((Y2 + B1) >> 1)
  152. .endm
  153. .macro compute_rgba r1 g1 b1 a1 r2 g2 b2 a2
  154. compute_rgb \r1, \g1, \b1, \r2, \g2, \b2
  155. movi \a1, #255
  156. movi \a2, #255
  157. .endm
  158. .macro declare_func ifmt ofmt
  159. function ff_\ifmt\()_to_\ofmt\()_neon, export=1
  160. load_args_\ifmt \ofmt
  161. mov w9, w1
  162. 1:
  163. mov w8, w0 // w8 = width
  164. 2:
  165. movi v5.8h, #4, lsl #8 // 128 * (1<<3)
  166. load_chroma_\ifmt
  167. sub v18.8h, v18.8h, v5.8h // U*(1<<3) - 128*(1<<3)
  168. sub v19.8h, v19.8h, v5.8h // V*(1<<3) - 128*(1<<3)
  169. sqdmulh v20.8h, v19.8h, v1.h[0] // V * v2r (R)
  170. sqdmulh v22.8h, v18.8h, v1.h[1] // U * u2g
  171. sqdmulh v19.8h, v19.8h, v1.h[2] // V * v2g
  172. add v22.8h, v22.8h, v19.8h // U * u2g + V * v2g (G)
  173. sqdmulh v24.8h, v18.8h, v1.h[3] // U * u2b (B)
  174. zip2 v21.8h, v20.8h, v20.8h // R2
  175. zip1 v20.8h, v20.8h, v20.8h // R1
  176. zip2 v23.8h, v22.8h, v22.8h // G2
  177. zip1 v22.8h, v22.8h, v22.8h // G1
  178. zip2 v25.8h, v24.8h, v24.8h // B2
  179. zip1 v24.8h, v24.8h, v24.8h // B1
  180. ld1 {v2.16b}, [x4], #16 // load luma
  181. ushll v26.8h, v2.8b, #3 // Y1*(1<<3)
  182. ushll2 v27.8h, v2.16b, #3 // Y2*(1<<3)
  183. sub v26.8h, v26.8h, v3.8h // Y1*(1<<3) - y_offset
  184. sub v27.8h, v27.8h, v3.8h // Y2*(1<<3) - y_offset
  185. sqdmulh v26.8h, v26.8h, v0.8h // ((Y1*(1<<3) - y_offset) * y_coeff) >> 15
  186. sqdmulh v27.8h, v27.8h, v0.8h // ((Y2*(1<<3) - y_offset) * y_coeff) >> 15
  187. .ifc \ofmt,argb // 1 2 3 0
  188. compute_rgba v5.8b,v6.8b,v7.8b,v4.8b, v17.8b,v18.8b,v19.8b,v16.8b
  189. .endif
  190. .ifc \ofmt,rgba // 0 1 2 3
  191. compute_rgba v4.8b,v5.8b,v6.8b,v7.8b, v16.8b,v17.8b,v18.8b,v19.8b
  192. .endif
  193. .ifc \ofmt,abgr // 3 2 1 0
  194. compute_rgba v7.8b,v6.8b,v5.8b,v4.8b, v19.8b,v18.8b,v17.8b,v16.8b
  195. .endif
  196. .ifc \ofmt,bgra // 2 1 0 3
  197. compute_rgba v6.8b,v5.8b,v4.8b,v7.8b, v18.8b,v17.8b,v16.8b,v19.8b
  198. .endif
  199. .ifc \ofmt,gbrp
  200. compute_rgb v18.8b,v4.8b,v6.8b, v19.8b,v5.8b,v7.8b
  201. st1 { v4.8b, v5.8b }, [x2], #16
  202. st1 { v6.8b, v7.8b }, [x10], #16
  203. st1 { v18.8b, v19.8b }, [x15], #16
  204. .else
  205. st4 { v4.8b, v5.8b, v6.8b, v7.8b}, [x2], #32
  206. st4 {v16.8b,v17.8b,v18.8b,v19.8b}, [x2], #32
  207. .endif
  208. subs w8, w8, #16 // width -= 16
  209. b.gt 2b
  210. add x2, x2, w3, sxtw // dst += padding
  211. .ifc \ofmt,gbrp
  212. add x10, x10, w12, sxtw // dst1 += padding1
  213. add x15, x15, w16, sxtw // dst2 += padding2
  214. .endif
  215. add x4, x4, w5, sxtw // srcY += paddingY
  216. increment_\ifmt
  217. subs w1, w1, #1 // height -= 1
  218. b.gt 1b
  219. mov w0, w9
  220. ret
  221. endfunc
  222. .endm
  223. .macro declare_rgb_funcs ifmt
  224. declare_func \ifmt, argb
  225. declare_func \ifmt, rgba
  226. declare_func \ifmt, abgr
  227. declare_func \ifmt, bgra
  228. declare_func \ifmt, gbrp
  229. .endm
  230. declare_rgb_funcs nv12
  231. declare_rgb_funcs nv21
  232. declare_rgb_funcs yuv420p
  233. declare_rgb_funcs yuv422p