yuv2rgb_neon.S 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * Copyright (c) 2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
  3. * Copyright (c) 2016 Clément Bœsch <clement stupeflix.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/aarch64/asm.S"
  22. .macro load_yoff_ycoeff yoff ycoeff
  23. #if defined(__APPLE__)
  24. ldp w9, w10, [sp, #\yoff]
  25. #else
  26. ldr w9, [sp, #\yoff]
  27. ldr w10, [sp, #\ycoeff]
  28. #endif
  29. .endm
  30. .macro load_args_nv12
  31. ldr x8, [sp] // table
  32. load_yoff_ycoeff 8, 16 // y_offset, y_coeff
  33. ld1 {v1.1D}, [x8]
  34. dup v0.8H, w10
  35. dup v3.8H, w9
  36. sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
  37. sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
  38. sub w7, w7, w0 // w7 = linesizeC - width (paddingC)
  39. neg w11, w0
  40. .endm
  41. .macro load_args_nv21
  42. load_args_nv12
  43. .endm
  44. .macro load_args_yuv420p
  45. ldr x13, [sp] // srcV
  46. ldr w14, [sp, #8] // linesizeV
  47. ldr x8, [sp, #16] // table
  48. load_yoff_ycoeff 24, 32 // y_offset, y_coeff
  49. ld1 {v1.1D}, [x8]
  50. dup v0.8H, w10
  51. dup v3.8H, w9
  52. sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
  53. sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
  54. sub w7, w7, w0, lsr #1 // w7 = linesizeU - width / 2 (paddingU)
  55. sub w14, w14, w0, lsr #1 // w14 = linesizeV - width / 2 (paddingV)
  56. lsr w11, w0, #1
  57. neg w11, w11
  58. .endm
  59. .macro load_args_yuv422p
  60. ldr x13, [sp] // srcV
  61. ldr w14, [sp, #8] // linesizeV
  62. ldr x8, [sp, #16] // table
  63. load_yoff_ycoeff 24, 32 // y_offset, y_coeff
  64. ld1 {v1.1D}, [x8]
  65. dup v0.8H, w10
  66. dup v3.8H, w9
  67. sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
  68. sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
  69. sub w7, w7, w0, lsr #1 // w7 = linesizeU - width / 2 (paddingU)
  70. sub w14, w14, w0, lsr #1 // w14 = linesizeV - width / 2 (paddingV)
  71. .endm
  72. .macro load_chroma_nv12
  73. ld2 {v16.8B, v17.8B}, [x6], #16
  74. ushll v18.8H, v16.8B, #3
  75. ushll v19.8H, v17.8B, #3
  76. .endm
  77. .macro load_chroma_nv21
  78. ld2 {v16.8B, v17.8B}, [x6], #16
  79. ushll v19.8H, v16.8B, #3
  80. ushll v18.8H, v17.8B, #3
  81. .endm
  82. .macro load_chroma_yuv420p
  83. ld1 {v16.8B}, [ x6], #8
  84. ld1 {v17.8B}, [x13], #8
  85. ushll v18.8H, v16.8B, #3
  86. ushll v19.8H, v17.8B, #3
  87. .endm
  88. .macro load_chroma_yuv422p
  89. load_chroma_yuv420p
  90. .endm
  91. .macro increment_nv12
  92. ands w15, w1, #1
  93. csel w16, w7, w11, ne // incC = (h & 1) ? paddincC : -width
  94. add x6, x6, w16, SXTW // srcC += incC
  95. .endm
  96. .macro increment_nv21
  97. increment_nv12
  98. .endm
  99. .macro increment_yuv420p
  100. ands w15, w1, #1
  101. csel w16, w7, w11, ne // incU = (h & 1) ? paddincU : -width/2
  102. csel w17, w14, w11, ne // incV = (h & 1) ? paddincV : -width/2
  103. add x6, x6, w16, SXTW // srcU += incU
  104. add x13, x13, w17, SXTW // srcV += incV
  105. .endm
  106. .macro increment_yuv422p
  107. add x6, x6, w7, SXTW // srcU += incU
  108. add x13, x13, w14, SXTW // srcV += incV
  109. .endm
  110. .macro compute_rgba r1 g1 b1 a1 r2 g2 b2 a2
  111. add v20.8H, v26.8H, v20.8H // Y1 + R1
  112. add v21.8H, v27.8H, v21.8H // Y2 + R2
  113. add v22.8H, v26.8H, v22.8H // Y1 + G1
  114. add v23.8H, v27.8H, v23.8H // Y2 + G2
  115. add v24.8H, v26.8H, v24.8H // Y1 + B1
  116. add v25.8H, v27.8H, v25.8H // Y2 + B2
  117. sqrshrun \r1, v20.8H, #1 // clip_u8((Y1 + R1) >> 1)
  118. sqrshrun \r2, v21.8H, #1 // clip_u8((Y2 + R1) >> 1)
  119. sqrshrun \g1, v22.8H, #1 // clip_u8((Y1 + G1) >> 1)
  120. sqrshrun \g2, v23.8H, #1 // clip_u8((Y2 + G1) >> 1)
  121. sqrshrun \b1, v24.8H, #1 // clip_u8((Y1 + B1) >> 1)
  122. sqrshrun \b2, v25.8H, #1 // clip_u8((Y2 + B1) >> 1)
  123. movi \a1, #255
  124. movi \a2, #255
  125. .endm
  126. .macro declare_func ifmt ofmt
  127. function ff_\ifmt\()_to_\ofmt\()_neon, export=1
  128. load_args_\ifmt
  129. mov w9, w1
  130. 1:
  131. mov w8, w0 // w8 = width
  132. 2:
  133. movi v5.8H, #4, lsl #8 // 128 * (1<<3)
  134. load_chroma_\ifmt
  135. sub v18.8H, v18.8H, v5.8H // U*(1<<3) - 128*(1<<3)
  136. sub v19.8H, v19.8H, v5.8H // V*(1<<3) - 128*(1<<3)
  137. sqdmulh v20.8H, v19.8H, v1.H[0] // V * v2r (R)
  138. sqdmulh v22.8H, v18.8H, v1.H[1] // U * u2g
  139. sqdmulh v19.8H, v19.8H, v1.H[2] // V * v2g
  140. add v22.8H, v22.8H, v19.8H // U * u2g + V * v2g (G)
  141. sqdmulh v24.8H, v18.8H, v1.H[3] // U * u2b (B)
  142. zip2 v21.8H, v20.8H, v20.8H // R2
  143. zip1 v20.8H, v20.8H, v20.8H // R1
  144. zip2 v23.8H, v22.8H, v22.8H // G2
  145. zip1 v22.8H, v22.8H, v22.8H // G1
  146. zip2 v25.8H, v24.8H, v24.8H // B2
  147. zip1 v24.8H, v24.8H, v24.8H // B1
  148. ld1 {v2.16B}, [x4], #16 // load luma
  149. ushll v26.8H, v2.8B, #3 // Y1*(1<<3)
  150. ushll2 v27.8H, v2.16B, #3 // Y2*(1<<3)
  151. sub v26.8H, v26.8H, v3.8H // Y1*(1<<3) - y_offset
  152. sub v27.8H, v27.8H, v3.8H // Y2*(1<<3) - y_offset
  153. sqdmulh v26.8H, v26.8H, v0.8H // ((Y1*(1<<3) - y_offset) * y_coeff) >> 15
  154. sqdmulh v27.8H, v27.8H, v0.8H // ((Y2*(1<<3) - y_offset) * y_coeff) >> 15
  155. .ifc \ofmt,argb // 1 2 3 0
  156. compute_rgba v5.8B,v6.8B,v7.8B,v4.8B, v17.8B,v18.8B,v19.8B,v16.8B
  157. .endif
  158. .ifc \ofmt,rgba // 0 1 2 3
  159. compute_rgba v4.8B,v5.8B,v6.8B,v7.8B, v16.8B,v17.8B,v18.8B,v19.8B
  160. .endif
  161. .ifc \ofmt,abgr // 3 2 1 0
  162. compute_rgba v7.8B,v6.8B,v5.8B,v4.8B, v19.8B,v18.8B,v17.8B,v16.8B
  163. .endif
  164. .ifc \ofmt,bgra // 2 1 0 3
  165. compute_rgba v6.8B,v5.8B,v4.8B,v7.8B, v18.8B,v17.8B,v16.8B,v19.8B
  166. .endif
  167. st4 { v4.8B, v5.8B, v6.8B, v7.8B}, [x2], #32
  168. st4 {v16.8B,v17.8B,v18.8B,v19.8B}, [x2], #32
  169. subs w8, w8, #16 // width -= 16
  170. b.gt 2b
  171. add x2, x2, w3, SXTW // dst += padding
  172. add x4, x4, w5, SXTW // srcY += paddingY
  173. increment_\ifmt
  174. subs w1, w1, #1 // height -= 1
  175. b.gt 1b
  176. mov w0, w9
  177. ret
  178. endfunc
  179. .endm
  180. .macro declare_rgb_funcs ifmt
  181. declare_func \ifmt, argb
  182. declare_func \ifmt, rgba
  183. declare_func \ifmt, abgr
  184. declare_func \ifmt, bgra
  185. .endm
  186. declare_rgb_funcs nv12
  187. declare_rgb_funcs nv21
  188. declare_rgb_funcs yuv420p
  189. declare_rgb_funcs yuv422p