yuv2rgb_neon.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * Copyright (c) 2015 Matthieu Bouron <matthieu.bouron stupeflix.com>
  3. * Copyright (c) 2015 Clément Bœsch <clement stupeflix.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/arm/asm.S"
  22. .macro compute_premult
  23. vsub.u16 q14,q11 @ q14 = U * (1 << 3) - 128 * (1 << 3)
  24. vsub.u16 q15,q11 @ q15 = V * (1 << 3) - 128 * (1 << 3)
  25. vqdmulh.s16 q8, q15, d1[0] @ q8 = V * v2r
  26. vqdmulh.s16 q9, q14, d1[1] @ q9 = U * u2g
  27. vqdmulh.s16 q5, q15, d1[2] @ q5 = V * v2g
  28. vadd.s16 q9, q5 @ q9 = U * u2g + V * v2g
  29. vqdmulh.s16 q10,q14, d1[3] @ q10 = U * u2b
  30. .endm
  31. .macro compute_color dst_comp1 dst_comp2 pre
  32. vadd.s16 q1, q14, \pre
  33. vadd.s16 q2, q15, \pre
  34. vqrshrun.s16 \dst_comp1, q1, #1
  35. vqrshrun.s16 \dst_comp2, q2, #1
  36. .endm
  37. .macro compute_rgba r1 g1 b1 a1 r2 g2 b2 a2
  38. compute_color \r1, \r2, q8
  39. compute_color \g1, \g2, q9
  40. compute_color \b1, \b2, q10
  41. vmov.u8 \a1, #255
  42. vmov.u8 \a2, #255
  43. .endm
  44. .macro compute dst ofmt
  45. vshll.u8 q14, d14, #3 @ q14 = Y * (1 << 3)
  46. vshll.u8 q15, d15, #3 @ q15 = Y * (1 << 3)
  47. vsub.s16 q14, q12 @ q14 = (Y - y_offset)
  48. vsub.s16 q15, q12 @ q15 = (Y - y_offset)
  49. vqdmulh.s16 q14, q13 @ q14 = (Y - y_offset) * y_coeff
  50. vqdmulh.s16 q15, q13 @ q15 = (Y - y_offset) * y_coeff
  51. .ifc \ofmt,argb
  52. compute_rgba d7, d8, d9, d6, d11, d12, d13, d10
  53. .endif
  54. .ifc \ofmt,rgba
  55. compute_rgba d6, d7, d8, d9, d10, d11, d12, d13
  56. .endif
  57. .ifc \ofmt,abgr
  58. compute_rgba d9, d8, d7, d6, d13, d12, d11, d10
  59. .endif
  60. .ifc \ofmt,bgra
  61. compute_rgba d8, d7, d6, d9, d12, d11, d10, d13
  62. .endif
  63. vzip.8 d6, d10 @ d6 = R1R2R3R4R5R6R7R8 d10 = R9R10R11R12R13R14R15R16
  64. vzip.8 d7, d11 @ d7 = G1G2G3G4G5G6G7G8 d11 = G9G10G11G12G13G14G15G16
  65. vzip.8 d8, d12 @ d8 = B1B2B3B4B5B6B7B8 d12 = B9B10B11B12B13B14B15B16
  66. vzip.8 d9, d13 @ d9 = A1A2A3A4A5A6A7A8 d13 = A9A10A11A12A13A14A15A16
  67. vst4.8 {q3, q4}, [\dst,:128]!
  68. vst4.8 {q5, q6}, [\dst,:128]!
  69. .endm
  70. .macro process_1l_internal dst src ofmt
  71. vld2.8 {d14, d15}, [\src]! @ q7 = Y (interleaved)
  72. compute \dst, \ofmt
  73. .endm
  74. .macro process_1l ofmt
  75. compute_premult
  76. process_1l_internal r2, r4, \ofmt
  77. .endm
  78. .macro process_2l ofmt
  79. compute_premult
  80. process_1l_internal r2, r4, \ofmt
  81. process_1l_internal r11,r12,\ofmt
  82. .endm
  83. .macro load_args_nv12
  84. push {r4-r12, lr}
  85. vpush {q4-q7}
  86. ldr r4, [sp, #104] @ r4 = srcY
  87. ldr r5, [sp, #108] @ r5 = linesizeY
  88. ldr r6, [sp, #112] @ r6 = srcC
  89. ldr r7, [sp, #116] @ r7 = linesizeC
  90. ldr r8, [sp, #120] @ r8 = table
  91. ldr r9, [sp, #124] @ r9 = y_offset
  92. ldr r10,[sp, #128] @ r10 = y_coeff
  93. vdup.16 d0, r10 @ d0 = y_coeff
  94. vld1.16 {d1}, [r8] @ d1 = *table
  95. add r11, r2, r3 @ r11 = dst + linesize (dst2)
  96. add r12, r4, r5 @ r12 = srcY + linesizeY (srcY2)
  97. lsl r3, r3, #1
  98. lsl r5, r5, #1
  99. sub r3, r3, r0, lsl #2 @ r3 = linesize * 2 - width * 4 (padding)
  100. sub r5, r5, r0 @ r5 = linesizeY * 2 - width (paddingY)
  101. sub r7, r7, r0 @ r7 = linesizeC - width (paddingC)
  102. .endm
  103. .macro load_args_nv21
  104. load_args_nv12
  105. .endm
  106. .macro load_args_yuv420p
  107. push {r4-r12, lr}
  108. vpush {q4-q7}
  109. ldr r4, [sp, #104] @ r4 = srcY
  110. ldr r5, [sp, #108] @ r5 = linesizeY
  111. ldr r6, [sp, #112] @ r6 = srcU
  112. ldr r8, [sp, #128] @ r8 = table
  113. ldr r9, [sp, #132] @ r9 = y_offset
  114. ldr r10,[sp, #136] @ r10 = y_coeff
  115. vdup.16 d0, r10 @ d0 = y_coeff
  116. vld1.16 {d1}, [r8] @ d1 = *table
  117. add r11, r2, r3 @ r11 = dst + linesize (dst2)
  118. add r12, r4, r5 @ r12 = srcY + linesizeY (srcY2)
  119. lsl r3, r3, #1
  120. lsl r5, r5, #1
  121. sub r3, r3, r0, lsl #2 @ r3 = linesize * 2 - width * 4 (padding)
  122. sub r5, r5, r0 @ r5 = linesizeY * 2 - width (paddingY)
  123. ldr r10,[sp, #120] @ r10 = srcV
  124. .endm
  125. .macro load_args_yuv422p
  126. push {r4-r12, lr}
  127. vpush {q4-q7}
  128. ldr r4, [sp, #104] @ r4 = srcY
  129. ldr r5, [sp, #108] @ r5 = linesizeY
  130. ldr r6, [sp, #112] @ r6 = srcU
  131. ldr r7, [sp, #116] @ r7 = linesizeU
  132. ldr r12,[sp, #124] @ r12 = linesizeV
  133. ldr r8, [sp, #128] @ r8 = table
  134. ldr r9, [sp, #132] @ r9 = y_offset
  135. ldr r10,[sp, #136] @ r10 = y_coeff
  136. vdup.16 d0, r10 @ d0 = y_coeff
  137. vld1.16 {d1}, [r8] @ d1 = *table
  138. sub r3, r3, r0, lsl #2 @ r3 = linesize - width * 4 (padding)
  139. sub r5, r5, r0 @ r5 = linesizeY - width (paddingY)
  140. sub r7, r7, r0, lsr #1 @ r7 = linesizeU - width / 2 (paddingU)
  141. sub r12,r12,r0, lsr #1 @ r12 = linesizeV - width / 2 (paddingV)
  142. ldr r10,[sp, #120] @ r10 = srcV
  143. .endm
  144. .macro load_chroma_nv12
  145. pld [r12, #64*3]
  146. vld2.8 {d2, d3}, [r6]! @ q1: interleaved chroma line
  147. vshll.u8 q14, d2, #3 @ q14 = U * (1 << 3)
  148. vshll.u8 q15, d3, #3 @ q15 = V * (1 << 3)
  149. .endm
  150. .macro load_chroma_nv21
  151. pld [r12, #64*3]
  152. vld2.8 {d2, d3}, [r6]! @ q1: interleaved chroma line
  153. vshll.u8 q14, d3, #3 @ q14 = U * (1 << 3)
  154. vshll.u8 q15, d2, #3 @ q15 = V * (1 << 3)
  155. .endm
  156. .macro load_chroma_yuv420p
  157. pld [r10, #64*3]
  158. pld [r12, #64*3]
  159. vld1.8 d2, [r6]! @ d2: chroma red line
  160. vld1.8 d3, [r10]! @ d3: chroma blue line
  161. vshll.u8 q14, d2, #3 @ q14 = U * (1 << 3)
  162. vshll.u8 q15, d3, #3 @ q15 = V * (1 << 3)
  163. .endm
  164. .macro load_chroma_yuv422p
  165. pld [r10, #64*3]
  166. vld1.8 d2, [r6]! @ d2: chroma red line
  167. vld1.8 d3, [r10]! @ d3: chroma blue line
  168. vshll.u8 q14, d2, #3 @ q14 = U * (1 << 3)
  169. vshll.u8 q15, d3, #3 @ q15 = V * (1 << 3)
  170. .endm
  171. .macro increment_and_test_nv12
  172. add r11, r11, r3 @ dst2 += padding
  173. add r12, r12, r5 @ srcY2 += paddingY
  174. add r6, r6, r7 @ srcC += paddingC
  175. subs r1, r1, #2 @ height -= 2
  176. .endm
  177. .macro increment_and_test_nv21
  178. increment_and_test_nv12
  179. .endm
  180. .macro increment_and_test_yuv420p
  181. add r11, r11, r3 @ dst2 += padding
  182. add r12, r12, r5 @ srcY2 += paddingY
  183. ldr r7, [sp, #116] @ r7 = linesizeU
  184. sub r7, r7, r0, lsr #1 @ r7 = linesizeU - width / 2 (paddingU)
  185. add r6, r6, r7 @ srcU += paddingU
  186. ldr r7, [sp, #124] @ r7 = linesizeV
  187. sub r7, r7, r0, lsr #1 @ r7 = linesizeV - width / 2 (paddingV)
  188. add r10, r10, r7 @ srcV += paddingV
  189. subs r1, r1, #2 @ height -= 2
  190. .endm
  191. .macro increment_and_test_yuv422p
  192. add r6, r6, r7 @ srcU += paddingU
  193. add r10,r10,r12 @ srcV += paddingV
  194. subs r1, r1, #1 @ height -= 1
  195. .endm
  196. .macro process_nv12 ofmt
  197. process_2l \ofmt
  198. .endm
  199. .macro process_nv21 ofmt
  200. process_2l \ofmt
  201. .endm
  202. .macro process_yuv420p ofmt
  203. process_2l \ofmt
  204. .endm
  205. .macro process_yuv422p ofmt
  206. process_1l \ofmt
  207. .endm
  208. .macro declare_func ifmt ofmt
  209. function ff_\ifmt\()_to_\ofmt\()_neon, export=1
  210. load_args_\ifmt
  211. vmov.u16 q11, #1024 @ q11 = 128 * (1 << 3)
  212. vdup.16 q12, r9 @ q12 = y_offset
  213. vmov d26, d0 @ q13 = y_coeff
  214. vmov d27, d0 @ q13 = y_coeff
  215. 1:
  216. mov r8, r0 @ r8 = width
  217. 2:
  218. pld [r6, #64*3]
  219. pld [r4, #64*3]
  220. vmov.i8 d10, #128
  221. load_chroma_\ifmt
  222. process_\ifmt \ofmt
  223. subs r8, r8, #16 @ width -= 16
  224. bgt 2b
  225. add r2, r2, r3 @ dst += padding
  226. add r4, r4, r5 @ srcY += paddingY
  227. increment_and_test_\ifmt
  228. bgt 1b
  229. vpop {q4-q7}
  230. pop {r4-r12, lr}
  231. mov pc, lr
  232. endfunc
  233. .endm
  234. .macro declare_rgb_funcs ifmt
  235. declare_func \ifmt, argb
  236. declare_func \ifmt, rgba
  237. declare_func \ifmt, abgr
  238. declare_func \ifmt, bgra
  239. .endm
  240. declare_rgb_funcs nv12
  241. declare_rgb_funcs nv21
  242. declare_rgb_funcs yuv420p
  243. declare_rgb_funcs yuv422p