rgb_2_rgb.asm 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. ;******************************************************************************
  2. ;* Copyright Nick Kurshev
  3. ;* Copyright Michael (michaelni@gmx.at)
  4. ;* Copyright 2018 Jokyo Images
  5. ;* Copyright Ivo van Poorten
  6. ;*
  7. ;* This file is part of FFmpeg.
  8. ;*
  9. ;* FFmpeg is free software; you can redistribute it and/or
  10. ;* modify it under the terms of the GNU Lesser General Public
  11. ;* License as published by the Free Software Foundation; either
  12. ;* version 2.1 of the License, or (at your option) any later version.
  13. ;*
  14. ;* FFmpeg is distributed in the hope that it will be useful,
  15. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. ;* Lesser General Public License for more details.
  18. ;*
  19. ;* You should have received a copy of the GNU Lesser General Public
  20. ;* License along with FFmpeg; if not, write to the Free Software
  21. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. ;******************************************************************************
  23. %include "libavutil/x86/x86util.asm"
  24. SECTION_RODATA
  25. pb_shuffle2103: db 2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15
  26. pb_shuffle0321: db 0, 3, 2, 1, 4, 7, 6, 5, 8, 11, 10, 9, 12, 15, 14, 13
  27. pb_shuffle1230: db 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12
  28. pb_shuffle3012: db 3, 0, 1, 2, 7, 4, 5, 6, 11, 8, 9, 10, 15, 12, 13, 14
  29. pb_shuffle3210: db 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  30. pb_shuffle3102: db 3, 1, 0, 2, 7, 5, 4, 6, 11, 9, 8, 10, 15, 13, 12, 14
  31. pb_shuffle2013: db 2, 0, 1, 3, 6, 4, 5, 7, 10, 8, 9, 11, 14, 12, 13, 15
  32. pb_shuffle2130: db 2, 1, 3, 0, 6, 5, 7, 4, 10, 9, 11, 8, 14, 13, 15, 12
  33. pb_shuffle1203: db 1, 2, 0, 3, 5, 6, 4, 7, 9, 10, 8, 11, 13, 14, 12, 15
  34. SECTION .text
  35. %macro RSHIFT_COPY 5
  36. ; %1 dst ; %2 src ; %3 shift
  37. %if mmsize == 32
  38. vperm2i128 %1, %2, %3, %5
  39. RSHIFT %1, %4
  40. %elif cpuflag(avx)
  41. psrldq %1, %2, %4
  42. %else
  43. mova %1, %2
  44. RSHIFT %1, %4
  45. %endif
  46. %endmacro
  47. ;------------------------------------------------------------------------------
  48. ; shuffle_bytes_## (const uint8_t *src, uint8_t *dst, int src_size)
  49. ;------------------------------------------------------------------------------
  50. ; %1-4 index shuffle
  51. %macro SHUFFLE_BYTES 4
  52. cglobal shuffle_bytes_%1%2%3%4, 3, 5, 2, src, dst, w, tmp, x
  53. VBROADCASTI128 m0, [pb_shuffle%1%2%3%4]
  54. movsxdifnidn wq, wd
  55. mov xq, wq
  56. add srcq, wq
  57. add dstq, wq
  58. neg wq
  59. %if mmsize == 64
  60. and xq, mmsize - 4
  61. shr xq, 2
  62. mov tmpd, -1
  63. shlx tmpd, tmpd, xd
  64. not tmpd
  65. kmovw k7, tmpd
  66. vmovdqu32 m1{k7}{z}, [srcq + wq]
  67. pshufb m1, m0
  68. vmovdqu32 [dstq + wq]{k7}, m1
  69. lea wq, [wq + 4 * xq]
  70. %else
  71. ;calc scalar loop
  72. and xq, mmsize-4
  73. je .loop_simd
  74. .loop_scalar:
  75. mov tmpb, [srcq + wq + %1]
  76. mov [dstq+wq + 0], tmpb
  77. mov tmpb, [srcq + wq + %2]
  78. mov [dstq+wq + 1], tmpb
  79. mov tmpb, [srcq + wq + %3]
  80. mov [dstq+wq + 2], tmpb
  81. mov tmpb, [srcq + wq + %4]
  82. mov [dstq+wq + 3], tmpb
  83. add wq, 4
  84. sub xq, 4
  85. jg .loop_scalar
  86. %endif
  87. ;check if src_size < mmsize
  88. cmp wq, 0
  89. jge .end
  90. .loop_simd:
  91. movu m1, [srcq + wq]
  92. pshufb m1, m0
  93. movu [dstq + wq], m1
  94. add wq, mmsize
  95. jl .loop_simd
  96. .end:
  97. RET
  98. %endmacro
  99. INIT_XMM ssse3
  100. SHUFFLE_BYTES 2, 1, 0, 3
  101. SHUFFLE_BYTES 0, 3, 2, 1
  102. SHUFFLE_BYTES 1, 2, 3, 0
  103. SHUFFLE_BYTES 3, 0, 1, 2
  104. SHUFFLE_BYTES 3, 2, 1, 0
  105. SHUFFLE_BYTES 3, 1, 0, 2
  106. SHUFFLE_BYTES 2, 0, 1, 3
  107. SHUFFLE_BYTES 2, 1, 3, 0
  108. SHUFFLE_BYTES 1, 2, 0, 3
  109. %if ARCH_X86_64
  110. %if HAVE_AVX2_EXTERNAL
  111. INIT_YMM avx2
  112. SHUFFLE_BYTES 2, 1, 0, 3
  113. SHUFFLE_BYTES 0, 3, 2, 1
  114. SHUFFLE_BYTES 1, 2, 3, 0
  115. SHUFFLE_BYTES 3, 0, 1, 2
  116. SHUFFLE_BYTES 3, 2, 1, 0
  117. SHUFFLE_BYTES 3, 1, 0, 2
  118. SHUFFLE_BYTES 2, 0, 1, 3
  119. SHUFFLE_BYTES 2, 1, 3, 0
  120. SHUFFLE_BYTES 1, 2, 0, 3
  121. %endif
  122. %endif
  123. %if ARCH_X86_64
  124. %if HAVE_AVX512ICL_EXTERNAL
  125. INIT_ZMM avx512icl
  126. SHUFFLE_BYTES 2, 1, 0, 3
  127. SHUFFLE_BYTES 0, 3, 2, 1
  128. SHUFFLE_BYTES 1, 2, 3, 0
  129. SHUFFLE_BYTES 3, 0, 1, 2
  130. SHUFFLE_BYTES 3, 2, 1, 0
  131. SHUFFLE_BYTES 3, 1, 0, 2
  132. SHUFFLE_BYTES 2, 0, 1, 3
  133. SHUFFLE_BYTES 2, 1, 3, 0
  134. SHUFFLE_BYTES 1, 2, 0, 3
  135. %endif
  136. %endif
  137. ;-----------------------------------------------------------------------------------------------
  138. ; uyvytoyuv422(uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  139. ; const uint8_t *src, int width, int height,
  140. ; int lumStride, int chromStride, int srcStride)
  141. ;-----------------------------------------------------------------------------------------------
  142. %macro UYVY_TO_YUV422 0
  143. cglobal uyvytoyuv422, 9, 14, 8, ydst, udst, vdst, src, w, h, lum_stride, chrom_stride, src_stride, wtwo, whalf, tmp, x, back_w
  144. pxor m0, m0
  145. pcmpeqw m1, m1
  146. psrlw m1, 8
  147. movsxdifnidn wq, wd
  148. movsxdifnidn lum_strideq, lum_strided
  149. movsxdifnidn chrom_strideq, chrom_strided
  150. movsxdifnidn src_strideq, src_strided
  151. mov back_wq, wq
  152. mov whalfq, wq
  153. shr whalfq, 1 ; whalf = width / 2
  154. lea srcq, [srcq + wq * 2]
  155. add ydstq, wq
  156. add udstq, whalfq
  157. add vdstq, whalfq
  158. .loop_line:
  159. mov xq, wq
  160. mov wtwoq, wq
  161. add wtwoq, wtwoq ; wtwo = width * 2
  162. neg wq
  163. neg wtwoq
  164. neg whalfq
  165. ;calc scalar loop count
  166. and xq, mmsize * 2 - 1
  167. je .loop_simd
  168. .loop_scalar:
  169. mov tmpb, [srcq + wtwoq + 0]
  170. mov [udstq + whalfq], tmpb
  171. mov tmpb, [srcq + wtwoq + 1]
  172. mov [ydstq + wq], tmpb
  173. mov tmpb, [srcq + wtwoq + 2]
  174. mov [vdstq + whalfq], tmpb
  175. mov tmpb, [srcq + wtwoq + 3]
  176. mov [ydstq + wq + 1], tmpb
  177. add wq, 2
  178. add wtwoq, 4
  179. add whalfq, 1
  180. sub xq, 2
  181. jg .loop_scalar
  182. ; check if simd loop is need
  183. cmp wq, 0
  184. jge .end_line
  185. .loop_simd:
  186. %if mmsize == 32
  187. movu xm2, [srcq + wtwoq ]
  188. movu xm3, [srcq + wtwoq + 16 ]
  189. movu xm4, [srcq + wtwoq + 16 * 2]
  190. movu xm5, [srcq + wtwoq + 16 * 3]
  191. vinserti128 m2, m2, [srcq + wtwoq + 16 * 4], 1
  192. vinserti128 m3, m3, [srcq + wtwoq + 16 * 5], 1
  193. vinserti128 m4, m4, [srcq + wtwoq + 16 * 6], 1
  194. vinserti128 m5, m5, [srcq + wtwoq + 16 * 7], 1
  195. %else
  196. movu m2, [srcq + wtwoq ]
  197. movu m3, [srcq + wtwoq + mmsize ]
  198. movu m4, [srcq + wtwoq + mmsize * 2]
  199. movu m5, [srcq + wtwoq + mmsize * 3]
  200. %endif
  201. ; extract y part 1
  202. RSHIFT_COPY m6, m2, m4, 1, 0x20 ; UYVY UYVY -> YVYU YVY...
  203. pand m6, m1; YxYx YxYx...
  204. RSHIFT_COPY m7, m3, m5, 1, 0x20 ; UYVY UYVY -> YVYU YVY...
  205. pand m7, m1 ; YxYx YxYx...
  206. packuswb m6, m7 ; YYYY YYYY...
  207. movu [ydstq + wq], m6
  208. ; extract y part 2
  209. RSHIFT_COPY m6, m4, m2, 1, 0x13 ; UYVY UYVY -> YVYU YVY...
  210. pand m6, m1; YxYx YxYx...
  211. RSHIFT_COPY m7, m5, m3, 1, 0x13 ; UYVY UYVY -> YVYU YVY...
  212. pand m7, m1 ; YxYx YxYx...
  213. packuswb m6, m7 ; YYYY YYYY...
  214. movu [ydstq + wq + mmsize], m6
  215. ; extract uv
  216. pand m2, m1 ; UxVx...
  217. pand m3, m1 ; UxVx...
  218. pand m4, m1 ; UxVx...
  219. pand m5, m1 ; UxVx...
  220. packuswb m2, m3 ; UVUV...
  221. packuswb m4, m5 ; UVUV...
  222. ; U
  223. pand m6, m2, m1 ; UxUx...
  224. pand m7, m4, m1 ; UxUx...
  225. packuswb m6, m7 ; UUUU
  226. movu [udstq + whalfq], m6
  227. ; V
  228. psrlw m2, 8 ; VxVx...
  229. psrlw m4, 8 ; VxVx...
  230. packuswb m2, m4 ; VVVV
  231. movu [vdstq + whalfq], m2
  232. add whalfq, mmsize
  233. add wtwoq, mmsize * 4
  234. add wq, mmsize * 2
  235. jl .loop_simd
  236. .end_line:
  237. add srcq, src_strideq
  238. add ydstq, lum_strideq
  239. add udstq, chrom_strideq
  240. add vdstq, chrom_strideq
  241. ;restore initial state of line variable
  242. mov wq, back_wq
  243. mov xq, wq
  244. mov whalfq, wq
  245. shr whalfq, 1 ; whalf = width / 2
  246. sub hd, 1
  247. jg .loop_line
  248. RET
  249. %endmacro
  250. %if ARCH_X86_64
  251. INIT_XMM sse2
  252. UYVY_TO_YUV422
  253. INIT_XMM avx
  254. UYVY_TO_YUV422
  255. %if HAVE_AVX2_EXTERNAL
  256. INIT_YMM avx2
  257. UYVY_TO_YUV422
  258. %endif
  259. %endif