input.asm 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740
  1. ;******************************************************************************
  2. ;* x86-optimized input routines; does shuffling of packed
  3. ;* YUV formats into individual planes, and converts RGB
  4. ;* into YUV planes also.
  5. ;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
  6. ;*
  7. ;* This file is part of FFmpeg.
  8. ;*
  9. ;* FFmpeg is free software; you can redistribute it and/or
  10. ;* modify it under the terms of the GNU Lesser General Public
  11. ;* License as published by the Free Software Foundation; either
  12. ;* version 2.1 of the License, or (at your option) any later version.
  13. ;*
  14. ;* FFmpeg is distributed in the hope that it will be useful,
  15. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. ;* Lesser General Public License for more details.
  18. ;*
  19. ;* You should have received a copy of the GNU Lesser General Public
  20. ;* License along with FFmpeg; if not, write to the Free Software
  21. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. ;******************************************************************************
  23. %include "libavutil/x86/x86util.asm"
  24. SECTION_RODATA
  25. %define RY 0x20DE
  26. %define GY 0x4087
  27. %define BY 0x0C88
  28. %define RU 0xECFF
  29. %define GU 0xDAC8
  30. %define BU 0x3838
  31. %define RV 0x3838
  32. %define GV 0xD0E3
  33. %define BV 0xF6E4
  34. rgb_Yrnd: times 4 dd 0x80100 ; 16.5 << 15
  35. rgb_UVrnd: times 4 dd 0x400100 ; 128.5 << 15
  36. %define bgr_Ycoeff_12x4 16*4 + 16* 0 + tableq
  37. %define bgr_Ycoeff_3x56 16*4 + 16* 1 + tableq
  38. %define rgb_Ycoeff_12x4 16*4 + 16* 2 + tableq
  39. %define rgb_Ycoeff_3x56 16*4 + 16* 3 + tableq
  40. %define bgr_Ucoeff_12x4 16*4 + 16* 4 + tableq
  41. %define bgr_Ucoeff_3x56 16*4 + 16* 5 + tableq
  42. %define rgb_Ucoeff_12x4 16*4 + 16* 6 + tableq
  43. %define rgb_Ucoeff_3x56 16*4 + 16* 7 + tableq
  44. %define bgr_Vcoeff_12x4 16*4 + 16* 8 + tableq
  45. %define bgr_Vcoeff_3x56 16*4 + 16* 9 + tableq
  46. %define rgb_Vcoeff_12x4 16*4 + 16*10 + tableq
  47. %define rgb_Vcoeff_3x56 16*4 + 16*11 + tableq
  48. %define rgba_Ycoeff_rb 16*4 + 16*12 + tableq
  49. %define rgba_Ycoeff_br 16*4 + 16*13 + tableq
  50. %define rgba_Ycoeff_ga 16*4 + 16*14 + tableq
  51. %define rgba_Ycoeff_ag 16*4 + 16*15 + tableq
  52. %define rgba_Ucoeff_rb 16*4 + 16*16 + tableq
  53. %define rgba_Ucoeff_br 16*4 + 16*17 + tableq
  54. %define rgba_Ucoeff_ga 16*4 + 16*18 + tableq
  55. %define rgba_Ucoeff_ag 16*4 + 16*19 + tableq
  56. %define rgba_Vcoeff_rb 16*4 + 16*20 + tableq
  57. %define rgba_Vcoeff_br 16*4 + 16*21 + tableq
  58. %define rgba_Vcoeff_ga 16*4 + 16*22 + tableq
  59. %define rgba_Vcoeff_ag 16*4 + 16*23 + tableq
  60. ; bgr_Ycoeff_12x4: times 2 dw BY, GY, 0, BY
  61. ; bgr_Ycoeff_3x56: times 2 dw RY, 0, GY, RY
  62. ; rgb_Ycoeff_12x4: times 2 dw RY, GY, 0, RY
  63. ; rgb_Ycoeff_3x56: times 2 dw BY, 0, GY, BY
  64. ; bgr_Ucoeff_12x4: times 2 dw BU, GU, 0, BU
  65. ; bgr_Ucoeff_3x56: times 2 dw RU, 0, GU, RU
  66. ; rgb_Ucoeff_12x4: times 2 dw RU, GU, 0, RU
  67. ; rgb_Ucoeff_3x56: times 2 dw BU, 0, GU, BU
  68. ; bgr_Vcoeff_12x4: times 2 dw BV, GV, 0, BV
  69. ; bgr_Vcoeff_3x56: times 2 dw RV, 0, GV, RV
  70. ; rgb_Vcoeff_12x4: times 2 dw RV, GV, 0, RV
  71. ; rgb_Vcoeff_3x56: times 2 dw BV, 0, GV, BV
  72. ; rgba_Ycoeff_rb: times 4 dw RY, BY
  73. ; rgba_Ycoeff_br: times 4 dw BY, RY
  74. ; rgba_Ycoeff_ga: times 4 dw GY, 0
  75. ; rgba_Ycoeff_ag: times 4 dw 0, GY
  76. ; rgba_Ucoeff_rb: times 4 dw RU, BU
  77. ; rgba_Ucoeff_br: times 4 dw BU, RU
  78. ; rgba_Ucoeff_ga: times 4 dw GU, 0
  79. ; rgba_Ucoeff_ag: times 4 dw 0, GU
  80. ; rgba_Vcoeff_rb: times 4 dw RV, BV
  81. ; rgba_Vcoeff_br: times 4 dw BV, RV
  82. ; rgba_Vcoeff_ga: times 4 dw GV, 0
  83. ; rgba_Vcoeff_ag: times 4 dw 0, GV
  84. shuf_rgb_12x4: db 0, 0x80, 1, 0x80, 2, 0x80, 3, 0x80, \
  85. 6, 0x80, 7, 0x80, 8, 0x80, 9, 0x80
  86. shuf_rgb_3x56: db 2, 0x80, 3, 0x80, 4, 0x80, 5, 0x80, \
  87. 8, 0x80, 9, 0x80, 10, 0x80, 11, 0x80
  88. SECTION .text
  89. ;-----------------------------------------------------------------------------
  90. ; RGB to Y/UV.
  91. ;
  92. ; void <fmt>ToY_<opt>(uint8_t *dst, const uint8_t *src, int w);
  93. ; and
  94. ; void <fmt>toUV_<opt>(uint8_t *dstU, uint8_t *dstV, const uint8_t *src,
  95. ; const uint8_t *unused, int w);
  96. ;-----------------------------------------------------------------------------
  97. ; %1 = nr. of XMM registers
  98. ; %2 = rgb or bgr
  99. %macro RGB24_TO_Y_FN 2-3
  100. cglobal %2 %+ 24ToY, 6, 6, %1, dst, src, u1, u2, w, table
  101. %if mmsize == 8
  102. mova m5, [%2_Ycoeff_12x4]
  103. mova m6, [%2_Ycoeff_3x56]
  104. %define coeff1 m5
  105. %define coeff2 m6
  106. %elif ARCH_X86_64
  107. mova m8, [%2_Ycoeff_12x4]
  108. mova m9, [%2_Ycoeff_3x56]
  109. %define coeff1 m8
  110. %define coeff2 m9
  111. %else ; x86-32 && mmsize == 16
  112. %define coeff1 [%2_Ycoeff_12x4]
  113. %define coeff2 [%2_Ycoeff_3x56]
  114. %endif ; x86-32/64 && mmsize == 8/16
  115. %if (ARCH_X86_64 || mmsize == 8) && %0 == 3
  116. jmp mangle(private_prefix %+ _ %+ %3 %+ 24ToY %+ SUFFIX).body
  117. %else ; (ARCH_X86_64 && %0 == 3) || mmsize == 8
  118. .body:
  119. %if cpuflag(ssse3)
  120. mova m7, [shuf_rgb_12x4]
  121. %define shuf_rgb1 m7
  122. %if ARCH_X86_64
  123. mova m10, [shuf_rgb_3x56]
  124. %define shuf_rgb2 m10
  125. %else ; x86-32
  126. %define shuf_rgb2 [shuf_rgb_3x56]
  127. %endif ; x86-32/64
  128. %endif ; cpuflag(ssse3)
  129. %if ARCH_X86_64
  130. movsxd wq, wd
  131. %endif
  132. add wq, wq
  133. add dstq, wq
  134. neg wq
  135. %if notcpuflag(ssse3)
  136. pxor m7, m7
  137. %endif ; !cpuflag(ssse3)
  138. mova m4, [rgb_Yrnd]
  139. .loop:
  140. %if cpuflag(ssse3)
  141. movu m0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
  142. movu m2, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
  143. pshufb m1, m0, shuf_rgb2 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
  144. pshufb m0, shuf_rgb1 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
  145. pshufb m3, m2, shuf_rgb2 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
  146. pshufb m2, shuf_rgb1 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
  147. %else ; !cpuflag(ssse3)
  148. movd m0, [srcq+0] ; (byte) { B0, G0, R0, B1 }
  149. movd m1, [srcq+2] ; (byte) { R0, B1, G1, R1 }
  150. movd m2, [srcq+6] ; (byte) { B2, G2, R2, B3 }
  151. movd m3, [srcq+8] ; (byte) { R2, B3, G3, R3 }
  152. %if mmsize == 16 ; i.e. sse2
  153. punpckldq m0, m2 ; (byte) { B0, G0, R0, B1, B2, G2, R2, B3 }
  154. punpckldq m1, m3 ; (byte) { R0, B1, G1, R1, R2, B3, G3, R3 }
  155. movd m2, [srcq+12] ; (byte) { B4, G4, R4, B5 }
  156. movd m3, [srcq+14] ; (byte) { R4, B5, G5, R5 }
  157. movd m5, [srcq+18] ; (byte) { B6, G6, R6, B7 }
  158. movd m6, [srcq+20] ; (byte) { R6, B7, G7, R7 }
  159. punpckldq m2, m5 ; (byte) { B4, G4, R4, B5, B6, G6, R6, B7 }
  160. punpckldq m3, m6 ; (byte) { R4, B5, G5, R5, R6, B7, G7, R7 }
  161. %endif ; mmsize == 16
  162. punpcklbw m0, m7 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
  163. punpcklbw m1, m7 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
  164. punpcklbw m2, m7 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
  165. punpcklbw m3, m7 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
  166. %endif ; cpuflag(ssse3)
  167. add srcq, 3 * mmsize / 2
  168. pmaddwd m0, coeff1 ; (dword) { B0*BY + G0*GY, B1*BY, B2*BY + G2*GY, B3*BY }
  169. pmaddwd m1, coeff2 ; (dword) { R0*RY, G1+GY + R1*RY, R2*RY, G3+GY + R3*RY }
  170. pmaddwd m2, coeff1 ; (dword) { B4*BY + G4*GY, B5*BY, B6*BY + G6*GY, B7*BY }
  171. pmaddwd m3, coeff2 ; (dword) { R4*RY, G5+GY + R5*RY, R6*RY, G7+GY + R7*RY }
  172. paddd m0, m1 ; (dword) { Bx*BY + Gx*GY + Rx*RY }[0-3]
  173. paddd m2, m3 ; (dword) { Bx*BY + Gx*GY + Rx*RY }[4-7]
  174. paddd m0, m4 ; += rgb_Yrnd, i.e. (dword) { Y[0-3] }
  175. paddd m2, m4 ; += rgb_Yrnd, i.e. (dword) { Y[4-7] }
  176. psrad m0, 9
  177. psrad m2, 9
  178. packssdw m0, m2 ; (word) { Y[0-7] }
  179. mova [dstq+wq], m0
  180. add wq, mmsize
  181. jl .loop
  182. REP_RET
  183. %endif ; (ARCH_X86_64 && %0 == 3) || mmsize == 8
  184. %endmacro
  185. ; %1 = nr. of XMM registers
  186. ; %2 = rgb or bgr
  187. %macro RGB24_TO_UV_FN 2-3
  188. cglobal %2 %+ 24ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
  189. %if ARCH_X86_64
  190. mova m8, [%2_Ucoeff_12x4]
  191. mova m9, [%2_Ucoeff_3x56]
  192. mova m10, [%2_Vcoeff_12x4]
  193. mova m11, [%2_Vcoeff_3x56]
  194. %define coeffU1 m8
  195. %define coeffU2 m9
  196. %define coeffV1 m10
  197. %define coeffV2 m11
  198. %else ; x86-32
  199. %define coeffU1 [%2_Ucoeff_12x4]
  200. %define coeffU2 [%2_Ucoeff_3x56]
  201. %define coeffV1 [%2_Vcoeff_12x4]
  202. %define coeffV2 [%2_Vcoeff_3x56]
  203. %endif ; x86-32/64
  204. %if ARCH_X86_64 && %0 == 3
  205. jmp mangle(private_prefix %+ _ %+ %3 %+ 24ToUV %+ SUFFIX).body
  206. %else ; ARCH_X86_64 && %0 == 3
  207. .body:
  208. %if cpuflag(ssse3)
  209. mova m7, [shuf_rgb_12x4]
  210. %define shuf_rgb1 m7
  211. %if ARCH_X86_64
  212. mova m12, [shuf_rgb_3x56]
  213. %define shuf_rgb2 m12
  214. %else ; x86-32
  215. %define shuf_rgb2 [shuf_rgb_3x56]
  216. %endif ; x86-32/64
  217. %endif ; cpuflag(ssse3)
  218. %if ARCH_X86_64
  219. movsxd wq, dword r5m
  220. %else ; x86-32
  221. mov wq, r5m
  222. %endif
  223. add wq, wq
  224. add dstUq, wq
  225. add dstVq, wq
  226. neg wq
  227. mova m6, [rgb_UVrnd]
  228. %if notcpuflag(ssse3)
  229. pxor m7, m7
  230. %endif
  231. .loop:
  232. %if cpuflag(ssse3)
  233. movu m0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
  234. movu m4, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
  235. pshufb m1, m0, shuf_rgb2 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
  236. pshufb m0, shuf_rgb1 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
  237. %else ; !cpuflag(ssse3)
  238. movd m0, [srcq+0] ; (byte) { B0, G0, R0, B1 }
  239. movd m1, [srcq+2] ; (byte) { R0, B1, G1, R1 }
  240. movd m4, [srcq+6] ; (byte) { B2, G2, R2, B3 }
  241. movd m5, [srcq+8] ; (byte) { R2, B3, G3, R3 }
  242. %if mmsize == 16
  243. punpckldq m0, m4 ; (byte) { B0, G0, R0, B1, B2, G2, R2, B3 }
  244. punpckldq m1, m5 ; (byte) { R0, B1, G1, R1, R2, B3, G3, R3 }
  245. movd m4, [srcq+12] ; (byte) { B4, G4, R4, B5 }
  246. movd m5, [srcq+14] ; (byte) { R4, B5, G5, R5 }
  247. %endif ; mmsize == 16
  248. punpcklbw m0, m7 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
  249. punpcklbw m1, m7 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
  250. %endif ; cpuflag(ssse3)
  251. pmaddwd m2, m0, coeffV1 ; (dword) { B0*BV + G0*GV, B1*BV, B2*BV + G2*GV, B3*BV }
  252. pmaddwd m3, m1, coeffV2 ; (dword) { R0*BV, G1*GV + R1*BV, R2*BV, G3*GV + R3*BV }
  253. pmaddwd m0, coeffU1 ; (dword) { B0*BU + G0*GU, B1*BU, B2*BU + G2*GU, B3*BU }
  254. pmaddwd m1, coeffU2 ; (dword) { R0*BU, G1*GU + R1*BU, R2*BU, G3*GU + R3*BU }
  255. paddd m0, m1 ; (dword) { Bx*BU + Gx*GU + Rx*RU }[0-3]
  256. paddd m2, m3 ; (dword) { Bx*BV + Gx*GV + Rx*RV }[0-3]
  257. %if cpuflag(ssse3)
  258. pshufb m5, m4, shuf_rgb2 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
  259. pshufb m4, shuf_rgb1 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
  260. %else ; !cpuflag(ssse3)
  261. %if mmsize == 16
  262. movd m1, [srcq+18] ; (byte) { B6, G6, R6, B7 }
  263. movd m3, [srcq+20] ; (byte) { R6, B7, G7, R7 }
  264. punpckldq m4, m1 ; (byte) { B4, G4, R4, B5, B6, G6, R6, B7 }
  265. punpckldq m5, m3 ; (byte) { R4, B5, G5, R5, R6, B7, G7, R7 }
  266. %endif ; mmsize == 16 && !cpuflag(ssse3)
  267. punpcklbw m4, m7 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
  268. punpcklbw m5, m7 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
  269. %endif ; cpuflag(ssse3)
  270. add srcq, 3 * mmsize / 2
  271. pmaddwd m1, m4, coeffU1 ; (dword) { B4*BU + G4*GU, B5*BU, B6*BU + G6*GU, B7*BU }
  272. pmaddwd m3, m5, coeffU2 ; (dword) { R4*BU, G5*GU + R5*BU, R6*BU, G7*GU + R7*BU }
  273. pmaddwd m4, coeffV1 ; (dword) { B4*BV + G4*GV, B5*BV, B6*BV + G6*GV, B7*BV }
  274. pmaddwd m5, coeffV2 ; (dword) { R4*BV, G5*GV + R5*BV, R6*BV, G7*GV + R7*BV }
  275. paddd m1, m3 ; (dword) { Bx*BU + Gx*GU + Rx*RU }[4-7]
  276. paddd m4, m5 ; (dword) { Bx*BV + Gx*GV + Rx*RV }[4-7]
  277. paddd m0, m6 ; += rgb_UVrnd, i.e. (dword) { U[0-3] }
  278. paddd m2, m6 ; += rgb_UVrnd, i.e. (dword) { V[0-3] }
  279. paddd m1, m6 ; += rgb_UVrnd, i.e. (dword) { U[4-7] }
  280. paddd m4, m6 ; += rgb_UVrnd, i.e. (dword) { V[4-7] }
  281. psrad m0, 9
  282. psrad m2, 9
  283. psrad m1, 9
  284. psrad m4, 9
  285. packssdw m0, m1 ; (word) { U[0-7] }
  286. packssdw m2, m4 ; (word) { V[0-7] }
  287. %if mmsize == 8
  288. mova [dstUq+wq], m0
  289. mova [dstVq+wq], m2
  290. %else ; mmsize == 16
  291. mova [dstUq+wq], m0
  292. mova [dstVq+wq], m2
  293. %endif ; mmsize == 8/16
  294. add wq, mmsize
  295. jl .loop
  296. REP_RET
  297. %endif ; ARCH_X86_64 && %0 == 3
  298. %endmacro
  299. ; %1 = nr. of XMM registers for rgb-to-Y func
  300. ; %2 = nr. of XMM registers for rgb-to-UV func
  301. %macro RGB24_FUNCS 2
  302. RGB24_TO_Y_FN %1, rgb
  303. RGB24_TO_Y_FN %1, bgr, rgb
  304. RGB24_TO_UV_FN %2, rgb
  305. RGB24_TO_UV_FN %2, bgr, rgb
  306. %endmacro
  307. %if ARCH_X86_32
  308. INIT_MMX mmx
  309. RGB24_FUNCS 0, 0
  310. %endif
  311. INIT_XMM sse2
  312. RGB24_FUNCS 10, 12
  313. INIT_XMM ssse3
  314. RGB24_FUNCS 11, 13
  315. %if HAVE_AVX_EXTERNAL
  316. INIT_XMM avx
  317. RGB24_FUNCS 11, 13
  318. %endif
  319. ; %1 = nr. of XMM registers
  320. ; %2-5 = rgba, bgra, argb or abgr (in individual characters)
  321. %macro RGB32_TO_Y_FN 5-6
  322. cglobal %2%3%4%5 %+ ToY, 6, 6, %1, dst, src, u1, u2, w, table
  323. mova m5, [rgba_Ycoeff_%2%4]
  324. mova m6, [rgba_Ycoeff_%3%5]
  325. %if %0 == 6
  326. jmp mangle(private_prefix %+ _ %+ %6 %+ ToY %+ SUFFIX).body
  327. %else ; %0 == 6
  328. .body:
  329. %if ARCH_X86_64
  330. movsxd wq, wd
  331. %endif
  332. add wq, wq
  333. sub wq, mmsize - 1
  334. lea srcq, [srcq+wq*2]
  335. add dstq, wq
  336. neg wq
  337. mova m4, [rgb_Yrnd]
  338. pcmpeqb m7, m7
  339. psrlw m7, 8 ; (word) { 0x00ff } x4
  340. .loop:
  341. ; FIXME check alignment and use mova
  342. movu m0, [srcq+wq*2+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
  343. movu m2, [srcq+wq*2+mmsize] ; (byte) { Bx, Gx, Rx, xx }[4-7]
  344. DEINTB 1, 0, 3, 2, 7 ; (word) { Gx, xx (m0/m2) or Bx, Rx (m1/m3) }[0-3]/[4-7]
  345. pmaddwd m1, m5 ; (dword) { Bx*BY + Rx*RY }[0-3]
  346. pmaddwd m0, m6 ; (dword) { Gx*GY }[0-3]
  347. pmaddwd m3, m5 ; (dword) { Bx*BY + Rx*RY }[4-7]
  348. pmaddwd m2, m6 ; (dword) { Gx*GY }[4-7]
  349. paddd m0, m4 ; += rgb_Yrnd
  350. paddd m2, m4 ; += rgb_Yrnd
  351. paddd m0, m1 ; (dword) { Y[0-3] }
  352. paddd m2, m3 ; (dword) { Y[4-7] }
  353. psrad m0, 9
  354. psrad m2, 9
  355. packssdw m0, m2 ; (word) { Y[0-7] }
  356. mova [dstq+wq], m0
  357. add wq, mmsize
  358. jl .loop
  359. sub wq, mmsize - 1
  360. jz .end
  361. add srcq, 2*mmsize - 2
  362. add dstq, mmsize - 1
  363. .loop2:
  364. movd m0, [srcq+wq*2+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
  365. DEINTB 1, 0, 3, 2, 7 ; (word) { Gx, xx (m0/m2) or Bx, Rx (m1/m3) }[0-3]/[4-7]
  366. pmaddwd m1, m5 ; (dword) { Bx*BY + Rx*RY }[0-3]
  367. pmaddwd m0, m6 ; (dword) { Gx*GY }[0-3]
  368. paddd m0, m4 ; += rgb_Yrnd
  369. paddd m0, m1 ; (dword) { Y[0-3] }
  370. psrad m0, 9
  371. packssdw m0, m0 ; (word) { Y[0-7] }
  372. movd [dstq+wq], m0
  373. add wq, 2
  374. jl .loop2
  375. .end:
  376. REP_RET
  377. %endif ; %0 == 3
  378. %endmacro
  379. ; %1 = nr. of XMM registers
  380. ; %2-5 = rgba, bgra, argb or abgr (in individual characters)
  381. %macro RGB32_TO_UV_FN 5-6
  382. cglobal %2%3%4%5 %+ ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
  383. %if ARCH_X86_64
  384. mova m8, [rgba_Ucoeff_%2%4]
  385. mova m9, [rgba_Ucoeff_%3%5]
  386. mova m10, [rgba_Vcoeff_%2%4]
  387. mova m11, [rgba_Vcoeff_%3%5]
  388. %define coeffU1 m8
  389. %define coeffU2 m9
  390. %define coeffV1 m10
  391. %define coeffV2 m11
  392. %else ; x86-32
  393. %define coeffU1 [rgba_Ucoeff_%2%4]
  394. %define coeffU2 [rgba_Ucoeff_%3%5]
  395. %define coeffV1 [rgba_Vcoeff_%2%4]
  396. %define coeffV2 [rgba_Vcoeff_%3%5]
  397. %endif ; x86-64/32
  398. %if ARCH_X86_64 && %0 == 6
  399. jmp mangle(private_prefix %+ _ %+ %6 %+ ToUV %+ SUFFIX).body
  400. %else ; ARCH_X86_64 && %0 == 6
  401. .body:
  402. %if ARCH_X86_64
  403. movsxd wq, dword r5m
  404. %else ; x86-32
  405. mov wq, r5m
  406. %endif
  407. add wq, wq
  408. sub wq, mmsize - 1
  409. add dstUq, wq
  410. add dstVq, wq
  411. lea srcq, [srcq+wq*2]
  412. neg wq
  413. pcmpeqb m7, m7
  414. psrlw m7, 8 ; (word) { 0x00ff } x4
  415. mova m6, [rgb_UVrnd]
  416. .loop:
  417. ; FIXME check alignment and use mova
  418. movu m0, [srcq+wq*2+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
  419. movu m4, [srcq+wq*2+mmsize] ; (byte) { Bx, Gx, Rx, xx }[4-7]
  420. DEINTB 1, 0, 5, 4, 7 ; (word) { Gx, xx (m0/m4) or Bx, Rx (m1/m5) }[0-3]/[4-7]
  421. pmaddwd m3, m1, coeffV1 ; (dword) { Bx*BV + Rx*RV }[0-3]
  422. pmaddwd m2, m0, coeffV2 ; (dword) { Gx*GV }[0-3]
  423. pmaddwd m1, coeffU1 ; (dword) { Bx*BU + Rx*RU }[0-3]
  424. pmaddwd m0, coeffU2 ; (dword) { Gx*GU }[0-3]
  425. paddd m3, m6 ; += rgb_UVrnd
  426. paddd m1, m6 ; += rgb_UVrnd
  427. paddd m2, m3 ; (dword) { V[0-3] }
  428. paddd m0, m1 ; (dword) { U[0-3] }
  429. pmaddwd m3, m5, coeffV1 ; (dword) { Bx*BV + Rx*RV }[4-7]
  430. pmaddwd m1, m4, coeffV2 ; (dword) { Gx*GV }[4-7]
  431. pmaddwd m5, coeffU1 ; (dword) { Bx*BU + Rx*RU }[4-7]
  432. pmaddwd m4, coeffU2 ; (dword) { Gx*GU }[4-7]
  433. paddd m3, m6 ; += rgb_UVrnd
  434. paddd m5, m6 ; += rgb_UVrnd
  435. psrad m0, 9
  436. paddd m1, m3 ; (dword) { V[4-7] }
  437. paddd m4, m5 ; (dword) { U[4-7] }
  438. psrad m2, 9
  439. psrad m4, 9
  440. psrad m1, 9
  441. packssdw m0, m4 ; (word) { U[0-7] }
  442. packssdw m2, m1 ; (word) { V[0-7] }
  443. %if mmsize == 8
  444. mova [dstUq+wq], m0
  445. mova [dstVq+wq], m2
  446. %else ; mmsize == 16
  447. mova [dstUq+wq], m0
  448. mova [dstVq+wq], m2
  449. %endif ; mmsize == 8/16
  450. add wq, mmsize
  451. jl .loop
  452. sub wq, mmsize - 1
  453. jz .end
  454. add srcq , 2*mmsize - 2
  455. add dstUq, mmsize - 1
  456. add dstVq, mmsize - 1
  457. .loop2:
  458. movd m0, [srcq+wq*2] ; (byte) { Bx, Gx, Rx, xx }[0-3]
  459. DEINTB 1, 0, 5, 4, 7 ; (word) { Gx, xx (m0/m4) or Bx, Rx (m1/m5) }[0-3]/[4-7]
  460. pmaddwd m3, m1, coeffV1 ; (dword) { Bx*BV + Rx*RV }[0-3]
  461. pmaddwd m2, m0, coeffV2 ; (dword) { Gx*GV }[0-3]
  462. pmaddwd m1, coeffU1 ; (dword) { Bx*BU + Rx*RU }[0-3]
  463. pmaddwd m0, coeffU2 ; (dword) { Gx*GU }[0-3]
  464. paddd m3, m6 ; += rgb_UVrnd
  465. paddd m1, m6 ; += rgb_UVrnd
  466. paddd m2, m3 ; (dword) { V[0-3] }
  467. paddd m0, m1 ; (dword) { U[0-3] }
  468. psrad m0, 9
  469. psrad m2, 9
  470. packssdw m0, m0 ; (word) { U[0-7] }
  471. packssdw m2, m2 ; (word) { V[0-7] }
  472. movd [dstUq+wq], m0
  473. movd [dstVq+wq], m2
  474. add wq, 2
  475. jl .loop2
  476. .end:
  477. REP_RET
  478. %endif ; ARCH_X86_64 && %0 == 3
  479. %endmacro
  480. ; %1 = nr. of XMM registers for rgb-to-Y func
  481. ; %2 = nr. of XMM registers for rgb-to-UV func
  482. %macro RGB32_FUNCS 2
  483. RGB32_TO_Y_FN %1, r, g, b, a
  484. RGB32_TO_Y_FN %1, b, g, r, a, rgba
  485. RGB32_TO_Y_FN %1, a, r, g, b, rgba
  486. RGB32_TO_Y_FN %1, a, b, g, r, rgba
  487. RGB32_TO_UV_FN %2, r, g, b, a
  488. RGB32_TO_UV_FN %2, b, g, r, a, rgba
  489. RGB32_TO_UV_FN %2, a, r, g, b, rgba
  490. RGB32_TO_UV_FN %2, a, b, g, r, rgba
  491. %endmacro
  492. %if ARCH_X86_32
  493. INIT_MMX mmx
  494. RGB32_FUNCS 0, 0
  495. %endif
  496. INIT_XMM sse2
  497. RGB32_FUNCS 8, 12
  498. %if HAVE_AVX_EXTERNAL
  499. INIT_XMM avx
  500. RGB32_FUNCS 8, 12
  501. %endif
  502. ;-----------------------------------------------------------------------------
  503. ; YUYV/UYVY/NV12/NV21 packed pixel shuffling.
  504. ;
  505. ; void <fmt>ToY_<opt>(uint8_t *dst, const uint8_t *src, int w);
  506. ; and
  507. ; void <fmt>toUV_<opt>(uint8_t *dstU, uint8_t *dstV, const uint8_t *src,
  508. ; const uint8_t *unused, int w);
  509. ;-----------------------------------------------------------------------------
  510. ; %1 = a (aligned) or u (unaligned)
  511. ; %2 = yuyv or uyvy
  512. %macro LOOP_YUYV_TO_Y 2
  513. .loop_%1:
  514. mov%1 m0, [srcq+wq*2] ; (byte) { Y0, U0, Y1, V0, ... }
  515. mov%1 m1, [srcq+wq*2+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
  516. %ifidn %2, yuyv
  517. pand m0, m2 ; (word) { Y0, Y1, ..., Y7 }
  518. pand m1, m2 ; (word) { Y8, Y9, ..., Y15 }
  519. %else ; uyvy
  520. psrlw m0, 8 ; (word) { Y0, Y1, ..., Y7 }
  521. psrlw m1, 8 ; (word) { Y8, Y9, ..., Y15 }
  522. %endif ; yuyv/uyvy
  523. packuswb m0, m1 ; (byte) { Y0, ..., Y15 }
  524. mova [dstq+wq], m0
  525. add wq, mmsize
  526. jl .loop_%1
  527. REP_RET
  528. %endmacro
  529. ; %1 = nr. of XMM registers
  530. ; %2 = yuyv or uyvy
  531. ; %3 = if specified, it means that unaligned and aligned code in loop
  532. ; will be the same (i.e. YUYV+AVX), and thus we don't need to
  533. ; split the loop in an aligned and unaligned case
  534. %macro YUYV_TO_Y_FN 2-3
  535. cglobal %2ToY, 5, 5, %1, dst, unused0, unused1, src, w
  536. %if ARCH_X86_64
  537. movsxd wq, wd
  538. %endif
  539. add dstq, wq
  540. %if mmsize == 16
  541. test srcq, 15
  542. %endif
  543. lea srcq, [srcq+wq*2]
  544. %ifidn %2, yuyv
  545. pcmpeqb m2, m2 ; (byte) { 0xff } x 16
  546. psrlw m2, 8 ; (word) { 0x00ff } x 8
  547. %endif ; yuyv
  548. %if mmsize == 16
  549. jnz .loop_u_start
  550. neg wq
  551. LOOP_YUYV_TO_Y a, %2
  552. .loop_u_start:
  553. neg wq
  554. LOOP_YUYV_TO_Y u, %2
  555. %else ; mmsize == 8
  556. neg wq
  557. LOOP_YUYV_TO_Y a, %2
  558. %endif ; mmsize == 8/16
  559. %endmacro
  560. ; %1 = a (aligned) or u (unaligned)
  561. ; %2 = yuyv or uyvy
  562. %macro LOOP_YUYV_TO_UV 2
  563. .loop_%1:
  564. %ifidn %2, yuyv
  565. mov%1 m0, [srcq+wq*4] ; (byte) { Y0, U0, Y1, V0, ... }
  566. mov%1 m1, [srcq+wq*4+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
  567. psrlw m0, 8 ; (word) { U0, V0, ..., U3, V3 }
  568. psrlw m1, 8 ; (word) { U4, V4, ..., U7, V7 }
  569. %else ; uyvy
  570. %if cpuflag(avx)
  571. vpand m0, m2, [srcq+wq*4] ; (word) { U0, V0, ..., U3, V3 }
  572. vpand m1, m2, [srcq+wq*4+mmsize] ; (word) { U4, V4, ..., U7, V7 }
  573. %else
  574. mov%1 m0, [srcq+wq*4] ; (byte) { Y0, U0, Y1, V0, ... }
  575. mov%1 m1, [srcq+wq*4+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
  576. pand m0, m2 ; (word) { U0, V0, ..., U3, V3 }
  577. pand m1, m2 ; (word) { U4, V4, ..., U7, V7 }
  578. %endif
  579. %endif ; yuyv/uyvy
  580. packuswb m0, m1 ; (byte) { U0, V0, ..., U7, V7 }
  581. pand m1, m0, m2 ; (word) { U0, U1, ..., U7 }
  582. psrlw m0, 8 ; (word) { V0, V1, ..., V7 }
  583. %if mmsize == 16
  584. packuswb m1, m0 ; (byte) { U0, ... U7, V1, ... V7 }
  585. movh [dstUq+wq], m1
  586. movhps [dstVq+wq], m1
  587. %else ; mmsize == 8
  588. packuswb m1, m1 ; (byte) { U0, ... U3 }
  589. packuswb m0, m0 ; (byte) { V0, ... V3 }
  590. movh [dstUq+wq], m1
  591. movh [dstVq+wq], m0
  592. %endif ; mmsize == 8/16
  593. add wq, mmsize / 2
  594. jl .loop_%1
  595. REP_RET
  596. %endmacro
  597. ; %1 = nr. of XMM registers
  598. ; %2 = yuyv or uyvy
  599. ; %3 = if specified, it means that unaligned and aligned code in loop
  600. ; will be the same (i.e. UYVY+AVX), and thus we don't need to
  601. ; split the loop in an aligned and unaligned case
  602. %macro YUYV_TO_UV_FN 2-3
  603. cglobal %2ToUV, 4, 5, %1, dstU, dstV, unused, src, w
  604. %if ARCH_X86_64
  605. movsxd wq, dword r5m
  606. %else ; x86-32
  607. mov wq, r5m
  608. %endif
  609. add dstUq, wq
  610. add dstVq, wq
  611. %if mmsize == 16 && %0 == 2
  612. test srcq, 15
  613. %endif
  614. lea srcq, [srcq+wq*4]
  615. pcmpeqb m2, m2 ; (byte) { 0xff } x 16
  616. psrlw m2, 8 ; (word) { 0x00ff } x 8
  617. ; NOTE: if uyvy+avx, u/a are identical
  618. %if mmsize == 16 && %0 == 2
  619. jnz .loop_u_start
  620. neg wq
  621. LOOP_YUYV_TO_UV a, %2
  622. .loop_u_start:
  623. neg wq
  624. LOOP_YUYV_TO_UV u, %2
  625. %else ; mmsize == 8
  626. neg wq
  627. LOOP_YUYV_TO_UV a, %2
  628. %endif ; mmsize == 8/16
  629. %endmacro
  630. ; %1 = a (aligned) or u (unaligned)
  631. ; %2 = nv12 or nv21
  632. %macro LOOP_NVXX_TO_UV 2
  633. .loop_%1:
  634. mov%1 m0, [srcq+wq*2] ; (byte) { U0, V0, U1, V1, ... }
  635. mov%1 m1, [srcq+wq*2+mmsize] ; (byte) { U8, V8, U9, V9, ... }
  636. pand m2, m0, m5 ; (word) { U0, U1, ..., U7 }
  637. pand m3, m1, m5 ; (word) { U8, U9, ..., U15 }
  638. psrlw m0, 8 ; (word) { V0, V1, ..., V7 }
  639. psrlw m1, 8 ; (word) { V8, V9, ..., V15 }
  640. packuswb m2, m3 ; (byte) { U0, ..., U15 }
  641. packuswb m0, m1 ; (byte) { V0, ..., V15 }
  642. %ifidn %2, nv12
  643. mova [dstUq+wq], m2
  644. mova [dstVq+wq], m0
  645. %else ; nv21
  646. mova [dstVq+wq], m2
  647. mova [dstUq+wq], m0
  648. %endif ; nv12/21
  649. add wq, mmsize
  650. jl .loop_%1
  651. REP_RET
  652. %endmacro
  653. ; %1 = nr. of XMM registers
  654. ; %2 = nv12 or nv21
  655. %macro NVXX_TO_UV_FN 2
  656. cglobal %2ToUV, 4, 5, %1, dstU, dstV, unused, src, w
  657. %if ARCH_X86_64
  658. movsxd wq, dword r5m
  659. %else ; x86-32
  660. mov wq, r5m
  661. %endif
  662. add dstUq, wq
  663. add dstVq, wq
  664. %if mmsize == 16
  665. test srcq, 15
  666. %endif
  667. lea srcq, [srcq+wq*2]
  668. pcmpeqb m5, m5 ; (byte) { 0xff } x 16
  669. psrlw m5, 8 ; (word) { 0x00ff } x 8
  670. %if mmsize == 16
  671. jnz .loop_u_start
  672. neg wq
  673. LOOP_NVXX_TO_UV a, %2
  674. .loop_u_start:
  675. neg wq
  676. LOOP_NVXX_TO_UV u, %2
  677. %else ; mmsize == 8
  678. neg wq
  679. LOOP_NVXX_TO_UV a, %2
  680. %endif ; mmsize == 8/16
  681. %endmacro
  682. %if ARCH_X86_32
  683. INIT_MMX mmx
  684. YUYV_TO_Y_FN 0, yuyv
  685. YUYV_TO_Y_FN 0, uyvy
  686. YUYV_TO_UV_FN 0, yuyv
  687. YUYV_TO_UV_FN 0, uyvy
  688. NVXX_TO_UV_FN 0, nv12
  689. NVXX_TO_UV_FN 0, nv21
  690. %endif
  691. INIT_XMM sse2
  692. YUYV_TO_Y_FN 3, yuyv
  693. YUYV_TO_Y_FN 2, uyvy
  694. YUYV_TO_UV_FN 3, yuyv
  695. YUYV_TO_UV_FN 3, uyvy
  696. NVXX_TO_UV_FN 5, nv12
  697. NVXX_TO_UV_FN 5, nv21
  698. %if HAVE_AVX_EXTERNAL
  699. INIT_XMM avx
  700. ; in theory, we could write a yuy2-to-y using vpand (i.e. AVX), but
  701. ; that's not faster in practice
  702. YUYV_TO_UV_FN 3, yuyv
  703. YUYV_TO_UV_FN 3, uyvy, 1
  704. NVXX_TO_UV_FN 5, nv12
  705. NVXX_TO_UV_FN 5, nv21
  706. %endif