scale.asm 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. ;******************************************************************************
  2. ;* x86-optimized horizontal/vertical line scaling functions
  3. ;* Copyright (c) 2011 Ronald S. Bultje <rsbultje@gmail.com>
  4. ;* Kieran Kunhya <kieran@kunhya.com>
  5. ;*
  6. ;* This file is part of Libav.
  7. ;*
  8. ;* Libav is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* Libav is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with Libav; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "x86inc.asm"
  23. %include "x86util.asm"
  24. SECTION_RODATA
  25. max_19bit_int: times 4 dd 0x7ffff
  26. max_19bit_flt: times 4 dd 524287.0
  27. minshort: times 8 dw 0x8000
  28. unicoeff: times 4 dd 0x20000000
  29. yuv2yuvX_16_start: times 4 dd 0x4000 - 0x40000000
  30. yuv2yuvX_10_start: times 4 dd 0x10000
  31. yuv2yuvX_9_start: times 4 dd 0x20000
  32. yuv2yuvX_10_upper: times 8 dw 0x3ff
  33. yuv2yuvX_9_upper: times 8 dw 0x1ff
  34. SECTION .text
  35. ;-----------------------------------------------------------------------------
  36. ; horizontal line scaling
  37. ;
  38. ; void hscale<source_width>to<intermediate_nbits>_<filterSize>_<opt>
  39. ; (SwsContext *c, int{16,32}_t *dst,
  40. ; int dstW, const uint{8,16}_t *src,
  41. ; const int16_t *filter,
  42. ; const int16_t *filterPos, int filterSize);
  43. ;
  44. ; Scale one horizontal line. Input is either 8-bits width or 16-bits width
  45. ; ($source_width can be either 8, 9, 10 or 16, difference is whether we have to
  46. ; downscale before multiplying). Filter is 14-bits. Output is either 15bits
  47. ; (in int16_t) or 19bits (in int32_t), as given in $intermediate_nbits. Each
  48. ; output pixel is generated from $filterSize input pixels, the position of
  49. ; the first pixel is given in filterPos[nOutputPixel].
  50. ;-----------------------------------------------------------------------------
  51. ; SCALE_FUNC source_width, intermediate_nbits, filtersize, filtersuffix, opt, n_args, n_xmm
  52. %macro SCALE_FUNC 7
  53. cglobal hscale%1to%2_%4_%5, %6, 7, %7
  54. %ifdef ARCH_X86_64
  55. movsxd r2, r2d
  56. %endif ; x86-64
  57. %if %2 == 19
  58. %if mmsize == 8 ; mmx
  59. mova m2, [max_19bit_int]
  60. %elifidn %5, sse4
  61. mova m2, [max_19bit_int]
  62. %else ; ssse3/sse2
  63. mova m2, [max_19bit_flt]
  64. %endif ; mmx/sse2/ssse3/sse4
  65. %endif ; %2 == 19
  66. %if %1 == 16
  67. mova m6, [minshort]
  68. mova m7, [unicoeff]
  69. %elif %1 == 8
  70. pxor m3, m3
  71. %endif ; %1 == 8/16
  72. %if %1 == 8
  73. %define movlh movd
  74. %define movbh movh
  75. %define srcmul 1
  76. %else ; %1 == 9-16
  77. %define movlh movq
  78. %define movbh movu
  79. %define srcmul 2
  80. %endif ; %1 == 8/9-16
  81. %ifnidn %3, X
  82. ; setup loop
  83. %if %3 == 8
  84. shl r2, 1 ; this allows *16 (i.e. now *8) in lea instructions for the 8-tap filter
  85. %define r2shr 1
  86. %else ; %3 == 4
  87. %define r2shr 0
  88. %endif ; %3 == 8
  89. lea r4, [r4+r2*8]
  90. %if %2 == 15
  91. lea r1, [r1+r2*(2>>r2shr)]
  92. %else ; %2 == 19
  93. lea r1, [r1+r2*(4>>r2shr)]
  94. %endif ; %2 == 15/19
  95. lea r5, [r5+r2*(2>>r2shr)]
  96. neg r2
  97. .loop:
  98. %if %3 == 4 ; filterSize == 4 scaling
  99. ; load 2x4 or 4x4 source pixels into m0/m1
  100. movsx r0, word [r5+r2*2+0] ; filterPos[0]
  101. movsx r6, word [r5+r2*2+2] ; filterPos[1]
  102. movlh m0, [r3+r0*srcmul] ; src[filterPos[0] + {0,1,2,3}]
  103. %if mmsize == 8
  104. movlh m1, [r3+r6*srcmul] ; src[filterPos[1] + {0,1,2,3}]
  105. %else ; mmsize == 16
  106. %if %1 > 8
  107. movhps m0, [r3+r6*srcmul] ; src[filterPos[1] + {0,1,2,3}]
  108. %else ; %1 == 8
  109. movd m4, [r3+r6*srcmul] ; src[filterPos[1] + {0,1,2,3}]
  110. %endif
  111. movsx r0, word [r5+r2*2+4] ; filterPos[2]
  112. movsx r6, word [r5+r2*2+6] ; filterPos[3]
  113. movlh m1, [r3+r0*srcmul] ; src[filterPos[2] + {0,1,2,3}]
  114. %if %1 > 8
  115. movhps m1, [r3+r6*srcmul] ; src[filterPos[3] + {0,1,2,3}]
  116. %else ; %1 == 8
  117. movd m5, [r3+r6*srcmul] ; src[filterPos[3] + {0,1,2,3}]
  118. punpckldq m0, m4
  119. punpckldq m1, m5
  120. %endif ; %1 == 8 && %5 <= ssse
  121. %endif ; mmsize == 8/16
  122. %if %1 == 8
  123. punpcklbw m0, m3 ; byte -> word
  124. punpcklbw m1, m3 ; byte -> word
  125. %endif ; %1 == 8
  126. ; multiply with filter coefficients
  127. %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
  128. ; add back 0x8000 * sum(coeffs) after the horizontal add
  129. psubw m0, m6
  130. psubw m1, m6
  131. %endif ; %1 == 16
  132. pmaddwd m0, [r4+r2*8+mmsize*0] ; *= filter[{0,1,..,6,7}]
  133. pmaddwd m1, [r4+r2*8+mmsize*1] ; *= filter[{8,9,..,14,15}]
  134. ; add up horizontally (4 srcpix * 4 coefficients -> 1 dstpix)
  135. %if mmsize == 8 ; mmx
  136. movq m4, m0
  137. punpckldq m0, m1
  138. punpckhdq m4, m1
  139. paddd m0, m4
  140. %elifidn %5, sse2
  141. mova m4, m0
  142. shufps m0, m1, 10001000b
  143. shufps m4, m1, 11011101b
  144. paddd m0, m4
  145. %else ; ssse3/sse4
  146. phaddd m0, m1 ; filter[{ 0, 1, 2, 3}]*src[filterPos[0]+{0,1,2,3}],
  147. ; filter[{ 4, 5, 6, 7}]*src[filterPos[1]+{0,1,2,3}],
  148. ; filter[{ 8, 9,10,11}]*src[filterPos[2]+{0,1,2,3}],
  149. ; filter[{12,13,14,15}]*src[filterPos[3]+{0,1,2,3}]
  150. %endif ; mmx/sse2/ssse3/sse4
  151. %else ; %3 == 8, i.e. filterSize == 8 scaling
  152. ; load 2x8 or 4x8 source pixels into m0, m1, m4 and m5
  153. movsx r0, word [r5+r2*1+0] ; filterPos[0]
  154. movsx r6, word [r5+r2*1+2] ; filterPos[1]
  155. movbh m0, [r3+ r0 *srcmul] ; src[filterPos[0] + {0,1,2,3,4,5,6,7}]
  156. %if mmsize == 8
  157. movbh m1, [r3+(r0+4)*srcmul] ; src[filterPos[0] + {4,5,6,7}]
  158. movbh m4, [r3+ r6 *srcmul] ; src[filterPos[1] + {0,1,2,3}]
  159. movbh m5, [r3+(r6+4)*srcmul] ; src[filterPos[1] + {4,5,6,7}]
  160. %else ; mmsize == 16
  161. movbh m1, [r3+ r6 *srcmul] ; src[filterPos[1] + {0,1,2,3,4,5,6,7}]
  162. movsx r0, word [r5+r2*1+4] ; filterPos[2]
  163. movsx r6, word [r5+r2*1+6] ; filterPos[3]
  164. movbh m4, [r3+ r0 *srcmul] ; src[filterPos[2] + {0,1,2,3,4,5,6,7}]
  165. movbh m5, [r3+ r6 *srcmul] ; src[filterPos[3] + {0,1,2,3,4,5,6,7}]
  166. %endif ; mmsize == 8/16
  167. %if %1 == 8
  168. punpcklbw m0, m3 ; byte -> word
  169. punpcklbw m1, m3 ; byte -> word
  170. punpcklbw m4, m3 ; byte -> word
  171. punpcklbw m5, m3 ; byte -> word
  172. %endif ; %1 == 8
  173. ; multiply
  174. %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
  175. ; add back 0x8000 * sum(coeffs) after the horizontal add
  176. psubw m0, m6
  177. psubw m1, m6
  178. psubw m4, m6
  179. psubw m5, m6
  180. %endif ; %1 == 16
  181. pmaddwd m0, [r4+r2*8+mmsize*0] ; *= filter[{0,1,..,6,7}]
  182. pmaddwd m1, [r4+r2*8+mmsize*1] ; *= filter[{8,9,..,14,15}]
  183. pmaddwd m4, [r4+r2*8+mmsize*2] ; *= filter[{16,17,..,22,23}]
  184. pmaddwd m5, [r4+r2*8+mmsize*3] ; *= filter[{24,25,..,30,31}]
  185. ; add up horizontally (8 srcpix * 8 coefficients -> 1 dstpix)
  186. %if mmsize == 8
  187. paddd m0, m1
  188. paddd m4, m5
  189. movq m1, m0
  190. punpckldq m0, m4
  191. punpckhdq m1, m4
  192. paddd m0, m1
  193. %elifidn %5, sse2
  194. %if %1 == 8
  195. %define mex m6
  196. %else
  197. %define mex m3
  198. %endif
  199. ; emulate horizontal add as transpose + vertical add
  200. mova mex, m0
  201. punpckldq m0, m1
  202. punpckhdq mex, m1
  203. paddd m0, mex
  204. mova m1, m4
  205. punpckldq m4, m5
  206. punpckhdq m1, m5
  207. paddd m4, m1
  208. mova m1, m0
  209. punpcklqdq m0, m4
  210. punpckhqdq m1, m4
  211. paddd m0, m1
  212. %else ; ssse3/sse4
  213. ; FIXME if we rearrange the filter in pairs of 4, we can
  214. ; load pixels likewise and use 2 x paddd + phaddd instead
  215. ; of 3 x phaddd here, faster on older cpus
  216. phaddd m0, m1
  217. phaddd m4, m5
  218. phaddd m0, m4 ; filter[{ 0, 1,..., 6, 7}]*src[filterPos[0]+{0,1,...,6,7}],
  219. ; filter[{ 8, 9,...,14,15}]*src[filterPos[1]+{0,1,...,6,7}],
  220. ; filter[{16,17,...,22,23}]*src[filterPos[2]+{0,1,...,6,7}],
  221. ; filter[{24,25,...,30,31}]*src[filterPos[3]+{0,1,...,6,7}]
  222. %endif ; mmx/sse2/ssse3/sse4
  223. %endif ; %3 == 4/8
  224. %else ; %3 == X, i.e. any filterSize scaling
  225. %ifidn %4, X4
  226. %define r6sub 4
  227. %else ; %4 == X || %4 == X8
  228. %define r6sub 0
  229. %endif ; %4 ==/!= X4
  230. %ifdef ARCH_X86_64
  231. push r12
  232. movsxd r6, r6d ; filterSize
  233. lea r12, [r3+(r6-r6sub)*srcmul] ; &src[filterSize&~4]
  234. %define src_reg r11
  235. %define r1x r10
  236. %define filter2 r12
  237. %else ; x86-32
  238. lea r0, [r3+(r6-r6sub)*srcmul] ; &src[filterSize&~4]
  239. mov r6m, r0
  240. %define src_reg r3
  241. %define r1x r1
  242. %define filter2 r6m
  243. %endif ; x86-32/64
  244. lea r5, [r5+r2*2]
  245. %if %2 == 15
  246. lea r1, [r1+r2*2]
  247. %else ; %2 == 19
  248. lea r1, [r1+r2*4]
  249. %endif ; %2 == 15/19
  250. movifnidn r1mp, r1
  251. neg r2
  252. .loop:
  253. movsx r0, word [r5+r2*2+0] ; filterPos[0]
  254. movsx r1x, word [r5+r2*2+2] ; filterPos[1]
  255. ; FIXME maybe do 4px/iteration on x86-64 (x86-32 wouldn't have enough regs)?
  256. pxor m4, m4
  257. pxor m5, m5
  258. mov src_reg, r3mp
  259. .innerloop:
  260. ; load 2x4 (mmx) or 2x8 (sse) source pixels into m0/m1 -> m4/m5
  261. movbh m0, [src_reg+r0 *srcmul] ; src[filterPos[0] + {0,1,2,3(,4,5,6,7)}]
  262. movbh m1, [src_reg+(r1x+r6sub)*srcmul] ; src[filterPos[1] + {0,1,2,3(,4,5,6,7)}]
  263. %if %1 == 8
  264. punpcklbw m0, m3
  265. punpcklbw m1, m3
  266. %endif ; %1 == 8
  267. ; multiply
  268. %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
  269. ; add back 0x8000 * sum(coeffs) after the horizontal add
  270. psubw m0, m6
  271. psubw m1, m6
  272. %endif ; %1 == 16
  273. pmaddwd m0, [r4 ] ; filter[{0,1,2,3(,4,5,6,7)}]
  274. pmaddwd m1, [r4+(r6+r6sub)*2] ; filter[filtersize+{0,1,2,3(,4,5,6,7)}]
  275. paddd m4, m0
  276. paddd m5, m1
  277. add r4, mmsize
  278. add src_reg, srcmul*mmsize/2
  279. cmp src_reg, filter2 ; while (src += 4) < &src[filterSize]
  280. jl .innerloop
  281. %ifidn %4, X4
  282. movsx r1x, word [r5+r2*2+2] ; filterPos[1]
  283. movlh m0, [src_reg+r0 *srcmul] ; split last 4 srcpx of dstpx[0]
  284. sub r1x, r6 ; and first 4 srcpx of dstpx[1]
  285. %if %1 > 8
  286. movhps m0, [src_reg+(r1x+r6sub)*srcmul]
  287. %else ; %1 == 8
  288. movd m1, [src_reg+(r1x+r6sub)*srcmul]
  289. punpckldq m0, m1
  290. %endif ; %1 == 8 && %5 <= ssse
  291. %if %1 == 8
  292. punpcklbw m0, m3
  293. %endif ; %1 == 8
  294. %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
  295. ; add back 0x8000 * sum(coeffs) after the horizontal add
  296. psubw m0, m6
  297. %endif ; %1 == 16
  298. pmaddwd m0, [r4]
  299. %endif ; %4 == X4
  300. lea r4, [r4+(r6+r6sub)*2]
  301. %if mmsize == 8 ; mmx
  302. movq m0, m4
  303. punpckldq m4, m5
  304. punpckhdq m0, m5
  305. paddd m0, m4
  306. %else ; mmsize == 16
  307. %ifidn %5, sse2
  308. mova m1, m4
  309. punpcklqdq m4, m5
  310. punpckhqdq m1, m5
  311. paddd m4, m1
  312. %else ; ssse3/sse4
  313. phaddd m4, m5
  314. %endif ; sse2/ssse3/sse4
  315. %ifidn %4, X4
  316. paddd m4, m0
  317. %endif ; %3 == X4
  318. %ifidn %5, sse2
  319. pshufd m4, m4, 11011000b
  320. movhlps m0, m4
  321. paddd m0, m4
  322. %else ; ssse3/sse4
  323. phaddd m4, m4
  324. SWAP 0, 4
  325. %endif ; sse2/ssse3/sse4
  326. %endif ; mmsize == 8/16
  327. %endif ; %3 ==/!= X
  328. %if %1 == 16 ; add 0x8000 * sum(coeffs), i.e. back from signed -> unsigned
  329. paddd m0, m7
  330. %endif ; %1 == 16
  331. ; clip, store
  332. psrad m0, 14 + %1 - %2
  333. %ifidn %3, X
  334. movifnidn r1, r1mp
  335. %endif ; %3 == X
  336. %if %2 == 15
  337. packssdw m0, m0
  338. %ifnidn %3, X
  339. movh [r1+r2*(2>>r2shr)], m0
  340. %else ; %3 == X
  341. movd [r1+r2*2], m0
  342. %endif ; %3 ==/!= X
  343. %else ; %2 == 19
  344. %if mmsize == 8
  345. PMINSD_MMX m0, m2, m4
  346. %elifidn %5, sse4
  347. pminsd m0, m2
  348. %else ; sse2/ssse3
  349. cvtdq2ps m0, m0
  350. minps m0, m2
  351. cvtps2dq m0, m0
  352. %endif ; mmx/sse2/ssse3/sse4
  353. %ifnidn %3, X
  354. mova [r1+r2*(4>>r2shr)], m0
  355. %else ; %3 == X
  356. movq [r1+r2*4], m0
  357. %endif ; %3 ==/!= X
  358. %endif ; %2 == 15/19
  359. %ifnidn %3, X
  360. add r2, (mmsize<<r2shr)/4 ; both 8tap and 4tap really only do 4 pixels (or for mmx: 2 pixels)
  361. ; per iteration. see "shl r2,1" above as for why we do this
  362. %else ; %3 == X
  363. add r2, 2
  364. %endif ; %3 ==/!= X
  365. jl .loop
  366. %ifnidn %3, X
  367. REP_RET
  368. %else ; %3 == X
  369. %ifdef ARCH_X86_64
  370. pop r12
  371. RET
  372. %else ; x86-32
  373. REP_RET
  374. %endif ; x86-32/64
  375. %endif ; %3 ==/!= X
  376. %endmacro
  377. ; SCALE_FUNCS source_width, intermediate_nbits, opt, n_xmm
  378. %macro SCALE_FUNCS 4
  379. SCALE_FUNC %1, %2, 4, 4, %3, 6, %4
  380. SCALE_FUNC %1, %2, 8, 8, %3, 6, %4
  381. %if mmsize == 8
  382. SCALE_FUNC %1, %2, X, X, %3, 7, %4
  383. %else
  384. SCALE_FUNC %1, %2, X, X4, %3, 7, %4
  385. SCALE_FUNC %1, %2, X, X8, %3, 7, %4
  386. %endif
  387. %endmacro
  388. ; SCALE_FUNCS2 opt, 8_xmm_args, 9to10_xmm_args, 16_xmm_args
  389. %macro SCALE_FUNCS2 4
  390. %ifnidn %1, sse4
  391. SCALE_FUNCS 8, 15, %1, %2
  392. SCALE_FUNCS 9, 15, %1, %3
  393. SCALE_FUNCS 10, 15, %1, %3
  394. SCALE_FUNCS 16, 15, %1, %4
  395. %endif ; !sse4
  396. SCALE_FUNCS 8, 19, %1, %2
  397. SCALE_FUNCS 9, 19, %1, %3
  398. SCALE_FUNCS 10, 19, %1, %3
  399. SCALE_FUNCS 16, 19, %1, %4
  400. %endmacro
  401. %ifdef ARCH_X86_32
  402. INIT_MMX
  403. SCALE_FUNCS2 mmx, 0, 0, 0
  404. %endif
  405. INIT_XMM
  406. SCALE_FUNCS2 sse2, 6, 7, 8
  407. SCALE_FUNCS2 ssse3, 6, 6, 8
  408. SCALE_FUNCS2 sse4, 6, 6, 8
  409. ;-----------------------------------------------------------------------------
  410. ; vertical line scaling
  411. ;
  412. ; void yuv2plane1_<output_size>_<opt>(const int16_t *src, uint8_t *dst, int dstW,
  413. ; const uint8_t *dither, int offset)
  414. ; and
  415. ; void yuv2planeX_<output_size>_<opt>(const int16_t *filter, int filterSize,
  416. ; const int16_t **src, uint8_t *dst, int dstW,
  417. ; const uint8_t *dither, int offset)
  418. ;
  419. ; Scale one or $filterSize lines of source data to generate one line of output
  420. ; data. The input is 15-bit in int16_t if $output_size is [8,10] and 19-bit in
  421. ; int32_t if $output_size is 16. $filter is 12-bits. $filterSize is a multiple
  422. ; of 2. $offset is either 0 or 3. $dither holds 8 values.
  423. ;-----------------------------------------------------------------------------
  424. %macro yuv2planeX_fn 4
  425. %ifdef ARCH_X86_32
  426. %define cntr_reg r1
  427. %define movsx mov
  428. %else
  429. %define cntr_reg r11
  430. %define movsx movsxd
  431. %endif
  432. cglobal yuv2planeX_%2_%1, %4, 7, %3
  433. %if %2 == 8 || %2 == 9 || %2 == 10
  434. pxor m6, m6
  435. %endif ; %2 == 8/9/10
  436. %if %2 == 8
  437. %ifdef ARCH_X86_32
  438. %assign pad 0x2c - (stack_offset & 15)
  439. SUB rsp, pad
  440. %define m_dith m7
  441. %else ; x86-64
  442. %define m_dith m9
  443. %endif ; x86-32
  444. ; create registers holding dither
  445. movq m_dith, [r5] ; dither
  446. test r6d, r6d
  447. jz .no_rot
  448. %if mmsize == 16
  449. punpcklqdq m_dith, m_dith
  450. %endif ; mmsize == 16
  451. PALIGNR m_dith, m_dith, 3, m0
  452. .no_rot:
  453. %if mmsize == 16
  454. punpcklbw m_dith, m6
  455. %ifdef ARCH_X86_64
  456. punpcklwd m8, m_dith, m6
  457. pslld m8, 12
  458. %else ; x86-32
  459. punpcklwd m5, m_dith, m6
  460. pslld m5, 12
  461. %endif ; x86-32/64
  462. punpckhwd m_dith, m6
  463. pslld m_dith, 12
  464. %ifdef ARCH_X86_32
  465. mova [rsp+ 0], m5
  466. mova [rsp+16], m_dith
  467. %endif
  468. %else ; mmsize == 8
  469. punpcklbw m5, m_dith, m6
  470. punpckhbw m_dith, m6
  471. punpcklwd m4, m5, m6
  472. punpckhwd m5, m6
  473. punpcklwd m3, m_dith, m6
  474. punpckhwd m_dith, m6
  475. pslld m4, 12
  476. pslld m5, 12
  477. pslld m3, 12
  478. pslld m_dith, 12
  479. mova [rsp+ 0], m4
  480. mova [rsp+ 8], m5
  481. mova [rsp+16], m3
  482. mova [rsp+24], m_dith
  483. %endif ; mmsize == 8/16
  484. %endif ; %2 == 8
  485. xor r5, r5
  486. .pixelloop:
  487. %assign %%i 0
  488. ; the rep here is for the 8bit output mmx case, where dither covers
  489. ; 8 pixels but we can only handle 2 pixels per register, and thus 4
  490. ; pixels per iteration. In order to not have to keep track of where
  491. ; we are w.r.t. dithering, we unroll the mmx/8bit loop x2.
  492. %if %2 == 8
  493. %rep 16/mmsize
  494. %endif ; %2 == 8
  495. %if %2 == 8
  496. %ifdef ARCH_X86_32
  497. mova m2, [rsp+mmsize*(0+%%i)]
  498. mova m1, [rsp+mmsize*(1+%%i)]
  499. %else ; x86-64
  500. mova m2, m8
  501. mova m1, m_dith
  502. %endif ; x86-32/64
  503. %else ; %2 == 9/10/16
  504. mova m1, [yuv2yuvX_%2_start]
  505. mova m2, m1
  506. %endif ; %2 == 8/9/10/16
  507. movsx cntr_reg, r1m
  508. .filterloop_ %+ %%i:
  509. ; input pixels
  510. mov r6, [r2+gprsize*cntr_reg-2*gprsize]
  511. %if %2 == 16
  512. mova m3, [r6+r5*4]
  513. mova m5, [r6+r5*4+mmsize]
  514. %else ; %2 == 8/9/10
  515. mova m3, [r6+r5*2]
  516. %endif ; %2 == 8/9/10/16
  517. mov r6, [r2+gprsize*cntr_reg-gprsize]
  518. %if %2 == 16
  519. mova m4, [r6+r5*4]
  520. mova m6, [r6+r5*4+mmsize]
  521. %else ; %2 == 8/9/10
  522. mova m4, [r6+r5*2]
  523. %endif ; %2 == 8/9/10/16
  524. ; coefficients
  525. movd m0, [r0+2*cntr_reg-4]; coeff[0], coeff[1]
  526. %if %2 == 16
  527. pshuflw m7, m0, 0 ; coeff[0]
  528. pshuflw m0, m0, 0x55 ; coeff[1]
  529. pmovsxwd m7, m7 ; word -> dword
  530. pmovsxwd m0, m0 ; word -> dword
  531. pmulld m3, m7
  532. pmulld m5, m7
  533. pmulld m4, m0
  534. pmulld m6, m0
  535. paddd m2, m3
  536. paddd m1, m5
  537. paddd m2, m4
  538. paddd m1, m6
  539. %else ; %2 == 10/9/8
  540. punpcklwd m5, m3, m4
  541. punpckhwd m3, m4
  542. SPLATD m0, m0
  543. pmaddwd m5, m0
  544. pmaddwd m3, m0
  545. paddd m2, m5
  546. paddd m1, m3
  547. %endif ; %2 == 8/9/10/16
  548. sub cntr_reg, 2
  549. jg .filterloop_ %+ %%i
  550. %if %2 == 16
  551. psrad m2, 31 - %2
  552. psrad m1, 31 - %2
  553. %else ; %2 == 10/9/8
  554. psrad m2, 27 - %2
  555. psrad m1, 27 - %2
  556. %endif ; %2 == 8/9/10/16
  557. %if %2 == 8
  558. packssdw m2, m1
  559. packuswb m2, m2
  560. movh [r3+r5*1], m2
  561. %else ; %2 == 9/10/16
  562. %if %2 == 16
  563. packssdw m2, m1
  564. paddw m2, [minshort]
  565. %else ; %2 == 9/10
  566. %ifidn %1, sse4
  567. packusdw m2, m1
  568. %elifidn %1, avx
  569. packusdw m2, m1
  570. %else ; mmx2/sse2
  571. packssdw m2, m1
  572. pmaxsw m2, m6
  573. %endif ; mmx2/sse2/sse4/avx
  574. pminsw m2, [yuv2yuvX_%2_upper]
  575. %endif ; %2 == 9/10/16
  576. mova [r3+r5*2], m2
  577. %endif ; %2 == 8/9/10/16
  578. add r5, mmsize/2
  579. sub r4d, mmsize/2
  580. %if %2 == 8
  581. %assign %%i %%i+2
  582. %endrep
  583. %endif ; %2 == 8
  584. jg .pixelloop
  585. %if %2 == 8
  586. %ifdef ARCH_X86_32
  587. ADD rsp, pad
  588. RET
  589. %else ; x86-64
  590. REP_RET
  591. %endif ; x86-32/64
  592. %else ; %2 == 9/10/16
  593. REP_RET
  594. %endif ; %2 == 8/9/10/16
  595. %endmacro
  596. %define PALIGNR PALIGNR_MMX
  597. %ifdef ARCH_X86_32
  598. INIT_MMX
  599. yuv2planeX_fn mmx, 8, 0, 7
  600. yuv2planeX_fn mmx2, 9, 0, 5
  601. yuv2planeX_fn mmx2, 10, 0, 5
  602. %endif
  603. INIT_XMM
  604. yuv2planeX_fn sse2, 8, 10, 7
  605. yuv2planeX_fn sse2, 9, 7, 5
  606. yuv2planeX_fn sse2, 10, 7, 5
  607. %define PALIGNR PALIGNR_SSSE3
  608. yuv2planeX_fn sse4, 8, 10, 7
  609. yuv2planeX_fn sse4, 9, 7, 5
  610. yuv2planeX_fn sse4, 10, 7, 5
  611. yuv2planeX_fn sse4, 16, 8, 5
  612. INIT_AVX
  613. yuv2planeX_fn avx, 8, 10, 7
  614. yuv2planeX_fn avx, 9, 7, 5
  615. yuv2planeX_fn avx, 10, 7, 5