scale.asm 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. ;******************************************************************************
  2. ;* x86-optimized horizontal line scaling functions
  3. ;* Copyright (c) 2011 Ronald S. Bultje <rsbultje@gmail.com>
  4. ;*
  5. ;* This file is part of FFmpeg.
  6. ;*
  7. ;* FFmpeg is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* FFmpeg is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with FFmpeg; if not, write to the Free Software
  19. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. %include "libavutil/x86/x86util.asm"
  22. SECTION_RODATA
  23. max_19bit_int: times 4 dd 0x7ffff
  24. max_19bit_flt: times 4 dd 524287.0
  25. minshort: times 8 dw 0x8000
  26. unicoeff: times 4 dd 0x20000000
  27. SECTION .text
  28. ;-----------------------------------------------------------------------------
  29. ; horizontal line scaling
  30. ;
  31. ; void hscale<source_width>to<intermediate_nbits>_<filterSize>_<opt>
  32. ; (SwsContext *c, int{16,32}_t *dst,
  33. ; int dstW, const uint{8,16}_t *src,
  34. ; const int16_t *filter,
  35. ; const int32_t *filterPos, int filterSize);
  36. ;
  37. ; Scale one horizontal line. Input is either 8-bits width or 16-bits width
  38. ; ($source_width can be either 8, 9, 10 or 16, difference is whether we have to
  39. ; downscale before multiplying). Filter is 14-bits. Output is either 15bits
  40. ; (in int16_t) or 19bits (in int32_t), as given in $intermediate_nbits. Each
  41. ; output pixel is generated from $filterSize input pixels, the position of
  42. ; the first pixel is given in filterPos[nOutputPixel].
  43. ;-----------------------------------------------------------------------------
  44. ; SCALE_FUNC source_width, intermediate_nbits, filtersize, filtersuffix, n_args, n_xmm
  45. %macro SCALE_FUNC 6
  46. %ifnidn %3, X
  47. cglobal hscale%1to%2_%4, %5, 7, %6, pos0, dst, w, src, filter, fltpos, pos1
  48. %else
  49. cglobal hscale%1to%2_%4, %5, 10, %6, pos0, dst, w, srcmem, filter, fltpos, fltsize
  50. %endif
  51. %if ARCH_X86_64
  52. movsxd wq, wd
  53. %define mov32 movsxd
  54. %else ; x86-32
  55. %define mov32 mov
  56. %endif ; x86-64
  57. %if %2 == 19
  58. %if mmsize == 8 ; mmx
  59. mova m2, [max_19bit_int]
  60. %elif cpuflag(sse4)
  61. mova m2, [max_19bit_int]
  62. %else ; ssse3/sse2
  63. mova m2, [max_19bit_flt]
  64. %endif ; mmx/sse2/ssse3/sse4
  65. %endif ; %2 == 19
  66. %if %1 == 16
  67. mova m6, [minshort]
  68. mova m7, [unicoeff]
  69. %elif %1 == 8
  70. pxor m3, m3
  71. %endif ; %1 == 8/16
  72. %if %1 == 8
  73. %define movlh movd
  74. %define movbh movh
  75. %define srcmul 1
  76. %else ; %1 == 9-16
  77. %define movlh movq
  78. %define movbh movu
  79. %define srcmul 2
  80. %endif ; %1 == 8/9-16
  81. %ifnidn %3, X
  82. ; setup loop
  83. %if %3 == 8
  84. shl wq, 1 ; this allows *16 (i.e. now *8) in lea instructions for the 8-tap filter
  85. %define wshr 1
  86. %else ; %3 == 4
  87. %define wshr 0
  88. %endif ; %3 == 8
  89. lea filterq, [filterq+wq*8]
  90. %if %2 == 15
  91. lea dstq, [dstq+wq*(2>>wshr)]
  92. %else ; %2 == 19
  93. lea dstq, [dstq+wq*(4>>wshr)]
  94. %endif ; %2 == 15/19
  95. lea fltposq, [fltposq+wq*(4>>wshr)]
  96. neg wq
  97. .loop:
  98. %if %3 == 4 ; filterSize == 4 scaling
  99. ; load 2x4 or 4x4 source pixels into m0/m1
  100. mov32 pos0q, dword [fltposq+wq*4+ 0] ; filterPos[0]
  101. mov32 pos1q, dword [fltposq+wq*4+ 4] ; filterPos[1]
  102. movlh m0, [srcq+pos0q*srcmul] ; src[filterPos[0] + {0,1,2,3}]
  103. %if mmsize == 8
  104. movlh m1, [srcq+pos1q*srcmul] ; src[filterPos[1] + {0,1,2,3}]
  105. %else ; mmsize == 16
  106. %if %1 > 8
  107. movhps m0, [srcq+pos1q*srcmul] ; src[filterPos[1] + {0,1,2,3}]
  108. %else ; %1 == 8
  109. movd m4, [srcq+pos1q*srcmul] ; src[filterPos[1] + {0,1,2,3}]
  110. %endif
  111. mov32 pos0q, dword [fltposq+wq*4+ 8] ; filterPos[2]
  112. mov32 pos1q, dword [fltposq+wq*4+12] ; filterPos[3]
  113. movlh m1, [srcq+pos0q*srcmul] ; src[filterPos[2] + {0,1,2,3}]
  114. %if %1 > 8
  115. movhps m1, [srcq+pos1q*srcmul] ; src[filterPos[3] + {0,1,2,3}]
  116. %else ; %1 == 8
  117. movd m5, [srcq+pos1q*srcmul] ; src[filterPos[3] + {0,1,2,3}]
  118. punpckldq m0, m4
  119. punpckldq m1, m5
  120. %endif ; %1 == 8
  121. %endif ; mmsize == 8/16
  122. %if %1 == 8
  123. punpcklbw m0, m3 ; byte -> word
  124. punpcklbw m1, m3 ; byte -> word
  125. %endif ; %1 == 8
  126. ; multiply with filter coefficients
  127. %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
  128. ; add back 0x8000 * sum(coeffs) after the horizontal add
  129. psubw m0, m6
  130. psubw m1, m6
  131. %endif ; %1 == 16
  132. pmaddwd m0, [filterq+wq*8+mmsize*0] ; *= filter[{0,1,..,6,7}]
  133. pmaddwd m1, [filterq+wq*8+mmsize*1] ; *= filter[{8,9,..,14,15}]
  134. ; add up horizontally (4 srcpix * 4 coefficients -> 1 dstpix)
  135. %if mmsize == 8 ; mmx
  136. movq m4, m0
  137. punpckldq m0, m1
  138. punpckhdq m4, m1
  139. paddd m0, m4
  140. %elif notcpuflag(ssse3) ; sse2
  141. mova m4, m0
  142. shufps m0, m1, 10001000b
  143. shufps m4, m1, 11011101b
  144. paddd m0, m4
  145. %else ; ssse3/sse4
  146. phaddd m0, m1 ; filter[{ 0, 1, 2, 3}]*src[filterPos[0]+{0,1,2,3}],
  147. ; filter[{ 4, 5, 6, 7}]*src[filterPos[1]+{0,1,2,3}],
  148. ; filter[{ 8, 9,10,11}]*src[filterPos[2]+{0,1,2,3}],
  149. ; filter[{12,13,14,15}]*src[filterPos[3]+{0,1,2,3}]
  150. %endif ; mmx/sse2/ssse3/sse4
  151. %else ; %3 == 8, i.e. filterSize == 8 scaling
  152. ; load 2x8 or 4x8 source pixels into m0, m1, m4 and m5
  153. mov32 pos0q, dword [fltposq+wq*2+0] ; filterPos[0]
  154. mov32 pos1q, dword [fltposq+wq*2+4] ; filterPos[1]
  155. movbh m0, [srcq+ pos0q *srcmul] ; src[filterPos[0] + {0,1,2,3,4,5,6,7}]
  156. %if mmsize == 8
  157. movbh m1, [srcq+(pos0q+4)*srcmul] ; src[filterPos[0] + {4,5,6,7}]
  158. movbh m4, [srcq+ pos1q *srcmul] ; src[filterPos[1] + {0,1,2,3}]
  159. movbh m5, [srcq+(pos1q+4)*srcmul] ; src[filterPos[1] + {4,5,6,7}]
  160. %else ; mmsize == 16
  161. movbh m1, [srcq+ pos1q *srcmul] ; src[filterPos[1] + {0,1,2,3,4,5,6,7}]
  162. mov32 pos0q, dword [fltposq+wq*2+8] ; filterPos[2]
  163. mov32 pos1q, dword [fltposq+wq*2+12] ; filterPos[3]
  164. movbh m4, [srcq+ pos0q *srcmul] ; src[filterPos[2] + {0,1,2,3,4,5,6,7}]
  165. movbh m5, [srcq+ pos1q *srcmul] ; src[filterPos[3] + {0,1,2,3,4,5,6,7}]
  166. %endif ; mmsize == 8/16
  167. %if %1 == 8
  168. punpcklbw m0, m3 ; byte -> word
  169. punpcklbw m1, m3 ; byte -> word
  170. punpcklbw m4, m3 ; byte -> word
  171. punpcklbw m5, m3 ; byte -> word
  172. %endif ; %1 == 8
  173. ; multiply
  174. %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
  175. ; add back 0x8000 * sum(coeffs) after the horizontal add
  176. psubw m0, m6
  177. psubw m1, m6
  178. psubw m4, m6
  179. psubw m5, m6
  180. %endif ; %1 == 16
  181. pmaddwd m0, [filterq+wq*8+mmsize*0] ; *= filter[{0,1,..,6,7}]
  182. pmaddwd m1, [filterq+wq*8+mmsize*1] ; *= filter[{8,9,..,14,15}]
  183. pmaddwd m4, [filterq+wq*8+mmsize*2] ; *= filter[{16,17,..,22,23}]
  184. pmaddwd m5, [filterq+wq*8+mmsize*3] ; *= filter[{24,25,..,30,31}]
  185. ; add up horizontally (8 srcpix * 8 coefficients -> 1 dstpix)
  186. %if mmsize == 8
  187. paddd m0, m1
  188. paddd m4, m5
  189. movq m1, m0
  190. punpckldq m0, m4
  191. punpckhdq m1, m4
  192. paddd m0, m1
  193. %elif notcpuflag(ssse3) ; sse2
  194. %if %1 == 8
  195. %define mex m6
  196. %else
  197. %define mex m3
  198. %endif
  199. ; emulate horizontal add as transpose + vertical add
  200. mova mex, m0
  201. punpckldq m0, m1
  202. punpckhdq mex, m1
  203. paddd m0, mex
  204. mova m1, m4
  205. punpckldq m4, m5
  206. punpckhdq m1, m5
  207. paddd m4, m1
  208. mova m1, m0
  209. punpcklqdq m0, m4
  210. punpckhqdq m1, m4
  211. paddd m0, m1
  212. %else ; ssse3/sse4
  213. ; FIXME if we rearrange the filter in pairs of 4, we can
  214. ; load pixels likewise and use 2 x paddd + phaddd instead
  215. ; of 3 x phaddd here, faster on older cpus
  216. phaddd m0, m1
  217. phaddd m4, m5
  218. phaddd m0, m4 ; filter[{ 0, 1,..., 6, 7}]*src[filterPos[0]+{0,1,...,6,7}],
  219. ; filter[{ 8, 9,...,14,15}]*src[filterPos[1]+{0,1,...,6,7}],
  220. ; filter[{16,17,...,22,23}]*src[filterPos[2]+{0,1,...,6,7}],
  221. ; filter[{24,25,...,30,31}]*src[filterPos[3]+{0,1,...,6,7}]
  222. %endif ; mmx/sse2/ssse3/sse4
  223. %endif ; %3 == 4/8
  224. %else ; %3 == X, i.e. any filterSize scaling
  225. %ifidn %4, X4
  226. %define dlt 4
  227. %else ; %4 == X || %4 == X8
  228. %define dlt 0
  229. %endif ; %4 ==/!= X4
  230. %if ARCH_X86_64
  231. %define srcq r8
  232. %define pos1q r7
  233. %define srcendq r9
  234. movsxd fltsizeq, fltsized ; filterSize
  235. lea srcendq, [srcmemq+(fltsizeq-dlt)*srcmul] ; &src[filterSize&~4]
  236. %else ; x86-32
  237. %define srcq srcmemq
  238. %define pos1q dstq
  239. %define srcendq r6m
  240. lea pos0q, [srcmemq+(fltsizeq-dlt)*srcmul] ; &src[filterSize&~4]
  241. mov srcendq, pos0q
  242. %endif ; x86-32/64
  243. lea fltposq, [fltposq+wq*4]
  244. %if %2 == 15
  245. lea dstq, [dstq+wq*2]
  246. %else ; %2 == 19
  247. lea dstq, [dstq+wq*4]
  248. %endif ; %2 == 15/19
  249. movifnidn dstmp, dstq
  250. neg wq
  251. .loop:
  252. mov32 pos0q, dword [fltposq+wq*4+0] ; filterPos[0]
  253. mov32 pos1q, dword [fltposq+wq*4+4] ; filterPos[1]
  254. ; FIXME maybe do 4px/iteration on x86-64 (x86-32 wouldn't have enough regs)?
  255. pxor m4, m4
  256. pxor m5, m5
  257. mov srcq, srcmemmp
  258. .innerloop:
  259. ; load 2x4 (mmx) or 2x8 (sse) source pixels into m0/m1 -> m4/m5
  260. movbh m0, [srcq+ pos0q *srcmul] ; src[filterPos[0] + {0,1,2,3(,4,5,6,7)}]
  261. movbh m1, [srcq+(pos1q+dlt)*srcmul] ; src[filterPos[1] + {0,1,2,3(,4,5,6,7)}]
  262. %if %1 == 8
  263. punpcklbw m0, m3
  264. punpcklbw m1, m3
  265. %endif ; %1 == 8
  266. ; multiply
  267. %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
  268. ; add back 0x8000 * sum(coeffs) after the horizontal add
  269. psubw m0, m6
  270. psubw m1, m6
  271. %endif ; %1 == 16
  272. pmaddwd m0, [filterq] ; filter[{0,1,2,3(,4,5,6,7)}]
  273. pmaddwd m1, [filterq+(fltsizeq+dlt)*2]; filter[filtersize+{0,1,2,3(,4,5,6,7)}]
  274. paddd m4, m0
  275. paddd m5, m1
  276. add filterq, mmsize
  277. add srcq, srcmul*mmsize/2
  278. cmp srcq, srcendq ; while (src += 4) < &src[filterSize]
  279. jl .innerloop
  280. %ifidn %4, X4
  281. mov32 pos1q, dword [fltposq+wq*4+4] ; filterPos[1]
  282. movlh m0, [srcq+ pos0q *srcmul] ; split last 4 srcpx of dstpx[0]
  283. sub pos1q, fltsizeq ; and first 4 srcpx of dstpx[1]
  284. %if %1 > 8
  285. movhps m0, [srcq+(pos1q+dlt)*srcmul]
  286. %else ; %1 == 8
  287. movd m1, [srcq+(pos1q+dlt)*srcmul]
  288. punpckldq m0, m1
  289. %endif ; %1 == 8
  290. %if %1 == 8
  291. punpcklbw m0, m3
  292. %endif ; %1 == 8
  293. %if %1 == 16 ; pmaddwd needs signed adds, so this moves unsigned -> signed, we'll
  294. ; add back 0x8000 * sum(coeffs) after the horizontal add
  295. psubw m0, m6
  296. %endif ; %1 == 16
  297. pmaddwd m0, [filterq]
  298. %endif ; %4 == X4
  299. lea filterq, [filterq+(fltsizeq+dlt)*2]
  300. %if mmsize == 8 ; mmx
  301. movq m0, m4
  302. punpckldq m4, m5
  303. punpckhdq m0, m5
  304. paddd m0, m4
  305. %else ; mmsize == 16
  306. %if notcpuflag(ssse3) ; sse2
  307. mova m1, m4
  308. punpcklqdq m4, m5
  309. punpckhqdq m1, m5
  310. paddd m4, m1
  311. %else ; ssse3/sse4
  312. phaddd m4, m5
  313. %endif ; sse2/ssse3/sse4
  314. %ifidn %4, X4
  315. paddd m4, m0
  316. %endif ; %3 == X4
  317. %if notcpuflag(ssse3) ; sse2
  318. pshufd m4, m4, 11011000b
  319. movhlps m0, m4
  320. paddd m0, m4
  321. %else ; ssse3/sse4
  322. phaddd m4, m4
  323. SWAP 0, 4
  324. %endif ; sse2/ssse3/sse4
  325. %endif ; mmsize == 8/16
  326. %endif ; %3 ==/!= X
  327. %if %1 == 16 ; add 0x8000 * sum(coeffs), i.e. back from signed -> unsigned
  328. paddd m0, m7
  329. %endif ; %1 == 16
  330. ; clip, store
  331. psrad m0, 14 + %1 - %2
  332. %ifidn %3, X
  333. movifnidn dstq, dstmp
  334. %endif ; %3 == X
  335. %if %2 == 15
  336. packssdw m0, m0
  337. %ifnidn %3, X
  338. movh [dstq+wq*(2>>wshr)], m0
  339. %else ; %3 == X
  340. movd [dstq+wq*2], m0
  341. %endif ; %3 ==/!= X
  342. %else ; %2 == 19
  343. %if mmsize == 8
  344. PMINSD_MMX m0, m2, m4
  345. %elif cpuflag(sse4)
  346. pminsd m0, m2
  347. %else ; sse2/ssse3
  348. cvtdq2ps m0, m0
  349. minps m0, m2
  350. cvtps2dq m0, m0
  351. %endif ; mmx/sse2/ssse3/sse4
  352. %ifnidn %3, X
  353. mova [dstq+wq*(4>>wshr)], m0
  354. %else ; %3 == X
  355. movq [dstq+wq*4], m0
  356. %endif ; %3 ==/!= X
  357. %endif ; %2 == 15/19
  358. %ifnidn %3, X
  359. add wq, (mmsize<<wshr)/4 ; both 8tap and 4tap really only do 4 pixels (or for mmx: 2 pixels)
  360. ; per iteration. see "shl wq,1" above as for why we do this
  361. %else ; %3 == X
  362. add wq, 2
  363. %endif ; %3 ==/!= X
  364. jl .loop
  365. REP_RET
  366. %endmacro
  367. ; SCALE_FUNCS source_width, intermediate_nbits, n_xmm
  368. %macro SCALE_FUNCS 3
  369. SCALE_FUNC %1, %2, 4, 4, 6, %3
  370. SCALE_FUNC %1, %2, 8, 8, 6, %3
  371. %if mmsize == 8
  372. SCALE_FUNC %1, %2, X, X, 7, %3
  373. %else
  374. SCALE_FUNC %1, %2, X, X4, 7, %3
  375. SCALE_FUNC %1, %2, X, X8, 7, %3
  376. %endif
  377. %endmacro
  378. ; SCALE_FUNCS2 8_xmm_args, 9to10_xmm_args, 16_xmm_args
  379. %macro SCALE_FUNCS2 3
  380. %if notcpuflag(sse4)
  381. SCALE_FUNCS 8, 15, %1
  382. SCALE_FUNCS 9, 15, %2
  383. SCALE_FUNCS 10, 15, %2
  384. SCALE_FUNCS 12, 15, %2
  385. SCALE_FUNCS 14, 15, %2
  386. SCALE_FUNCS 16, 15, %3
  387. %endif ; !sse4
  388. SCALE_FUNCS 8, 19, %1
  389. SCALE_FUNCS 9, 19, %2
  390. SCALE_FUNCS 10, 19, %2
  391. SCALE_FUNCS 12, 19, %2
  392. SCALE_FUNCS 14, 19, %2
  393. SCALE_FUNCS 16, 19, %3
  394. %endmacro
  395. %if ARCH_X86_32
  396. INIT_MMX mmx
  397. SCALE_FUNCS2 0, 0, 0
  398. %endif
  399. INIT_XMM sse2
  400. SCALE_FUNCS2 7, 6, 8
  401. INIT_XMM ssse3
  402. SCALE_FUNCS2 6, 6, 8
  403. INIT_XMM sse4
  404. SCALE_FUNCS2 6, 6, 8