resample.asm 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. ;******************************************************************************
  2. ;* Copyright (c) 2012 Michael Niedermayer
  3. ;* Copyright (c) 2014 James Almer <jamrial <at> gmail.com>
  4. ;* Copyright (c) 2014 Ronald S. Bultje <rsbultje@gmail.com>
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "libavutil/x86/x86util.asm"
  23. %if ARCH_X86_64
  24. %define pointer resq
  25. %else
  26. %define pointer resd
  27. %endif
  28. struc ResampleContext
  29. .av_class: pointer 1
  30. .filter_bank: pointer 1
  31. .filter_length: resd 1
  32. .filter_alloc: resd 1
  33. .ideal_dst_incr: resd 1
  34. .dst_incr: resd 1
  35. .dst_incr_div: resd 1
  36. .dst_incr_mod: resd 1
  37. .index: resd 1
  38. .frac: resd 1
  39. .src_incr: resd 1
  40. .compensation_distance: resd 1
  41. .phase_shift: resd 1
  42. .phase_mask: resd 1
  43. ; there's a few more here but we only care about the first few
  44. endstruc
  45. SECTION_RODATA
  46. pf_1: dd 1.0
  47. pdbl_1: dq 1.0
  48. pd_0x4000: dd 0x4000
  49. SECTION .text
  50. %macro RESAMPLE_FNS 3-5 ; format [float or int16], bps, log2_bps, float op suffix [s or d], 1.0 constant
  51. ; int resample_common_$format(ResampleContext *ctx, $format *dst,
  52. ; const $format *src, int size, int update_ctx)
  53. %if ARCH_X86_64 ; unix64 and win64
  54. cglobal resample_common_%1, 0, 15, 2, ctx, dst, src, phase_shift, index, frac, \
  55. dst_incr_mod, size, min_filter_count_x4, \
  56. min_filter_len_x4, dst_incr_div, src_incr, \
  57. phase_mask, dst_end, filter_bank
  58. ; use red-zone for variable storage
  59. %define ctx_stackq [rsp-0x8]
  60. %define src_stackq [rsp-0x10]
  61. %if WIN64
  62. %define update_context_stackd r4m
  63. %else ; unix64
  64. %define update_context_stackd [rsp-0x14]
  65. %endif
  66. ; load as many variables in registers as possible; for the rest, store
  67. ; on stack so that we have 'ctx' available as one extra register
  68. mov sized, r3d
  69. mov phase_maskd, [ctxq+ResampleContext.phase_mask]
  70. %if UNIX64
  71. mov update_context_stackd, r4d
  72. %endif
  73. mov indexd, [ctxq+ResampleContext.index]
  74. mov fracd, [ctxq+ResampleContext.frac]
  75. mov dst_incr_modd, [ctxq+ResampleContext.dst_incr_mod]
  76. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  77. mov src_incrd, [ctxq+ResampleContext.src_incr]
  78. mov ctx_stackq, ctxq
  79. mov min_filter_len_x4d, [ctxq+ResampleContext.filter_length]
  80. mov dst_incr_divd, [ctxq+ResampleContext.dst_incr_div]
  81. shl min_filter_len_x4d, %3
  82. lea dst_endq, [dstq+sizeq*%2]
  83. %if UNIX64
  84. mov ecx, [ctxq+ResampleContext.phase_shift]
  85. mov edi, [ctxq+ResampleContext.filter_alloc]
  86. DEFINE_ARGS filter_alloc, dst, src, phase_shift, index, frac, dst_incr_mod, \
  87. filter, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  88. src_incr, phase_mask, dst_end, filter_bank
  89. %elif WIN64
  90. mov R9d, [ctxq+ResampleContext.filter_alloc]
  91. mov ecx, [ctxq+ResampleContext.phase_shift]
  92. DEFINE_ARGS phase_shift, dst, src, filter_alloc, index, frac, dst_incr_mod, \
  93. filter, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  94. src_incr, phase_mask, dst_end, filter_bank
  95. %endif
  96. neg min_filter_len_x4q
  97. sub filter_bankq, min_filter_len_x4q
  98. sub srcq, min_filter_len_x4q
  99. mov src_stackq, srcq
  100. %else ; x86-32
  101. cglobal resample_common_%1, 1, 7, 2, ctx, phase_shift, dst, frac, \
  102. index, min_filter_length_x4, filter_bank
  103. ; push temp variables to stack
  104. %define ctx_stackq r0mp
  105. %define src_stackq r2mp
  106. %define update_context_stackd r4m
  107. mov dstq, r1mp
  108. mov r3, r3mp
  109. lea r3, [dstq+r3*%2]
  110. PUSH dword [ctxq+ResampleContext.dst_incr_div]
  111. PUSH dword [ctxq+ResampleContext.dst_incr_mod]
  112. PUSH dword [ctxq+ResampleContext.filter_alloc]
  113. PUSH r3
  114. PUSH dword [ctxq+ResampleContext.phase_mask]
  115. PUSH dword [ctxq+ResampleContext.src_incr]
  116. mov min_filter_length_x4d, [ctxq+ResampleContext.filter_length]
  117. mov indexd, [ctxq+ResampleContext.index]
  118. shl min_filter_length_x4d, %3
  119. mov fracd, [ctxq+ResampleContext.frac]
  120. neg min_filter_length_x4q
  121. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  122. sub r2mp, min_filter_length_x4q
  123. sub filter_bankq, min_filter_length_x4q
  124. PUSH min_filter_length_x4q
  125. PUSH filter_bankq
  126. mov phase_shiftd, [ctxq+ResampleContext.phase_shift]
  127. DEFINE_ARGS src, phase_shift, dst, frac, index, min_filter_count_x4, filter
  128. %define filter_bankq dword [rsp+0x0]
  129. %define min_filter_length_x4q dword [rsp+0x4]
  130. %define src_incrd dword [rsp+0x8]
  131. %define phase_maskd dword [rsp+0xc]
  132. %define dst_endq dword [rsp+0x10]
  133. %define filter_allocd dword [rsp+0x14]
  134. %define dst_incr_modd dword [rsp+0x18]
  135. %define dst_incr_divd dword [rsp+0x1c]
  136. mov srcq, r2mp
  137. %endif
  138. .loop:
  139. mov filterd, filter_allocd
  140. imul filterd, indexd
  141. %if ARCH_X86_64
  142. mov min_filter_count_x4q, min_filter_len_x4q
  143. lea filterq, [filter_bankq+filterq*%2]
  144. %else ; x86-32
  145. mov min_filter_count_x4q, filter_bankq
  146. lea filterq, [min_filter_count_x4q+filterq*%2]
  147. mov min_filter_count_x4q, min_filter_length_x4q
  148. %endif
  149. %ifidn %1, int16
  150. movd m0, [pd_0x4000]
  151. %else ; float/double
  152. xorps m0, m0, m0
  153. %endif
  154. align 16
  155. .inner_loop:
  156. movu m1, [srcq+min_filter_count_x4q*1]
  157. %ifidn %1, int16
  158. %if cpuflag(xop)
  159. vpmadcswd m0, m1, [filterq+min_filter_count_x4q*1], m0
  160. %else
  161. pmaddwd m1, [filterq+min_filter_count_x4q*1]
  162. paddd m0, m1
  163. %endif
  164. %else ; float/double
  165. %if cpuflag(fma4) || cpuflag(fma3)
  166. fmaddp%4 m0, m1, [filterq+min_filter_count_x4q*1], m0
  167. %else
  168. mulp%4 m1, m1, [filterq+min_filter_count_x4q*1]
  169. addp%4 m0, m0, m1
  170. %endif ; cpuflag
  171. %endif
  172. add min_filter_count_x4q, mmsize
  173. js .inner_loop
  174. %ifidn %1, int16
  175. HADDD m0, m1
  176. psrad m0, 15
  177. add fracd, dst_incr_modd
  178. packssdw m0, m0
  179. add indexd, dst_incr_divd
  180. movd [dstq], m0
  181. %else ; float/double
  182. ; horizontal sum & store
  183. %if mmsize == 32
  184. vextractf128 xm1, m0, 0x1
  185. addps xm0, xm1
  186. %endif
  187. movhlps xm1, xm0
  188. %ifidn %1, float
  189. addps xm0, xm1
  190. shufps xm1, xm0, xm0, q0001
  191. %endif
  192. add fracd, dst_incr_modd
  193. addp%4 xm0, xm1
  194. add indexd, dst_incr_divd
  195. movs%4 [dstq], xm0
  196. %endif
  197. cmp fracd, src_incrd
  198. jl .skip
  199. sub fracd, src_incrd
  200. inc indexd
  201. %if UNIX64
  202. DEFINE_ARGS filter_alloc, dst, src, phase_shift, index, frac, dst_incr_mod, \
  203. index_incr, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  204. src_incr, phase_mask, dst_end, filter_bank
  205. %elif WIN64
  206. DEFINE_ARGS phase_shift, dst, src, filter_alloc, index, frac, dst_incr_mod, \
  207. index_incr, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  208. src_incr, phase_mask, dst_end, filter_bank
  209. %else ; x86-32
  210. DEFINE_ARGS src, phase_shift, dst, frac, index, index_incr
  211. %endif
  212. .skip:
  213. mov index_incrd, indexd
  214. add dstq, %2
  215. and indexd, phase_maskd
  216. sar index_incrd, phase_shiftb
  217. lea srcq, [srcq+index_incrq*%2]
  218. cmp dstq, dst_endq
  219. jne .loop
  220. %if ARCH_X86_64
  221. DEFINE_ARGS ctx, dst, src, phase_shift, index, frac
  222. %else ; x86-32
  223. DEFINE_ARGS src, ctx, update_context, frac, index
  224. %endif
  225. cmp dword update_context_stackd, 0
  226. jz .skip_store
  227. ; strictly speaking, the function should always return the consumed
  228. ; number of bytes; however, we only use the value if update_context
  229. ; is true, so let's just leave it uninitialized otherwise
  230. mov ctxq, ctx_stackq
  231. movifnidn rax, srcq
  232. mov [ctxq+ResampleContext.frac ], fracd
  233. sub rax, src_stackq
  234. mov [ctxq+ResampleContext.index], indexd
  235. shr rax, %3
  236. .skip_store:
  237. %if ARCH_X86_32
  238. ADD rsp, 0x20
  239. %endif
  240. RET
  241. ; int resample_linear_$format(ResampleContext *ctx, float *dst,
  242. ; const float *src, int size, int update_ctx)
  243. %if ARCH_X86_64 ; unix64 and win64
  244. %if UNIX64
  245. cglobal resample_linear_%1, 0, 15, 5, ctx, dst, phase_mask, phase_shift, index, frac, \
  246. size, dst_incr_mod, min_filter_count_x4, \
  247. min_filter_len_x4, dst_incr_div, src_incr, \
  248. src, dst_end, filter_bank
  249. mov srcq, r2mp
  250. %else ; win64
  251. cglobal resample_linear_%1, 0, 15, 5, ctx, phase_mask, src, phase_shift, index, frac, \
  252. size, dst_incr_mod, min_filter_count_x4, \
  253. min_filter_len_x4, dst_incr_div, src_incr, \
  254. dst, dst_end, filter_bank
  255. mov dstq, r1mp
  256. %endif
  257. ; use red-zone for variable storage
  258. %define ctx_stackq [rsp-0x8]
  259. %define src_stackq [rsp-0x10]
  260. %define phase_mask_stackd [rsp-0x14]
  261. %if WIN64
  262. %define update_context_stackd r4m
  263. %else ; unix64
  264. %define update_context_stackd [rsp-0x18]
  265. %endif
  266. ; load as many variables in registers as possible; for the rest, store
  267. ; on stack so that we have 'ctx' available as one extra register
  268. mov sized, r3d
  269. mov phase_maskd, [ctxq+ResampleContext.phase_mask]
  270. %if UNIX64
  271. mov update_context_stackd, r4d
  272. %endif
  273. mov indexd, [ctxq+ResampleContext.index]
  274. mov fracd, [ctxq+ResampleContext.frac]
  275. mov dst_incr_modd, [ctxq+ResampleContext.dst_incr_mod]
  276. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  277. mov src_incrd, [ctxq+ResampleContext.src_incr]
  278. mov ctx_stackq, ctxq
  279. mov phase_mask_stackd, phase_maskd
  280. mov min_filter_len_x4d, [ctxq+ResampleContext.filter_length]
  281. %ifidn %1, int16
  282. movd m4, [pd_0x4000]
  283. %else ; float/double
  284. cvtsi2s%4 xm0, src_incrd
  285. movs%4 xm4, [%5]
  286. divs%4 xm4, xm0
  287. %endif
  288. mov dst_incr_divd, [ctxq+ResampleContext.dst_incr_div]
  289. shl min_filter_len_x4d, %3
  290. lea dst_endq, [dstq+sizeq*%2]
  291. %if UNIX64
  292. mov ecx, [ctxq+ResampleContext.phase_shift]
  293. mov edi, [ctxq+ResampleContext.filter_alloc]
  294. DEFINE_ARGS filter_alloc, dst, filter2, phase_shift, index, frac, filter1, \
  295. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  296. dst_incr_div, src_incr, src, dst_end, filter_bank
  297. %elif WIN64
  298. mov R9d, [ctxq+ResampleContext.filter_alloc]
  299. mov ecx, [ctxq+ResampleContext.phase_shift]
  300. DEFINE_ARGS phase_shift, filter2, src, filter_alloc, index, frac, filter1, \
  301. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  302. dst_incr_div, src_incr, dst, dst_end, filter_bank
  303. %endif
  304. neg min_filter_len_x4q
  305. sub filter_bankq, min_filter_len_x4q
  306. sub srcq, min_filter_len_x4q
  307. mov src_stackq, srcq
  308. %else ; x86-32
  309. cglobal resample_linear_%1, 1, 7, 5, ctx, min_filter_length_x4, filter2, \
  310. frac, index, dst, filter_bank
  311. ; push temp variables to stack
  312. %define ctx_stackq r0mp
  313. %define src_stackq r2mp
  314. %define update_context_stackd r4m
  315. mov dstq, r1mp
  316. mov r3, r3mp
  317. lea r3, [dstq+r3*%2]
  318. PUSH dword [ctxq+ResampleContext.dst_incr_div]
  319. PUSH r3
  320. mov r3, dword [ctxq+ResampleContext.filter_alloc]
  321. PUSH dword [ctxq+ResampleContext.dst_incr_mod]
  322. PUSH r3
  323. shl r3, %3
  324. PUSH r3
  325. mov r3, dword [ctxq+ResampleContext.src_incr]
  326. PUSH dword [ctxq+ResampleContext.phase_mask]
  327. PUSH r3d
  328. %ifidn %1, int16
  329. movd m4, [pd_0x4000]
  330. %else ; float/double
  331. cvtsi2s%4 xm0, r3d
  332. movs%4 xm4, [%5]
  333. divs%4 xm4, xm0
  334. %endif
  335. mov min_filter_length_x4d, [ctxq+ResampleContext.filter_length]
  336. mov indexd, [ctxq+ResampleContext.index]
  337. shl min_filter_length_x4d, %3
  338. mov fracd, [ctxq+ResampleContext.frac]
  339. neg min_filter_length_x4q
  340. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  341. sub r2mp, min_filter_length_x4q
  342. sub filter_bankq, min_filter_length_x4q
  343. PUSH min_filter_length_x4q
  344. PUSH filter_bankq
  345. PUSH dword [ctxq+ResampleContext.phase_shift]
  346. DEFINE_ARGS filter1, min_filter_count_x4, filter2, frac, index, dst, src
  347. %define phase_shift_stackd dword [rsp+0x0]
  348. %define filter_bankq dword [rsp+0x4]
  349. %define min_filter_length_x4q dword [rsp+0x8]
  350. %define src_incrd dword [rsp+0xc]
  351. %define phase_mask_stackd dword [rsp+0x10]
  352. %define filter_alloc_x4q dword [rsp+0x14]
  353. %define filter_allocd dword [rsp+0x18]
  354. %define dst_incr_modd dword [rsp+0x1c]
  355. %define dst_endq dword [rsp+0x20]
  356. %define dst_incr_divd dword [rsp+0x24]
  357. mov srcq, r2mp
  358. %endif
  359. .loop:
  360. mov filter1d, filter_allocd
  361. imul filter1d, indexd
  362. %if ARCH_X86_64
  363. mov min_filter_count_x4q, min_filter_len_x4q
  364. lea filter1q, [filter_bankq+filter1q*%2]
  365. lea filter2q, [filter1q+filter_allocq*%2]
  366. %else ; x86-32
  367. mov min_filter_count_x4q, filter_bankq
  368. lea filter1q, [min_filter_count_x4q+filter1q*%2]
  369. mov min_filter_count_x4q, min_filter_length_x4q
  370. mov filter2q, filter1q
  371. add filter2q, filter_alloc_x4q
  372. %endif
  373. %ifidn %1, int16
  374. mova m0, m4
  375. mova m2, m4
  376. %else ; float/double
  377. xorps m0, m0, m0
  378. xorps m2, m2, m2
  379. %endif
  380. align 16
  381. .inner_loop:
  382. movu m1, [srcq+min_filter_count_x4q*1]
  383. %ifidn %1, int16
  384. %if cpuflag(xop)
  385. vpmadcswd m2, m1, [filter2q+min_filter_count_x4q*1], m2
  386. vpmadcswd m0, m1, [filter1q+min_filter_count_x4q*1], m0
  387. %else
  388. pmaddwd m3, m1, [filter2q+min_filter_count_x4q*1]
  389. pmaddwd m1, [filter1q+min_filter_count_x4q*1]
  390. paddd m2, m3
  391. paddd m0, m1
  392. %endif ; cpuflag
  393. %else ; float/double
  394. %if cpuflag(fma4) || cpuflag(fma3)
  395. fmaddp%4 m2, m1, [filter2q+min_filter_count_x4q*1], m2
  396. fmaddp%4 m0, m1, [filter1q+min_filter_count_x4q*1], m0
  397. %else
  398. mulp%4 m3, m1, [filter2q+min_filter_count_x4q*1]
  399. mulp%4 m1, m1, [filter1q+min_filter_count_x4q*1]
  400. addp%4 m2, m2, m3
  401. addp%4 m0, m0, m1
  402. %endif ; cpuflag
  403. %endif
  404. add min_filter_count_x4q, mmsize
  405. js .inner_loop
  406. %ifidn %1, int16
  407. %if mmsize == 16
  408. %if cpuflag(xop)
  409. vphadddq m2, m2
  410. vphadddq m0, m0
  411. %endif
  412. pshufd m3, m2, q0032
  413. pshufd m1, m0, q0032
  414. paddd m2, m3
  415. paddd m0, m1
  416. %endif
  417. %if notcpuflag(xop)
  418. PSHUFLW m3, m2, q0032
  419. PSHUFLW m1, m0, q0032
  420. paddd m2, m3
  421. paddd m0, m1
  422. %endif
  423. psubd m2, m0
  424. ; This is probably a really bad idea on atom and other machines with a
  425. ; long transfer latency between GPRs and XMMs (atom). However, it does
  426. ; make the clip a lot simpler...
  427. movd eax, m2
  428. add indexd, dst_incr_divd
  429. imul fracd
  430. idiv src_incrd
  431. movd m1, eax
  432. add fracd, dst_incr_modd
  433. paddd m0, m1
  434. psrad m0, 15
  435. packssdw m0, m0
  436. movd [dstq], m0
  437. ; note that for imul/idiv, I need to move filter to edx/eax for each:
  438. ; - 32bit: eax=r0[filter1], edx=r2[filter2]
  439. ; - win64: eax=r6[filter1], edx=r1[todo]
  440. ; - unix64: eax=r6[filter1], edx=r2[todo]
  441. %else ; float/double
  442. ; val += (v2 - val) * (FELEML) frac / c->src_incr;
  443. %if mmsize == 32
  444. vextractf128 xm1, m0, 0x1
  445. vextractf128 xm3, m2, 0x1
  446. addps xm0, xm1
  447. addps xm2, xm3
  448. %endif
  449. cvtsi2s%4 xm1, fracd
  450. subp%4 xm2, xm0
  451. mulp%4 xm1, xm4
  452. shufp%4 xm1, xm1, q0000
  453. %if cpuflag(fma4) || cpuflag(fma3)
  454. fmaddp%4 xm0, xm2, xm1, xm0
  455. %else
  456. mulp%4 xm2, xm1
  457. addp%4 xm0, xm2
  458. %endif ; cpuflag
  459. ; horizontal sum & store
  460. movhlps xm1, xm0
  461. %ifidn %1, float
  462. addps xm0, xm1
  463. shufps xm1, xm0, xm0, q0001
  464. %endif
  465. add fracd, dst_incr_modd
  466. addp%4 xm0, xm1
  467. add indexd, dst_incr_divd
  468. movs%4 [dstq], xm0
  469. %endif
  470. cmp fracd, src_incrd
  471. jl .skip
  472. sub fracd, src_incrd
  473. inc indexd
  474. %if UNIX64
  475. DEFINE_ARGS filter_alloc, dst, filter2, phase_shift, index, frac, index_incr, \
  476. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  477. dst_incr_div, src_incr, src, dst_end, filter_bank
  478. %elif WIN64
  479. DEFINE_ARGS phase_shift, filter2, src, filter_alloc, index, frac, index_incr, \
  480. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  481. dst_incr_div, src_incr, dst, dst_end, filter_bank
  482. %else ; x86-32
  483. DEFINE_ARGS filter1, phase_shift, index_incr, frac, index, dst, src
  484. %endif
  485. .skip:
  486. %if ARCH_X86_32
  487. mov phase_shiftd, phase_shift_stackd
  488. %endif
  489. mov index_incrd, indexd
  490. add dstq, %2
  491. and indexd, phase_mask_stackd
  492. sar index_incrd, phase_shiftb
  493. lea srcq, [srcq+index_incrq*%2]
  494. cmp dstq, dst_endq
  495. jne .loop
  496. %if UNIX64
  497. DEFINE_ARGS ctx, dst, filter2, phase_shift, index, frac, index_incr, \
  498. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  499. dst_incr_div, src_incr, src, dst_end, filter_bank
  500. %elif WIN64
  501. DEFINE_ARGS ctx, filter2, src, phase_shift, index, frac, index_incr, \
  502. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  503. dst_incr_div, src_incr, dst, dst_end, filter_bank
  504. %else ; x86-32
  505. DEFINE_ARGS filter1, ctx, update_context, frac, index, dst, src
  506. %endif
  507. cmp dword update_context_stackd, 0
  508. jz .skip_store
  509. ; strictly speaking, the function should always return the consumed
  510. ; number of bytes; however, we only use the value if update_context
  511. ; is true, so let's just leave it uninitialized otherwise
  512. mov ctxq, ctx_stackq
  513. movifnidn rax, srcq
  514. mov [ctxq+ResampleContext.frac ], fracd
  515. sub rax, src_stackq
  516. mov [ctxq+ResampleContext.index], indexd
  517. shr rax, %3
  518. .skip_store:
  519. %if ARCH_X86_32
  520. ADD rsp, 0x28
  521. %endif
  522. RET
  523. %endmacro
  524. INIT_XMM sse
  525. RESAMPLE_FNS float, 4, 2, s, pf_1
  526. %if HAVE_AVX_EXTERNAL
  527. INIT_YMM avx
  528. RESAMPLE_FNS float, 4, 2, s, pf_1
  529. %endif
  530. %if HAVE_FMA3_EXTERNAL
  531. INIT_YMM fma3
  532. RESAMPLE_FNS float, 4, 2, s, pf_1
  533. %endif
  534. %if HAVE_FMA4_EXTERNAL
  535. INIT_XMM fma4
  536. RESAMPLE_FNS float, 4, 2, s, pf_1
  537. %endif
  538. %if ARCH_X86_32
  539. INIT_MMX mmxext
  540. RESAMPLE_FNS int16, 2, 1
  541. %endif
  542. INIT_XMM sse2
  543. RESAMPLE_FNS int16, 2, 1
  544. %if HAVE_XOP_EXTERNAL
  545. INIT_XMM xop
  546. RESAMPLE_FNS int16, 2, 1
  547. %endif
  548. INIT_XMM sse2
  549. RESAMPLE_FNS double, 8, 3, d, pdbl_1