resample.asm 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600
  1. ;******************************************************************************
  2. ;* Copyright (c) 2012 Michael Niedermayer
  3. ;* Copyright (c) 2014 James Almer <jamrial <at> gmail.com>
  4. ;* Copyright (c) 2014 Ronald S. Bultje <rsbultje@gmail.com>
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "libavutil/x86/x86util.asm"
  23. %if ARCH_X86_64
  24. %define pointer resq
  25. %else
  26. %define pointer resd
  27. %endif
  28. struc ResampleContext
  29. .av_class: pointer 1
  30. .filter_bank: pointer 1
  31. .filter_length: resd 1
  32. .filter_alloc: resd 1
  33. .ideal_dst_incr: resd 1
  34. .dst_incr: resd 1
  35. .dst_incr_div: resd 1
  36. .dst_incr_mod: resd 1
  37. .index: resd 1
  38. .frac: resd 1
  39. .src_incr: resd 1
  40. .compensation_distance: resd 1
  41. .phase_shift: resd 1
  42. .phase_mask: resd 1
  43. ; there's a few more here but we only care about the first few
  44. endstruc
  45. SECTION_RODATA
  46. pf_1: dd 1.0
  47. pdbl_1: dq 1.0
  48. pd_0x4000: dd 0x4000
  49. SECTION .text
  50. %macro RESAMPLE_FNS 3-5 ; format [float or int16], bps, log2_bps, float op suffix [s or d], 1.0 constant
  51. ; int resample_common_$format(ResampleContext *ctx, $format *dst,
  52. ; const $format *src, int size, int update_ctx)
  53. %if ARCH_X86_64 ; unix64 and win64
  54. cglobal resample_common_%1, 0, 15, 2, ctx, dst, src, phase_shift, index, frac, \
  55. dst_incr_mod, size, min_filter_count_x4, \
  56. min_filter_len_x4, dst_incr_div, src_incr, \
  57. phase_mask, dst_end, filter_bank
  58. ; use red-zone for variable storage
  59. %define ctx_stackq [rsp-0x8]
  60. %define src_stackq [rsp-0x10]
  61. %if WIN64
  62. %define update_context_stackd r4m
  63. %else ; unix64
  64. %define update_context_stackd [rsp-0x14]
  65. %endif
  66. ; load as many variables in registers as possible; for the rest, store
  67. ; on stack so that we have 'ctx' available as one extra register
  68. mov sized, r3d
  69. mov phase_maskd, [ctxq+ResampleContext.phase_mask]
  70. %if UNIX64
  71. mov update_context_stackd, r4d
  72. %endif
  73. mov indexd, [ctxq+ResampleContext.index]
  74. mov fracd, [ctxq+ResampleContext.frac]
  75. mov dst_incr_modd, [ctxq+ResampleContext.dst_incr_mod]
  76. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  77. mov src_incrd, [ctxq+ResampleContext.src_incr]
  78. mov ctx_stackq, ctxq
  79. mov min_filter_len_x4d, [ctxq+ResampleContext.filter_length]
  80. mov dst_incr_divd, [ctxq+ResampleContext.dst_incr_div]
  81. shl min_filter_len_x4d, %3
  82. lea dst_endq, [dstq+sizeq*%2]
  83. %if UNIX64
  84. mov ecx, [ctxq+ResampleContext.phase_shift]
  85. mov edi, [ctxq+ResampleContext.filter_alloc]
  86. DEFINE_ARGS filter_alloc, dst, src, phase_shift, index, frac, dst_incr_mod, \
  87. filter, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  88. src_incr, phase_mask, dst_end, filter_bank
  89. %elif WIN64
  90. mov R9d, [ctxq+ResampleContext.filter_alloc]
  91. mov ecx, [ctxq+ResampleContext.phase_shift]
  92. DEFINE_ARGS phase_shift, dst, src, filter_alloc, index, frac, dst_incr_mod, \
  93. filter, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  94. src_incr, phase_mask, dst_end, filter_bank
  95. %endif
  96. neg min_filter_len_x4q
  97. sub filter_bankq, min_filter_len_x4q
  98. sub srcq, min_filter_len_x4q
  99. mov src_stackq, srcq
  100. %else ; x86-32
  101. cglobal resample_common_%1, 1, 7, 2, ctx, phase_shift, dst, frac, \
  102. index, min_filter_length_x4, filter_bank
  103. ; push temp variables to stack
  104. %define ctx_stackq r0mp
  105. %define src_stackq r2mp
  106. %define update_context_stackd r4m
  107. mov dstq, r1mp
  108. mov r3, r3mp
  109. lea r3, [dstq+r3*%2]
  110. PUSH dword [ctxq+ResampleContext.dst_incr_div]
  111. PUSH dword [ctxq+ResampleContext.dst_incr_mod]
  112. PUSH dword [ctxq+ResampleContext.filter_alloc]
  113. PUSH r3
  114. PUSH dword [ctxq+ResampleContext.phase_mask]
  115. PUSH dword [ctxq+ResampleContext.src_incr]
  116. mov min_filter_length_x4d, [ctxq+ResampleContext.filter_length]
  117. mov indexd, [ctxq+ResampleContext.index]
  118. shl min_filter_length_x4d, %3
  119. mov fracd, [ctxq+ResampleContext.frac]
  120. neg min_filter_length_x4q
  121. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  122. sub r2mp, min_filter_length_x4q
  123. sub filter_bankq, min_filter_length_x4q
  124. PUSH min_filter_length_x4q
  125. PUSH filter_bankq
  126. mov phase_shiftd, [ctxq+ResampleContext.phase_shift]
  127. DEFINE_ARGS src, phase_shift, dst, frac, index, min_filter_count_x4, filter
  128. %define filter_bankq dword [rsp+0x0]
  129. %define min_filter_length_x4q dword [rsp+0x4]
  130. %define src_incrd dword [rsp+0x8]
  131. %define phase_maskd dword [rsp+0xc]
  132. %define dst_endq dword [rsp+0x10]
  133. %define filter_allocd dword [rsp+0x14]
  134. %define dst_incr_modd dword [rsp+0x18]
  135. %define dst_incr_divd dword [rsp+0x1c]
  136. mov srcq, r2mp
  137. %endif
  138. .loop:
  139. mov filterd, filter_allocd
  140. imul filterd, indexd
  141. %if ARCH_X86_64
  142. mov min_filter_count_x4q, min_filter_len_x4q
  143. lea filterq, [filter_bankq+filterq*%2]
  144. %else ; x86-32
  145. mov min_filter_count_x4q, filter_bankq
  146. lea filterq, [min_filter_count_x4q+filterq*%2]
  147. mov min_filter_count_x4q, min_filter_length_x4q
  148. %endif
  149. %ifidn %1, int16
  150. movd m0, [pd_0x4000]
  151. %else ; float/double
  152. xorps m0, m0, m0
  153. %endif
  154. align 16
  155. .inner_loop:
  156. movu m1, [srcq+min_filter_count_x4q*1]
  157. %ifidn %1, int16
  158. PMADCSWD m0, m1, [filterq+min_filter_count_x4q*1], m0, m1
  159. %else ; float/double
  160. %if cpuflag(fma4) || cpuflag(fma3)
  161. fmaddp%4 m0, m1, [filterq+min_filter_count_x4q*1], m0
  162. %else
  163. mulp%4 m1, m1, [filterq+min_filter_count_x4q*1]
  164. addp%4 m0, m0, m1
  165. %endif ; cpuflag
  166. %endif
  167. add min_filter_count_x4q, mmsize
  168. js .inner_loop
  169. %ifidn %1, int16
  170. HADDD m0, m1
  171. psrad m0, 15
  172. add fracd, dst_incr_modd
  173. packssdw m0, m0
  174. add indexd, dst_incr_divd
  175. movd [dstq], m0
  176. %else ; float/double
  177. ; horizontal sum & store
  178. %if mmsize == 32
  179. vextractf128 xm1, m0, 0x1
  180. addps xm0, xm1
  181. %endif
  182. movhlps xm1, xm0
  183. %ifidn %1, float
  184. addps xm0, xm1
  185. shufps xm1, xm0, xm0, q0001
  186. %endif
  187. add fracd, dst_incr_modd
  188. addp%4 xm0, xm1
  189. add indexd, dst_incr_divd
  190. movs%4 [dstq], xm0
  191. %endif
  192. cmp fracd, src_incrd
  193. jl .skip
  194. sub fracd, src_incrd
  195. inc indexd
  196. %if UNIX64
  197. DEFINE_ARGS filter_alloc, dst, src, phase_shift, index, frac, dst_incr_mod, \
  198. index_incr, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  199. src_incr, phase_mask, dst_end, filter_bank
  200. %elif WIN64
  201. DEFINE_ARGS phase_shift, dst, src, filter_alloc, index, frac, dst_incr_mod, \
  202. index_incr, min_filter_count_x4, min_filter_len_x4, dst_incr_div, \
  203. src_incr, phase_mask, dst_end, filter_bank
  204. %else ; x86-32
  205. DEFINE_ARGS src, phase_shift, dst, frac, index, index_incr
  206. %endif
  207. .skip:
  208. mov index_incrd, indexd
  209. add dstq, %2
  210. and indexd, phase_maskd
  211. sar index_incrd, phase_shiftb
  212. lea srcq, [srcq+index_incrq*%2]
  213. cmp dstq, dst_endq
  214. jne .loop
  215. %if ARCH_X86_64
  216. DEFINE_ARGS ctx, dst, src, phase_shift, index, frac
  217. %else ; x86-32
  218. DEFINE_ARGS src, ctx, update_context, frac, index
  219. %endif
  220. cmp dword update_context_stackd, 0
  221. jz .skip_store
  222. ; strictly speaking, the function should always return the consumed
  223. ; number of bytes; however, we only use the value if update_context
  224. ; is true, so let's just leave it uninitialized otherwise
  225. mov ctxq, ctx_stackq
  226. movifnidn rax, srcq
  227. mov [ctxq+ResampleContext.frac ], fracd
  228. sub rax, src_stackq
  229. mov [ctxq+ResampleContext.index], indexd
  230. shr rax, %3
  231. .skip_store:
  232. %if ARCH_X86_32
  233. ADD rsp, 0x20
  234. %endif
  235. RET
  236. ; int resample_linear_$format(ResampleContext *ctx, float *dst,
  237. ; const float *src, int size, int update_ctx)
  238. %if ARCH_X86_64 ; unix64 and win64
  239. %if UNIX64
  240. cglobal resample_linear_%1, 0, 15, 5, ctx, dst, phase_mask, phase_shift, index, frac, \
  241. size, dst_incr_mod, min_filter_count_x4, \
  242. min_filter_len_x4, dst_incr_div, src_incr, \
  243. src, dst_end, filter_bank
  244. mov srcq, r2mp
  245. %else ; win64
  246. cglobal resample_linear_%1, 0, 15, 5, ctx, phase_mask, src, phase_shift, index, frac, \
  247. size, dst_incr_mod, min_filter_count_x4, \
  248. min_filter_len_x4, dst_incr_div, src_incr, \
  249. dst, dst_end, filter_bank
  250. mov dstq, r1mp
  251. %endif
  252. ; use red-zone for variable storage
  253. %define ctx_stackq [rsp-0x8]
  254. %define src_stackq [rsp-0x10]
  255. %define phase_mask_stackd [rsp-0x14]
  256. %if WIN64
  257. %define update_context_stackd r4m
  258. %else ; unix64
  259. %define update_context_stackd [rsp-0x18]
  260. %endif
  261. ; load as many variables in registers as possible; for the rest, store
  262. ; on stack so that we have 'ctx' available as one extra register
  263. mov sized, r3d
  264. mov phase_maskd, [ctxq+ResampleContext.phase_mask]
  265. %if UNIX64
  266. mov update_context_stackd, r4d
  267. %endif
  268. mov indexd, [ctxq+ResampleContext.index]
  269. mov fracd, [ctxq+ResampleContext.frac]
  270. mov dst_incr_modd, [ctxq+ResampleContext.dst_incr_mod]
  271. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  272. mov src_incrd, [ctxq+ResampleContext.src_incr]
  273. mov ctx_stackq, ctxq
  274. mov phase_mask_stackd, phase_maskd
  275. mov min_filter_len_x4d, [ctxq+ResampleContext.filter_length]
  276. %ifidn %1, int16
  277. movd m4, [pd_0x4000]
  278. %else ; float/double
  279. cvtsi2s%4 xm0, src_incrd
  280. movs%4 xm4, [%5]
  281. divs%4 xm4, xm0
  282. %endif
  283. mov dst_incr_divd, [ctxq+ResampleContext.dst_incr_div]
  284. shl min_filter_len_x4d, %3
  285. lea dst_endq, [dstq+sizeq*%2]
  286. %if UNIX64
  287. mov ecx, [ctxq+ResampleContext.phase_shift]
  288. mov edi, [ctxq+ResampleContext.filter_alloc]
  289. DEFINE_ARGS filter_alloc, dst, filter2, phase_shift, index, frac, filter1, \
  290. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  291. dst_incr_div, src_incr, src, dst_end, filter_bank
  292. %elif WIN64
  293. mov R9d, [ctxq+ResampleContext.filter_alloc]
  294. mov ecx, [ctxq+ResampleContext.phase_shift]
  295. DEFINE_ARGS phase_shift, filter2, src, filter_alloc, index, frac, filter1, \
  296. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  297. dst_incr_div, src_incr, dst, dst_end, filter_bank
  298. %endif
  299. neg min_filter_len_x4q
  300. sub filter_bankq, min_filter_len_x4q
  301. sub srcq, min_filter_len_x4q
  302. mov src_stackq, srcq
  303. %else ; x86-32
  304. cglobal resample_linear_%1, 1, 7, 5, ctx, min_filter_length_x4, filter2, \
  305. frac, index, dst, filter_bank
  306. ; push temp variables to stack
  307. %define ctx_stackq r0mp
  308. %define src_stackq r2mp
  309. %define update_context_stackd r4m
  310. mov dstq, r1mp
  311. mov r3, r3mp
  312. lea r3, [dstq+r3*%2]
  313. PUSH dword [ctxq+ResampleContext.dst_incr_div]
  314. PUSH r3
  315. mov r3, dword [ctxq+ResampleContext.filter_alloc]
  316. PUSH dword [ctxq+ResampleContext.dst_incr_mod]
  317. PUSH r3
  318. shl r3, %3
  319. PUSH r3
  320. mov r3, dword [ctxq+ResampleContext.src_incr]
  321. PUSH dword [ctxq+ResampleContext.phase_mask]
  322. PUSH r3d
  323. %ifidn %1, int16
  324. movd m4, [pd_0x4000]
  325. %else ; float/double
  326. cvtsi2s%4 xm0, r3d
  327. movs%4 xm4, [%5]
  328. divs%4 xm4, xm0
  329. %endif
  330. mov min_filter_length_x4d, [ctxq+ResampleContext.filter_length]
  331. mov indexd, [ctxq+ResampleContext.index]
  332. shl min_filter_length_x4d, %3
  333. mov fracd, [ctxq+ResampleContext.frac]
  334. neg min_filter_length_x4q
  335. mov filter_bankq, [ctxq+ResampleContext.filter_bank]
  336. sub r2mp, min_filter_length_x4q
  337. sub filter_bankq, min_filter_length_x4q
  338. PUSH min_filter_length_x4q
  339. PUSH filter_bankq
  340. PUSH dword [ctxq+ResampleContext.phase_shift]
  341. DEFINE_ARGS filter1, min_filter_count_x4, filter2, frac, index, dst, src
  342. %define phase_shift_stackd dword [rsp+0x0]
  343. %define filter_bankq dword [rsp+0x4]
  344. %define min_filter_length_x4q dword [rsp+0x8]
  345. %define src_incrd dword [rsp+0xc]
  346. %define phase_mask_stackd dword [rsp+0x10]
  347. %define filter_alloc_x4q dword [rsp+0x14]
  348. %define filter_allocd dword [rsp+0x18]
  349. %define dst_incr_modd dword [rsp+0x1c]
  350. %define dst_endq dword [rsp+0x20]
  351. %define dst_incr_divd dword [rsp+0x24]
  352. mov srcq, r2mp
  353. %endif
  354. .loop:
  355. mov filter1d, filter_allocd
  356. imul filter1d, indexd
  357. %if ARCH_X86_64
  358. mov min_filter_count_x4q, min_filter_len_x4q
  359. lea filter1q, [filter_bankq+filter1q*%2]
  360. lea filter2q, [filter1q+filter_allocq*%2]
  361. %else ; x86-32
  362. mov min_filter_count_x4q, filter_bankq
  363. lea filter1q, [min_filter_count_x4q+filter1q*%2]
  364. mov min_filter_count_x4q, min_filter_length_x4q
  365. mov filter2q, filter1q
  366. add filter2q, filter_alloc_x4q
  367. %endif
  368. %ifidn %1, int16
  369. mova m0, m4
  370. mova m2, m4
  371. %else ; float/double
  372. xorps m0, m0, m0
  373. xorps m2, m2, m2
  374. %endif
  375. align 16
  376. .inner_loop:
  377. movu m1, [srcq+min_filter_count_x4q*1]
  378. %ifidn %1, int16
  379. %if cpuflag(xop)
  380. vpmadcswd m2, m1, [filter2q+min_filter_count_x4q*1], m2
  381. vpmadcswd m0, m1, [filter1q+min_filter_count_x4q*1], m0
  382. %else
  383. pmaddwd m3, m1, [filter2q+min_filter_count_x4q*1]
  384. pmaddwd m1, [filter1q+min_filter_count_x4q*1]
  385. paddd m2, m3
  386. paddd m0, m1
  387. %endif ; cpuflag
  388. %else ; float/double
  389. %if cpuflag(fma4) || cpuflag(fma3)
  390. fmaddp%4 m2, m1, [filter2q+min_filter_count_x4q*1], m2
  391. fmaddp%4 m0, m1, [filter1q+min_filter_count_x4q*1], m0
  392. %else
  393. mulp%4 m3, m1, [filter2q+min_filter_count_x4q*1]
  394. mulp%4 m1, m1, [filter1q+min_filter_count_x4q*1]
  395. addp%4 m2, m2, m3
  396. addp%4 m0, m0, m1
  397. %endif ; cpuflag
  398. %endif
  399. add min_filter_count_x4q, mmsize
  400. js .inner_loop
  401. %ifidn %1, int16
  402. %if mmsize == 16
  403. %if cpuflag(xop)
  404. vphadddq m2, m2
  405. vphadddq m0, m0
  406. %endif
  407. pshufd m3, m2, q0032
  408. pshufd m1, m0, q0032
  409. paddd m2, m3
  410. paddd m0, m1
  411. %endif
  412. %if notcpuflag(xop)
  413. PSHUFLW m3, m2, q0032
  414. PSHUFLW m1, m0, q0032
  415. paddd m2, m3
  416. paddd m0, m1
  417. %endif
  418. psubd m2, m0
  419. ; This is probably a really bad idea on atom and other machines with a
  420. ; long transfer latency between GPRs and XMMs (atom). However, it does
  421. ; make the clip a lot simpler...
  422. movd eax, m2
  423. add indexd, dst_incr_divd
  424. imul fracd
  425. idiv src_incrd
  426. movd m1, eax
  427. add fracd, dst_incr_modd
  428. paddd m0, m1
  429. psrad m0, 15
  430. packssdw m0, m0
  431. movd [dstq], m0
  432. ; note that for imul/idiv, I need to move filter to edx/eax for each:
  433. ; - 32bit: eax=r0[filter1], edx=r2[filter2]
  434. ; - win64: eax=r6[filter1], edx=r1[todo]
  435. ; - unix64: eax=r6[filter1], edx=r2[todo]
  436. %else ; float/double
  437. ; val += (v2 - val) * (FELEML) frac / c->src_incr;
  438. %if mmsize == 32
  439. vextractf128 xm1, m0, 0x1
  440. vextractf128 xm3, m2, 0x1
  441. addps xm0, xm1
  442. addps xm2, xm3
  443. %endif
  444. cvtsi2s%4 xm1, fracd
  445. subp%4 xm2, xm0
  446. mulp%4 xm1, xm4
  447. shufp%4 xm1, xm1, q0000
  448. %if cpuflag(fma4) || cpuflag(fma3)
  449. fmaddp%4 xm0, xm2, xm1, xm0
  450. %else
  451. mulp%4 xm2, xm1
  452. addp%4 xm0, xm2
  453. %endif ; cpuflag
  454. ; horizontal sum & store
  455. movhlps xm1, xm0
  456. %ifidn %1, float
  457. addps xm0, xm1
  458. shufps xm1, xm0, xm0, q0001
  459. %endif
  460. add fracd, dst_incr_modd
  461. addp%4 xm0, xm1
  462. add indexd, dst_incr_divd
  463. movs%4 [dstq], xm0
  464. %endif
  465. cmp fracd, src_incrd
  466. jl .skip
  467. sub fracd, src_incrd
  468. inc indexd
  469. %if UNIX64
  470. DEFINE_ARGS filter_alloc, dst, filter2, phase_shift, index, frac, index_incr, \
  471. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  472. dst_incr_div, src_incr, src, dst_end, filter_bank
  473. %elif WIN64
  474. DEFINE_ARGS phase_shift, filter2, src, filter_alloc, index, frac, index_incr, \
  475. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  476. dst_incr_div, src_incr, dst, dst_end, filter_bank
  477. %else ; x86-32
  478. DEFINE_ARGS filter1, phase_shift, index_incr, frac, index, dst, src
  479. %endif
  480. .skip:
  481. %if ARCH_X86_32
  482. mov phase_shiftd, phase_shift_stackd
  483. %endif
  484. mov index_incrd, indexd
  485. add dstq, %2
  486. and indexd, phase_mask_stackd
  487. sar index_incrd, phase_shiftb
  488. lea srcq, [srcq+index_incrq*%2]
  489. cmp dstq, dst_endq
  490. jne .loop
  491. %if UNIX64
  492. DEFINE_ARGS ctx, dst, filter2, phase_shift, index, frac, index_incr, \
  493. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  494. dst_incr_div, src_incr, src, dst_end, filter_bank
  495. %elif WIN64
  496. DEFINE_ARGS ctx, filter2, src, phase_shift, index, frac, index_incr, \
  497. dst_incr_mod, min_filter_count_x4, min_filter_len_x4, \
  498. dst_incr_div, src_incr, dst, dst_end, filter_bank
  499. %else ; x86-32
  500. DEFINE_ARGS filter1, ctx, update_context, frac, index, dst, src
  501. %endif
  502. cmp dword update_context_stackd, 0
  503. jz .skip_store
  504. ; strictly speaking, the function should always return the consumed
  505. ; number of bytes; however, we only use the value if update_context
  506. ; is true, so let's just leave it uninitialized otherwise
  507. mov ctxq, ctx_stackq
  508. movifnidn rax, srcq
  509. mov [ctxq+ResampleContext.frac ], fracd
  510. sub rax, src_stackq
  511. mov [ctxq+ResampleContext.index], indexd
  512. shr rax, %3
  513. .skip_store:
  514. %if ARCH_X86_32
  515. ADD rsp, 0x28
  516. %endif
  517. RET
  518. %endmacro
  519. INIT_XMM sse
  520. RESAMPLE_FNS float, 4, 2, s, pf_1
  521. %if HAVE_AVX_EXTERNAL
  522. INIT_YMM avx
  523. RESAMPLE_FNS float, 4, 2, s, pf_1
  524. %endif
  525. %if HAVE_FMA3_EXTERNAL
  526. INIT_YMM fma3
  527. RESAMPLE_FNS float, 4, 2, s, pf_1
  528. %endif
  529. %if HAVE_FMA4_EXTERNAL
  530. INIT_XMM fma4
  531. RESAMPLE_FNS float, 4, 2, s, pf_1
  532. %endif
  533. %if ARCH_X86_32
  534. INIT_MMX mmxext
  535. RESAMPLE_FNS int16, 2, 1
  536. %endif
  537. INIT_XMM sse2
  538. RESAMPLE_FNS int16, 2, 1
  539. %if HAVE_XOP_EXTERNAL
  540. INIT_XMM xop
  541. RESAMPLE_FNS int16, 2, 1
  542. %endif
  543. INIT_XMM sse2
  544. RESAMPLE_FNS double, 8, 3, d, pdbl_1