dsputil_mmx.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "config.h"
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/cpu.h"
  27. #include "libavutil/x86/asm.h"
  28. #include "libavcodec/videodsp.h"
  29. #include "constants.h"
  30. #include "dsputil_x86.h"
  31. #include "diracdsp_mmx.h"
  32. #if HAVE_INLINE_ASM
  33. void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  34. int line_size)
  35. {
  36. const int16_t *p;
  37. uint8_t *pix;
  38. /* read the pixels */
  39. p = block;
  40. pix = pixels;
  41. /* unrolled loop */
  42. __asm__ volatile (
  43. "movq (%3), %%mm0 \n\t"
  44. "movq 8(%3), %%mm1 \n\t"
  45. "movq 16(%3), %%mm2 \n\t"
  46. "movq 24(%3), %%mm3 \n\t"
  47. "movq 32(%3), %%mm4 \n\t"
  48. "movq 40(%3), %%mm5 \n\t"
  49. "movq 48(%3), %%mm6 \n\t"
  50. "movq 56(%3), %%mm7 \n\t"
  51. "packuswb %%mm1, %%mm0 \n\t"
  52. "packuswb %%mm3, %%mm2 \n\t"
  53. "packuswb %%mm5, %%mm4 \n\t"
  54. "packuswb %%mm7, %%mm6 \n\t"
  55. "movq %%mm0, (%0) \n\t"
  56. "movq %%mm2, (%0, %1) \n\t"
  57. "movq %%mm4, (%0, %1, 2) \n\t"
  58. "movq %%mm6, (%0, %2) \n\t"
  59. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3),
  60. "r"(p)
  61. : "memory");
  62. pix += line_size * 4;
  63. p += 32;
  64. // if here would be an exact copy of the code above
  65. // compiler would generate some very strange code
  66. // thus using "r"
  67. __asm__ volatile (
  68. "movq (%3), %%mm0 \n\t"
  69. "movq 8(%3), %%mm1 \n\t"
  70. "movq 16(%3), %%mm2 \n\t"
  71. "movq 24(%3), %%mm3 \n\t"
  72. "movq 32(%3), %%mm4 \n\t"
  73. "movq 40(%3), %%mm5 \n\t"
  74. "movq 48(%3), %%mm6 \n\t"
  75. "movq 56(%3), %%mm7 \n\t"
  76. "packuswb %%mm1, %%mm0 \n\t"
  77. "packuswb %%mm3, %%mm2 \n\t"
  78. "packuswb %%mm5, %%mm4 \n\t"
  79. "packuswb %%mm7, %%mm6 \n\t"
  80. "movq %%mm0, (%0) \n\t"
  81. "movq %%mm2, (%0, %1) \n\t"
  82. "movq %%mm4, (%0, %1, 2) \n\t"
  83. "movq %%mm6, (%0, %2) \n\t"
  84. :: "r"(pix), "r"((x86_reg)line_size), "r"((x86_reg)line_size * 3), "r"(p)
  85. : "memory");
  86. }
  87. #define put_signed_pixels_clamped_mmx_half(off) \
  88. "movq "#off"(%2), %%mm1 \n\t" \
  89. "movq 16 + "#off"(%2), %%mm2 \n\t" \
  90. "movq 32 + "#off"(%2), %%mm3 \n\t" \
  91. "movq 48 + "#off"(%2), %%mm4 \n\t" \
  92. "packsswb 8 + "#off"(%2), %%mm1 \n\t" \
  93. "packsswb 24 + "#off"(%2), %%mm2 \n\t" \
  94. "packsswb 40 + "#off"(%2), %%mm3 \n\t" \
  95. "packsswb 56 + "#off"(%2), %%mm4 \n\t" \
  96. "paddb %%mm0, %%mm1 \n\t" \
  97. "paddb %%mm0, %%mm2 \n\t" \
  98. "paddb %%mm0, %%mm3 \n\t" \
  99. "paddb %%mm0, %%mm4 \n\t" \
  100. "movq %%mm1, (%0) \n\t" \
  101. "movq %%mm2, (%0, %3) \n\t" \
  102. "movq %%mm3, (%0, %3, 2) \n\t" \
  103. "movq %%mm4, (%0, %1) \n\t"
  104. void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  105. int line_size)
  106. {
  107. x86_reg line_skip = line_size;
  108. x86_reg line_skip3;
  109. __asm__ volatile (
  110. "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
  111. "lea (%3, %3, 2), %1 \n\t"
  112. put_signed_pixels_clamped_mmx_half(0)
  113. "lea (%0, %3, 4), %0 \n\t"
  114. put_signed_pixels_clamped_mmx_half(64)
  115. : "+&r"(pixels), "=&r"(line_skip3)
  116. : "r"(block), "r"(line_skip)
  117. : "memory");
  118. }
  119. void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
  120. int line_size)
  121. {
  122. const int16_t *p;
  123. uint8_t *pix;
  124. int i;
  125. /* read the pixels */
  126. p = block;
  127. pix = pixels;
  128. MOVQ_ZERO(mm7);
  129. i = 4;
  130. do {
  131. __asm__ volatile (
  132. "movq (%2), %%mm0 \n\t"
  133. "movq 8(%2), %%mm1 \n\t"
  134. "movq 16(%2), %%mm2 \n\t"
  135. "movq 24(%2), %%mm3 \n\t"
  136. "movq %0, %%mm4 \n\t"
  137. "movq %1, %%mm6 \n\t"
  138. "movq %%mm4, %%mm5 \n\t"
  139. "punpcklbw %%mm7, %%mm4 \n\t"
  140. "punpckhbw %%mm7, %%mm5 \n\t"
  141. "paddsw %%mm4, %%mm0 \n\t"
  142. "paddsw %%mm5, %%mm1 \n\t"
  143. "movq %%mm6, %%mm5 \n\t"
  144. "punpcklbw %%mm7, %%mm6 \n\t"
  145. "punpckhbw %%mm7, %%mm5 \n\t"
  146. "paddsw %%mm6, %%mm2 \n\t"
  147. "paddsw %%mm5, %%mm3 \n\t"
  148. "packuswb %%mm1, %%mm0 \n\t"
  149. "packuswb %%mm3, %%mm2 \n\t"
  150. "movq %%mm0, %0 \n\t"
  151. "movq %%mm2, %1 \n\t"
  152. : "+m"(*pix), "+m"(*(pix + line_size))
  153. : "r"(p)
  154. : "memory");
  155. pix += line_size * 2;
  156. p += 16;
  157. } while (--i);
  158. }
  159. #define CLEAR_BLOCKS(name, n) \
  160. void name(int16_t *blocks) \
  161. { \
  162. __asm__ volatile ( \
  163. "pxor %%mm7, %%mm7 \n\t" \
  164. "mov %1, %%"REG_a" \n\t" \
  165. "1: \n\t" \
  166. "movq %%mm7, (%0, %%"REG_a") \n\t" \
  167. "movq %%mm7, 8(%0, %%"REG_a") \n\t" \
  168. "movq %%mm7, 16(%0, %%"REG_a") \n\t" \
  169. "movq %%mm7, 24(%0, %%"REG_a") \n\t" \
  170. "add $32, %%"REG_a" \n\t" \
  171. "js 1b \n\t" \
  172. :: "r"(((uint8_t *)blocks) + 128 * n), \
  173. "i"(-128 * n) \
  174. : "%"REG_a \
  175. ); \
  176. }
  177. CLEAR_BLOCKS(ff_clear_blocks_mmx, 6)
  178. CLEAR_BLOCKS(ff_clear_block_mmx, 1)
  179. void ff_clear_block_sse(int16_t *block)
  180. {
  181. __asm__ volatile (
  182. "xorps %%xmm0, %%xmm0 \n"
  183. "movaps %%xmm0, (%0) \n"
  184. "movaps %%xmm0, 16(%0) \n"
  185. "movaps %%xmm0, 32(%0) \n"
  186. "movaps %%xmm0, 48(%0) \n"
  187. "movaps %%xmm0, 64(%0) \n"
  188. "movaps %%xmm0, 80(%0) \n"
  189. "movaps %%xmm0, 96(%0) \n"
  190. "movaps %%xmm0, 112(%0) \n"
  191. :: "r"(block)
  192. : "memory"
  193. );
  194. }
  195. void ff_clear_blocks_sse(int16_t *blocks)
  196. {
  197. __asm__ volatile (
  198. "xorps %%xmm0, %%xmm0 \n"
  199. "mov %1, %%"REG_a" \n"
  200. "1: \n"
  201. "movaps %%xmm0, (%0, %%"REG_a") \n"
  202. "movaps %%xmm0, 16(%0, %%"REG_a") \n"
  203. "movaps %%xmm0, 32(%0, %%"REG_a") \n"
  204. "movaps %%xmm0, 48(%0, %%"REG_a") \n"
  205. "movaps %%xmm0, 64(%0, %%"REG_a") \n"
  206. "movaps %%xmm0, 80(%0, %%"REG_a") \n"
  207. "movaps %%xmm0, 96(%0, %%"REG_a") \n"
  208. "movaps %%xmm0, 112(%0, %%"REG_a") \n"
  209. "add $128, %%"REG_a" \n"
  210. "js 1b \n"
  211. :: "r"(((uint8_t *)blocks) + 128 * 6),
  212. "i"(-128 * 6)
  213. : "%"REG_a
  214. );
  215. }
  216. void ff_add_bytes_mmx(uint8_t *dst, uint8_t *src, int w)
  217. {
  218. x86_reg i = 0;
  219. __asm__ volatile (
  220. "jmp 2f \n\t"
  221. "1: \n\t"
  222. "movq (%1, %0), %%mm0 \n\t"
  223. "movq (%2, %0), %%mm1 \n\t"
  224. "paddb %%mm0, %%mm1 \n\t"
  225. "movq %%mm1, (%2, %0) \n\t"
  226. "movq 8(%1, %0), %%mm0 \n\t"
  227. "movq 8(%2, %0), %%mm1 \n\t"
  228. "paddb %%mm0, %%mm1 \n\t"
  229. "movq %%mm1, 8(%2, %0) \n\t"
  230. "add $16, %0 \n\t"
  231. "2: \n\t"
  232. "cmp %3, %0 \n\t"
  233. "js 1b \n\t"
  234. : "+r"(i)
  235. : "r"(src), "r"(dst), "r"((x86_reg)w - 15)
  236. );
  237. for ( ; i < w; i++)
  238. dst[i + 0] += src[i + 0];
  239. }
  240. #if HAVE_7REGS
  241. void ff_add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
  242. const uint8_t *diff, int w,
  243. int *left, int *left_top)
  244. {
  245. x86_reg w2 = -w;
  246. x86_reg x;
  247. int l = *left & 0xff;
  248. int tl = *left_top & 0xff;
  249. int t;
  250. __asm__ volatile (
  251. "mov %7, %3 \n"
  252. "1: \n"
  253. "movzbl (%3, %4), %2 \n"
  254. "mov %2, %k3 \n"
  255. "sub %b1, %b3 \n"
  256. "add %b0, %b3 \n"
  257. "mov %2, %1 \n"
  258. "cmp %0, %2 \n"
  259. "cmovg %0, %2 \n"
  260. "cmovg %1, %0 \n"
  261. "cmp %k3, %0 \n"
  262. "cmovg %k3, %0 \n"
  263. "mov %7, %3 \n"
  264. "cmp %2, %0 \n"
  265. "cmovl %2, %0 \n"
  266. "add (%6, %4), %b0 \n"
  267. "mov %b0, (%5, %4) \n"
  268. "inc %4 \n"
  269. "jl 1b \n"
  270. : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
  271. : "r"(dst + w), "r"(diff + w), "rm"(top + w)
  272. );
  273. *left = l;
  274. *left_top = tl;
  275. }
  276. #endif
  277. /* Draw the edges of width 'w' of an image of size width, height
  278. * this MMX version can only handle w == 8 || w == 16. */
  279. void ff_draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
  280. int w, int h, int sides)
  281. {
  282. uint8_t *ptr, *last_line;
  283. int i;
  284. last_line = buf + (height - 1) * wrap;
  285. /* left and right */
  286. ptr = buf;
  287. if (w == 8) {
  288. __asm__ volatile (
  289. "1: \n\t"
  290. "movd (%0), %%mm0 \n\t"
  291. "punpcklbw %%mm0, %%mm0 \n\t"
  292. "punpcklwd %%mm0, %%mm0 \n\t"
  293. "punpckldq %%mm0, %%mm0 \n\t"
  294. "movq %%mm0, -8(%0) \n\t"
  295. "movq -8(%0, %2), %%mm1 \n\t"
  296. "punpckhbw %%mm1, %%mm1 \n\t"
  297. "punpckhwd %%mm1, %%mm1 \n\t"
  298. "punpckhdq %%mm1, %%mm1 \n\t"
  299. "movq %%mm1, (%0, %2) \n\t"
  300. "add %1, %0 \n\t"
  301. "cmp %3, %0 \n\t"
  302. "jb 1b \n\t"
  303. : "+r"(ptr)
  304. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  305. );
  306. } else if(w==16){
  307. __asm__ volatile (
  308. "1: \n\t"
  309. "movd (%0), %%mm0 \n\t"
  310. "punpcklbw %%mm0, %%mm0 \n\t"
  311. "punpcklwd %%mm0, %%mm0 \n\t"
  312. "punpckldq %%mm0, %%mm0 \n\t"
  313. "movq %%mm0, -8(%0) \n\t"
  314. "movq %%mm0, -16(%0) \n\t"
  315. "movq -8(%0, %2), %%mm1 \n\t"
  316. "punpckhbw %%mm1, %%mm1 \n\t"
  317. "punpckhwd %%mm1, %%mm1 \n\t"
  318. "punpckhdq %%mm1, %%mm1 \n\t"
  319. "movq %%mm1, (%0, %2) \n\t"
  320. "movq %%mm1, 8(%0, %2) \n\t"
  321. "add %1, %0 \n\t"
  322. "cmp %3, %0 \n\t"
  323. "jb 1b \n\t"
  324. : "+r"(ptr)
  325. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  326. );
  327. } else {
  328. av_assert1(w == 4);
  329. __asm__ volatile (
  330. "1: \n\t"
  331. "movd (%0), %%mm0 \n\t"
  332. "punpcklbw %%mm0, %%mm0 \n\t"
  333. "punpcklwd %%mm0, %%mm0 \n\t"
  334. "movd %%mm0, -4(%0) \n\t"
  335. "movd -4(%0, %2), %%mm1 \n\t"
  336. "punpcklbw %%mm1, %%mm1 \n\t"
  337. "punpckhwd %%mm1, %%mm1 \n\t"
  338. "punpckhdq %%mm1, %%mm1 \n\t"
  339. "movd %%mm1, (%0, %2) \n\t"
  340. "add %1, %0 \n\t"
  341. "cmp %3, %0 \n\t"
  342. "jb 1b \n\t"
  343. : "+r"(ptr)
  344. : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
  345. );
  346. }
  347. /* top and bottom (and hopefully also the corners) */
  348. if (sides & EDGE_TOP) {
  349. for (i = 0; i < h; i += 4) {
  350. ptr = buf - (i + 1) * wrap - w;
  351. __asm__ volatile (
  352. "1: \n\t"
  353. "movq (%1, %0), %%mm0 \n\t"
  354. "movq %%mm0, (%0) \n\t"
  355. "movq %%mm0, (%0, %2) \n\t"
  356. "movq %%mm0, (%0, %2, 2) \n\t"
  357. "movq %%mm0, (%0, %3) \n\t"
  358. "add $8, %0 \n\t"
  359. "cmp %4, %0 \n\t"
  360. "jb 1b \n\t"
  361. : "+r"(ptr)
  362. : "r"((x86_reg)buf - (x86_reg)ptr - w), "r"((x86_reg) -wrap),
  363. "r"((x86_reg) -wrap * 3), "r"(ptr + width + 2 * w)
  364. );
  365. }
  366. }
  367. if (sides & EDGE_BOTTOM) {
  368. for (i = 0; i < h; i += 4) {
  369. ptr = last_line + (i + 1) * wrap - w;
  370. __asm__ volatile (
  371. "1: \n\t"
  372. "movq (%1, %0), %%mm0 \n\t"
  373. "movq %%mm0, (%0) \n\t"
  374. "movq %%mm0, (%0, %2) \n\t"
  375. "movq %%mm0, (%0, %2, 2) \n\t"
  376. "movq %%mm0, (%0, %3) \n\t"
  377. "add $8, %0 \n\t"
  378. "cmp %4, %0 \n\t"
  379. "jb 1b \n\t"
  380. : "+r"(ptr)
  381. : "r"((x86_reg)last_line - (x86_reg)ptr - w),
  382. "r"((x86_reg)wrap), "r"((x86_reg)wrap * 3),
  383. "r"(ptr + width + 2 * w)
  384. );
  385. }
  386. }
  387. }
  388. typedef void emulated_edge_mc_func(uint8_t *dst, const uint8_t *src,
  389. ptrdiff_t linesize, int block_w, int block_h,
  390. int src_x, int src_y, int w, int h);
  391. static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
  392. int stride, int h, int ox, int oy,
  393. int dxx, int dxy, int dyx, int dyy,
  394. int shift, int r, int width, int height,
  395. emulated_edge_mc_func *emu_edge_fn)
  396. {
  397. const int w = 8;
  398. const int ix = ox >> (16 + shift);
  399. const int iy = oy >> (16 + shift);
  400. const int oxs = ox >> 4;
  401. const int oys = oy >> 4;
  402. const int dxxs = dxx >> 4;
  403. const int dxys = dxy >> 4;
  404. const int dyxs = dyx >> 4;
  405. const int dyys = dyy >> 4;
  406. const uint16_t r4[4] = { r, r, r, r };
  407. const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
  408. const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
  409. const uint64_t shift2 = 2 * shift;
  410. #define MAX_STRIDE 4096U
  411. #define MAX_H 8U
  412. uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE];
  413. int x, y;
  414. const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
  415. const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
  416. const int dxh = dxy * (h - 1);
  417. const int dyw = dyx * (w - 1);
  418. int need_emu = (unsigned)ix >= width - w ||
  419. (unsigned)iy >= height - h;
  420. if ( // non-constant fullpel offset (3% of blocks)
  421. ((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
  422. (oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
  423. // uses more than 16 bits of subpel mv (only at huge resolution)
  424. || (dxx | dxy | dyx | dyy) & 15
  425. || (need_emu && (h > MAX_H || stride > MAX_STRIDE))) {
  426. // FIXME could still use mmx for some of the rows
  427. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
  428. shift, r, width, height);
  429. return;
  430. }
  431. src += ix + iy * stride;
  432. if (need_emu) {
  433. emu_edge_fn(edge_buf, src, stride, w + 1, h + 1, ix, iy, width, height);
  434. src = edge_buf;
  435. }
  436. __asm__ volatile (
  437. "movd %0, %%mm6 \n\t"
  438. "pxor %%mm7, %%mm7 \n\t"
  439. "punpcklwd %%mm6, %%mm6 \n\t"
  440. "punpcklwd %%mm6, %%mm6 \n\t"
  441. :: "r"(1<<shift)
  442. );
  443. for (x = 0; x < w; x += 4) {
  444. uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
  445. oxs - dxys + dxxs * (x + 1),
  446. oxs - dxys + dxxs * (x + 2),
  447. oxs - dxys + dxxs * (x + 3) };
  448. uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
  449. oys - dyys + dyxs * (x + 1),
  450. oys - dyys + dyxs * (x + 2),
  451. oys - dyys + dyxs * (x + 3) };
  452. for (y = 0; y < h; y++) {
  453. __asm__ volatile (
  454. "movq %0, %%mm4 \n\t"
  455. "movq %1, %%mm5 \n\t"
  456. "paddw %2, %%mm4 \n\t"
  457. "paddw %3, %%mm5 \n\t"
  458. "movq %%mm4, %0 \n\t"
  459. "movq %%mm5, %1 \n\t"
  460. "psrlw $12, %%mm4 \n\t"
  461. "psrlw $12, %%mm5 \n\t"
  462. : "+m"(*dx4), "+m"(*dy4)
  463. : "m"(*dxy4), "m"(*dyy4)
  464. );
  465. __asm__ volatile (
  466. "movq %%mm6, %%mm2 \n\t"
  467. "movq %%mm6, %%mm1 \n\t"
  468. "psubw %%mm4, %%mm2 \n\t"
  469. "psubw %%mm5, %%mm1 \n\t"
  470. "movq %%mm2, %%mm0 \n\t"
  471. "movq %%mm4, %%mm3 \n\t"
  472. "pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
  473. "pmullw %%mm5, %%mm3 \n\t" // dx * dy
  474. "pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
  475. "pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
  476. "movd %4, %%mm5 \n\t"
  477. "movd %3, %%mm4 \n\t"
  478. "punpcklbw %%mm7, %%mm5 \n\t"
  479. "punpcklbw %%mm7, %%mm4 \n\t"
  480. "pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
  481. "pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
  482. "movd %2, %%mm5 \n\t"
  483. "movd %1, %%mm4 \n\t"
  484. "punpcklbw %%mm7, %%mm5 \n\t"
  485. "punpcklbw %%mm7, %%mm4 \n\t"
  486. "pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
  487. "pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
  488. "paddw %5, %%mm1 \n\t"
  489. "paddw %%mm3, %%mm2 \n\t"
  490. "paddw %%mm1, %%mm0 \n\t"
  491. "paddw %%mm2, %%mm0 \n\t"
  492. "psrlw %6, %%mm0 \n\t"
  493. "packuswb %%mm0, %%mm0 \n\t"
  494. "movd %%mm0, %0 \n\t"
  495. : "=m"(dst[x + y * stride])
  496. : "m"(src[0]), "m"(src[1]),
  497. "m"(src[stride]), "m"(src[stride + 1]),
  498. "m"(*r4), "m"(shift2)
  499. );
  500. src += stride;
  501. }
  502. src += 4 - h * stride;
  503. }
  504. }
  505. #if CONFIG_VIDEODSP
  506. #if HAVE_YASM
  507. #if ARCH_X86_32
  508. void ff_gmc_mmx(uint8_t *dst, uint8_t *src,
  509. int stride, int h, int ox, int oy,
  510. int dxx, int dxy, int dyx, int dyy,
  511. int shift, int r, int width, int height)
  512. {
  513. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  514. width, height, &ff_emulated_edge_mc_8);
  515. }
  516. #endif
  517. void ff_gmc_sse(uint8_t *dst, uint8_t *src,
  518. int stride, int h, int ox, int oy,
  519. int dxx, int dxy, int dyx, int dyy,
  520. int shift, int r, int width, int height)
  521. {
  522. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  523. width, height, &ff_emulated_edge_mc_8);
  524. }
  525. #else
  526. void ff_gmc_mmx(uint8_t *dst, uint8_t *src,
  527. int stride, int h, int ox, int oy,
  528. int dxx, int dxy, int dyx, int dyy,
  529. int shift, int r, int width, int height)
  530. {
  531. gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
  532. width, height, &ff_emulated_edge_mc_8);
  533. }
  534. #endif
  535. #endif
  536. #if CONFIG_DIRAC_DECODER
  537. #define DIRAC_PIXOP(OPNAME2, OPNAME, EXT)\
  538. void ff_ ## OPNAME2 ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  539. {\
  540. if (h&3)\
  541. ff_ ## OPNAME2 ## _dirac_pixels8_c(dst, src, stride, h);\
  542. else\
  543. OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\
  544. }\
  545. void ff_ ## OPNAME2 ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  546. {\
  547. if (h&3)\
  548. ff_ ## OPNAME2 ## _dirac_pixels16_c(dst, src, stride, h);\
  549. else\
  550. OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\
  551. }\
  552. void ff_ ## OPNAME2 ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
  553. {\
  554. if (h&3) {\
  555. ff_ ## OPNAME2 ## _dirac_pixels32_c(dst, src, stride, h);\
  556. } else {\
  557. OPNAME ## _pixels16_ ## EXT(dst , src[0] , stride, h);\
  558. OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\
  559. }\
  560. }
  561. #if HAVE_MMX_INLINE
  562. PIXELS16(static, ff_avg, , , _mmxext)
  563. DIRAC_PIXOP(put, ff_put, mmx)
  564. DIRAC_PIXOP(avg, ff_avg, mmx)
  565. #endif
  566. #if HAVE_YASM
  567. DIRAC_PIXOP(avg, ff_avg, mmxext)
  568. void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  569. {
  570. if (h&3)
  571. ff_put_dirac_pixels16_c(dst, src, stride, h);
  572. else
  573. ff_put_pixels16_sse2(dst, src[0], stride, h);
  574. }
  575. void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  576. {
  577. if (h&3)
  578. ff_avg_dirac_pixels16_c(dst, src, stride, h);
  579. else
  580. ff_avg_pixels16_sse2(dst, src[0], stride, h);
  581. }
  582. void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  583. {
  584. if (h&3) {
  585. ff_put_dirac_pixels32_c(dst, src, stride, h);
  586. } else {
  587. ff_put_pixels16_sse2(dst , src[0] , stride, h);
  588. ff_put_pixels16_sse2(dst+16, src[0]+16, stride, h);
  589. }
  590. }
  591. void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
  592. {
  593. if (h&3) {
  594. ff_avg_dirac_pixels32_c(dst, src, stride, h);
  595. } else {
  596. ff_avg_pixels16_sse2(dst , src[0] , stride, h);
  597. ff_avg_pixels16_sse2(dst+16, src[0]+16, stride, h);
  598. }
  599. }
  600. #endif
  601. #endif
  602. void ff_vector_clipf_sse(float *dst, const float *src,
  603. float min, float max, int len)
  604. {
  605. x86_reg i = (len - 16) * 4;
  606. __asm__ volatile (
  607. "movss %3, %%xmm4 \n\t"
  608. "movss %4, %%xmm5 \n\t"
  609. "shufps $0, %%xmm4, %%xmm4 \n\t"
  610. "shufps $0, %%xmm5, %%xmm5 \n\t"
  611. "1: \n\t"
  612. "movaps (%2, %0), %%xmm0 \n\t" // 3/1 on intel
  613. "movaps 16(%2, %0), %%xmm1 \n\t"
  614. "movaps 32(%2, %0), %%xmm2 \n\t"
  615. "movaps 48(%2, %0), %%xmm3 \n\t"
  616. "maxps %%xmm4, %%xmm0 \n\t"
  617. "maxps %%xmm4, %%xmm1 \n\t"
  618. "maxps %%xmm4, %%xmm2 \n\t"
  619. "maxps %%xmm4, %%xmm3 \n\t"
  620. "minps %%xmm5, %%xmm0 \n\t"
  621. "minps %%xmm5, %%xmm1 \n\t"
  622. "minps %%xmm5, %%xmm2 \n\t"
  623. "minps %%xmm5, %%xmm3 \n\t"
  624. "movaps %%xmm0, (%1, %0) \n\t"
  625. "movaps %%xmm1, 16(%1, %0) \n\t"
  626. "movaps %%xmm2, 32(%1, %0) \n\t"
  627. "movaps %%xmm3, 48(%1, %0) \n\t"
  628. "sub $64, %0 \n\t"
  629. "jge 1b \n\t"
  630. : "+&r"(i)
  631. : "r"(dst), "r"(src), "m"(min), "m"(max)
  632. : "memory"
  633. );
  634. }
  635. #endif /* HAVE_INLINE_ASM */