me_cmp_init.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * SIMD-optimized motion estimation
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/attributes.h"
  25. #include "libavutil/cpu.h"
  26. #include "libavutil/mem_internal.h"
  27. #include "libavutil/x86/asm.h"
  28. #include "libavutil/x86/cpu.h"
  29. #include "libavcodec/me_cmp.h"
  30. #include "libavcodec/mpegvideo.h"
  31. int ff_sum_abs_dctelem_mmx(int16_t *block);
  32. int ff_sum_abs_dctelem_mmxext(int16_t *block);
  33. int ff_sum_abs_dctelem_sse2(int16_t *block);
  34. int ff_sum_abs_dctelem_ssse3(int16_t *block);
  35. int ff_sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  36. ptrdiff_t stride, int h);
  37. int ff_sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  38. ptrdiff_t stride, int h);
  39. int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  40. ptrdiff_t stride, int h);
  41. int ff_hf_noise8_mmx(uint8_t *pix1, ptrdiff_t stride, int h);
  42. int ff_hf_noise16_mmx(uint8_t *pix1, ptrdiff_t stride, int h);
  43. int ff_sad8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  44. ptrdiff_t stride, int h);
  45. int ff_sad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  46. ptrdiff_t stride, int h);
  47. int ff_sad16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  48. ptrdiff_t stride, int h);
  49. int ff_sad8_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  50. ptrdiff_t stride, int h);
  51. int ff_sad16_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  52. ptrdiff_t stride, int h);
  53. int ff_sad16_x2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  54. ptrdiff_t stride, int h);
  55. int ff_sad8_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  56. ptrdiff_t stride, int h);
  57. int ff_sad16_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  58. ptrdiff_t stride, int h);
  59. int ff_sad16_y2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  60. ptrdiff_t stride, int h);
  61. int ff_sad8_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  62. ptrdiff_t stride, int h);
  63. int ff_sad16_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  64. ptrdiff_t stride, int h);
  65. int ff_sad16_approx_xy2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  66. ptrdiff_t stride, int h);
  67. int ff_vsad_intra8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  68. ptrdiff_t stride, int h);
  69. int ff_vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  70. ptrdiff_t stride, int h);
  71. int ff_vsad_intra16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  72. ptrdiff_t stride, int h);
  73. int ff_vsad8_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  74. ptrdiff_t stride, int h);
  75. int ff_vsad16_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  76. ptrdiff_t stride, int h);
  77. int ff_vsad16_approx_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  78. ptrdiff_t stride, int h);
  79. #define hadamard_func(cpu) \
  80. int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
  81. uint8_t *src2, ptrdiff_t stride, int h); \
  82. int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
  83. uint8_t *src2, ptrdiff_t stride, int h);
  84. hadamard_func(mmx)
  85. hadamard_func(mmxext)
  86. hadamard_func(sse2)
  87. hadamard_func(ssse3)
  88. #if HAVE_X86ASM
  89. static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
  90. ptrdiff_t stride, int h)
  91. {
  92. int score1, score2;
  93. if (c)
  94. score1 = c->mecc.sse[0](c, pix1, pix2, stride, h);
  95. else
  96. score1 = ff_sse16_mmx(c, pix1, pix2, stride, h);
  97. score2 = ff_hf_noise16_mmx(pix1, stride, h) + ff_hf_noise8_mmx(pix1+8, stride, h)
  98. - ff_hf_noise16_mmx(pix2, stride, h) - ff_hf_noise8_mmx(pix2+8, stride, h);
  99. if (c)
  100. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  101. else
  102. return score1 + FFABS(score2) * 8;
  103. }
  104. static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
  105. ptrdiff_t stride, int h)
  106. {
  107. int score1 = ff_sse8_mmx(c, pix1, pix2, stride, h);
  108. int score2 = ff_hf_noise8_mmx(pix1, stride, h) -
  109. ff_hf_noise8_mmx(pix2, stride, h);
  110. if (c)
  111. return score1 + FFABS(score2) * c->avctx->nsse_weight;
  112. else
  113. return score1 + FFABS(score2) * 8;
  114. }
  115. #endif /* HAVE_X86ASM */
  116. #if HAVE_INLINE_ASM
  117. static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
  118. ptrdiff_t stride, int h)
  119. {
  120. int tmp;
  121. av_assert2(((uintptr_t) pix & 7) == 0);
  122. av_assert2((stride & 7) == 0);
  123. #define SUM(in0, in1, out0, out1) \
  124. "movq (%0), %%mm2\n" \
  125. "movq 8(%0), %%mm3\n" \
  126. "add %2,%0\n" \
  127. "movq %%mm2, " #out0 "\n" \
  128. "movq %%mm3, " #out1 "\n" \
  129. "psubusb " #in0 ", %%mm2\n" \
  130. "psubusb " #in1 ", %%mm3\n" \
  131. "psubusb " #out0 ", " #in0 "\n" \
  132. "psubusb " #out1 ", " #in1 "\n" \
  133. "por %%mm2, " #in0 "\n" \
  134. "por %%mm3, " #in1 "\n" \
  135. "movq " #in0 ", %%mm2\n" \
  136. "movq " #in1 ", %%mm3\n" \
  137. "punpcklbw %%mm7, " #in0 "\n" \
  138. "punpcklbw %%mm7, " #in1 "\n" \
  139. "punpckhbw %%mm7, %%mm2\n" \
  140. "punpckhbw %%mm7, %%mm3\n" \
  141. "paddw " #in1 ", " #in0 "\n" \
  142. "paddw %%mm3, %%mm2\n" \
  143. "paddw %%mm2, " #in0 "\n" \
  144. "paddw " #in0 ", %%mm6\n"
  145. __asm__ volatile (
  146. "movl %3, %%ecx\n"
  147. "pxor %%mm6, %%mm6\n"
  148. "pxor %%mm7, %%mm7\n"
  149. "movq (%0), %%mm0\n"
  150. "movq 8(%0), %%mm1\n"
  151. "add %2, %0\n"
  152. "jmp 2f\n"
  153. "1:\n"
  154. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  155. "2:\n"
  156. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  157. "subl $2, %%ecx\n"
  158. "jnz 1b\n"
  159. "movq %%mm6, %%mm0\n"
  160. "psrlq $32, %%mm6\n"
  161. "paddw %%mm6, %%mm0\n"
  162. "movq %%mm0, %%mm6\n"
  163. "psrlq $16, %%mm0\n"
  164. "paddw %%mm6, %%mm0\n"
  165. "movd %%mm0, %1\n"
  166. : "+r" (pix), "=r" (tmp)
  167. : "r" (stride), "m" (h)
  168. : "%ecx");
  169. return tmp & 0xFFFF;
  170. }
  171. #undef SUM
  172. static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
  173. ptrdiff_t stride, int h)
  174. {
  175. int tmp;
  176. av_assert2(((uintptr_t)pix1 & 7) == 0);
  177. av_assert2(((uintptr_t)pix2 & 7) == 0);
  178. av_assert2((stride & 7) == 0);
  179. #define SUM(in0, in1, out0, out1) \
  180. "movq (%0), %%mm2\n" \
  181. "movq (%1), " #out0 "\n" \
  182. "movq 8(%0), %%mm3\n" \
  183. "movq 8(%1), " #out1 "\n" \
  184. "add %3, %0\n" \
  185. "add %3, %1\n" \
  186. "psubb " #out0 ", %%mm2\n" \
  187. "psubb " #out1 ", %%mm3\n" \
  188. "pxor %%mm7, %%mm2\n" \
  189. "pxor %%mm7, %%mm3\n" \
  190. "movq %%mm2, " #out0 "\n" \
  191. "movq %%mm3, " #out1 "\n" \
  192. "psubusb " #in0 ", %%mm2\n" \
  193. "psubusb " #in1 ", %%mm3\n" \
  194. "psubusb " #out0 ", " #in0 "\n" \
  195. "psubusb " #out1 ", " #in1 "\n" \
  196. "por %%mm2, " #in0 "\n" \
  197. "por %%mm3, " #in1 "\n" \
  198. "movq " #in0 ", %%mm2\n" \
  199. "movq " #in1 ", %%mm3\n" \
  200. "punpcklbw %%mm7, " #in0 "\n" \
  201. "punpcklbw %%mm7, " #in1 "\n" \
  202. "punpckhbw %%mm7, %%mm2\n" \
  203. "punpckhbw %%mm7, %%mm3\n" \
  204. "paddw " #in1 ", " #in0 "\n" \
  205. "paddw %%mm3, %%mm2\n" \
  206. "paddw %%mm2, " #in0 "\n" \
  207. "paddw " #in0 ", %%mm6\n"
  208. __asm__ volatile (
  209. "movl %4, %%ecx\n"
  210. "pxor %%mm6, %%mm6\n"
  211. "pcmpeqw %%mm7, %%mm7\n"
  212. "psllw $15, %%mm7\n"
  213. "packsswb %%mm7, %%mm7\n"
  214. "movq (%0), %%mm0\n"
  215. "movq (%1), %%mm2\n"
  216. "movq 8(%0), %%mm1\n"
  217. "movq 8(%1), %%mm3\n"
  218. "add %3, %0\n"
  219. "add %3, %1\n"
  220. "psubb %%mm2, %%mm0\n"
  221. "psubb %%mm3, %%mm1\n"
  222. "pxor %%mm7, %%mm0\n"
  223. "pxor %%mm7, %%mm1\n"
  224. "jmp 2f\n"
  225. "1:\n"
  226. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  227. "2:\n"
  228. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  229. "subl $2, %%ecx\n"
  230. "jnz 1b\n"
  231. "movq %%mm6, %%mm0\n"
  232. "psrlq $32, %%mm6\n"
  233. "paddw %%mm6, %%mm0\n"
  234. "movq %%mm0, %%mm6\n"
  235. "psrlq $16, %%mm0\n"
  236. "paddw %%mm6, %%mm0\n"
  237. "movd %%mm0, %2\n"
  238. : "+r" (pix1), "+r" (pix2), "=r" (tmp)
  239. : "r" (stride), "m" (h)
  240. : "%ecx");
  241. return tmp & 0x7FFF;
  242. }
  243. #undef SUM
  244. DECLARE_ASM_CONST(8, uint64_t, round_tab)[3] = {
  245. 0x0000000000000000ULL,
  246. 0x0001000100010001ULL,
  247. 0x0002000200020002ULL,
  248. };
  249. static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2,
  250. ptrdiff_t stride, int h)
  251. {
  252. x86_reg len = -stride * h;
  253. __asm__ volatile (
  254. ".p2align 4 \n\t"
  255. "1: \n\t"
  256. "movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
  257. "movq (%2, %%"FF_REG_a"), %%mm2 \n\t"
  258. "movq (%2, %%"FF_REG_a"), %%mm4 \n\t"
  259. "add %3, %%"FF_REG_a" \n\t"
  260. "psubusb %%mm0, %%mm2 \n\t"
  261. "psubusb %%mm4, %%mm0 \n\t"
  262. "movq (%1, %%"FF_REG_a"), %%mm1 \n\t"
  263. "movq (%2, %%"FF_REG_a"), %%mm3 \n\t"
  264. "movq (%2, %%"FF_REG_a"), %%mm5 \n\t"
  265. "psubusb %%mm1, %%mm3 \n\t"
  266. "psubusb %%mm5, %%mm1 \n\t"
  267. "por %%mm2, %%mm0 \n\t"
  268. "por %%mm1, %%mm3 \n\t"
  269. "movq %%mm0, %%mm1 \n\t"
  270. "movq %%mm3, %%mm2 \n\t"
  271. "punpcklbw %%mm7, %%mm0 \n\t"
  272. "punpckhbw %%mm7, %%mm1 \n\t"
  273. "punpcklbw %%mm7, %%mm3 \n\t"
  274. "punpckhbw %%mm7, %%mm2 \n\t"
  275. "paddw %%mm1, %%mm0 \n\t"
  276. "paddw %%mm3, %%mm2 \n\t"
  277. "paddw %%mm2, %%mm0 \n\t"
  278. "paddw %%mm0, %%mm6 \n\t"
  279. "add %3, %%"FF_REG_a" \n\t"
  280. " js 1b \n\t"
  281. : "+a" (len)
  282. : "r" (blk1 - len), "r" (blk2 - len), "r" (stride));
  283. }
  284. static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
  285. ptrdiff_t stride, int h)
  286. {
  287. x86_reg len = -stride * h;
  288. __asm__ volatile (
  289. ".p2align 4 \n\t"
  290. "1: \n\t"
  291. "movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
  292. "movq (%2, %%"FF_REG_a"), %%mm1 \n\t"
  293. "movq (%1, %%"FF_REG_a"), %%mm2 \n\t"
  294. "movq (%2, %%"FF_REG_a"), %%mm3 \n\t"
  295. "punpcklbw %%mm7, %%mm0 \n\t"
  296. "punpcklbw %%mm7, %%mm1 \n\t"
  297. "punpckhbw %%mm7, %%mm2 \n\t"
  298. "punpckhbw %%mm7, %%mm3 \n\t"
  299. "paddw %%mm0, %%mm1 \n\t"
  300. "paddw %%mm2, %%mm3 \n\t"
  301. "movq (%3, %%"FF_REG_a"), %%mm4 \n\t"
  302. "movq (%3, %%"FF_REG_a"), %%mm2 \n\t"
  303. "paddw %%mm5, %%mm1 \n\t"
  304. "paddw %%mm5, %%mm3 \n\t"
  305. "psrlw $1, %%mm1 \n\t"
  306. "psrlw $1, %%mm3 \n\t"
  307. "packuswb %%mm3, %%mm1 \n\t"
  308. "psubusb %%mm1, %%mm4 \n\t"
  309. "psubusb %%mm2, %%mm1 \n\t"
  310. "por %%mm4, %%mm1 \n\t"
  311. "movq %%mm1, %%mm0 \n\t"
  312. "punpcklbw %%mm7, %%mm0 \n\t"
  313. "punpckhbw %%mm7, %%mm1 \n\t"
  314. "paddw %%mm1, %%mm0 \n\t"
  315. "paddw %%mm0, %%mm6 \n\t"
  316. "add %4, %%"FF_REG_a" \n\t"
  317. " js 1b \n\t"
  318. : "+a" (len)
  319. : "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
  320. "r" (stride));
  321. }
  322. static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2,
  323. ptrdiff_t stride, int h)
  324. {
  325. x86_reg len = -stride * h;
  326. __asm__ volatile (
  327. "movq (%1, %%"FF_REG_a"), %%mm0\n\t"
  328. "movq 1(%1, %%"FF_REG_a"), %%mm2\n\t"
  329. "movq %%mm0, %%mm1 \n\t"
  330. "movq %%mm2, %%mm3 \n\t"
  331. "punpcklbw %%mm7, %%mm0 \n\t"
  332. "punpckhbw %%mm7, %%mm1 \n\t"
  333. "punpcklbw %%mm7, %%mm2 \n\t"
  334. "punpckhbw %%mm7, %%mm3 \n\t"
  335. "paddw %%mm2, %%mm0 \n\t"
  336. "paddw %%mm3, %%mm1 \n\t"
  337. ".p2align 4 \n\t"
  338. "1: \n\t"
  339. "movq (%2, %%"FF_REG_a"), %%mm2\n\t"
  340. "movq 1(%2, %%"FF_REG_a"), %%mm4\n\t"
  341. "movq %%mm2, %%mm3 \n\t"
  342. "movq %%mm4, %%mm5 \n\t"
  343. "punpcklbw %%mm7, %%mm2 \n\t"
  344. "punpckhbw %%mm7, %%mm3 \n\t"
  345. "punpcklbw %%mm7, %%mm4 \n\t"
  346. "punpckhbw %%mm7, %%mm5 \n\t"
  347. "paddw %%mm4, %%mm2 \n\t"
  348. "paddw %%mm5, %%mm3 \n\t"
  349. "movq %5, %%mm5 \n\t"
  350. "paddw %%mm2, %%mm0 \n\t"
  351. "paddw %%mm3, %%mm1 \n\t"
  352. "paddw %%mm5, %%mm0 \n\t"
  353. "paddw %%mm5, %%mm1 \n\t"
  354. "movq (%3, %%"FF_REG_a"), %%mm4 \n\t"
  355. "movq (%3, %%"FF_REG_a"), %%mm5 \n\t"
  356. "psrlw $2, %%mm0 \n\t"
  357. "psrlw $2, %%mm1 \n\t"
  358. "packuswb %%mm1, %%mm0 \n\t"
  359. "psubusb %%mm0, %%mm4 \n\t"
  360. "psubusb %%mm5, %%mm0 \n\t"
  361. "por %%mm4, %%mm0 \n\t"
  362. "movq %%mm0, %%mm4 \n\t"
  363. "punpcklbw %%mm7, %%mm0 \n\t"
  364. "punpckhbw %%mm7, %%mm4 \n\t"
  365. "paddw %%mm0, %%mm6 \n\t"
  366. "paddw %%mm4, %%mm6 \n\t"
  367. "movq %%mm2, %%mm0 \n\t"
  368. "movq %%mm3, %%mm1 \n\t"
  369. "add %4, %%"FF_REG_a" \n\t"
  370. " js 1b \n\t"
  371. : "+a" (len)
  372. : "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
  373. "r" (stride), "m" (round_tab[2]));
  374. }
  375. static inline int sum_mmx(void)
  376. {
  377. int ret;
  378. __asm__ volatile (
  379. "movq %%mm6, %%mm0 \n\t"
  380. "psrlq $32, %%mm6 \n\t"
  381. "paddw %%mm0, %%mm6 \n\t"
  382. "movq %%mm6, %%mm0 \n\t"
  383. "psrlq $16, %%mm6 \n\t"
  384. "paddw %%mm0, %%mm6 \n\t"
  385. "movd %%mm6, %0 \n\t"
  386. : "=r" (ret));
  387. return ret & 0xFFFF;
  388. }
  389. static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2,
  390. ptrdiff_t stride, int h)
  391. {
  392. sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
  393. }
  394. static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2,
  395. ptrdiff_t stride, int h)
  396. {
  397. sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
  398. }
  399. #define PIX_SAD(suf) \
  400. static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  401. uint8_t *blk1, ptrdiff_t stride, int h) \
  402. { \
  403. av_assert2(h == 8); \
  404. __asm__ volatile ( \
  405. "pxor %%mm7, %%mm7 \n\t" \
  406. "pxor %%mm6, %%mm6 \n\t" \
  407. :); \
  408. \
  409. sad8_1_ ## suf(blk1, blk2, stride, 8); \
  410. \
  411. return sum_ ## suf(); \
  412. } \
  413. \
  414. static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  415. uint8_t *blk1, ptrdiff_t stride, int h) \
  416. { \
  417. av_assert2(h == 8); \
  418. __asm__ volatile ( \
  419. "pxor %%mm7, %%mm7 \n\t" \
  420. "pxor %%mm6, %%mm6 \n\t" \
  421. "movq %0, %%mm5 \n\t" \
  422. :: "m" (round_tab[1])); \
  423. \
  424. sad8_x2a_ ## suf(blk1, blk2, stride, 8); \
  425. \
  426. return sum_ ## suf(); \
  427. } \
  428. \
  429. static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  430. uint8_t *blk1, ptrdiff_t stride, int h) \
  431. { \
  432. av_assert2(h == 8); \
  433. __asm__ volatile ( \
  434. "pxor %%mm7, %%mm7 \n\t" \
  435. "pxor %%mm6, %%mm6 \n\t" \
  436. "movq %0, %%mm5 \n\t" \
  437. :: "m" (round_tab[1])); \
  438. \
  439. sad8_y2a_ ## suf(blk1, blk2, stride, 8); \
  440. \
  441. return sum_ ## suf(); \
  442. } \
  443. \
  444. static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  445. uint8_t *blk1, ptrdiff_t stride, int h) \
  446. { \
  447. av_assert2(h == 8); \
  448. __asm__ volatile ( \
  449. "pxor %%mm7, %%mm7 \n\t" \
  450. "pxor %%mm6, %%mm6 \n\t" \
  451. ::); \
  452. \
  453. sad8_4_ ## suf(blk1, blk2, stride, 8); \
  454. \
  455. return sum_ ## suf(); \
  456. } \
  457. \
  458. static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  459. uint8_t *blk1, ptrdiff_t stride, int h) \
  460. { \
  461. __asm__ volatile ( \
  462. "pxor %%mm7, %%mm7 \n\t" \
  463. "pxor %%mm6, %%mm6 \n\t" \
  464. :); \
  465. \
  466. sad8_1_ ## suf(blk1, blk2, stride, h); \
  467. sad8_1_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
  468. \
  469. return sum_ ## suf(); \
  470. } \
  471. \
  472. static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  473. uint8_t *blk1, ptrdiff_t stride, int h) \
  474. { \
  475. __asm__ volatile ( \
  476. "pxor %%mm7, %%mm7 \n\t" \
  477. "pxor %%mm6, %%mm6 \n\t" \
  478. "movq %0, %%mm5 \n\t" \
  479. :: "m" (round_tab[1])); \
  480. \
  481. sad8_x2a_ ## suf(blk1, blk2, stride, h); \
  482. sad8_x2a_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
  483. \
  484. return sum_ ## suf(); \
  485. } \
  486. \
  487. static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  488. uint8_t *blk1, ptrdiff_t stride, int h) \
  489. { \
  490. __asm__ volatile ( \
  491. "pxor %%mm7, %%mm7 \n\t" \
  492. "pxor %%mm6, %%mm6 \n\t" \
  493. "movq %0, %%mm5 \n\t" \
  494. :: "m" (round_tab[1])); \
  495. \
  496. sad8_y2a_ ## suf(blk1, blk2, stride, h); \
  497. sad8_y2a_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
  498. \
  499. return sum_ ## suf(); \
  500. } \
  501. \
  502. static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
  503. uint8_t *blk1, ptrdiff_t stride, int h) \
  504. { \
  505. __asm__ volatile ( \
  506. "pxor %%mm7, %%mm7 \n\t" \
  507. "pxor %%mm6, %%mm6 \n\t" \
  508. ::); \
  509. \
  510. sad8_4_ ## suf(blk1, blk2, stride, h); \
  511. sad8_4_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
  512. \
  513. return sum_ ## suf(); \
  514. } \
  515. PIX_SAD(mmx)
  516. #endif /* HAVE_INLINE_ASM */
  517. av_cold void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)
  518. {
  519. int cpu_flags = av_get_cpu_flags();
  520. #if HAVE_INLINE_ASM
  521. if (INLINE_MMX(cpu_flags)) {
  522. c->pix_abs[0][0] = sad16_mmx;
  523. c->pix_abs[0][1] = sad16_x2_mmx;
  524. c->pix_abs[0][2] = sad16_y2_mmx;
  525. c->pix_abs[0][3] = sad16_xy2_mmx;
  526. c->pix_abs[1][0] = sad8_mmx;
  527. c->pix_abs[1][1] = sad8_x2_mmx;
  528. c->pix_abs[1][2] = sad8_y2_mmx;
  529. c->pix_abs[1][3] = sad8_xy2_mmx;
  530. c->sad[0] = sad16_mmx;
  531. c->sad[1] = sad8_mmx;
  532. c->vsad[4] = vsad_intra16_mmx;
  533. if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
  534. c->vsad[0] = vsad16_mmx;
  535. }
  536. }
  537. #endif /* HAVE_INLINE_ASM */
  538. if (EXTERNAL_MMX(cpu_flags)) {
  539. c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
  540. c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
  541. c->sum_abs_dctelem = ff_sum_abs_dctelem_mmx;
  542. c->sse[0] = ff_sse16_mmx;
  543. c->sse[1] = ff_sse8_mmx;
  544. #if HAVE_X86ASM
  545. c->nsse[0] = nsse16_mmx;
  546. c->nsse[1] = nsse8_mmx;
  547. #endif
  548. }
  549. if (EXTERNAL_MMXEXT(cpu_flags)) {
  550. c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
  551. c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
  552. c->sum_abs_dctelem = ff_sum_abs_dctelem_mmxext;
  553. c->sad[0] = ff_sad16_mmxext;
  554. c->sad[1] = ff_sad8_mmxext;
  555. c->pix_abs[0][0] = ff_sad16_mmxext;
  556. c->pix_abs[0][1] = ff_sad16_x2_mmxext;
  557. c->pix_abs[0][2] = ff_sad16_y2_mmxext;
  558. c->pix_abs[1][0] = ff_sad8_mmxext;
  559. c->pix_abs[1][1] = ff_sad8_x2_mmxext;
  560. c->pix_abs[1][2] = ff_sad8_y2_mmxext;
  561. c->vsad[4] = ff_vsad_intra16_mmxext;
  562. c->vsad[5] = ff_vsad_intra8_mmxext;
  563. if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
  564. c->pix_abs[0][3] = ff_sad16_approx_xy2_mmxext;
  565. c->pix_abs[1][3] = ff_sad8_approx_xy2_mmxext;
  566. c->vsad[0] = ff_vsad16_approx_mmxext;
  567. c->vsad[1] = ff_vsad8_approx_mmxext;
  568. }
  569. }
  570. if (EXTERNAL_SSE2(cpu_flags)) {
  571. c->sse[0] = ff_sse16_sse2;
  572. c->sum_abs_dctelem = ff_sum_abs_dctelem_sse2;
  573. #if HAVE_ALIGNED_STACK
  574. c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
  575. c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
  576. #endif
  577. if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW) && avctx->codec_id != AV_CODEC_ID_SNOW) {
  578. c->sad[0] = ff_sad16_sse2;
  579. c->pix_abs[0][0] = ff_sad16_sse2;
  580. c->pix_abs[0][1] = ff_sad16_x2_sse2;
  581. c->pix_abs[0][2] = ff_sad16_y2_sse2;
  582. c->vsad[4] = ff_vsad_intra16_sse2;
  583. if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
  584. c->pix_abs[0][3] = ff_sad16_approx_xy2_sse2;
  585. c->vsad[0] = ff_vsad16_approx_sse2;
  586. }
  587. }
  588. }
  589. if (EXTERNAL_SSSE3(cpu_flags)) {
  590. c->sum_abs_dctelem = ff_sum_abs_dctelem_ssse3;
  591. #if HAVE_ALIGNED_STACK
  592. c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
  593. c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
  594. #endif
  595. }
  596. }