dsputilenc_mmx.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443
  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "libavutil/x86_cpu.h"
  25. #include "libavcodec/dsputil.h"
  26. #include "libavcodec/mpegvideo.h"
  27. #include "libavcodec/mathops.h"
  28. #include "dsputil_mmx.h"
  29. static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
  30. {
  31. __asm__ volatile(
  32. "mov $-128, %%"REG_a" \n\t"
  33. "pxor %%mm7, %%mm7 \n\t"
  34. ASMALIGN(4)
  35. "1: \n\t"
  36. "movq (%0), %%mm0 \n\t"
  37. "movq (%0, %2), %%mm2 \n\t"
  38. "movq %%mm0, %%mm1 \n\t"
  39. "movq %%mm2, %%mm3 \n\t"
  40. "punpcklbw %%mm7, %%mm0 \n\t"
  41. "punpckhbw %%mm7, %%mm1 \n\t"
  42. "punpcklbw %%mm7, %%mm2 \n\t"
  43. "punpckhbw %%mm7, %%mm3 \n\t"
  44. "movq %%mm0, (%1, %%"REG_a") \n\t"
  45. "movq %%mm1, 8(%1, %%"REG_a") \n\t"
  46. "movq %%mm2, 16(%1, %%"REG_a") \n\t"
  47. "movq %%mm3, 24(%1, %%"REG_a") \n\t"
  48. "add %3, %0 \n\t"
  49. "add $32, %%"REG_a" \n\t"
  50. "js 1b \n\t"
  51. : "+r" (pixels)
  52. : "r" (block+64), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*2)
  53. : "%"REG_a
  54. );
  55. }
  56. static void get_pixels_sse2(DCTELEM *block, const uint8_t *pixels, int line_size)
  57. {
  58. __asm__ volatile(
  59. "pxor %%xmm7, %%xmm7 \n\t"
  60. "movq (%0), %%xmm0 \n\t"
  61. "movq (%0, %2), %%xmm1 \n\t"
  62. "movq (%0, %2,2), %%xmm2 \n\t"
  63. "movq (%0, %3), %%xmm3 \n\t"
  64. "lea (%0,%2,4), %0 \n\t"
  65. "punpcklbw %%xmm7, %%xmm0 \n\t"
  66. "punpcklbw %%xmm7, %%xmm1 \n\t"
  67. "punpcklbw %%xmm7, %%xmm2 \n\t"
  68. "punpcklbw %%xmm7, %%xmm3 \n\t"
  69. "movdqa %%xmm0, (%1) \n\t"
  70. "movdqa %%xmm1, 16(%1) \n\t"
  71. "movdqa %%xmm2, 32(%1) \n\t"
  72. "movdqa %%xmm3, 48(%1) \n\t"
  73. "movq (%0), %%xmm0 \n\t"
  74. "movq (%0, %2), %%xmm1 \n\t"
  75. "movq (%0, %2,2), %%xmm2 \n\t"
  76. "movq (%0, %3), %%xmm3 \n\t"
  77. "punpcklbw %%xmm7, %%xmm0 \n\t"
  78. "punpcklbw %%xmm7, %%xmm1 \n\t"
  79. "punpcklbw %%xmm7, %%xmm2 \n\t"
  80. "punpcklbw %%xmm7, %%xmm3 \n\t"
  81. "movdqa %%xmm0, 64(%1) \n\t"
  82. "movdqa %%xmm1, 80(%1) \n\t"
  83. "movdqa %%xmm2, 96(%1) \n\t"
  84. "movdqa %%xmm3, 112(%1) \n\t"
  85. : "+r" (pixels)
  86. : "r" (block), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3)
  87. );
  88. }
  89. static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
  90. {
  91. __asm__ volatile(
  92. "pxor %%mm7, %%mm7 \n\t"
  93. "mov $-128, %%"REG_a" \n\t"
  94. ASMALIGN(4)
  95. "1: \n\t"
  96. "movq (%0), %%mm0 \n\t"
  97. "movq (%1), %%mm2 \n\t"
  98. "movq %%mm0, %%mm1 \n\t"
  99. "movq %%mm2, %%mm3 \n\t"
  100. "punpcklbw %%mm7, %%mm0 \n\t"
  101. "punpckhbw %%mm7, %%mm1 \n\t"
  102. "punpcklbw %%mm7, %%mm2 \n\t"
  103. "punpckhbw %%mm7, %%mm3 \n\t"
  104. "psubw %%mm2, %%mm0 \n\t"
  105. "psubw %%mm3, %%mm1 \n\t"
  106. "movq %%mm0, (%2, %%"REG_a") \n\t"
  107. "movq %%mm1, 8(%2, %%"REG_a") \n\t"
  108. "add %3, %0 \n\t"
  109. "add %3, %1 \n\t"
  110. "add $16, %%"REG_a" \n\t"
  111. "jnz 1b \n\t"
  112. : "+r" (s1), "+r" (s2)
  113. : "r" (block+64), "r" ((x86_reg)stride)
  114. : "%"REG_a
  115. );
  116. }
  117. static int pix_sum16_mmx(uint8_t * pix, int line_size){
  118. const int h=16;
  119. int sum;
  120. x86_reg index= -line_size*h;
  121. __asm__ volatile(
  122. "pxor %%mm7, %%mm7 \n\t"
  123. "pxor %%mm6, %%mm6 \n\t"
  124. "1: \n\t"
  125. "movq (%2, %1), %%mm0 \n\t"
  126. "movq (%2, %1), %%mm1 \n\t"
  127. "movq 8(%2, %1), %%mm2 \n\t"
  128. "movq 8(%2, %1), %%mm3 \n\t"
  129. "punpcklbw %%mm7, %%mm0 \n\t"
  130. "punpckhbw %%mm7, %%mm1 \n\t"
  131. "punpcklbw %%mm7, %%mm2 \n\t"
  132. "punpckhbw %%mm7, %%mm3 \n\t"
  133. "paddw %%mm0, %%mm1 \n\t"
  134. "paddw %%mm2, %%mm3 \n\t"
  135. "paddw %%mm1, %%mm3 \n\t"
  136. "paddw %%mm3, %%mm6 \n\t"
  137. "add %3, %1 \n\t"
  138. " js 1b \n\t"
  139. "movq %%mm6, %%mm5 \n\t"
  140. "psrlq $32, %%mm6 \n\t"
  141. "paddw %%mm5, %%mm6 \n\t"
  142. "movq %%mm6, %%mm5 \n\t"
  143. "psrlq $16, %%mm6 \n\t"
  144. "paddw %%mm5, %%mm6 \n\t"
  145. "movd %%mm6, %0 \n\t"
  146. "andl $0xFFFF, %0 \n\t"
  147. : "=&r" (sum), "+r" (index)
  148. : "r" (pix - index), "r" ((x86_reg)line_size)
  149. );
  150. return sum;
  151. }
  152. static int pix_norm1_mmx(uint8_t *pix, int line_size) {
  153. int tmp;
  154. __asm__ volatile (
  155. "movl $16,%%ecx\n"
  156. "pxor %%mm0,%%mm0\n"
  157. "pxor %%mm7,%%mm7\n"
  158. "1:\n"
  159. "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
  160. "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
  161. "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
  162. "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
  163. "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
  164. "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
  165. "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
  166. "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
  167. "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
  168. "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
  169. "pmaddwd %%mm3,%%mm3\n"
  170. "pmaddwd %%mm4,%%mm4\n"
  171. "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
  172. pix2^2+pix3^2+pix6^2+pix7^2) */
  173. "paddd %%mm3,%%mm4\n"
  174. "paddd %%mm2,%%mm7\n"
  175. "add %2, %0\n"
  176. "paddd %%mm4,%%mm7\n"
  177. "dec %%ecx\n"
  178. "jnz 1b\n"
  179. "movq %%mm7,%%mm1\n"
  180. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  181. "paddd %%mm7,%%mm1\n"
  182. "movd %%mm1,%1\n"
  183. : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) : "%ecx" );
  184. return tmp;
  185. }
  186. static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  187. int tmp;
  188. __asm__ volatile (
  189. "movl %4,%%ecx\n"
  190. "shr $1,%%ecx\n"
  191. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  192. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  193. "1:\n"
  194. "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
  195. "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
  196. "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
  197. "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
  198. /* todo: mm1-mm2, mm3-mm4 */
  199. /* algo: subtract mm1 from mm2 with saturation and vice versa */
  200. /* OR the results to get absolute difference */
  201. "movq %%mm1,%%mm5\n"
  202. "movq %%mm3,%%mm6\n"
  203. "psubusb %%mm2,%%mm1\n"
  204. "psubusb %%mm4,%%mm3\n"
  205. "psubusb %%mm5,%%mm2\n"
  206. "psubusb %%mm6,%%mm4\n"
  207. "por %%mm1,%%mm2\n"
  208. "por %%mm3,%%mm4\n"
  209. /* now convert to 16-bit vectors so we can square them */
  210. "movq %%mm2,%%mm1\n"
  211. "movq %%mm4,%%mm3\n"
  212. "punpckhbw %%mm0,%%mm2\n"
  213. "punpckhbw %%mm0,%%mm4\n"
  214. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  215. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  216. "pmaddwd %%mm2,%%mm2\n"
  217. "pmaddwd %%mm4,%%mm4\n"
  218. "pmaddwd %%mm1,%%mm1\n"
  219. "pmaddwd %%mm3,%%mm3\n"
  220. "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
  221. "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
  222. "paddd %%mm2,%%mm1\n"
  223. "paddd %%mm4,%%mm3\n"
  224. "paddd %%mm1,%%mm7\n"
  225. "paddd %%mm3,%%mm7\n"
  226. "decl %%ecx\n"
  227. "jnz 1b\n"
  228. "movq %%mm7,%%mm1\n"
  229. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  230. "paddd %%mm7,%%mm1\n"
  231. "movd %%mm1,%2\n"
  232. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  233. : "r" ((x86_reg)line_size) , "m" (h)
  234. : "%ecx");
  235. return tmp;
  236. }
  237. static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  238. int tmp;
  239. __asm__ volatile (
  240. "movl %4,%%ecx\n"
  241. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  242. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  243. "1:\n"
  244. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  245. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  246. "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
  247. "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
  248. /* todo: mm1-mm2, mm3-mm4 */
  249. /* algo: subtract mm1 from mm2 with saturation and vice versa */
  250. /* OR the results to get absolute difference */
  251. "movq %%mm1,%%mm5\n"
  252. "movq %%mm3,%%mm6\n"
  253. "psubusb %%mm2,%%mm1\n"
  254. "psubusb %%mm4,%%mm3\n"
  255. "psubusb %%mm5,%%mm2\n"
  256. "psubusb %%mm6,%%mm4\n"
  257. "por %%mm1,%%mm2\n"
  258. "por %%mm3,%%mm4\n"
  259. /* now convert to 16-bit vectors so we can square them */
  260. "movq %%mm2,%%mm1\n"
  261. "movq %%mm4,%%mm3\n"
  262. "punpckhbw %%mm0,%%mm2\n"
  263. "punpckhbw %%mm0,%%mm4\n"
  264. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  265. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  266. "pmaddwd %%mm2,%%mm2\n"
  267. "pmaddwd %%mm4,%%mm4\n"
  268. "pmaddwd %%mm1,%%mm1\n"
  269. "pmaddwd %%mm3,%%mm3\n"
  270. "add %3,%0\n"
  271. "add %3,%1\n"
  272. "paddd %%mm2,%%mm1\n"
  273. "paddd %%mm4,%%mm3\n"
  274. "paddd %%mm1,%%mm7\n"
  275. "paddd %%mm3,%%mm7\n"
  276. "decl %%ecx\n"
  277. "jnz 1b\n"
  278. "movq %%mm7,%%mm1\n"
  279. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  280. "paddd %%mm7,%%mm1\n"
  281. "movd %%mm1,%2\n"
  282. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  283. : "r" ((x86_reg)line_size) , "m" (h)
  284. : "%ecx");
  285. return tmp;
  286. }
  287. static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  288. int tmp;
  289. __asm__ volatile (
  290. "shr $1,%2\n"
  291. "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */
  292. "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */
  293. "1:\n"
  294. "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */
  295. "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */
  296. "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
  297. "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
  298. /* todo: mm1-mm2, mm3-mm4 */
  299. /* algo: subtract mm1 from mm2 with saturation and vice versa */
  300. /* OR the results to get absolute difference */
  301. "movdqa %%xmm1,%%xmm5\n"
  302. "movdqa %%xmm3,%%xmm6\n"
  303. "psubusb %%xmm2,%%xmm1\n"
  304. "psubusb %%xmm4,%%xmm3\n"
  305. "psubusb %%xmm5,%%xmm2\n"
  306. "psubusb %%xmm6,%%xmm4\n"
  307. "por %%xmm1,%%xmm2\n"
  308. "por %%xmm3,%%xmm4\n"
  309. /* now convert to 16-bit vectors so we can square them */
  310. "movdqa %%xmm2,%%xmm1\n"
  311. "movdqa %%xmm4,%%xmm3\n"
  312. "punpckhbw %%xmm0,%%xmm2\n"
  313. "punpckhbw %%xmm0,%%xmm4\n"
  314. "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */
  315. "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */
  316. "pmaddwd %%xmm2,%%xmm2\n"
  317. "pmaddwd %%xmm4,%%xmm4\n"
  318. "pmaddwd %%xmm1,%%xmm1\n"
  319. "pmaddwd %%xmm3,%%xmm3\n"
  320. "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */
  321. "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */
  322. "paddd %%xmm2,%%xmm1\n"
  323. "paddd %%xmm4,%%xmm3\n"
  324. "paddd %%xmm1,%%xmm7\n"
  325. "paddd %%xmm3,%%xmm7\n"
  326. "decl %2\n"
  327. "jnz 1b\n"
  328. "movdqa %%xmm7,%%xmm1\n"
  329. "psrldq $8, %%xmm7\n" /* shift hi qword to lo */
  330. "paddd %%xmm1,%%xmm7\n"
  331. "movdqa %%xmm7,%%xmm1\n"
  332. "psrldq $4, %%xmm7\n" /* shift hi dword to lo */
  333. "paddd %%xmm1,%%xmm7\n"
  334. "movd %%xmm7,%3\n"
  335. : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
  336. : "r" ((x86_reg)line_size));
  337. return tmp;
  338. }
  339. static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
  340. int tmp;
  341. __asm__ volatile (
  342. "movl %3,%%ecx\n"
  343. "pxor %%mm7,%%mm7\n"
  344. "pxor %%mm6,%%mm6\n"
  345. "movq (%0),%%mm0\n"
  346. "movq %%mm0, %%mm1\n"
  347. "psllq $8, %%mm0\n"
  348. "psrlq $8, %%mm1\n"
  349. "psrlq $8, %%mm0\n"
  350. "movq %%mm0, %%mm2\n"
  351. "movq %%mm1, %%mm3\n"
  352. "punpcklbw %%mm7,%%mm0\n"
  353. "punpcklbw %%mm7,%%mm1\n"
  354. "punpckhbw %%mm7,%%mm2\n"
  355. "punpckhbw %%mm7,%%mm3\n"
  356. "psubw %%mm1, %%mm0\n"
  357. "psubw %%mm3, %%mm2\n"
  358. "add %2,%0\n"
  359. "movq (%0),%%mm4\n"
  360. "movq %%mm4, %%mm1\n"
  361. "psllq $8, %%mm4\n"
  362. "psrlq $8, %%mm1\n"
  363. "psrlq $8, %%mm4\n"
  364. "movq %%mm4, %%mm5\n"
  365. "movq %%mm1, %%mm3\n"
  366. "punpcklbw %%mm7,%%mm4\n"
  367. "punpcklbw %%mm7,%%mm1\n"
  368. "punpckhbw %%mm7,%%mm5\n"
  369. "punpckhbw %%mm7,%%mm3\n"
  370. "psubw %%mm1, %%mm4\n"
  371. "psubw %%mm3, %%mm5\n"
  372. "psubw %%mm4, %%mm0\n"
  373. "psubw %%mm5, %%mm2\n"
  374. "pxor %%mm3, %%mm3\n"
  375. "pxor %%mm1, %%mm1\n"
  376. "pcmpgtw %%mm0, %%mm3\n\t"
  377. "pcmpgtw %%mm2, %%mm1\n\t"
  378. "pxor %%mm3, %%mm0\n"
  379. "pxor %%mm1, %%mm2\n"
  380. "psubw %%mm3, %%mm0\n"
  381. "psubw %%mm1, %%mm2\n"
  382. "paddw %%mm0, %%mm2\n"
  383. "paddw %%mm2, %%mm6\n"
  384. "add %2,%0\n"
  385. "1:\n"
  386. "movq (%0),%%mm0\n"
  387. "movq %%mm0, %%mm1\n"
  388. "psllq $8, %%mm0\n"
  389. "psrlq $8, %%mm1\n"
  390. "psrlq $8, %%mm0\n"
  391. "movq %%mm0, %%mm2\n"
  392. "movq %%mm1, %%mm3\n"
  393. "punpcklbw %%mm7,%%mm0\n"
  394. "punpcklbw %%mm7,%%mm1\n"
  395. "punpckhbw %%mm7,%%mm2\n"
  396. "punpckhbw %%mm7,%%mm3\n"
  397. "psubw %%mm1, %%mm0\n"
  398. "psubw %%mm3, %%mm2\n"
  399. "psubw %%mm0, %%mm4\n"
  400. "psubw %%mm2, %%mm5\n"
  401. "pxor %%mm3, %%mm3\n"
  402. "pxor %%mm1, %%mm1\n"
  403. "pcmpgtw %%mm4, %%mm3\n\t"
  404. "pcmpgtw %%mm5, %%mm1\n\t"
  405. "pxor %%mm3, %%mm4\n"
  406. "pxor %%mm1, %%mm5\n"
  407. "psubw %%mm3, %%mm4\n"
  408. "psubw %%mm1, %%mm5\n"
  409. "paddw %%mm4, %%mm5\n"
  410. "paddw %%mm5, %%mm6\n"
  411. "add %2,%0\n"
  412. "movq (%0),%%mm4\n"
  413. "movq %%mm4, %%mm1\n"
  414. "psllq $8, %%mm4\n"
  415. "psrlq $8, %%mm1\n"
  416. "psrlq $8, %%mm4\n"
  417. "movq %%mm4, %%mm5\n"
  418. "movq %%mm1, %%mm3\n"
  419. "punpcklbw %%mm7,%%mm4\n"
  420. "punpcklbw %%mm7,%%mm1\n"
  421. "punpckhbw %%mm7,%%mm5\n"
  422. "punpckhbw %%mm7,%%mm3\n"
  423. "psubw %%mm1, %%mm4\n"
  424. "psubw %%mm3, %%mm5\n"
  425. "psubw %%mm4, %%mm0\n"
  426. "psubw %%mm5, %%mm2\n"
  427. "pxor %%mm3, %%mm3\n"
  428. "pxor %%mm1, %%mm1\n"
  429. "pcmpgtw %%mm0, %%mm3\n\t"
  430. "pcmpgtw %%mm2, %%mm1\n\t"
  431. "pxor %%mm3, %%mm0\n"
  432. "pxor %%mm1, %%mm2\n"
  433. "psubw %%mm3, %%mm0\n"
  434. "psubw %%mm1, %%mm2\n"
  435. "paddw %%mm0, %%mm2\n"
  436. "paddw %%mm2, %%mm6\n"
  437. "add %2,%0\n"
  438. "subl $2, %%ecx\n"
  439. " jnz 1b\n"
  440. "movq %%mm6, %%mm0\n"
  441. "punpcklwd %%mm7,%%mm0\n"
  442. "punpckhwd %%mm7,%%mm6\n"
  443. "paddd %%mm0, %%mm6\n"
  444. "movq %%mm6,%%mm0\n"
  445. "psrlq $32, %%mm6\n"
  446. "paddd %%mm6,%%mm0\n"
  447. "movd %%mm0,%1\n"
  448. : "+r" (pix1), "=r"(tmp)
  449. : "r" ((x86_reg)line_size) , "g" (h-2)
  450. : "%ecx");
  451. return tmp;
  452. }
  453. static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
  454. int tmp;
  455. uint8_t * pix= pix1;
  456. __asm__ volatile (
  457. "movl %3,%%ecx\n"
  458. "pxor %%mm7,%%mm7\n"
  459. "pxor %%mm6,%%mm6\n"
  460. "movq (%0),%%mm0\n"
  461. "movq 1(%0),%%mm1\n"
  462. "movq %%mm0, %%mm2\n"
  463. "movq %%mm1, %%mm3\n"
  464. "punpcklbw %%mm7,%%mm0\n"
  465. "punpcklbw %%mm7,%%mm1\n"
  466. "punpckhbw %%mm7,%%mm2\n"
  467. "punpckhbw %%mm7,%%mm3\n"
  468. "psubw %%mm1, %%mm0\n"
  469. "psubw %%mm3, %%mm2\n"
  470. "add %2,%0\n"
  471. "movq (%0),%%mm4\n"
  472. "movq 1(%0),%%mm1\n"
  473. "movq %%mm4, %%mm5\n"
  474. "movq %%mm1, %%mm3\n"
  475. "punpcklbw %%mm7,%%mm4\n"
  476. "punpcklbw %%mm7,%%mm1\n"
  477. "punpckhbw %%mm7,%%mm5\n"
  478. "punpckhbw %%mm7,%%mm3\n"
  479. "psubw %%mm1, %%mm4\n"
  480. "psubw %%mm3, %%mm5\n"
  481. "psubw %%mm4, %%mm0\n"
  482. "psubw %%mm5, %%mm2\n"
  483. "pxor %%mm3, %%mm3\n"
  484. "pxor %%mm1, %%mm1\n"
  485. "pcmpgtw %%mm0, %%mm3\n\t"
  486. "pcmpgtw %%mm2, %%mm1\n\t"
  487. "pxor %%mm3, %%mm0\n"
  488. "pxor %%mm1, %%mm2\n"
  489. "psubw %%mm3, %%mm0\n"
  490. "psubw %%mm1, %%mm2\n"
  491. "paddw %%mm0, %%mm2\n"
  492. "paddw %%mm2, %%mm6\n"
  493. "add %2,%0\n"
  494. "1:\n"
  495. "movq (%0),%%mm0\n"
  496. "movq 1(%0),%%mm1\n"
  497. "movq %%mm0, %%mm2\n"
  498. "movq %%mm1, %%mm3\n"
  499. "punpcklbw %%mm7,%%mm0\n"
  500. "punpcklbw %%mm7,%%mm1\n"
  501. "punpckhbw %%mm7,%%mm2\n"
  502. "punpckhbw %%mm7,%%mm3\n"
  503. "psubw %%mm1, %%mm0\n"
  504. "psubw %%mm3, %%mm2\n"
  505. "psubw %%mm0, %%mm4\n"
  506. "psubw %%mm2, %%mm5\n"
  507. "pxor %%mm3, %%mm3\n"
  508. "pxor %%mm1, %%mm1\n"
  509. "pcmpgtw %%mm4, %%mm3\n\t"
  510. "pcmpgtw %%mm5, %%mm1\n\t"
  511. "pxor %%mm3, %%mm4\n"
  512. "pxor %%mm1, %%mm5\n"
  513. "psubw %%mm3, %%mm4\n"
  514. "psubw %%mm1, %%mm5\n"
  515. "paddw %%mm4, %%mm5\n"
  516. "paddw %%mm5, %%mm6\n"
  517. "add %2,%0\n"
  518. "movq (%0),%%mm4\n"
  519. "movq 1(%0),%%mm1\n"
  520. "movq %%mm4, %%mm5\n"
  521. "movq %%mm1, %%mm3\n"
  522. "punpcklbw %%mm7,%%mm4\n"
  523. "punpcklbw %%mm7,%%mm1\n"
  524. "punpckhbw %%mm7,%%mm5\n"
  525. "punpckhbw %%mm7,%%mm3\n"
  526. "psubw %%mm1, %%mm4\n"
  527. "psubw %%mm3, %%mm5\n"
  528. "psubw %%mm4, %%mm0\n"
  529. "psubw %%mm5, %%mm2\n"
  530. "pxor %%mm3, %%mm3\n"
  531. "pxor %%mm1, %%mm1\n"
  532. "pcmpgtw %%mm0, %%mm3\n\t"
  533. "pcmpgtw %%mm2, %%mm1\n\t"
  534. "pxor %%mm3, %%mm0\n"
  535. "pxor %%mm1, %%mm2\n"
  536. "psubw %%mm3, %%mm0\n"
  537. "psubw %%mm1, %%mm2\n"
  538. "paddw %%mm0, %%mm2\n"
  539. "paddw %%mm2, %%mm6\n"
  540. "add %2,%0\n"
  541. "subl $2, %%ecx\n"
  542. " jnz 1b\n"
  543. "movq %%mm6, %%mm0\n"
  544. "punpcklwd %%mm7,%%mm0\n"
  545. "punpckhwd %%mm7,%%mm6\n"
  546. "paddd %%mm0, %%mm6\n"
  547. "movq %%mm6,%%mm0\n"
  548. "psrlq $32, %%mm6\n"
  549. "paddd %%mm6,%%mm0\n"
  550. "movd %%mm0,%1\n"
  551. : "+r" (pix1), "=r"(tmp)
  552. : "r" ((x86_reg)line_size) , "g" (h-2)
  553. : "%ecx");
  554. return tmp + hf_noise8_mmx(pix+8, line_size, h);
  555. }
  556. static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  557. MpegEncContext *c = p;
  558. int score1, score2;
  559. if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
  560. else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
  561. score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
  562. if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
  563. else return score1 + FFABS(score2)*8;
  564. }
  565. static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  566. MpegEncContext *c = p;
  567. int score1= sse8_mmx(c, pix1, pix2, line_size, h);
  568. int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
  569. if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
  570. else return score1 + FFABS(score2)*8;
  571. }
  572. static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  573. int tmp;
  574. assert( (((int)pix) & 7) == 0);
  575. assert((line_size &7) ==0);
  576. #define SUM(in0, in1, out0, out1) \
  577. "movq (%0), %%mm2\n"\
  578. "movq 8(%0), %%mm3\n"\
  579. "add %2,%0\n"\
  580. "movq %%mm2, " #out0 "\n"\
  581. "movq %%mm3, " #out1 "\n"\
  582. "psubusb " #in0 ", %%mm2\n"\
  583. "psubusb " #in1 ", %%mm3\n"\
  584. "psubusb " #out0 ", " #in0 "\n"\
  585. "psubusb " #out1 ", " #in1 "\n"\
  586. "por %%mm2, " #in0 "\n"\
  587. "por %%mm3, " #in1 "\n"\
  588. "movq " #in0 ", %%mm2\n"\
  589. "movq " #in1 ", %%mm3\n"\
  590. "punpcklbw %%mm7, " #in0 "\n"\
  591. "punpcklbw %%mm7, " #in1 "\n"\
  592. "punpckhbw %%mm7, %%mm2\n"\
  593. "punpckhbw %%mm7, %%mm3\n"\
  594. "paddw " #in1 ", " #in0 "\n"\
  595. "paddw %%mm3, %%mm2\n"\
  596. "paddw %%mm2, " #in0 "\n"\
  597. "paddw " #in0 ", %%mm6\n"
  598. __asm__ volatile (
  599. "movl %3,%%ecx\n"
  600. "pxor %%mm6,%%mm6\n"
  601. "pxor %%mm7,%%mm7\n"
  602. "movq (%0),%%mm0\n"
  603. "movq 8(%0),%%mm1\n"
  604. "add %2,%0\n"
  605. "jmp 2f\n"
  606. "1:\n"
  607. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  608. "2:\n"
  609. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  610. "subl $2, %%ecx\n"
  611. "jnz 1b\n"
  612. "movq %%mm6,%%mm0\n"
  613. "psrlq $32, %%mm6\n"
  614. "paddw %%mm6,%%mm0\n"
  615. "movq %%mm0,%%mm6\n"
  616. "psrlq $16, %%mm0\n"
  617. "paddw %%mm6,%%mm0\n"
  618. "movd %%mm0,%1\n"
  619. : "+r" (pix), "=r"(tmp)
  620. : "r" ((x86_reg)line_size) , "m" (h)
  621. : "%ecx");
  622. return tmp & 0xFFFF;
  623. }
  624. #undef SUM
  625. static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  626. int tmp;
  627. assert( (((int)pix) & 7) == 0);
  628. assert((line_size &7) ==0);
  629. #define SUM(in0, in1, out0, out1) \
  630. "movq (%0), " #out0 "\n"\
  631. "movq 8(%0), " #out1 "\n"\
  632. "add %2,%0\n"\
  633. "psadbw " #out0 ", " #in0 "\n"\
  634. "psadbw " #out1 ", " #in1 "\n"\
  635. "paddw " #in1 ", " #in0 "\n"\
  636. "paddw " #in0 ", %%mm6\n"
  637. __asm__ volatile (
  638. "movl %3,%%ecx\n"
  639. "pxor %%mm6,%%mm6\n"
  640. "pxor %%mm7,%%mm7\n"
  641. "movq (%0),%%mm0\n"
  642. "movq 8(%0),%%mm1\n"
  643. "add %2,%0\n"
  644. "jmp 2f\n"
  645. "1:\n"
  646. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  647. "2:\n"
  648. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  649. "subl $2, %%ecx\n"
  650. "jnz 1b\n"
  651. "movd %%mm6,%1\n"
  652. : "+r" (pix), "=r"(tmp)
  653. : "r" ((x86_reg)line_size) , "m" (h)
  654. : "%ecx");
  655. return tmp;
  656. }
  657. #undef SUM
  658. static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  659. int tmp;
  660. assert( (((int)pix1) & 7) == 0);
  661. assert( (((int)pix2) & 7) == 0);
  662. assert((line_size &7) ==0);
  663. #define SUM(in0, in1, out0, out1) \
  664. "movq (%0),%%mm2\n"\
  665. "movq (%1)," #out0 "\n"\
  666. "movq 8(%0),%%mm3\n"\
  667. "movq 8(%1)," #out1 "\n"\
  668. "add %3,%0\n"\
  669. "add %3,%1\n"\
  670. "psubb " #out0 ", %%mm2\n"\
  671. "psubb " #out1 ", %%mm3\n"\
  672. "pxor %%mm7, %%mm2\n"\
  673. "pxor %%mm7, %%mm3\n"\
  674. "movq %%mm2, " #out0 "\n"\
  675. "movq %%mm3, " #out1 "\n"\
  676. "psubusb " #in0 ", %%mm2\n"\
  677. "psubusb " #in1 ", %%mm3\n"\
  678. "psubusb " #out0 ", " #in0 "\n"\
  679. "psubusb " #out1 ", " #in1 "\n"\
  680. "por %%mm2, " #in0 "\n"\
  681. "por %%mm3, " #in1 "\n"\
  682. "movq " #in0 ", %%mm2\n"\
  683. "movq " #in1 ", %%mm3\n"\
  684. "punpcklbw %%mm7, " #in0 "\n"\
  685. "punpcklbw %%mm7, " #in1 "\n"\
  686. "punpckhbw %%mm7, %%mm2\n"\
  687. "punpckhbw %%mm7, %%mm3\n"\
  688. "paddw " #in1 ", " #in0 "\n"\
  689. "paddw %%mm3, %%mm2\n"\
  690. "paddw %%mm2, " #in0 "\n"\
  691. "paddw " #in0 ", %%mm6\n"
  692. __asm__ volatile (
  693. "movl %4,%%ecx\n"
  694. "pxor %%mm6,%%mm6\n"
  695. "pcmpeqw %%mm7,%%mm7\n"
  696. "psllw $15, %%mm7\n"
  697. "packsswb %%mm7, %%mm7\n"
  698. "movq (%0),%%mm0\n"
  699. "movq (%1),%%mm2\n"
  700. "movq 8(%0),%%mm1\n"
  701. "movq 8(%1),%%mm3\n"
  702. "add %3,%0\n"
  703. "add %3,%1\n"
  704. "psubb %%mm2, %%mm0\n"
  705. "psubb %%mm3, %%mm1\n"
  706. "pxor %%mm7, %%mm0\n"
  707. "pxor %%mm7, %%mm1\n"
  708. "jmp 2f\n"
  709. "1:\n"
  710. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  711. "2:\n"
  712. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  713. "subl $2, %%ecx\n"
  714. "jnz 1b\n"
  715. "movq %%mm6,%%mm0\n"
  716. "psrlq $32, %%mm6\n"
  717. "paddw %%mm6,%%mm0\n"
  718. "movq %%mm0,%%mm6\n"
  719. "psrlq $16, %%mm0\n"
  720. "paddw %%mm6,%%mm0\n"
  721. "movd %%mm0,%2\n"
  722. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  723. : "r" ((x86_reg)line_size) , "m" (h)
  724. : "%ecx");
  725. return tmp & 0x7FFF;
  726. }
  727. #undef SUM
  728. static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  729. int tmp;
  730. assert( (((int)pix1) & 7) == 0);
  731. assert( (((int)pix2) & 7) == 0);
  732. assert((line_size &7) ==0);
  733. #define SUM(in0, in1, out0, out1) \
  734. "movq (%0)," #out0 "\n"\
  735. "movq (%1),%%mm2\n"\
  736. "movq 8(%0)," #out1 "\n"\
  737. "movq 8(%1),%%mm3\n"\
  738. "add %3,%0\n"\
  739. "add %3,%1\n"\
  740. "psubb %%mm2, " #out0 "\n"\
  741. "psubb %%mm3, " #out1 "\n"\
  742. "pxor %%mm7, " #out0 "\n"\
  743. "pxor %%mm7, " #out1 "\n"\
  744. "psadbw " #out0 ", " #in0 "\n"\
  745. "psadbw " #out1 ", " #in1 "\n"\
  746. "paddw " #in1 ", " #in0 "\n"\
  747. "paddw " #in0 ", %%mm6\n"
  748. __asm__ volatile (
  749. "movl %4,%%ecx\n"
  750. "pxor %%mm6,%%mm6\n"
  751. "pcmpeqw %%mm7,%%mm7\n"
  752. "psllw $15, %%mm7\n"
  753. "packsswb %%mm7, %%mm7\n"
  754. "movq (%0),%%mm0\n"
  755. "movq (%1),%%mm2\n"
  756. "movq 8(%0),%%mm1\n"
  757. "movq 8(%1),%%mm3\n"
  758. "add %3,%0\n"
  759. "add %3,%1\n"
  760. "psubb %%mm2, %%mm0\n"
  761. "psubb %%mm3, %%mm1\n"
  762. "pxor %%mm7, %%mm0\n"
  763. "pxor %%mm7, %%mm1\n"
  764. "jmp 2f\n"
  765. "1:\n"
  766. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  767. "2:\n"
  768. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  769. "subl $2, %%ecx\n"
  770. "jnz 1b\n"
  771. "movd %%mm6,%2\n"
  772. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  773. : "r" ((x86_reg)line_size) , "m" (h)
  774. : "%ecx");
  775. return tmp;
  776. }
  777. #undef SUM
  778. static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  779. x86_reg i=0;
  780. if(w>=16)
  781. __asm__ volatile(
  782. "1: \n\t"
  783. "movq (%2, %0), %%mm0 \n\t"
  784. "movq (%1, %0), %%mm1 \n\t"
  785. "psubb %%mm0, %%mm1 \n\t"
  786. "movq %%mm1, (%3, %0) \n\t"
  787. "movq 8(%2, %0), %%mm0 \n\t"
  788. "movq 8(%1, %0), %%mm1 \n\t"
  789. "psubb %%mm0, %%mm1 \n\t"
  790. "movq %%mm1, 8(%3, %0) \n\t"
  791. "add $16, %0 \n\t"
  792. "cmp %4, %0 \n\t"
  793. " jb 1b \n\t"
  794. : "+r" (i)
  795. : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15)
  796. );
  797. for(; i<w; i++)
  798. dst[i+0] = src1[i+0]-src2[i+0];
  799. }
  800. static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  801. x86_reg i=0;
  802. uint8_t l, lt;
  803. __asm__ volatile(
  804. "1: \n\t"
  805. "movq -1(%1, %0), %%mm0 \n\t" // LT
  806. "movq (%1, %0), %%mm1 \n\t" // T
  807. "movq -1(%2, %0), %%mm2 \n\t" // L
  808. "movq (%2, %0), %%mm3 \n\t" // X
  809. "movq %%mm2, %%mm4 \n\t" // L
  810. "psubb %%mm0, %%mm2 \n\t"
  811. "paddb %%mm1, %%mm2 \n\t" // L + T - LT
  812. "movq %%mm4, %%mm5 \n\t" // L
  813. "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
  814. "pminub %%mm5, %%mm1 \n\t" // min(T, L)
  815. "pminub %%mm2, %%mm4 \n\t"
  816. "pmaxub %%mm1, %%mm4 \n\t"
  817. "psubb %%mm4, %%mm3 \n\t" // dst - pred
  818. "movq %%mm3, (%3, %0) \n\t"
  819. "add $8, %0 \n\t"
  820. "cmp %4, %0 \n\t"
  821. " jb 1b \n\t"
  822. : "+r" (i)
  823. : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w)
  824. );
  825. l= *left;
  826. lt= *left_top;
  827. dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
  828. *left_top= src1[w-1];
  829. *left = src2[w-1];
  830. }
  831. #define DIFF_PIXELS_1(m,a,t,p1,p2)\
  832. "mov"#m" "#p1", "#a" \n\t"\
  833. "mov"#m" "#p2", "#t" \n\t"\
  834. "punpcklbw "#a", "#t" \n\t"\
  835. "punpcklbw "#a", "#a" \n\t"\
  836. "psubw "#t", "#a" \n\t"\
  837. #define DIFF_PIXELS_8(m0,m1,mm,p1,p2,stride,temp) {\
  838. uint8_t *p1b=p1, *p2b=p2;\
  839. __asm__ volatile(\
  840. DIFF_PIXELS_1(m0, mm##0, mm##7, (%1), (%2))\
  841. DIFF_PIXELS_1(m0, mm##1, mm##7, (%1,%3), (%2,%3))\
  842. DIFF_PIXELS_1(m0, mm##2, mm##7, (%1,%3,2), (%2,%3,2))\
  843. "add %4, %1 \n\t"\
  844. "add %4, %2 \n\t"\
  845. DIFF_PIXELS_1(m0, mm##3, mm##7, (%1), (%2))\
  846. DIFF_PIXELS_1(m0, mm##4, mm##7, (%1,%3), (%2,%3))\
  847. DIFF_PIXELS_1(m0, mm##5, mm##7, (%1,%3,2), (%2,%3,2))\
  848. DIFF_PIXELS_1(m0, mm##6, mm##7, (%1,%4), (%2,%4))\
  849. "mov"#m1" "#mm"0, %0 \n\t"\
  850. DIFF_PIXELS_1(m0, mm##7, mm##0, (%1,%3,4), (%2,%3,4))\
  851. "mov"#m1" %0, "#mm"0 \n\t"\
  852. : "+m"(temp), "+r"(p1b), "+r"(p2b)\
  853. : "r"((x86_reg)stride), "r"((x86_reg)stride*3)\
  854. );\
  855. }
  856. //the "+m"(temp) is needed as gcc 2.95 sometimes fails to compile "=m"(temp)
  857. #define DIFF_PIXELS_4x8(p1,p2,stride,temp) DIFF_PIXELS_8(d, q, %%mm, p1, p2, stride, temp)
  858. #define DIFF_PIXELS_8x8(p1,p2,stride,temp) DIFF_PIXELS_8(q, dqa, %%xmm, p1, p2, stride, temp)
  859. #define LBUTTERFLY2(a1,b1,a2,b2)\
  860. "paddw " #b1 ", " #a1 " \n\t"\
  861. "paddw " #b2 ", " #a2 " \n\t"\
  862. "paddw " #b1 ", " #b1 " \n\t"\
  863. "paddw " #b2 ", " #b2 " \n\t"\
  864. "psubw " #a1 ", " #b1 " \n\t"\
  865. "psubw " #a2 ", " #b2 " \n\t"
  866. #define HADAMARD8(m0, m1, m2, m3, m4, m5, m6, m7)\
  867. LBUTTERFLY2(m0, m1, m2, m3)\
  868. LBUTTERFLY2(m4, m5, m6, m7)\
  869. LBUTTERFLY2(m0, m2, m1, m3)\
  870. LBUTTERFLY2(m4, m6, m5, m7)\
  871. LBUTTERFLY2(m0, m4, m1, m5)\
  872. LBUTTERFLY2(m2, m6, m3, m7)\
  873. #define HADAMARD48 HADAMARD8(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm6, %%mm7)
  874. #define MMABS_MMX(a,z)\
  875. "pxor " #z ", " #z " \n\t"\
  876. "pcmpgtw " #a ", " #z " \n\t"\
  877. "pxor " #z ", " #a " \n\t"\
  878. "psubw " #z ", " #a " \n\t"
  879. #define MMABS_MMX2(a,z)\
  880. "pxor " #z ", " #z " \n\t"\
  881. "psubw " #a ", " #z " \n\t"\
  882. "pmaxsw " #z ", " #a " \n\t"
  883. #define MMABS_SSSE3(a,z)\
  884. "pabsw " #a ", " #a " \n\t"
  885. #define MMABS_SUM(a,z, sum)\
  886. MMABS(a,z)\
  887. "paddusw " #a ", " #sum " \n\t"
  888. #define MMABS_SUM_8x8_NOSPILL\
  889. MMABS(%%xmm0, %%xmm8)\
  890. MMABS(%%xmm1, %%xmm9)\
  891. MMABS_SUM(%%xmm2, %%xmm8, %%xmm0)\
  892. MMABS_SUM(%%xmm3, %%xmm9, %%xmm1)\
  893. MMABS_SUM(%%xmm4, %%xmm8, %%xmm0)\
  894. MMABS_SUM(%%xmm5, %%xmm9, %%xmm1)\
  895. MMABS_SUM(%%xmm6, %%xmm8, %%xmm0)\
  896. MMABS_SUM(%%xmm7, %%xmm9, %%xmm1)\
  897. "paddusw %%xmm1, %%xmm0 \n\t"
  898. #if ARCH_X86_64
  899. #define MMABS_SUM_8x8_SSE2 MMABS_SUM_8x8_NOSPILL
  900. #else
  901. #define MMABS_SUM_8x8_SSE2\
  902. "movdqa %%xmm7, (%1) \n\t"\
  903. MMABS(%%xmm0, %%xmm7)\
  904. MMABS(%%xmm1, %%xmm7)\
  905. MMABS_SUM(%%xmm2, %%xmm7, %%xmm0)\
  906. MMABS_SUM(%%xmm3, %%xmm7, %%xmm1)\
  907. MMABS_SUM(%%xmm4, %%xmm7, %%xmm0)\
  908. MMABS_SUM(%%xmm5, %%xmm7, %%xmm1)\
  909. MMABS_SUM(%%xmm6, %%xmm7, %%xmm0)\
  910. "movdqa (%1), %%xmm2 \n\t"\
  911. MMABS_SUM(%%xmm2, %%xmm7, %%xmm1)\
  912. "paddusw %%xmm1, %%xmm0 \n\t"
  913. #endif
  914. /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
  915. * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
  916. * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
  917. #define HSUM_MMX(a, t, dst)\
  918. "movq "#a", "#t" \n\t"\
  919. "psrlq $32, "#a" \n\t"\
  920. "paddusw "#t", "#a" \n\t"\
  921. "movq "#a", "#t" \n\t"\
  922. "psrlq $16, "#a" \n\t"\
  923. "paddusw "#t", "#a" \n\t"\
  924. "movd "#a", "#dst" \n\t"\
  925. #define HSUM_MMX2(a, t, dst)\
  926. "pshufw $0x0E, "#a", "#t" \n\t"\
  927. "paddusw "#t", "#a" \n\t"\
  928. "pshufw $0x01, "#a", "#t" \n\t"\
  929. "paddusw "#t", "#a" \n\t"\
  930. "movd "#a", "#dst" \n\t"\
  931. #define HSUM_SSE2(a, t, dst)\
  932. "movhlps "#a", "#t" \n\t"\
  933. "paddusw "#t", "#a" \n\t"\
  934. "pshuflw $0x0E, "#a", "#t" \n\t"\
  935. "paddusw "#t", "#a" \n\t"\
  936. "pshuflw $0x01, "#a", "#t" \n\t"\
  937. "paddusw "#t", "#a" \n\t"\
  938. "movd "#a", "#dst" \n\t"\
  939. #define HADAMARD8_DIFF_MMX(cpu) \
  940. static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
  941. DECLARE_ALIGNED_8(uint64_t, temp[13]);\
  942. int sum;\
  943. \
  944. assert(h==8);\
  945. \
  946. DIFF_PIXELS_4x8(src1, src2, stride, temp[0]);\
  947. \
  948. __asm__ volatile(\
  949. HADAMARD48\
  950. \
  951. "movq %%mm7, 96(%1) \n\t"\
  952. \
  953. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\
  954. STORE4(8, 0(%1), %%mm0, %%mm3, %%mm7, %%mm2)\
  955. \
  956. "movq 96(%1), %%mm7 \n\t"\
  957. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\
  958. STORE4(8, 64(%1), %%mm4, %%mm7, %%mm0, %%mm6)\
  959. \
  960. : "=r" (sum)\
  961. : "r"(temp)\
  962. );\
  963. \
  964. DIFF_PIXELS_4x8(src1+4, src2+4, stride, temp[4]);\
  965. \
  966. __asm__ volatile(\
  967. HADAMARD48\
  968. \
  969. "movq %%mm7, 96(%1) \n\t"\
  970. \
  971. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\
  972. STORE4(8, 32(%1), %%mm0, %%mm3, %%mm7, %%mm2)\
  973. \
  974. "movq 96(%1), %%mm7 \n\t"\
  975. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\
  976. "movq %%mm7, %%mm5 \n\t"/*FIXME remove*/\
  977. "movq %%mm6, %%mm7 \n\t"\
  978. "movq %%mm0, %%mm6 \n\t"\
  979. \
  980. LOAD4(8, 64(%1), %%mm0, %%mm1, %%mm2, %%mm3)\
  981. \
  982. HADAMARD48\
  983. "movq %%mm7, 64(%1) \n\t"\
  984. MMABS(%%mm0, %%mm7)\
  985. MMABS(%%mm1, %%mm7)\
  986. MMABS_SUM(%%mm2, %%mm7, %%mm0)\
  987. MMABS_SUM(%%mm3, %%mm7, %%mm1)\
  988. MMABS_SUM(%%mm4, %%mm7, %%mm0)\
  989. MMABS_SUM(%%mm5, %%mm7, %%mm1)\
  990. MMABS_SUM(%%mm6, %%mm7, %%mm0)\
  991. "movq 64(%1), %%mm2 \n\t"\
  992. MMABS_SUM(%%mm2, %%mm7, %%mm1)\
  993. "paddusw %%mm1, %%mm0 \n\t"\
  994. "movq %%mm0, 64(%1) \n\t"\
  995. \
  996. LOAD4(8, 0(%1), %%mm0, %%mm1, %%mm2, %%mm3)\
  997. LOAD4(8, 32(%1), %%mm4, %%mm5, %%mm6, %%mm7)\
  998. \
  999. HADAMARD48\
  1000. "movq %%mm7, (%1) \n\t"\
  1001. MMABS(%%mm0, %%mm7)\
  1002. MMABS(%%mm1, %%mm7)\
  1003. MMABS_SUM(%%mm2, %%mm7, %%mm0)\
  1004. MMABS_SUM(%%mm3, %%mm7, %%mm1)\
  1005. MMABS_SUM(%%mm4, %%mm7, %%mm0)\
  1006. MMABS_SUM(%%mm5, %%mm7, %%mm1)\
  1007. MMABS_SUM(%%mm6, %%mm7, %%mm0)\
  1008. "movq (%1), %%mm2 \n\t"\
  1009. MMABS_SUM(%%mm2, %%mm7, %%mm1)\
  1010. "paddusw 64(%1), %%mm0 \n\t"\
  1011. "paddusw %%mm1, %%mm0 \n\t"\
  1012. \
  1013. HSUM(%%mm0, %%mm1, %0)\
  1014. \
  1015. : "=r" (sum)\
  1016. : "r"(temp)\
  1017. );\
  1018. return sum&0xFFFF;\
  1019. }\
  1020. WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)
  1021. #define HADAMARD8_DIFF_SSE2(cpu) \
  1022. static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
  1023. DECLARE_ALIGNED_16(uint64_t, temp[4]);\
  1024. int sum;\
  1025. \
  1026. assert(h==8);\
  1027. \
  1028. DIFF_PIXELS_8x8(src1, src2, stride, temp[0]);\
  1029. \
  1030. __asm__ volatile(\
  1031. HADAMARD8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)\
  1032. TRANSPOSE8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7, (%1))\
  1033. HADAMARD8(%%xmm0, %%xmm5, %%xmm7, %%xmm3, %%xmm6, %%xmm4, %%xmm2, %%xmm1)\
  1034. MMABS_SUM_8x8\
  1035. HSUM_SSE2(%%xmm0, %%xmm1, %0)\
  1036. : "=r" (sum)\
  1037. : "r"(temp)\
  1038. );\
  1039. return sum&0xFFFF;\
  1040. }\
  1041. WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)
  1042. #define MMABS(a,z) MMABS_MMX(a,z)
  1043. #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
  1044. HADAMARD8_DIFF_MMX(mmx)
  1045. #undef MMABS
  1046. #undef HSUM
  1047. #define MMABS(a,z) MMABS_MMX2(a,z)
  1048. #define MMABS_SUM_8x8 MMABS_SUM_8x8_SSE2
  1049. #define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
  1050. HADAMARD8_DIFF_MMX(mmx2)
  1051. HADAMARD8_DIFF_SSE2(sse2)
  1052. #undef MMABS
  1053. #undef MMABS_SUM_8x8
  1054. #undef HSUM
  1055. #if HAVE_SSSE3
  1056. #define MMABS(a,z) MMABS_SSSE3(a,z)
  1057. #define MMABS_SUM_8x8 MMABS_SUM_8x8_NOSPILL
  1058. HADAMARD8_DIFF_SSE2(ssse3)
  1059. #undef MMABS
  1060. #undef MMABS_SUM_8x8
  1061. #endif
  1062. #define DCT_SAD4(m,mm,o)\
  1063. "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
  1064. "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
  1065. "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
  1066. "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
  1067. MMABS_SUM(mm##2, mm##6, mm##0)\
  1068. MMABS_SUM(mm##3, mm##7, mm##1)\
  1069. MMABS_SUM(mm##4, mm##6, mm##0)\
  1070. MMABS_SUM(mm##5, mm##7, mm##1)\
  1071. #define DCT_SAD_MMX\
  1072. "pxor %%mm0, %%mm0 \n\t"\
  1073. "pxor %%mm1, %%mm1 \n\t"\
  1074. DCT_SAD4(q, %%mm, 0)\
  1075. DCT_SAD4(q, %%mm, 8)\
  1076. DCT_SAD4(q, %%mm, 64)\
  1077. DCT_SAD4(q, %%mm, 72)\
  1078. "paddusw %%mm1, %%mm0 \n\t"\
  1079. HSUM(%%mm0, %%mm1, %0)
  1080. #define DCT_SAD_SSE2\
  1081. "pxor %%xmm0, %%xmm0 \n\t"\
  1082. "pxor %%xmm1, %%xmm1 \n\t"\
  1083. DCT_SAD4(dqa, %%xmm, 0)\
  1084. DCT_SAD4(dqa, %%xmm, 64)\
  1085. "paddusw %%xmm1, %%xmm0 \n\t"\
  1086. HSUM(%%xmm0, %%xmm1, %0)
  1087. #define DCT_SAD_FUNC(cpu) \
  1088. static int sum_abs_dctelem_##cpu(DCTELEM *block){\
  1089. int sum;\
  1090. __asm__ volatile(\
  1091. DCT_SAD\
  1092. :"=r"(sum)\
  1093. :"r"(block)\
  1094. );\
  1095. return sum&0xFFFF;\
  1096. }
  1097. #define DCT_SAD DCT_SAD_MMX
  1098. #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
  1099. #define MMABS(a,z) MMABS_MMX(a,z)
  1100. DCT_SAD_FUNC(mmx)
  1101. #undef MMABS
  1102. #undef HSUM
  1103. #define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
  1104. #define MMABS(a,z) MMABS_MMX2(a,z)
  1105. DCT_SAD_FUNC(mmx2)
  1106. #undef HSUM
  1107. #undef DCT_SAD
  1108. #define DCT_SAD DCT_SAD_SSE2
  1109. #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
  1110. DCT_SAD_FUNC(sse2)
  1111. #undef MMABS
  1112. #if HAVE_SSSE3
  1113. #define MMABS(a,z) MMABS_SSSE3(a,z)
  1114. DCT_SAD_FUNC(ssse3)
  1115. #undef MMABS
  1116. #endif
  1117. #undef HSUM
  1118. #undef DCT_SAD
  1119. static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
  1120. int sum;
  1121. x86_reg i=size;
  1122. __asm__ volatile(
  1123. "pxor %%mm4, %%mm4 \n"
  1124. "1: \n"
  1125. "sub $8, %0 \n"
  1126. "movq (%2,%0), %%mm2 \n"
  1127. "movq (%3,%0,2), %%mm0 \n"
  1128. "movq 8(%3,%0,2), %%mm1 \n"
  1129. "punpckhbw %%mm2, %%mm3 \n"
  1130. "punpcklbw %%mm2, %%mm2 \n"
  1131. "psraw $8, %%mm3 \n"
  1132. "psraw $8, %%mm2 \n"
  1133. "psubw %%mm3, %%mm1 \n"
  1134. "psubw %%mm2, %%mm0 \n"
  1135. "pmaddwd %%mm1, %%mm1 \n"
  1136. "pmaddwd %%mm0, %%mm0 \n"
  1137. "paddd %%mm1, %%mm4 \n"
  1138. "paddd %%mm0, %%mm4 \n"
  1139. "jg 1b \n"
  1140. "movq %%mm4, %%mm3 \n"
  1141. "psrlq $32, %%mm3 \n"
  1142. "paddd %%mm3, %%mm4 \n"
  1143. "movd %%mm4, %1 \n"
  1144. :"+r"(i), "=r"(sum)
  1145. :"r"(pix1), "r"(pix2)
  1146. );
  1147. return sum;
  1148. }
  1149. #define PHADDD(a, t)\
  1150. "movq "#a", "#t" \n\t"\
  1151. "psrlq $32, "#a" \n\t"\
  1152. "paddd "#t", "#a" \n\t"
  1153. /*
  1154. pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
  1155. pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
  1156. pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
  1157. */
  1158. #define PMULHRW(x, y, s, o)\
  1159. "pmulhw " #s ", "#x " \n\t"\
  1160. "pmulhw " #s ", "#y " \n\t"\
  1161. "paddw " #o ", "#x " \n\t"\
  1162. "paddw " #o ", "#y " \n\t"\
  1163. "psraw $1, "#x " \n\t"\
  1164. "psraw $1, "#y " \n\t"
  1165. #define DEF(x) x ## _mmx
  1166. #define SET_RND MOVQ_WONE
  1167. #define SCALE_OFFSET 1
  1168. #include "dsputil_mmx_qns_template.c"
  1169. #undef DEF
  1170. #undef SET_RND
  1171. #undef SCALE_OFFSET
  1172. #undef PMULHRW
  1173. #define DEF(x) x ## _3dnow
  1174. #define SET_RND(x)
  1175. #define SCALE_OFFSET 0
  1176. #define PMULHRW(x, y, s, o)\
  1177. "pmulhrw " #s ", "#x " \n\t"\
  1178. "pmulhrw " #s ", "#y " \n\t"
  1179. #include "dsputil_mmx_qns_template.c"
  1180. #undef DEF
  1181. #undef SET_RND
  1182. #undef SCALE_OFFSET
  1183. #undef PMULHRW
  1184. #if HAVE_SSSE3
  1185. #undef PHADDD
  1186. #define DEF(x) x ## _ssse3
  1187. #define SET_RND(x)
  1188. #define SCALE_OFFSET -1
  1189. #define PHADDD(a, t)\
  1190. "pshufw $0x0E, "#a", "#t" \n\t"\
  1191. "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
  1192. #define PMULHRW(x, y, s, o)\
  1193. "pmulhrsw " #s ", "#x " \n\t"\
  1194. "pmulhrsw " #s ", "#y " \n\t"
  1195. #include "dsputil_mmx_qns_template.c"
  1196. #undef DEF
  1197. #undef SET_RND
  1198. #undef SCALE_OFFSET
  1199. #undef PMULHRW
  1200. #undef PHADDD
  1201. #endif //HAVE_SSSE3
  1202. /* FLAC specific */
  1203. void ff_flac_compute_autocorr_sse2(const int32_t *data, int len, int lag,
  1204. double *autoc);
  1205. void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
  1206. {
  1207. if (mm_flags & FF_MM_MMX) {
  1208. const int dct_algo = avctx->dct_algo;
  1209. if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
  1210. if(mm_flags & FF_MM_SSE2){
  1211. c->fdct = ff_fdct_sse2;
  1212. }else if(mm_flags & FF_MM_MMXEXT){
  1213. c->fdct = ff_fdct_mmx2;
  1214. }else{
  1215. c->fdct = ff_fdct_mmx;
  1216. }
  1217. }
  1218. c->get_pixels = get_pixels_mmx;
  1219. c->diff_pixels = diff_pixels_mmx;
  1220. c->pix_sum = pix_sum16_mmx;
  1221. c->diff_bytes= diff_bytes_mmx;
  1222. c->sum_abs_dctelem= sum_abs_dctelem_mmx;
  1223. c->hadamard8_diff[0]= hadamard8_diff16_mmx;
  1224. c->hadamard8_diff[1]= hadamard8_diff_mmx;
  1225. c->pix_norm1 = pix_norm1_mmx;
  1226. c->sse[0] = (mm_flags & FF_MM_SSE2) ? sse16_sse2 : sse16_mmx;
  1227. c->sse[1] = sse8_mmx;
  1228. c->vsad[4]= vsad_intra16_mmx;
  1229. c->nsse[0] = nsse16_mmx;
  1230. c->nsse[1] = nsse8_mmx;
  1231. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1232. c->vsad[0] = vsad16_mmx;
  1233. }
  1234. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1235. c->try_8x8basis= try_8x8basis_mmx;
  1236. }
  1237. c->add_8x8basis= add_8x8basis_mmx;
  1238. c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
  1239. if (mm_flags & FF_MM_MMXEXT) {
  1240. c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
  1241. c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
  1242. c->hadamard8_diff[1]= hadamard8_diff_mmx2;
  1243. c->vsad[4]= vsad_intra16_mmx2;
  1244. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1245. c->vsad[0] = vsad16_mmx2;
  1246. }
  1247. c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
  1248. }
  1249. if(mm_flags & FF_MM_SSE2){
  1250. c->get_pixels = get_pixels_sse2;
  1251. c->sum_abs_dctelem= sum_abs_dctelem_sse2;
  1252. c->hadamard8_diff[0]= hadamard8_diff16_sse2;
  1253. c->hadamard8_diff[1]= hadamard8_diff_sse2;
  1254. if (CONFIG_FLAC_ENCODER)
  1255. c->flac_compute_autocorr = ff_flac_compute_autocorr_sse2;
  1256. }
  1257. #if HAVE_SSSE3
  1258. if(mm_flags & FF_MM_SSSE3){
  1259. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1260. c->try_8x8basis= try_8x8basis_ssse3;
  1261. }
  1262. c->add_8x8basis= add_8x8basis_ssse3;
  1263. c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
  1264. c->hadamard8_diff[0]= hadamard8_diff16_ssse3;
  1265. c->hadamard8_diff[1]= hadamard8_diff_ssse3;
  1266. }
  1267. #endif
  1268. if(mm_flags & FF_MM_3DNOW){
  1269. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1270. c->try_8x8basis= try_8x8basis_3dnow;
  1271. }
  1272. c->add_8x8basis= add_8x8basis_3dnow;
  1273. }
  1274. }
  1275. dsputil_init_pix_mmx(c, avctx);
  1276. }