dsputilenc_mmx.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422
  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "dsputil.h"
  25. #include "dsputil_mmx.h"
  26. #include "mpegvideo.h"
  27. #include "x86_cpu.h"
  28. static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
  29. {
  30. asm volatile(
  31. "mov $-128, %%"REG_a" \n\t"
  32. "pxor %%mm7, %%mm7 \n\t"
  33. ASMALIGN(4)
  34. "1: \n\t"
  35. "movq (%0), %%mm0 \n\t"
  36. "movq (%0, %2), %%mm2 \n\t"
  37. "movq %%mm0, %%mm1 \n\t"
  38. "movq %%mm2, %%mm3 \n\t"
  39. "punpcklbw %%mm7, %%mm0 \n\t"
  40. "punpckhbw %%mm7, %%mm1 \n\t"
  41. "punpcklbw %%mm7, %%mm2 \n\t"
  42. "punpckhbw %%mm7, %%mm3 \n\t"
  43. "movq %%mm0, (%1, %%"REG_a") \n\t"
  44. "movq %%mm1, 8(%1, %%"REG_a") \n\t"
  45. "movq %%mm2, 16(%1, %%"REG_a") \n\t"
  46. "movq %%mm3, 24(%1, %%"REG_a") \n\t"
  47. "add %3, %0 \n\t"
  48. "add $32, %%"REG_a" \n\t"
  49. "js 1b \n\t"
  50. : "+r" (pixels)
  51. : "r" (block+64), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*2)
  52. : "%"REG_a
  53. );
  54. }
  55. static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
  56. {
  57. asm volatile(
  58. "pxor %%mm7, %%mm7 \n\t"
  59. "mov $-128, %%"REG_a" \n\t"
  60. ASMALIGN(4)
  61. "1: \n\t"
  62. "movq (%0), %%mm0 \n\t"
  63. "movq (%1), %%mm2 \n\t"
  64. "movq %%mm0, %%mm1 \n\t"
  65. "movq %%mm2, %%mm3 \n\t"
  66. "punpcklbw %%mm7, %%mm0 \n\t"
  67. "punpckhbw %%mm7, %%mm1 \n\t"
  68. "punpcklbw %%mm7, %%mm2 \n\t"
  69. "punpckhbw %%mm7, %%mm3 \n\t"
  70. "psubw %%mm2, %%mm0 \n\t"
  71. "psubw %%mm3, %%mm1 \n\t"
  72. "movq %%mm0, (%2, %%"REG_a") \n\t"
  73. "movq %%mm1, 8(%2, %%"REG_a") \n\t"
  74. "add %3, %0 \n\t"
  75. "add %3, %1 \n\t"
  76. "add $16, %%"REG_a" \n\t"
  77. "jnz 1b \n\t"
  78. : "+r" (s1), "+r" (s2)
  79. : "r" (block+64), "r" ((x86_reg)stride)
  80. : "%"REG_a
  81. );
  82. }
  83. static int pix_sum16_mmx(uint8_t * pix, int line_size){
  84. const int h=16;
  85. int sum;
  86. x86_reg index= -line_size*h;
  87. asm volatile(
  88. "pxor %%mm7, %%mm7 \n\t"
  89. "pxor %%mm6, %%mm6 \n\t"
  90. "1: \n\t"
  91. "movq (%2, %1), %%mm0 \n\t"
  92. "movq (%2, %1), %%mm1 \n\t"
  93. "movq 8(%2, %1), %%mm2 \n\t"
  94. "movq 8(%2, %1), %%mm3 \n\t"
  95. "punpcklbw %%mm7, %%mm0 \n\t"
  96. "punpckhbw %%mm7, %%mm1 \n\t"
  97. "punpcklbw %%mm7, %%mm2 \n\t"
  98. "punpckhbw %%mm7, %%mm3 \n\t"
  99. "paddw %%mm0, %%mm1 \n\t"
  100. "paddw %%mm2, %%mm3 \n\t"
  101. "paddw %%mm1, %%mm3 \n\t"
  102. "paddw %%mm3, %%mm6 \n\t"
  103. "add %3, %1 \n\t"
  104. " js 1b \n\t"
  105. "movq %%mm6, %%mm5 \n\t"
  106. "psrlq $32, %%mm6 \n\t"
  107. "paddw %%mm5, %%mm6 \n\t"
  108. "movq %%mm6, %%mm5 \n\t"
  109. "psrlq $16, %%mm6 \n\t"
  110. "paddw %%mm5, %%mm6 \n\t"
  111. "movd %%mm6, %0 \n\t"
  112. "andl $0xFFFF, %0 \n\t"
  113. : "=&r" (sum), "+r" (index)
  114. : "r" (pix - index), "r" ((x86_reg)line_size)
  115. );
  116. return sum;
  117. }
  118. static int pix_norm1_mmx(uint8_t *pix, int line_size) {
  119. int tmp;
  120. asm volatile (
  121. "movl $16,%%ecx\n"
  122. "pxor %%mm0,%%mm0\n"
  123. "pxor %%mm7,%%mm7\n"
  124. "1:\n"
  125. "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
  126. "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
  127. "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
  128. "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
  129. "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
  130. "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
  131. "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
  132. "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
  133. "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
  134. "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
  135. "pmaddwd %%mm3,%%mm3\n"
  136. "pmaddwd %%mm4,%%mm4\n"
  137. "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
  138. pix2^2+pix3^2+pix6^2+pix7^2) */
  139. "paddd %%mm3,%%mm4\n"
  140. "paddd %%mm2,%%mm7\n"
  141. "add %2, %0\n"
  142. "paddd %%mm4,%%mm7\n"
  143. "dec %%ecx\n"
  144. "jnz 1b\n"
  145. "movq %%mm7,%%mm1\n"
  146. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  147. "paddd %%mm7,%%mm1\n"
  148. "movd %%mm1,%1\n"
  149. : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) : "%ecx" );
  150. return tmp;
  151. }
  152. static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  153. int tmp;
  154. asm volatile (
  155. "movl %4,%%ecx\n"
  156. "shr $1,%%ecx\n"
  157. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  158. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  159. "1:\n"
  160. "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
  161. "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
  162. "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
  163. "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
  164. /* todo: mm1-mm2, mm3-mm4 */
  165. /* algo: subtract mm1 from mm2 with saturation and vice versa */
  166. /* OR the results to get absolute difference */
  167. "movq %%mm1,%%mm5\n"
  168. "movq %%mm3,%%mm6\n"
  169. "psubusb %%mm2,%%mm1\n"
  170. "psubusb %%mm4,%%mm3\n"
  171. "psubusb %%mm5,%%mm2\n"
  172. "psubusb %%mm6,%%mm4\n"
  173. "por %%mm1,%%mm2\n"
  174. "por %%mm3,%%mm4\n"
  175. /* now convert to 16-bit vectors so we can square them */
  176. "movq %%mm2,%%mm1\n"
  177. "movq %%mm4,%%mm3\n"
  178. "punpckhbw %%mm0,%%mm2\n"
  179. "punpckhbw %%mm0,%%mm4\n"
  180. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  181. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  182. "pmaddwd %%mm2,%%mm2\n"
  183. "pmaddwd %%mm4,%%mm4\n"
  184. "pmaddwd %%mm1,%%mm1\n"
  185. "pmaddwd %%mm3,%%mm3\n"
  186. "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
  187. "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
  188. "paddd %%mm2,%%mm1\n"
  189. "paddd %%mm4,%%mm3\n"
  190. "paddd %%mm1,%%mm7\n"
  191. "paddd %%mm3,%%mm7\n"
  192. "decl %%ecx\n"
  193. "jnz 1b\n"
  194. "movq %%mm7,%%mm1\n"
  195. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  196. "paddd %%mm7,%%mm1\n"
  197. "movd %%mm1,%2\n"
  198. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  199. : "r" ((x86_reg)line_size) , "m" (h)
  200. : "%ecx");
  201. return tmp;
  202. }
  203. static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  204. int tmp;
  205. asm volatile (
  206. "movl %4,%%ecx\n"
  207. "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
  208. "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
  209. "1:\n"
  210. "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
  211. "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
  212. "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
  213. "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
  214. /* todo: mm1-mm2, mm3-mm4 */
  215. /* algo: subtract mm1 from mm2 with saturation and vice versa */
  216. /* OR the results to get absolute difference */
  217. "movq %%mm1,%%mm5\n"
  218. "movq %%mm3,%%mm6\n"
  219. "psubusb %%mm2,%%mm1\n"
  220. "psubusb %%mm4,%%mm3\n"
  221. "psubusb %%mm5,%%mm2\n"
  222. "psubusb %%mm6,%%mm4\n"
  223. "por %%mm1,%%mm2\n"
  224. "por %%mm3,%%mm4\n"
  225. /* now convert to 16-bit vectors so we can square them */
  226. "movq %%mm2,%%mm1\n"
  227. "movq %%mm4,%%mm3\n"
  228. "punpckhbw %%mm0,%%mm2\n"
  229. "punpckhbw %%mm0,%%mm4\n"
  230. "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
  231. "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
  232. "pmaddwd %%mm2,%%mm2\n"
  233. "pmaddwd %%mm4,%%mm4\n"
  234. "pmaddwd %%mm1,%%mm1\n"
  235. "pmaddwd %%mm3,%%mm3\n"
  236. "add %3,%0\n"
  237. "add %3,%1\n"
  238. "paddd %%mm2,%%mm1\n"
  239. "paddd %%mm4,%%mm3\n"
  240. "paddd %%mm1,%%mm7\n"
  241. "paddd %%mm3,%%mm7\n"
  242. "decl %%ecx\n"
  243. "jnz 1b\n"
  244. "movq %%mm7,%%mm1\n"
  245. "psrlq $32, %%mm7\n" /* shift hi dword to lo */
  246. "paddd %%mm7,%%mm1\n"
  247. "movd %%mm1,%2\n"
  248. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  249. : "r" ((x86_reg)line_size) , "m" (h)
  250. : "%ecx");
  251. return tmp;
  252. }
  253. static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  254. int tmp;
  255. asm volatile (
  256. "shr $1,%2\n"
  257. "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */
  258. "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */
  259. "1:\n"
  260. "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */
  261. "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */
  262. "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
  263. "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
  264. /* todo: mm1-mm2, mm3-mm4 */
  265. /* algo: subtract mm1 from mm2 with saturation and vice versa */
  266. /* OR the results to get absolute difference */
  267. "movdqa %%xmm1,%%xmm5\n"
  268. "movdqa %%xmm3,%%xmm6\n"
  269. "psubusb %%xmm2,%%xmm1\n"
  270. "psubusb %%xmm4,%%xmm3\n"
  271. "psubusb %%xmm5,%%xmm2\n"
  272. "psubusb %%xmm6,%%xmm4\n"
  273. "por %%xmm1,%%xmm2\n"
  274. "por %%xmm3,%%xmm4\n"
  275. /* now convert to 16-bit vectors so we can square them */
  276. "movdqa %%xmm2,%%xmm1\n"
  277. "movdqa %%xmm4,%%xmm3\n"
  278. "punpckhbw %%xmm0,%%xmm2\n"
  279. "punpckhbw %%xmm0,%%xmm4\n"
  280. "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */
  281. "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */
  282. "pmaddwd %%xmm2,%%xmm2\n"
  283. "pmaddwd %%xmm4,%%xmm4\n"
  284. "pmaddwd %%xmm1,%%xmm1\n"
  285. "pmaddwd %%xmm3,%%xmm3\n"
  286. "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */
  287. "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */
  288. "paddd %%xmm2,%%xmm1\n"
  289. "paddd %%xmm4,%%xmm3\n"
  290. "paddd %%xmm1,%%xmm7\n"
  291. "paddd %%xmm3,%%xmm7\n"
  292. "decl %2\n"
  293. "jnz 1b\n"
  294. "movdqa %%xmm7,%%xmm1\n"
  295. "psrldq $8, %%xmm7\n" /* shift hi qword to lo */
  296. "paddd %%xmm1,%%xmm7\n"
  297. "movdqa %%xmm7,%%xmm1\n"
  298. "psrldq $4, %%xmm7\n" /* shift hi dword to lo */
  299. "paddd %%xmm1,%%xmm7\n"
  300. "movd %%xmm7,%3\n"
  301. : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
  302. : "r" ((x86_reg)line_size));
  303. return tmp;
  304. }
  305. static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
  306. int tmp;
  307. asm volatile (
  308. "movl %3,%%ecx\n"
  309. "pxor %%mm7,%%mm7\n"
  310. "pxor %%mm6,%%mm6\n"
  311. "movq (%0),%%mm0\n"
  312. "movq %%mm0, %%mm1\n"
  313. "psllq $8, %%mm0\n"
  314. "psrlq $8, %%mm1\n"
  315. "psrlq $8, %%mm0\n"
  316. "movq %%mm0, %%mm2\n"
  317. "movq %%mm1, %%mm3\n"
  318. "punpcklbw %%mm7,%%mm0\n"
  319. "punpcklbw %%mm7,%%mm1\n"
  320. "punpckhbw %%mm7,%%mm2\n"
  321. "punpckhbw %%mm7,%%mm3\n"
  322. "psubw %%mm1, %%mm0\n"
  323. "psubw %%mm3, %%mm2\n"
  324. "add %2,%0\n"
  325. "movq (%0),%%mm4\n"
  326. "movq %%mm4, %%mm1\n"
  327. "psllq $8, %%mm4\n"
  328. "psrlq $8, %%mm1\n"
  329. "psrlq $8, %%mm4\n"
  330. "movq %%mm4, %%mm5\n"
  331. "movq %%mm1, %%mm3\n"
  332. "punpcklbw %%mm7,%%mm4\n"
  333. "punpcklbw %%mm7,%%mm1\n"
  334. "punpckhbw %%mm7,%%mm5\n"
  335. "punpckhbw %%mm7,%%mm3\n"
  336. "psubw %%mm1, %%mm4\n"
  337. "psubw %%mm3, %%mm5\n"
  338. "psubw %%mm4, %%mm0\n"
  339. "psubw %%mm5, %%mm2\n"
  340. "pxor %%mm3, %%mm3\n"
  341. "pxor %%mm1, %%mm1\n"
  342. "pcmpgtw %%mm0, %%mm3\n\t"
  343. "pcmpgtw %%mm2, %%mm1\n\t"
  344. "pxor %%mm3, %%mm0\n"
  345. "pxor %%mm1, %%mm2\n"
  346. "psubw %%mm3, %%mm0\n"
  347. "psubw %%mm1, %%mm2\n"
  348. "paddw %%mm0, %%mm2\n"
  349. "paddw %%mm2, %%mm6\n"
  350. "add %2,%0\n"
  351. "1:\n"
  352. "movq (%0),%%mm0\n"
  353. "movq %%mm0, %%mm1\n"
  354. "psllq $8, %%mm0\n"
  355. "psrlq $8, %%mm1\n"
  356. "psrlq $8, %%mm0\n"
  357. "movq %%mm0, %%mm2\n"
  358. "movq %%mm1, %%mm3\n"
  359. "punpcklbw %%mm7,%%mm0\n"
  360. "punpcklbw %%mm7,%%mm1\n"
  361. "punpckhbw %%mm7,%%mm2\n"
  362. "punpckhbw %%mm7,%%mm3\n"
  363. "psubw %%mm1, %%mm0\n"
  364. "psubw %%mm3, %%mm2\n"
  365. "psubw %%mm0, %%mm4\n"
  366. "psubw %%mm2, %%mm5\n"
  367. "pxor %%mm3, %%mm3\n"
  368. "pxor %%mm1, %%mm1\n"
  369. "pcmpgtw %%mm4, %%mm3\n\t"
  370. "pcmpgtw %%mm5, %%mm1\n\t"
  371. "pxor %%mm3, %%mm4\n"
  372. "pxor %%mm1, %%mm5\n"
  373. "psubw %%mm3, %%mm4\n"
  374. "psubw %%mm1, %%mm5\n"
  375. "paddw %%mm4, %%mm5\n"
  376. "paddw %%mm5, %%mm6\n"
  377. "add %2,%0\n"
  378. "movq (%0),%%mm4\n"
  379. "movq %%mm4, %%mm1\n"
  380. "psllq $8, %%mm4\n"
  381. "psrlq $8, %%mm1\n"
  382. "psrlq $8, %%mm4\n"
  383. "movq %%mm4, %%mm5\n"
  384. "movq %%mm1, %%mm3\n"
  385. "punpcklbw %%mm7,%%mm4\n"
  386. "punpcklbw %%mm7,%%mm1\n"
  387. "punpckhbw %%mm7,%%mm5\n"
  388. "punpckhbw %%mm7,%%mm3\n"
  389. "psubw %%mm1, %%mm4\n"
  390. "psubw %%mm3, %%mm5\n"
  391. "psubw %%mm4, %%mm0\n"
  392. "psubw %%mm5, %%mm2\n"
  393. "pxor %%mm3, %%mm3\n"
  394. "pxor %%mm1, %%mm1\n"
  395. "pcmpgtw %%mm0, %%mm3\n\t"
  396. "pcmpgtw %%mm2, %%mm1\n\t"
  397. "pxor %%mm3, %%mm0\n"
  398. "pxor %%mm1, %%mm2\n"
  399. "psubw %%mm3, %%mm0\n"
  400. "psubw %%mm1, %%mm2\n"
  401. "paddw %%mm0, %%mm2\n"
  402. "paddw %%mm2, %%mm6\n"
  403. "add %2,%0\n"
  404. "subl $2, %%ecx\n"
  405. " jnz 1b\n"
  406. "movq %%mm6, %%mm0\n"
  407. "punpcklwd %%mm7,%%mm0\n"
  408. "punpckhwd %%mm7,%%mm6\n"
  409. "paddd %%mm0, %%mm6\n"
  410. "movq %%mm6,%%mm0\n"
  411. "psrlq $32, %%mm6\n"
  412. "paddd %%mm6,%%mm0\n"
  413. "movd %%mm0,%1\n"
  414. : "+r" (pix1), "=r"(tmp)
  415. : "r" ((x86_reg)line_size) , "g" (h-2)
  416. : "%ecx");
  417. return tmp;
  418. }
  419. static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
  420. int tmp;
  421. uint8_t * pix= pix1;
  422. asm volatile (
  423. "movl %3,%%ecx\n"
  424. "pxor %%mm7,%%mm7\n"
  425. "pxor %%mm6,%%mm6\n"
  426. "movq (%0),%%mm0\n"
  427. "movq 1(%0),%%mm1\n"
  428. "movq %%mm0, %%mm2\n"
  429. "movq %%mm1, %%mm3\n"
  430. "punpcklbw %%mm7,%%mm0\n"
  431. "punpcklbw %%mm7,%%mm1\n"
  432. "punpckhbw %%mm7,%%mm2\n"
  433. "punpckhbw %%mm7,%%mm3\n"
  434. "psubw %%mm1, %%mm0\n"
  435. "psubw %%mm3, %%mm2\n"
  436. "add %2,%0\n"
  437. "movq (%0),%%mm4\n"
  438. "movq 1(%0),%%mm1\n"
  439. "movq %%mm4, %%mm5\n"
  440. "movq %%mm1, %%mm3\n"
  441. "punpcklbw %%mm7,%%mm4\n"
  442. "punpcklbw %%mm7,%%mm1\n"
  443. "punpckhbw %%mm7,%%mm5\n"
  444. "punpckhbw %%mm7,%%mm3\n"
  445. "psubw %%mm1, %%mm4\n"
  446. "psubw %%mm3, %%mm5\n"
  447. "psubw %%mm4, %%mm0\n"
  448. "psubw %%mm5, %%mm2\n"
  449. "pxor %%mm3, %%mm3\n"
  450. "pxor %%mm1, %%mm1\n"
  451. "pcmpgtw %%mm0, %%mm3\n\t"
  452. "pcmpgtw %%mm2, %%mm1\n\t"
  453. "pxor %%mm3, %%mm0\n"
  454. "pxor %%mm1, %%mm2\n"
  455. "psubw %%mm3, %%mm0\n"
  456. "psubw %%mm1, %%mm2\n"
  457. "paddw %%mm0, %%mm2\n"
  458. "paddw %%mm2, %%mm6\n"
  459. "add %2,%0\n"
  460. "1:\n"
  461. "movq (%0),%%mm0\n"
  462. "movq 1(%0),%%mm1\n"
  463. "movq %%mm0, %%mm2\n"
  464. "movq %%mm1, %%mm3\n"
  465. "punpcklbw %%mm7,%%mm0\n"
  466. "punpcklbw %%mm7,%%mm1\n"
  467. "punpckhbw %%mm7,%%mm2\n"
  468. "punpckhbw %%mm7,%%mm3\n"
  469. "psubw %%mm1, %%mm0\n"
  470. "psubw %%mm3, %%mm2\n"
  471. "psubw %%mm0, %%mm4\n"
  472. "psubw %%mm2, %%mm5\n"
  473. "pxor %%mm3, %%mm3\n"
  474. "pxor %%mm1, %%mm1\n"
  475. "pcmpgtw %%mm4, %%mm3\n\t"
  476. "pcmpgtw %%mm5, %%mm1\n\t"
  477. "pxor %%mm3, %%mm4\n"
  478. "pxor %%mm1, %%mm5\n"
  479. "psubw %%mm3, %%mm4\n"
  480. "psubw %%mm1, %%mm5\n"
  481. "paddw %%mm4, %%mm5\n"
  482. "paddw %%mm5, %%mm6\n"
  483. "add %2,%0\n"
  484. "movq (%0),%%mm4\n"
  485. "movq 1(%0),%%mm1\n"
  486. "movq %%mm4, %%mm5\n"
  487. "movq %%mm1, %%mm3\n"
  488. "punpcklbw %%mm7,%%mm4\n"
  489. "punpcklbw %%mm7,%%mm1\n"
  490. "punpckhbw %%mm7,%%mm5\n"
  491. "punpckhbw %%mm7,%%mm3\n"
  492. "psubw %%mm1, %%mm4\n"
  493. "psubw %%mm3, %%mm5\n"
  494. "psubw %%mm4, %%mm0\n"
  495. "psubw %%mm5, %%mm2\n"
  496. "pxor %%mm3, %%mm3\n"
  497. "pxor %%mm1, %%mm1\n"
  498. "pcmpgtw %%mm0, %%mm3\n\t"
  499. "pcmpgtw %%mm2, %%mm1\n\t"
  500. "pxor %%mm3, %%mm0\n"
  501. "pxor %%mm1, %%mm2\n"
  502. "psubw %%mm3, %%mm0\n"
  503. "psubw %%mm1, %%mm2\n"
  504. "paddw %%mm0, %%mm2\n"
  505. "paddw %%mm2, %%mm6\n"
  506. "add %2,%0\n"
  507. "subl $2, %%ecx\n"
  508. " jnz 1b\n"
  509. "movq %%mm6, %%mm0\n"
  510. "punpcklwd %%mm7,%%mm0\n"
  511. "punpckhwd %%mm7,%%mm6\n"
  512. "paddd %%mm0, %%mm6\n"
  513. "movq %%mm6,%%mm0\n"
  514. "psrlq $32, %%mm6\n"
  515. "paddd %%mm6,%%mm0\n"
  516. "movd %%mm0,%1\n"
  517. : "+r" (pix1), "=r"(tmp)
  518. : "r" ((x86_reg)line_size) , "g" (h-2)
  519. : "%ecx");
  520. return tmp + hf_noise8_mmx(pix+8, line_size, h);
  521. }
  522. static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  523. MpegEncContext *c = p;
  524. int score1, score2;
  525. if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
  526. else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
  527. score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
  528. if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
  529. else return score1 + FFABS(score2)*8;
  530. }
  531. static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  532. MpegEncContext *c = p;
  533. int score1= sse8_mmx(c, pix1, pix2, line_size, h);
  534. int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
  535. if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
  536. else return score1 + FFABS(score2)*8;
  537. }
  538. static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  539. int tmp;
  540. assert( (((int)pix) & 7) == 0);
  541. assert((line_size &7) ==0);
  542. #define SUM(in0, in1, out0, out1) \
  543. "movq (%0), %%mm2\n"\
  544. "movq 8(%0), %%mm3\n"\
  545. "add %2,%0\n"\
  546. "movq %%mm2, " #out0 "\n"\
  547. "movq %%mm3, " #out1 "\n"\
  548. "psubusb " #in0 ", %%mm2\n"\
  549. "psubusb " #in1 ", %%mm3\n"\
  550. "psubusb " #out0 ", " #in0 "\n"\
  551. "psubusb " #out1 ", " #in1 "\n"\
  552. "por %%mm2, " #in0 "\n"\
  553. "por %%mm3, " #in1 "\n"\
  554. "movq " #in0 ", %%mm2\n"\
  555. "movq " #in1 ", %%mm3\n"\
  556. "punpcklbw %%mm7, " #in0 "\n"\
  557. "punpcklbw %%mm7, " #in1 "\n"\
  558. "punpckhbw %%mm7, %%mm2\n"\
  559. "punpckhbw %%mm7, %%mm3\n"\
  560. "paddw " #in1 ", " #in0 "\n"\
  561. "paddw %%mm3, %%mm2\n"\
  562. "paddw %%mm2, " #in0 "\n"\
  563. "paddw " #in0 ", %%mm6\n"
  564. asm volatile (
  565. "movl %3,%%ecx\n"
  566. "pxor %%mm6,%%mm6\n"
  567. "pxor %%mm7,%%mm7\n"
  568. "movq (%0),%%mm0\n"
  569. "movq 8(%0),%%mm1\n"
  570. "add %2,%0\n"
  571. "subl $2, %%ecx\n"
  572. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  573. "1:\n"
  574. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  575. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  576. "subl $2, %%ecx\n"
  577. "jnz 1b\n"
  578. "movq %%mm6,%%mm0\n"
  579. "psrlq $32, %%mm6\n"
  580. "paddw %%mm6,%%mm0\n"
  581. "movq %%mm0,%%mm6\n"
  582. "psrlq $16, %%mm0\n"
  583. "paddw %%mm6,%%mm0\n"
  584. "movd %%mm0,%1\n"
  585. : "+r" (pix), "=r"(tmp)
  586. : "r" ((x86_reg)line_size) , "m" (h)
  587. : "%ecx");
  588. return tmp & 0xFFFF;
  589. }
  590. #undef SUM
  591. static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
  592. int tmp;
  593. assert( (((int)pix) & 7) == 0);
  594. assert((line_size &7) ==0);
  595. #define SUM(in0, in1, out0, out1) \
  596. "movq (%0), " #out0 "\n"\
  597. "movq 8(%0), " #out1 "\n"\
  598. "add %2,%0\n"\
  599. "psadbw " #out0 ", " #in0 "\n"\
  600. "psadbw " #out1 ", " #in1 "\n"\
  601. "paddw " #in1 ", " #in0 "\n"\
  602. "paddw " #in0 ", %%mm6\n"
  603. asm volatile (
  604. "movl %3,%%ecx\n"
  605. "pxor %%mm6,%%mm6\n"
  606. "pxor %%mm7,%%mm7\n"
  607. "movq (%0),%%mm0\n"
  608. "movq 8(%0),%%mm1\n"
  609. "add %2,%0\n"
  610. "subl $2, %%ecx\n"
  611. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  612. "1:\n"
  613. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  614. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  615. "subl $2, %%ecx\n"
  616. "jnz 1b\n"
  617. "movd %%mm6,%1\n"
  618. : "+r" (pix), "=r"(tmp)
  619. : "r" ((x86_reg)line_size) , "m" (h)
  620. : "%ecx");
  621. return tmp;
  622. }
  623. #undef SUM
  624. static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  625. int tmp;
  626. assert( (((int)pix1) & 7) == 0);
  627. assert( (((int)pix2) & 7) == 0);
  628. assert((line_size &7) ==0);
  629. #define SUM(in0, in1, out0, out1) \
  630. "movq (%0),%%mm2\n"\
  631. "movq (%1)," #out0 "\n"\
  632. "movq 8(%0),%%mm3\n"\
  633. "movq 8(%1)," #out1 "\n"\
  634. "add %3,%0\n"\
  635. "add %3,%1\n"\
  636. "psubb " #out0 ", %%mm2\n"\
  637. "psubb " #out1 ", %%mm3\n"\
  638. "pxor %%mm7, %%mm2\n"\
  639. "pxor %%mm7, %%mm3\n"\
  640. "movq %%mm2, " #out0 "\n"\
  641. "movq %%mm3, " #out1 "\n"\
  642. "psubusb " #in0 ", %%mm2\n"\
  643. "psubusb " #in1 ", %%mm3\n"\
  644. "psubusb " #out0 ", " #in0 "\n"\
  645. "psubusb " #out1 ", " #in1 "\n"\
  646. "por %%mm2, " #in0 "\n"\
  647. "por %%mm3, " #in1 "\n"\
  648. "movq " #in0 ", %%mm2\n"\
  649. "movq " #in1 ", %%mm3\n"\
  650. "punpcklbw %%mm7, " #in0 "\n"\
  651. "punpcklbw %%mm7, " #in1 "\n"\
  652. "punpckhbw %%mm7, %%mm2\n"\
  653. "punpckhbw %%mm7, %%mm3\n"\
  654. "paddw " #in1 ", " #in0 "\n"\
  655. "paddw %%mm3, %%mm2\n"\
  656. "paddw %%mm2, " #in0 "\n"\
  657. "paddw " #in0 ", %%mm6\n"
  658. asm volatile (
  659. "movl %4,%%ecx\n"
  660. "pxor %%mm6,%%mm6\n"
  661. "pcmpeqw %%mm7,%%mm7\n"
  662. "psllw $15, %%mm7\n"
  663. "packsswb %%mm7, %%mm7\n"
  664. "movq (%0),%%mm0\n"
  665. "movq (%1),%%mm2\n"
  666. "movq 8(%0),%%mm1\n"
  667. "movq 8(%1),%%mm3\n"
  668. "add %3,%0\n"
  669. "add %3,%1\n"
  670. "subl $2, %%ecx\n"
  671. "psubb %%mm2, %%mm0\n"
  672. "psubb %%mm3, %%mm1\n"
  673. "pxor %%mm7, %%mm0\n"
  674. "pxor %%mm7, %%mm1\n"
  675. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  676. "1:\n"
  677. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  678. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  679. "subl $2, %%ecx\n"
  680. "jnz 1b\n"
  681. "movq %%mm6,%%mm0\n"
  682. "psrlq $32, %%mm6\n"
  683. "paddw %%mm6,%%mm0\n"
  684. "movq %%mm0,%%mm6\n"
  685. "psrlq $16, %%mm0\n"
  686. "paddw %%mm6,%%mm0\n"
  687. "movd %%mm0,%2\n"
  688. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  689. : "r" ((x86_reg)line_size) , "m" (h)
  690. : "%ecx");
  691. return tmp & 0x7FFF;
  692. }
  693. #undef SUM
  694. static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
  695. int tmp;
  696. assert( (((int)pix1) & 7) == 0);
  697. assert( (((int)pix2) & 7) == 0);
  698. assert((line_size &7) ==0);
  699. #define SUM(in0, in1, out0, out1) \
  700. "movq (%0)," #out0 "\n"\
  701. "movq (%1),%%mm2\n"\
  702. "movq 8(%0)," #out1 "\n"\
  703. "movq 8(%1),%%mm3\n"\
  704. "add %3,%0\n"\
  705. "add %3,%1\n"\
  706. "psubb %%mm2, " #out0 "\n"\
  707. "psubb %%mm3, " #out1 "\n"\
  708. "pxor %%mm7, " #out0 "\n"\
  709. "pxor %%mm7, " #out1 "\n"\
  710. "psadbw " #out0 ", " #in0 "\n"\
  711. "psadbw " #out1 ", " #in1 "\n"\
  712. "paddw " #in1 ", " #in0 "\n"\
  713. "paddw " #in0 ", %%mm6\n"
  714. asm volatile (
  715. "movl %4,%%ecx\n"
  716. "pxor %%mm6,%%mm6\n"
  717. "pcmpeqw %%mm7,%%mm7\n"
  718. "psllw $15, %%mm7\n"
  719. "packsswb %%mm7, %%mm7\n"
  720. "movq (%0),%%mm0\n"
  721. "movq (%1),%%mm2\n"
  722. "movq 8(%0),%%mm1\n"
  723. "movq 8(%1),%%mm3\n"
  724. "add %3,%0\n"
  725. "add %3,%1\n"
  726. "subl $2, %%ecx\n"
  727. "psubb %%mm2, %%mm0\n"
  728. "psubb %%mm3, %%mm1\n"
  729. "pxor %%mm7, %%mm0\n"
  730. "pxor %%mm7, %%mm1\n"
  731. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  732. "1:\n"
  733. SUM(%%mm4, %%mm5, %%mm0, %%mm1)
  734. SUM(%%mm0, %%mm1, %%mm4, %%mm5)
  735. "subl $2, %%ecx\n"
  736. "jnz 1b\n"
  737. "movd %%mm6,%2\n"
  738. : "+r" (pix1), "+r" (pix2), "=r"(tmp)
  739. : "r" ((x86_reg)line_size) , "m" (h)
  740. : "%ecx");
  741. return tmp;
  742. }
  743. #undef SUM
  744. static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  745. x86_reg i=0;
  746. asm volatile(
  747. "1: \n\t"
  748. "movq (%2, %0), %%mm0 \n\t"
  749. "movq (%1, %0), %%mm1 \n\t"
  750. "psubb %%mm0, %%mm1 \n\t"
  751. "movq %%mm1, (%3, %0) \n\t"
  752. "movq 8(%2, %0), %%mm0 \n\t"
  753. "movq 8(%1, %0), %%mm1 \n\t"
  754. "psubb %%mm0, %%mm1 \n\t"
  755. "movq %%mm1, 8(%3, %0) \n\t"
  756. "add $16, %0 \n\t"
  757. "cmp %4, %0 \n\t"
  758. " jb 1b \n\t"
  759. : "+r" (i)
  760. : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15)
  761. );
  762. for(; i<w; i++)
  763. dst[i+0] = src1[i+0]-src2[i+0];
  764. }
  765. static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  766. x86_reg i=0;
  767. uint8_t l, lt;
  768. asm volatile(
  769. "1: \n\t"
  770. "movq -1(%1, %0), %%mm0 \n\t" // LT
  771. "movq (%1, %0), %%mm1 \n\t" // T
  772. "movq -1(%2, %0), %%mm2 \n\t" // L
  773. "movq (%2, %0), %%mm3 \n\t" // X
  774. "movq %%mm2, %%mm4 \n\t" // L
  775. "psubb %%mm0, %%mm2 \n\t"
  776. "paddb %%mm1, %%mm2 \n\t" // L + T - LT
  777. "movq %%mm4, %%mm5 \n\t" // L
  778. "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
  779. "pminub %%mm5, %%mm1 \n\t" // min(T, L)
  780. "pminub %%mm2, %%mm4 \n\t"
  781. "pmaxub %%mm1, %%mm4 \n\t"
  782. "psubb %%mm4, %%mm3 \n\t" // dst - pred
  783. "movq %%mm3, (%3, %0) \n\t"
  784. "add $8, %0 \n\t"
  785. "cmp %4, %0 \n\t"
  786. " jb 1b \n\t"
  787. : "+r" (i)
  788. : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w)
  789. );
  790. l= *left;
  791. lt= *left_top;
  792. dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
  793. *left_top= src1[w-1];
  794. *left = src2[w-1];
  795. }
  796. #define DIFF_PIXELS_1(m,a,t,p1,p2)\
  797. "mov"#m" "#p1", "#a" \n\t"\
  798. "mov"#m" "#p2", "#t" \n\t"\
  799. "punpcklbw "#a", "#t" \n\t"\
  800. "punpcklbw "#a", "#a" \n\t"\
  801. "psubw "#t", "#a" \n\t"\
  802. #define DIFF_PIXELS_8(m0,m1,mm,p1,p2,stride,temp) {\
  803. uint8_t *p1b=p1, *p2b=p2;\
  804. asm volatile(\
  805. DIFF_PIXELS_1(m0, mm##0, mm##7, (%1), (%2))\
  806. DIFF_PIXELS_1(m0, mm##1, mm##7, (%1,%3), (%2,%3))\
  807. DIFF_PIXELS_1(m0, mm##2, mm##7, (%1,%3,2), (%2,%3,2))\
  808. "add %4, %1 \n\t"\
  809. "add %4, %2 \n\t"\
  810. DIFF_PIXELS_1(m0, mm##3, mm##7, (%1), (%2))\
  811. DIFF_PIXELS_1(m0, mm##4, mm##7, (%1,%3), (%2,%3))\
  812. DIFF_PIXELS_1(m0, mm##5, mm##7, (%1,%3,2), (%2,%3,2))\
  813. DIFF_PIXELS_1(m0, mm##6, mm##7, (%1,%4), (%2,%4))\
  814. "mov"#m1" "#mm"0, %0 \n\t"\
  815. DIFF_PIXELS_1(m0, mm##7, mm##0, (%1,%3,4), (%2,%3,4))\
  816. "mov"#m1" %0, "#mm"0 \n\t"\
  817. : "+m"(temp), "+r"(p1b), "+r"(p2b)\
  818. : "r"((x86_reg)stride), "r"((x86_reg)stride*3)\
  819. );\
  820. }
  821. //the "+m"(temp) is needed as gcc 2.95 sometimes fails to compile "=m"(temp)
  822. #define DIFF_PIXELS_4x8(p1,p2,stride,temp) DIFF_PIXELS_8(d, q, %%mm, p1, p2, stride, temp)
  823. #define DIFF_PIXELS_8x8(p1,p2,stride,temp) DIFF_PIXELS_8(q, dqa, %%xmm, p1, p2, stride, temp)
  824. #define LBUTTERFLY2(a1,b1,a2,b2)\
  825. "paddw " #b1 ", " #a1 " \n\t"\
  826. "paddw " #b2 ", " #a2 " \n\t"\
  827. "paddw " #b1 ", " #b1 " \n\t"\
  828. "paddw " #b2 ", " #b2 " \n\t"\
  829. "psubw " #a1 ", " #b1 " \n\t"\
  830. "psubw " #a2 ", " #b2 " \n\t"
  831. #define HADAMARD8(m0, m1, m2, m3, m4, m5, m6, m7)\
  832. LBUTTERFLY2(m0, m1, m2, m3)\
  833. LBUTTERFLY2(m4, m5, m6, m7)\
  834. LBUTTERFLY2(m0, m2, m1, m3)\
  835. LBUTTERFLY2(m4, m6, m5, m7)\
  836. LBUTTERFLY2(m0, m4, m1, m5)\
  837. LBUTTERFLY2(m2, m6, m3, m7)\
  838. #define HADAMARD48 HADAMARD8(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm6, %%mm7)
  839. #define MMABS_MMX(a,z)\
  840. "pxor " #z ", " #z " \n\t"\
  841. "pcmpgtw " #a ", " #z " \n\t"\
  842. "pxor " #z ", " #a " \n\t"\
  843. "psubw " #z ", " #a " \n\t"
  844. #define MMABS_MMX2(a,z)\
  845. "pxor " #z ", " #z " \n\t"\
  846. "psubw " #a ", " #z " \n\t"\
  847. "pmaxsw " #z ", " #a " \n\t"
  848. #define MMABS_SSSE3(a,z)\
  849. "pabsw " #a ", " #a " \n\t"
  850. #define MMABS_SUM(a,z, sum)\
  851. MMABS(a,z)\
  852. "paddusw " #a ", " #sum " \n\t"
  853. #define MMABS_SUM_8x8_NOSPILL\
  854. MMABS(%%xmm0, %%xmm8)\
  855. MMABS(%%xmm1, %%xmm9)\
  856. MMABS_SUM(%%xmm2, %%xmm8, %%xmm0)\
  857. MMABS_SUM(%%xmm3, %%xmm9, %%xmm1)\
  858. MMABS_SUM(%%xmm4, %%xmm8, %%xmm0)\
  859. MMABS_SUM(%%xmm5, %%xmm9, %%xmm1)\
  860. MMABS_SUM(%%xmm6, %%xmm8, %%xmm0)\
  861. MMABS_SUM(%%xmm7, %%xmm9, %%xmm1)\
  862. "paddusw %%xmm1, %%xmm0 \n\t"
  863. #ifdef ARCH_X86_64
  864. #define MMABS_SUM_8x8_SSE2 MMABS_SUM_8x8_NOSPILL
  865. #else
  866. #define MMABS_SUM_8x8_SSE2\
  867. "movdqa %%xmm7, (%1) \n\t"\
  868. MMABS(%%xmm0, %%xmm7)\
  869. MMABS(%%xmm1, %%xmm7)\
  870. MMABS_SUM(%%xmm2, %%xmm7, %%xmm0)\
  871. MMABS_SUM(%%xmm3, %%xmm7, %%xmm1)\
  872. MMABS_SUM(%%xmm4, %%xmm7, %%xmm0)\
  873. MMABS_SUM(%%xmm5, %%xmm7, %%xmm1)\
  874. MMABS_SUM(%%xmm6, %%xmm7, %%xmm0)\
  875. "movdqa (%1), %%xmm2 \n\t"\
  876. MMABS_SUM(%%xmm2, %%xmm7, %%xmm1)\
  877. "paddusw %%xmm1, %%xmm0 \n\t"
  878. #endif
  879. #define LOAD4(o, a, b, c, d)\
  880. "movq "#o"(%1), "#a" \n\t"\
  881. "movq "#o"+8(%1), "#b" \n\t"\
  882. "movq "#o"+16(%1), "#c" \n\t"\
  883. "movq "#o"+24(%1), "#d" \n\t"\
  884. #define STORE4(o, a, b, c, d)\
  885. "movq "#a", "#o"(%1) \n\t"\
  886. "movq "#b", "#o"+8(%1) \n\t"\
  887. "movq "#c", "#o"+16(%1) \n\t"\
  888. "movq "#d", "#o"+24(%1) \n\t"\
  889. /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
  890. * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
  891. * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
  892. #define HSUM_MMX(a, t, dst)\
  893. "movq "#a", "#t" \n\t"\
  894. "psrlq $32, "#a" \n\t"\
  895. "paddusw "#t", "#a" \n\t"\
  896. "movq "#a", "#t" \n\t"\
  897. "psrlq $16, "#a" \n\t"\
  898. "paddusw "#t", "#a" \n\t"\
  899. "movd "#a", "#dst" \n\t"\
  900. #define HSUM_MMX2(a, t, dst)\
  901. "pshufw $0x0E, "#a", "#t" \n\t"\
  902. "paddusw "#t", "#a" \n\t"\
  903. "pshufw $0x01, "#a", "#t" \n\t"\
  904. "paddusw "#t", "#a" \n\t"\
  905. "movd "#a", "#dst" \n\t"\
  906. #define HSUM_SSE2(a, t, dst)\
  907. "movhlps "#a", "#t" \n\t"\
  908. "paddusw "#t", "#a" \n\t"\
  909. "pshuflw $0x0E, "#a", "#t" \n\t"\
  910. "paddusw "#t", "#a" \n\t"\
  911. "pshuflw $0x01, "#a", "#t" \n\t"\
  912. "paddusw "#t", "#a" \n\t"\
  913. "movd "#a", "#dst" \n\t"\
  914. #define HADAMARD8_DIFF_MMX(cpu) \
  915. static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
  916. DECLARE_ALIGNED_8(uint64_t, temp[13]);\
  917. int sum;\
  918. \
  919. assert(h==8);\
  920. \
  921. DIFF_PIXELS_4x8(src1, src2, stride, temp[0]);\
  922. \
  923. asm volatile(\
  924. HADAMARD48\
  925. \
  926. "movq %%mm7, 96(%1) \n\t"\
  927. \
  928. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\
  929. STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)\
  930. \
  931. "movq 96(%1), %%mm7 \n\t"\
  932. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\
  933. STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)\
  934. \
  935. : "=r" (sum)\
  936. : "r"(temp)\
  937. );\
  938. \
  939. DIFF_PIXELS_4x8(src1+4, src2+4, stride, temp[4]);\
  940. \
  941. asm volatile(\
  942. HADAMARD48\
  943. \
  944. "movq %%mm7, 96(%1) \n\t"\
  945. \
  946. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)\
  947. STORE4(32, %%mm0, %%mm3, %%mm7, %%mm2)\
  948. \
  949. "movq 96(%1), %%mm7 \n\t"\
  950. TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)\
  951. "movq %%mm7, %%mm5 \n\t"/*FIXME remove*/\
  952. "movq %%mm6, %%mm7 \n\t"\
  953. "movq %%mm0, %%mm6 \n\t"\
  954. \
  955. LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)\
  956. \
  957. HADAMARD48\
  958. "movq %%mm7, 64(%1) \n\t"\
  959. MMABS(%%mm0, %%mm7)\
  960. MMABS(%%mm1, %%mm7)\
  961. MMABS_SUM(%%mm2, %%mm7, %%mm0)\
  962. MMABS_SUM(%%mm3, %%mm7, %%mm1)\
  963. MMABS_SUM(%%mm4, %%mm7, %%mm0)\
  964. MMABS_SUM(%%mm5, %%mm7, %%mm1)\
  965. MMABS_SUM(%%mm6, %%mm7, %%mm0)\
  966. "movq 64(%1), %%mm2 \n\t"\
  967. MMABS_SUM(%%mm2, %%mm7, %%mm1)\
  968. "paddusw %%mm1, %%mm0 \n\t"\
  969. "movq %%mm0, 64(%1) \n\t"\
  970. \
  971. LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)\
  972. LOAD4(32, %%mm4, %%mm5, %%mm6, %%mm7)\
  973. \
  974. HADAMARD48\
  975. "movq %%mm7, (%1) \n\t"\
  976. MMABS(%%mm0, %%mm7)\
  977. MMABS(%%mm1, %%mm7)\
  978. MMABS_SUM(%%mm2, %%mm7, %%mm0)\
  979. MMABS_SUM(%%mm3, %%mm7, %%mm1)\
  980. MMABS_SUM(%%mm4, %%mm7, %%mm0)\
  981. MMABS_SUM(%%mm5, %%mm7, %%mm1)\
  982. MMABS_SUM(%%mm6, %%mm7, %%mm0)\
  983. "movq (%1), %%mm2 \n\t"\
  984. MMABS_SUM(%%mm2, %%mm7, %%mm1)\
  985. "paddusw 64(%1), %%mm0 \n\t"\
  986. "paddusw %%mm1, %%mm0 \n\t"\
  987. \
  988. HSUM(%%mm0, %%mm1, %0)\
  989. \
  990. : "=r" (sum)\
  991. : "r"(temp)\
  992. );\
  993. return sum&0xFFFF;\
  994. }\
  995. WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)
  996. #define HADAMARD8_DIFF_SSE2(cpu) \
  997. static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
  998. DECLARE_ALIGNED_16(uint64_t, temp[4]);\
  999. int sum;\
  1000. \
  1001. assert(h==8);\
  1002. \
  1003. DIFF_PIXELS_8x8(src1, src2, stride, temp[0]);\
  1004. \
  1005. asm volatile(\
  1006. HADAMARD8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)\
  1007. TRANSPOSE8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7, (%1))\
  1008. HADAMARD8(%%xmm0, %%xmm5, %%xmm7, %%xmm3, %%xmm6, %%xmm4, %%xmm2, %%xmm1)\
  1009. MMABS_SUM_8x8\
  1010. HSUM_SSE2(%%xmm0, %%xmm1, %0)\
  1011. : "=r" (sum)\
  1012. : "r"(temp)\
  1013. );\
  1014. return sum&0xFFFF;\
  1015. }\
  1016. WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)
  1017. #define MMABS(a,z) MMABS_MMX(a,z)
  1018. #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
  1019. HADAMARD8_DIFF_MMX(mmx)
  1020. #undef MMABS
  1021. #undef HSUM
  1022. #define MMABS(a,z) MMABS_MMX2(a,z)
  1023. #define MMABS_SUM_8x8 MMABS_SUM_8x8_SSE2
  1024. #define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
  1025. HADAMARD8_DIFF_MMX(mmx2)
  1026. HADAMARD8_DIFF_SSE2(sse2)
  1027. #undef MMABS
  1028. #undef MMABS_SUM_8x8
  1029. #undef HSUM
  1030. #ifdef HAVE_SSSE3
  1031. #define MMABS(a,z) MMABS_SSSE3(a,z)
  1032. #define MMABS_SUM_8x8 MMABS_SUM_8x8_NOSPILL
  1033. HADAMARD8_DIFF_SSE2(ssse3)
  1034. #undef MMABS
  1035. #undef MMABS_SUM_8x8
  1036. #endif
  1037. #define DCT_SAD4(m,mm,o)\
  1038. "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
  1039. "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
  1040. "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
  1041. "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
  1042. MMABS_SUM(mm##2, mm##6, mm##0)\
  1043. MMABS_SUM(mm##3, mm##7, mm##1)\
  1044. MMABS_SUM(mm##4, mm##6, mm##0)\
  1045. MMABS_SUM(mm##5, mm##7, mm##1)\
  1046. #define DCT_SAD_MMX\
  1047. "pxor %%mm0, %%mm0 \n\t"\
  1048. "pxor %%mm1, %%mm1 \n\t"\
  1049. DCT_SAD4(q, %%mm, 0)\
  1050. DCT_SAD4(q, %%mm, 8)\
  1051. DCT_SAD4(q, %%mm, 64)\
  1052. DCT_SAD4(q, %%mm, 72)\
  1053. "paddusw %%mm1, %%mm0 \n\t"\
  1054. HSUM(%%mm0, %%mm1, %0)
  1055. #define DCT_SAD_SSE2\
  1056. "pxor %%xmm0, %%xmm0 \n\t"\
  1057. "pxor %%xmm1, %%xmm1 \n\t"\
  1058. DCT_SAD4(dqa, %%xmm, 0)\
  1059. DCT_SAD4(dqa, %%xmm, 64)\
  1060. "paddusw %%xmm1, %%xmm0 \n\t"\
  1061. HSUM(%%xmm0, %%xmm1, %0)
  1062. #define DCT_SAD_FUNC(cpu) \
  1063. static int sum_abs_dctelem_##cpu(DCTELEM *block){\
  1064. int sum;\
  1065. asm volatile(\
  1066. DCT_SAD\
  1067. :"=r"(sum)\
  1068. :"r"(block)\
  1069. );\
  1070. return sum&0xFFFF;\
  1071. }
  1072. #define DCT_SAD DCT_SAD_MMX
  1073. #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
  1074. #define MMABS(a,z) MMABS_MMX(a,z)
  1075. DCT_SAD_FUNC(mmx)
  1076. #undef MMABS
  1077. #undef HSUM
  1078. #define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
  1079. #define MMABS(a,z) MMABS_MMX2(a,z)
  1080. DCT_SAD_FUNC(mmx2)
  1081. #undef HSUM
  1082. #undef DCT_SAD
  1083. #define DCT_SAD DCT_SAD_SSE2
  1084. #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
  1085. DCT_SAD_FUNC(sse2)
  1086. #undef MMABS
  1087. #ifdef HAVE_SSSE3
  1088. #define MMABS(a,z) MMABS_SSSE3(a,z)
  1089. DCT_SAD_FUNC(ssse3)
  1090. #undef MMABS
  1091. #endif
  1092. #undef HSUM
  1093. #undef DCT_SAD
  1094. static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
  1095. int sum;
  1096. x86_reg i=size;
  1097. asm volatile(
  1098. "pxor %%mm4, %%mm4 \n"
  1099. "1: \n"
  1100. "sub $8, %0 \n"
  1101. "movq (%2,%0), %%mm2 \n"
  1102. "movq (%3,%0,2), %%mm0 \n"
  1103. "movq 8(%3,%0,2), %%mm1 \n"
  1104. "punpckhbw %%mm2, %%mm3 \n"
  1105. "punpcklbw %%mm2, %%mm2 \n"
  1106. "psraw $8, %%mm3 \n"
  1107. "psraw $8, %%mm2 \n"
  1108. "psubw %%mm3, %%mm1 \n"
  1109. "psubw %%mm2, %%mm0 \n"
  1110. "pmaddwd %%mm1, %%mm1 \n"
  1111. "pmaddwd %%mm0, %%mm0 \n"
  1112. "paddd %%mm1, %%mm4 \n"
  1113. "paddd %%mm0, %%mm4 \n"
  1114. "jg 1b \n"
  1115. "movq %%mm4, %%mm3 \n"
  1116. "psrlq $32, %%mm3 \n"
  1117. "paddd %%mm3, %%mm4 \n"
  1118. "movd %%mm4, %1 \n"
  1119. :"+r"(i), "=r"(sum)
  1120. :"r"(pix1), "r"(pix2)
  1121. );
  1122. return sum;
  1123. }
  1124. #define PHADDD(a, t)\
  1125. "movq "#a", "#t" \n\t"\
  1126. "psrlq $32, "#a" \n\t"\
  1127. "paddd "#t", "#a" \n\t"
  1128. /*
  1129. pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
  1130. pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
  1131. pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
  1132. */
  1133. #define PMULHRW(x, y, s, o)\
  1134. "pmulhw " #s ", "#x " \n\t"\
  1135. "pmulhw " #s ", "#y " \n\t"\
  1136. "paddw " #o ", "#x " \n\t"\
  1137. "paddw " #o ", "#y " \n\t"\
  1138. "psraw $1, "#x " \n\t"\
  1139. "psraw $1, "#y " \n\t"
  1140. #define DEF(x) x ## _mmx
  1141. #define SET_RND MOVQ_WONE
  1142. #define SCALE_OFFSET 1
  1143. #include "dsputil_mmx_qns.h"
  1144. #undef DEF
  1145. #undef SET_RND
  1146. #undef SCALE_OFFSET
  1147. #undef PMULHRW
  1148. #define DEF(x) x ## _3dnow
  1149. #define SET_RND(x)
  1150. #define SCALE_OFFSET 0
  1151. #define PMULHRW(x, y, s, o)\
  1152. "pmulhrw " #s ", "#x " \n\t"\
  1153. "pmulhrw " #s ", "#y " \n\t"
  1154. #include "dsputil_mmx_qns.h"
  1155. #undef DEF
  1156. #undef SET_RND
  1157. #undef SCALE_OFFSET
  1158. #undef PMULHRW
  1159. #ifdef HAVE_SSSE3
  1160. #undef PHADDD
  1161. #define DEF(x) x ## _ssse3
  1162. #define SET_RND(x)
  1163. #define SCALE_OFFSET -1
  1164. #define PHADDD(a, t)\
  1165. "pshufw $0x0E, "#a", "#t" \n\t"\
  1166. "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
  1167. #define PMULHRW(x, y, s, o)\
  1168. "pmulhrsw " #s ", "#x " \n\t"\
  1169. "pmulhrsw " #s ", "#y " \n\t"
  1170. #include "dsputil_mmx_qns.h"
  1171. #undef DEF
  1172. #undef SET_RND
  1173. #undef SCALE_OFFSET
  1174. #undef PMULHRW
  1175. #undef PHADDD
  1176. #endif //HAVE_SSSE3
  1177. /* FLAC specific */
  1178. void ff_flac_compute_autocorr_sse2(const int32_t *data, int len, int lag,
  1179. double *autoc);
  1180. void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
  1181. {
  1182. if (mm_flags & MM_MMX) {
  1183. const int dct_algo = avctx->dct_algo;
  1184. if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
  1185. if(mm_flags & MM_SSE2){
  1186. c->fdct = ff_fdct_sse2;
  1187. }else if(mm_flags & MM_MMXEXT){
  1188. c->fdct = ff_fdct_mmx2;
  1189. }else{
  1190. c->fdct = ff_fdct_mmx;
  1191. }
  1192. }
  1193. c->get_pixels = get_pixels_mmx;
  1194. c->diff_pixels = diff_pixels_mmx;
  1195. c->pix_sum = pix_sum16_mmx;
  1196. c->diff_bytes= diff_bytes_mmx;
  1197. c->sum_abs_dctelem= sum_abs_dctelem_mmx;
  1198. c->hadamard8_diff[0]= hadamard8_diff16_mmx;
  1199. c->hadamard8_diff[1]= hadamard8_diff_mmx;
  1200. c->pix_norm1 = pix_norm1_mmx;
  1201. c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx;
  1202. c->sse[1] = sse8_mmx;
  1203. c->vsad[4]= vsad_intra16_mmx;
  1204. c->nsse[0] = nsse16_mmx;
  1205. c->nsse[1] = nsse8_mmx;
  1206. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1207. c->vsad[0] = vsad16_mmx;
  1208. }
  1209. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1210. c->try_8x8basis= try_8x8basis_mmx;
  1211. }
  1212. c->add_8x8basis= add_8x8basis_mmx;
  1213. c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
  1214. if (mm_flags & MM_MMXEXT) {
  1215. c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
  1216. c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
  1217. c->hadamard8_diff[1]= hadamard8_diff_mmx2;
  1218. c->vsad[4]= vsad_intra16_mmx2;
  1219. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1220. c->vsad[0] = vsad16_mmx2;
  1221. }
  1222. c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
  1223. }
  1224. if(mm_flags & MM_SSE2){
  1225. c->sum_abs_dctelem= sum_abs_dctelem_sse2;
  1226. c->hadamard8_diff[0]= hadamard8_diff16_sse2;
  1227. c->hadamard8_diff[1]= hadamard8_diff_sse2;
  1228. if (ENABLE_FLAC_ENCODER)
  1229. c->flac_compute_autocorr = ff_flac_compute_autocorr_sse2;
  1230. }
  1231. #ifdef HAVE_SSSE3
  1232. if(mm_flags & MM_SSSE3){
  1233. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1234. c->try_8x8basis= try_8x8basis_ssse3;
  1235. }
  1236. c->add_8x8basis= add_8x8basis_ssse3;
  1237. c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
  1238. c->hadamard8_diff[0]= hadamard8_diff16_ssse3;
  1239. c->hadamard8_diff[1]= hadamard8_diff_ssse3;
  1240. }
  1241. #endif
  1242. if(mm_flags & MM_3DNOW){
  1243. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  1244. c->try_8x8basis= try_8x8basis_3dnow;
  1245. }
  1246. c->add_8x8basis= add_8x8basis_3dnow;
  1247. }
  1248. }
  1249. dsputil_init_pix_mmx(c, avctx);
  1250. }