cavsdsp.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /*
  2. * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
  3. * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
  4. *
  5. * MMX-optimized DSP functions, based on H.264 optimizations by
  6. * Michael Niedermayer and Loren Merritt
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/attributes.h"
  25. #include "libavutil/common.h"
  26. #include "libavutil/cpu.h"
  27. #include "libavutil/x86/asm.h"
  28. #include "libavutil/x86/cpu.h"
  29. #include "libavcodec/cavsdsp.h"
  30. #include "constants.h"
  31. #include "dsputil_x86.h"
  32. #include "config.h"
  33. #if HAVE_MMX_INLINE
  34. /* in/out: mma=mma+mmb, mmb=mmb-mma */
  35. #define SUMSUB_BA( a, b ) \
  36. "paddw "#b", "#a" \n\t"\
  37. "paddw "#b", "#b" \n\t"\
  38. "psubw "#a", "#b" \n\t"
  39. /*****************************************************************************
  40. *
  41. * inverse transform
  42. *
  43. ****************************************************************************/
  44. static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
  45. {
  46. __asm__ volatile(
  47. "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */
  48. "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */
  49. "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */
  50. "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */
  51. "movq %%mm4, %%mm0 \n\t"
  52. "movq %%mm5, %%mm3 \n\t"
  53. "movq %%mm2, %%mm6 \n\t"
  54. "movq %%mm7, %%mm1 \n\t"
  55. "paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */
  56. "paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */
  57. "paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */
  58. "paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */
  59. "paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */
  60. "paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */
  61. "paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */
  62. "paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */
  63. "psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */
  64. "paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */
  65. "psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */
  66. "paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */
  67. "movq %%mm5, %%mm4 \n\t"
  68. "movq %%mm7, %%mm6 \n\t"
  69. "movq %%mm3, %%mm0 \n\t"
  70. "movq %%mm1, %%mm2 \n\t"
  71. SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */
  72. "paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */
  73. "paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */
  74. "paddw %%mm7, %%mm7 \n\t"
  75. "paddw %%mm5, %%mm5 \n\t"
  76. "paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */
  77. "paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */
  78. SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */
  79. "psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */
  80. "movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */
  81. "psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */
  82. "paddw %%mm1, %%mm1 \n\t"
  83. "paddw %%mm3, %%mm3 \n\t"
  84. "psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */
  85. "paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */
  86. "movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */
  87. "movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */
  88. "movq %%mm2, %%mm4 \n\t"
  89. "movq %%mm6, %%mm0 \n\t"
  90. "psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */
  91. "psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */
  92. "paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */
  93. "paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */
  94. "paddw %%mm2, %%mm2 \n\t"
  95. "paddw %%mm0, %%mm0 \n\t"
  96. "psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */
  97. "paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */
  98. "movq (%0), %%mm2 \n\t" /* mm2 = src0 */
  99. "movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */
  100. SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */
  101. "psllw $3, %%mm0 \n\t"
  102. "psllw $3, %%mm2 \n\t"
  103. "paddw %1, %%mm0 \n\t" /* add rounding bias */
  104. "paddw %1, %%mm2 \n\t" /* add rounding bias */
  105. SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */
  106. SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */
  107. SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */
  108. SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */
  109. SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */
  110. SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */
  111. :: "r"(block), "m"(bias)
  112. );
  113. }
  114. #define SBUTTERFLY(a,b,t,n,m)\
  115. "mov" #m " " #a ", " #t " \n\t" /* abcd */\
  116. "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
  117. "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
  118. #define TRANSPOSE4(a,b,c,d,t)\
  119. SBUTTERFLY(a,b,t,wd,q) /* a=aebf t=cgdh */\
  120. SBUTTERFLY(c,d,b,wd,q) /* c=imjn b=kolp */\
  121. SBUTTERFLY(a,c,d,dq,q) /* a=aeim d=bfjn */\
  122. SBUTTERFLY(t,b,c,dq,q) /* t=cgko c=dhlp */
  123. static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
  124. {
  125. int i;
  126. DECLARE_ALIGNED(8, int16_t, b2)[64];
  127. for(i=0; i<2; i++){
  128. DECLARE_ALIGNED(8, uint64_t, tmp);
  129. cavs_idct8_1d(block+4*i, ff_pw_4.a);
  130. __asm__ volatile(
  131. "psraw $3, %%mm7 \n\t"
  132. "psraw $3, %%mm6 \n\t"
  133. "psraw $3, %%mm5 \n\t"
  134. "psraw $3, %%mm4 \n\t"
  135. "psraw $3, %%mm3 \n\t"
  136. "psraw $3, %%mm2 \n\t"
  137. "psraw $3, %%mm1 \n\t"
  138. "psraw $3, %%mm0 \n\t"
  139. "movq %%mm7, %0 \n\t"
  140. TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
  141. "movq %%mm0, 8(%1) \n\t"
  142. "movq %%mm6, 24(%1) \n\t"
  143. "movq %%mm7, 40(%1) \n\t"
  144. "movq %%mm4, 56(%1) \n\t"
  145. "movq %0, %%mm7 \n\t"
  146. TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
  147. "movq %%mm7, (%1) \n\t"
  148. "movq %%mm1, 16(%1) \n\t"
  149. "movq %%mm0, 32(%1) \n\t"
  150. "movq %%mm3, 48(%1) \n\t"
  151. : "=m"(tmp)
  152. : "r"(b2+32*i)
  153. : "memory"
  154. );
  155. }
  156. for(i=0; i<2; i++){
  157. cavs_idct8_1d(b2+4*i, ff_pw_64.a);
  158. __asm__ volatile(
  159. "psraw $7, %%mm7 \n\t"
  160. "psraw $7, %%mm6 \n\t"
  161. "psraw $7, %%mm5 \n\t"
  162. "psraw $7, %%mm4 \n\t"
  163. "psraw $7, %%mm3 \n\t"
  164. "psraw $7, %%mm2 \n\t"
  165. "psraw $7, %%mm1 \n\t"
  166. "psraw $7, %%mm0 \n\t"
  167. "movq %%mm7, (%0) \n\t"
  168. "movq %%mm5, 16(%0) \n\t"
  169. "movq %%mm3, 32(%0) \n\t"
  170. "movq %%mm1, 48(%0) \n\t"
  171. "movq %%mm0, 64(%0) \n\t"
  172. "movq %%mm2, 80(%0) \n\t"
  173. "movq %%mm4, 96(%0) \n\t"
  174. "movq %%mm6, 112(%0) \n\t"
  175. :: "r"(b2+4*i)
  176. : "memory"
  177. );
  178. }
  179. ff_add_pixels_clamped_mmx(b2, dst, stride);
  180. }
  181. #endif /* HAVE_MMX_INLINE */
  182. #if (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE)
  183. /*****************************************************************************
  184. *
  185. * motion compensation
  186. *
  187. ****************************************************************************/
  188. /* vertical filter [-1 -2 96 42 -7 0] */
  189. #define QPEL_CAVSV1(A,B,C,D,E,F,OP,MUL2) \
  190. "movd (%0), "#F" \n\t"\
  191. "movq "#C", %%mm6 \n\t"\
  192. "pmullw %5, %%mm6 \n\t"\
  193. "movq "#D", %%mm7 \n\t"\
  194. "pmullw "MANGLE(MUL2)", %%mm7\n\t"\
  195. "psllw $3, "#E" \n\t"\
  196. "psubw "#E", %%mm6 \n\t"\
  197. "psraw $3, "#E" \n\t"\
  198. "paddw %%mm7, %%mm6 \n\t"\
  199. "paddw "#E", %%mm6 \n\t"\
  200. "paddw "#B", "#B" \n\t"\
  201. "pxor %%mm7, %%mm7 \n\t"\
  202. "add %2, %0 \n\t"\
  203. "punpcklbw %%mm7, "#F" \n\t"\
  204. "psubw "#B", %%mm6 \n\t"\
  205. "psraw $1, "#B" \n\t"\
  206. "psubw "#A", %%mm6 \n\t"\
  207. "paddw %4, %%mm6 \n\t"\
  208. "psraw $7, %%mm6 \n\t"\
  209. "packuswb %%mm6, %%mm6 \n\t"\
  210. OP(%%mm6, (%1), A, d) \
  211. "add %3, %1 \n\t"
  212. /* vertical filter [ 0 -1 5 5 -1 0] */
  213. #define QPEL_CAVSV2(A,B,C,D,E,F,OP,MUL2) \
  214. "movd (%0), "#F" \n\t"\
  215. "movq "#C", %%mm6 \n\t"\
  216. "paddw "#D", %%mm6 \n\t"\
  217. "pmullw %5, %%mm6 \n\t"\
  218. "add %2, %0 \n\t"\
  219. "punpcklbw %%mm7, "#F" \n\t"\
  220. "psubw "#B", %%mm6 \n\t"\
  221. "psubw "#E", %%mm6 \n\t"\
  222. "paddw %4, %%mm6 \n\t"\
  223. "psraw $3, %%mm6 \n\t"\
  224. "packuswb %%mm6, %%mm6 \n\t"\
  225. OP(%%mm6, (%1), A, d) \
  226. "add %3, %1 \n\t"
  227. /* vertical filter [ 0 -7 42 96 -2 -1] */
  228. #define QPEL_CAVSV3(A,B,C,D,E,F,OP,MUL2) \
  229. "movd (%0), "#F" \n\t"\
  230. "movq "#C", %%mm6 \n\t"\
  231. "pmullw "MANGLE(MUL2)", %%mm6\n\t"\
  232. "movq "#D", %%mm7 \n\t"\
  233. "pmullw %5, %%mm7 \n\t"\
  234. "psllw $3, "#B" \n\t"\
  235. "psubw "#B", %%mm6 \n\t"\
  236. "psraw $3, "#B" \n\t"\
  237. "paddw %%mm7, %%mm6 \n\t"\
  238. "paddw "#B", %%mm6 \n\t"\
  239. "paddw "#E", "#E" \n\t"\
  240. "pxor %%mm7, %%mm7 \n\t"\
  241. "add %2, %0 \n\t"\
  242. "punpcklbw %%mm7, "#F" \n\t"\
  243. "psubw "#E", %%mm6 \n\t"\
  244. "psraw $1, "#E" \n\t"\
  245. "psubw "#F", %%mm6 \n\t"\
  246. "paddw %4, %%mm6 \n\t"\
  247. "psraw $7, %%mm6 \n\t"\
  248. "packuswb %%mm6, %%mm6 \n\t"\
  249. OP(%%mm6, (%1), A, d) \
  250. "add %3, %1 \n\t"
  251. #define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\
  252. int w= 2;\
  253. src -= 2*srcStride;\
  254. \
  255. while(w--){\
  256. __asm__ volatile(\
  257. "pxor %%mm7, %%mm7 \n\t"\
  258. "movd (%0), %%mm0 \n\t"\
  259. "add %2, %0 \n\t"\
  260. "movd (%0), %%mm1 \n\t"\
  261. "add %2, %0 \n\t"\
  262. "movd (%0), %%mm2 \n\t"\
  263. "add %2, %0 \n\t"\
  264. "movd (%0), %%mm3 \n\t"\
  265. "add %2, %0 \n\t"\
  266. "movd (%0), %%mm4 \n\t"\
  267. "add %2, %0 \n\t"\
  268. "punpcklbw %%mm7, %%mm0 \n\t"\
  269. "punpcklbw %%mm7, %%mm1 \n\t"\
  270. "punpcklbw %%mm7, %%mm2 \n\t"\
  271. "punpcklbw %%mm7, %%mm3 \n\t"\
  272. "punpcklbw %%mm7, %%mm4 \n\t"\
  273. VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
  274. VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
  275. VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
  276. VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
  277. VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\
  278. VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\
  279. VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
  280. VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
  281. \
  282. : "+a"(src), "+c"(dst)\
  283. : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\
  284. : "memory"\
  285. );\
  286. if(h==16){\
  287. __asm__ volatile(\
  288. VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
  289. VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
  290. VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\
  291. VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\
  292. VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\
  293. VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\
  294. VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\
  295. VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\
  296. \
  297. : "+a"(src), "+c"(dst)\
  298. : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\
  299. : "memory"\
  300. );\
  301. }\
  302. src += 4-(h+5)*srcStride;\
  303. dst += 4-h*dstStride;\
  304. }
  305. #define QPEL_CAVS(OPNAME, OP, MMX)\
  306. static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  307. int h=8;\
  308. __asm__ volatile(\
  309. "pxor %%mm7, %%mm7 \n\t"\
  310. "movq %5, %%mm6 \n\t"\
  311. "1: \n\t"\
  312. "movq (%0), %%mm0 \n\t"\
  313. "movq 1(%0), %%mm2 \n\t"\
  314. "movq %%mm0, %%mm1 \n\t"\
  315. "movq %%mm2, %%mm3 \n\t"\
  316. "punpcklbw %%mm7, %%mm0 \n\t"\
  317. "punpckhbw %%mm7, %%mm1 \n\t"\
  318. "punpcklbw %%mm7, %%mm2 \n\t"\
  319. "punpckhbw %%mm7, %%mm3 \n\t"\
  320. "paddw %%mm2, %%mm0 \n\t"\
  321. "paddw %%mm3, %%mm1 \n\t"\
  322. "pmullw %%mm6, %%mm0 \n\t"\
  323. "pmullw %%mm6, %%mm1 \n\t"\
  324. "movq -1(%0), %%mm2 \n\t"\
  325. "movq 2(%0), %%mm4 \n\t"\
  326. "movq %%mm2, %%mm3 \n\t"\
  327. "movq %%mm4, %%mm5 \n\t"\
  328. "punpcklbw %%mm7, %%mm2 \n\t"\
  329. "punpckhbw %%mm7, %%mm3 \n\t"\
  330. "punpcklbw %%mm7, %%mm4 \n\t"\
  331. "punpckhbw %%mm7, %%mm5 \n\t"\
  332. "paddw %%mm4, %%mm2 \n\t"\
  333. "paddw %%mm3, %%mm5 \n\t"\
  334. "psubw %%mm2, %%mm0 \n\t"\
  335. "psubw %%mm5, %%mm1 \n\t"\
  336. "movq %6, %%mm5 \n\t"\
  337. "paddw %%mm5, %%mm0 \n\t"\
  338. "paddw %%mm5, %%mm1 \n\t"\
  339. "psraw $3, %%mm0 \n\t"\
  340. "psraw $3, %%mm1 \n\t"\
  341. "packuswb %%mm1, %%mm0 \n\t"\
  342. OP(%%mm0, (%1),%%mm5, q) \
  343. "add %3, %0 \n\t"\
  344. "add %4, %1 \n\t"\
  345. "decl %2 \n\t"\
  346. " jnz 1b \n\t"\
  347. : "+a"(src), "+c"(dst), "+m"(h)\
  348. : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_4)\
  349. : "memory"\
  350. );\
  351. }\
  352. \
  353. static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  354. QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
  355. }\
  356. \
  357. static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  358. QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_5) \
  359. }\
  360. \
  361. static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  362. QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
  363. }\
  364. \
  365. static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  366. OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\
  367. }\
  368. static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  369. OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\
  370. OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  371. }\
  372. \
  373. static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  374. OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\
  375. }\
  376. static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  377. OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\
  378. OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  379. }\
  380. \
  381. static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  382. OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\
  383. }\
  384. static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  385. OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\
  386. OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  387. }\
  388. \
  389. static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  390. OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
  391. OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  392. src += 8*srcStride;\
  393. dst += 8*dstStride;\
  394. OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
  395. OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  396. }\
  397. #define CAVS_MC(OPNAME, SIZE, MMX) \
  398. static void OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
  399. {\
  400. OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
  401. }\
  402. \
  403. static void OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
  404. {\
  405. OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
  406. }\
  407. \
  408. static void OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
  409. {\
  410. OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
  411. }\
  412. \
  413. static void OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
  414. {\
  415. OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
  416. }\
  417. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  418. #define AVG_3DNOW_OP(a,b,temp, size) \
  419. "mov" #size " " #b ", " #temp " \n\t"\
  420. "pavgusb " #temp ", " #a " \n\t"\
  421. "mov" #size " " #a ", " #b " \n\t"
  422. #define AVG_MMXEXT_OP(a, b, temp, size) \
  423. "mov" #size " " #b ", " #temp " \n\t"\
  424. "pavgb " #temp ", " #a " \n\t"\
  425. "mov" #size " " #a ", " #b " \n\t"
  426. #endif /* (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE) */
  427. #if HAVE_MMX_INLINE
  428. static void put_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src,
  429. ptrdiff_t stride)
  430. {
  431. ff_put_pixels8_mmx(dst, src, stride, 8);
  432. }
  433. static void avg_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src,
  434. ptrdiff_t stride)
  435. {
  436. ff_avg_pixels8_mmx(dst, src, stride, 8);
  437. }
  438. static void put_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src,
  439. ptrdiff_t stride)
  440. {
  441. ff_put_pixels16_mmx(dst, src, stride, 16);
  442. }
  443. static void avg_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src,
  444. ptrdiff_t stride)
  445. {
  446. ff_avg_pixels16_mmx(dst, src, stride, 16);
  447. }
  448. static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c,
  449. AVCodecContext *avctx)
  450. {
  451. c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_mmx;
  452. c->put_cavs_qpel_pixels_tab[1][0] = put_cavs_qpel8_mc00_mmx;
  453. c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmx;
  454. c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmx;
  455. c->cavs_idct8_add = cavs_idct8_add_mmx;
  456. c->idct_perm = FF_TRANSPOSE_IDCT_PERM;
  457. }
  458. #endif /* HAVE_MMX_INLINE */
  459. #define DSPFUNC(PFX, IDX, NUM, EXT) \
  460. c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \
  461. c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \
  462. c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \
  463. c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \
  464. #if HAVE_MMXEXT_INLINE
  465. QPEL_CAVS(put_, PUT_OP, mmxext)
  466. QPEL_CAVS(avg_, AVG_MMXEXT_OP, mmxext)
  467. CAVS_MC(put_, 8, mmxext)
  468. CAVS_MC(put_, 16, mmxext)
  469. CAVS_MC(avg_, 8, mmxext)
  470. CAVS_MC(avg_, 16, mmxext)
  471. static av_cold void cavsdsp_init_mmxext(CAVSDSPContext *c,
  472. AVCodecContext *avctx)
  473. {
  474. DSPFUNC(put, 0, 16, mmxext);
  475. DSPFUNC(put, 1, 8, mmxext);
  476. DSPFUNC(avg, 0, 16, mmxext);
  477. DSPFUNC(avg, 1, 8, mmxext);
  478. }
  479. #endif /* HAVE_MMXEXT_INLINE */
  480. #if HAVE_AMD3DNOW_INLINE
  481. QPEL_CAVS(put_, PUT_OP, 3dnow)
  482. QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow)
  483. CAVS_MC(put_, 8, 3dnow)
  484. CAVS_MC(put_, 16,3dnow)
  485. CAVS_MC(avg_, 8, 3dnow)
  486. CAVS_MC(avg_, 16,3dnow)
  487. static av_cold void cavsdsp_init_3dnow(CAVSDSPContext *c,
  488. AVCodecContext *avctx)
  489. {
  490. DSPFUNC(put, 0, 16, 3dnow);
  491. DSPFUNC(put, 1, 8, 3dnow);
  492. DSPFUNC(avg, 0, 16, 3dnow);
  493. DSPFUNC(avg, 1, 8, 3dnow);
  494. }
  495. #endif /* HAVE_AMD3DNOW_INLINE */
  496. av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx)
  497. {
  498. #if HAVE_MMX_INLINE
  499. int mm_flags = av_get_cpu_flags();
  500. if (mm_flags & AV_CPU_FLAG_MMX)
  501. cavsdsp_init_mmx(c, avctx);
  502. #endif /* HAVE_MMX_INLINE */
  503. #if HAVE_MMXEXT_INLINE
  504. if (mm_flags & AV_CPU_FLAG_MMXEXT)
  505. cavsdsp_init_mmxext(c, avctx);
  506. #endif /* HAVE_MMXEXT_INLINE */
  507. #if HAVE_AMD3DNOW_INLINE
  508. if (mm_flags & AV_CPU_FLAG_3DNOW)
  509. cavsdsp_init_3dnow(c, avctx);
  510. #endif /* HAVE_AMD3DNOW_INLINE */
  511. }