dsputil_init.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. /*
  2. * Copyright (c) 2000, 2001 Fabrice Bellard
  3. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "config.h"
  22. #include "libavutil/attributes.h"
  23. #include "libavutil/cpu.h"
  24. #include "libavutil/x86/asm.h"
  25. #include "libavcodec/dsputil.h"
  26. #include "libavcodec/simple_idct.h"
  27. #include "dsputil_x86.h"
  28. #include "idct_xvid.h"
  29. void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  30. int dstStride, int src1Stride, int h);
  31. void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1,
  32. uint8_t *src2, int dstStride,
  33. int src1Stride, int h);
  34. void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  35. int dstStride, int src1Stride, int h);
  36. void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  37. int dstStride, int src1Stride, int h);
  38. void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  39. int dstStride, int src1Stride, int h);
  40. void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2,
  41. int dstStride, int src1Stride, int h);
  42. void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  43. int dstStride, int srcStride, int h);
  44. void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  45. int dstStride, int srcStride, int h);
  46. void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  47. int dstStride, int srcStride,
  48. int h);
  49. void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  50. int dstStride, int srcStride, int h);
  51. void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  52. int dstStride, int srcStride, int h);
  53. void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  54. int dstStride, int srcStride,
  55. int h);
  56. void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  57. int dstStride, int srcStride);
  58. void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  59. int dstStride, int srcStride);
  60. void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  61. int dstStride, int srcStride);
  62. void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  63. int dstStride, int srcStride);
  64. void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  65. int dstStride, int srcStride);
  66. void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src,
  67. int dstStride, int srcStride);
  68. #define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext
  69. #define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext
  70. void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
  71. void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
  72. int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
  73. int order);
  74. int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
  75. int order);
  76. int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
  77. const int16_t *v3,
  78. int order, int mul);
  79. int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
  80. const int16_t *v3,
  81. int order, int mul);
  82. int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
  83. const int16_t *v3,
  84. int order, int mul);
  85. void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
  86. const int16_t *window, unsigned int len);
  87. void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
  88. const int16_t *window, unsigned int len);
  89. void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
  90. const int16_t *window, unsigned int len);
  91. void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
  92. const int16_t *window, unsigned int len);
  93. void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
  94. const int16_t *window, unsigned int len);
  95. void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
  96. const int16_t *window, unsigned int len);
  97. void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
  98. void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
  99. void ff_add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top,
  100. const uint8_t *diff, int w,
  101. int *left, int *left_top);
  102. int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src,
  103. int w, int left);
  104. int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
  105. int w, int left);
  106. void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
  107. int32_t min, int32_t max, unsigned int len);
  108. void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
  109. int32_t min, int32_t max, unsigned int len);
  110. void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
  111. int32_t min, int32_t max, unsigned int len);
  112. void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src,
  113. int32_t min, int32_t max, unsigned int len);
  114. #if HAVE_YASM
  115. PIXELS16(static, ff_avg, , , _mmxext)
  116. PIXELS16(static, ff_put, , , _mmxext)
  117. #define QPEL_OP(OPNAME, RND, MMX) \
  118. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
  119. ptrdiff_t stride) \
  120. { \
  121. ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \
  122. } \
  123. \
  124. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
  125. ptrdiff_t stride) \
  126. { \
  127. uint64_t temp[8]; \
  128. uint8_t * const half = (uint8_t*)temp; \
  129. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
  130. stride, 8); \
  131. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \
  132. stride, stride, 8); \
  133. } \
  134. \
  135. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
  136. ptrdiff_t stride) \
  137. { \
  138. ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \
  139. stride, 8); \
  140. } \
  141. \
  142. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
  143. ptrdiff_t stride) \
  144. { \
  145. uint64_t temp[8]; \
  146. uint8_t * const half = (uint8_t*)temp; \
  147. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \
  148. stride, 8); \
  149. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \
  150. stride, 8); \
  151. } \
  152. \
  153. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
  154. ptrdiff_t stride) \
  155. { \
  156. uint64_t temp[8]; \
  157. uint8_t * const half = (uint8_t*)temp; \
  158. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \
  159. 8, stride); \
  160. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \
  161. stride, stride, 8); \
  162. } \
  163. \
  164. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
  165. ptrdiff_t stride) \
  166. { \
  167. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, \
  168. stride, stride); \
  169. } \
  170. \
  171. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
  172. ptrdiff_t stride) \
  173. { \
  174. uint64_t temp[8]; \
  175. uint8_t * const half = (uint8_t*)temp; \
  176. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \
  177. 8, stride); \
  178. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\
  179. stride, 8); \
  180. } \
  181. \
  182. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
  183. ptrdiff_t stride) \
  184. { \
  185. uint64_t half[8 + 9]; \
  186. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  187. uint8_t * const halfHV = ((uint8_t*)half); \
  188. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  189. stride, 9); \
  190. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \
  191. stride, 9); \
  192. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  193. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
  194. stride, 8, 8); \
  195. } \
  196. \
  197. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
  198. ptrdiff_t stride) \
  199. { \
  200. uint64_t half[8 + 9]; \
  201. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  202. uint8_t * const halfHV = ((uint8_t*)half); \
  203. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  204. stride, 9); \
  205. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  206. stride, 9); \
  207. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  208. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
  209. stride, 8, 8); \
  210. } \
  211. \
  212. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
  213. ptrdiff_t stride) \
  214. { \
  215. uint64_t half[8 + 9]; \
  216. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  217. uint8_t * const halfHV = ((uint8_t*)half); \
  218. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  219. stride, 9); \
  220. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \
  221. stride, 9); \
  222. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  223. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
  224. stride, 8, 8); \
  225. } \
  226. \
  227. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
  228. ptrdiff_t stride) \
  229. { \
  230. uint64_t half[8 + 9]; \
  231. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  232. uint8_t * const halfHV = ((uint8_t*)half); \
  233. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  234. stride, 9); \
  235. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  236. stride, 9); \
  237. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  238. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
  239. stride, 8, 8); \
  240. } \
  241. \
  242. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
  243. ptrdiff_t stride) \
  244. { \
  245. uint64_t half[8 + 9]; \
  246. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  247. uint8_t * const halfHV = ((uint8_t*)half); \
  248. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  249. stride, 9); \
  250. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  251. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \
  252. stride, 8, 8); \
  253. } \
  254. \
  255. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
  256. ptrdiff_t stride) \
  257. { \
  258. uint64_t half[8 + 9]; \
  259. uint8_t * const halfH = ((uint8_t*)half) + 64; \
  260. uint8_t * const halfHV = ((uint8_t*)half); \
  261. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  262. stride, 9); \
  263. ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  264. ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \
  265. stride, 8, 8); \
  266. } \
  267. \
  268. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
  269. ptrdiff_t stride) \
  270. { \
  271. uint64_t half[8 + 9]; \
  272. uint8_t * const halfH = ((uint8_t*)half); \
  273. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  274. stride, 9); \
  275. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, \
  276. 8, stride, 9); \
  277. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
  278. stride, 8); \
  279. } \
  280. \
  281. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
  282. ptrdiff_t stride) \
  283. { \
  284. uint64_t half[8 + 9]; \
  285. uint8_t * const halfH = ((uint8_t*)half); \
  286. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  287. stride, 9); \
  288. ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \
  289. stride, 9); \
  290. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
  291. stride, 8); \
  292. } \
  293. \
  294. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
  295. ptrdiff_t stride) \
  296. { \
  297. uint64_t half[9]; \
  298. uint8_t * const halfH = ((uint8_t*)half); \
  299. ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \
  300. stride, 9); \
  301. ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \
  302. stride, 8); \
  303. } \
  304. \
  305. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \
  306. ptrdiff_t stride) \
  307. { \
  308. ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \
  309. } \
  310. \
  311. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \
  312. ptrdiff_t stride) \
  313. { \
  314. uint64_t temp[32]; \
  315. uint8_t * const half = (uint8_t*)temp; \
  316. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
  317. stride, 16); \
  318. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \
  319. stride, 16); \
  320. } \
  321. \
  322. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \
  323. ptrdiff_t stride) \
  324. { \
  325. ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \
  326. stride, stride, 16);\
  327. } \
  328. \
  329. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \
  330. ptrdiff_t stride) \
  331. { \
  332. uint64_t temp[32]; \
  333. uint8_t * const half = (uint8_t*)temp; \
  334. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \
  335. stride, 16); \
  336. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \
  337. stride, stride, 16); \
  338. } \
  339. \
  340. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \
  341. ptrdiff_t stride) \
  342. { \
  343. uint64_t temp[32]; \
  344. uint8_t * const half = (uint8_t*)temp; \
  345. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
  346. stride); \
  347. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \
  348. stride, 16); \
  349. } \
  350. \
  351. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \
  352. ptrdiff_t stride) \
  353. { \
  354. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, \
  355. stride, stride); \
  356. } \
  357. \
  358. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \
  359. ptrdiff_t stride) \
  360. { \
  361. uint64_t temp[32]; \
  362. uint8_t * const half = (uint8_t*)temp; \
  363. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \
  364. stride); \
  365. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \
  366. stride, stride, 16); \
  367. } \
  368. \
  369. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \
  370. ptrdiff_t stride) \
  371. { \
  372. uint64_t half[16 * 2 + 17 * 2]; \
  373. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  374. uint8_t * const halfHV = ((uint8_t*)half); \
  375. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  376. stride, 17); \
  377. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  378. stride, 17); \
  379. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  380. 16, 16); \
  381. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
  382. stride, 16, 16); \
  383. } \
  384. \
  385. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \
  386. ptrdiff_t stride) \
  387. { \
  388. uint64_t half[16 * 2 + 17 * 2]; \
  389. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  390. uint8_t * const halfHV = ((uint8_t*)half); \
  391. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  392. stride, 17); \
  393. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  394. stride, 17); \
  395. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  396. 16, 16); \
  397. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
  398. stride, 16, 16); \
  399. } \
  400. \
  401. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \
  402. ptrdiff_t stride) \
  403. { \
  404. uint64_t half[16 * 2 + 17 * 2]; \
  405. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  406. uint8_t * const halfHV = ((uint8_t*)half); \
  407. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  408. stride, 17); \
  409. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  410. stride, 17); \
  411. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  412. 16, 16); \
  413. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
  414. stride, 16, 16); \
  415. } \
  416. \
  417. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \
  418. ptrdiff_t stride) \
  419. { \
  420. uint64_t half[16 * 2 + 17 * 2]; \
  421. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  422. uint8_t * const halfHV = ((uint8_t*)half); \
  423. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  424. stride, 17); \
  425. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  426. stride, 17); \
  427. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  428. 16, 16); \
  429. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
  430. stride, 16, 16); \
  431. } \
  432. \
  433. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \
  434. ptrdiff_t stride) \
  435. { \
  436. uint64_t half[16 * 2 + 17 * 2]; \
  437. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  438. uint8_t * const halfHV = ((uint8_t*)half); \
  439. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  440. stride, 17); \
  441. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  442. 16, 16); \
  443. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \
  444. stride, 16, 16); \
  445. } \
  446. \
  447. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \
  448. ptrdiff_t stride) \
  449. { \
  450. uint64_t half[16 * 2 + 17 * 2]; \
  451. uint8_t * const halfH = ((uint8_t*)half) + 256; \
  452. uint8_t * const halfHV = ((uint8_t*)half); \
  453. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  454. stride, 17); \
  455. ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \
  456. 16, 16); \
  457. ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \
  458. stride, 16, 16); \
  459. } \
  460. \
  461. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \
  462. ptrdiff_t stride) \
  463. { \
  464. uint64_t half[17 * 2]; \
  465. uint8_t * const halfH = ((uint8_t*)half); \
  466. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  467. stride, 17); \
  468. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \
  469. stride, 17); \
  470. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
  471. stride, 16); \
  472. } \
  473. \
  474. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \
  475. ptrdiff_t stride) \
  476. { \
  477. uint64_t half[17 * 2]; \
  478. uint8_t * const halfH = ((uint8_t*)half); \
  479. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  480. stride, 17); \
  481. ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \
  482. stride, 17); \
  483. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
  484. stride, 16); \
  485. } \
  486. \
  487. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \
  488. ptrdiff_t stride) \
  489. { \
  490. uint64_t half[17 * 2]; \
  491. uint8_t * const halfH = ((uint8_t*)half); \
  492. ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \
  493. stride, 17); \
  494. ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \
  495. stride, 16); \
  496. }
  497. QPEL_OP(put_, _, mmxext)
  498. QPEL_OP(avg_, _, mmxext)
  499. QPEL_OP(put_no_rnd_, _no_rnd_, mmxext)
  500. #endif /* HAVE_YASM */
  501. #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
  502. do { \
  503. c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
  504. c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
  505. c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
  506. c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
  507. c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
  508. c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
  509. c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
  510. c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
  511. c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
  512. c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
  513. c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
  514. c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
  515. c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
  516. c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
  517. c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
  518. c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
  519. } while (0)
  520. static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
  521. int mm_flags)
  522. {
  523. #if HAVE_MMX_INLINE
  524. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  525. c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
  526. c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
  527. c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
  528. if (!high_bit_depth) {
  529. c->clear_block = ff_clear_block_mmx;
  530. c->clear_blocks = ff_clear_blocks_mmx;
  531. c->draw_edges = ff_draw_edges_mmx;
  532. }
  533. #if CONFIG_VIDEODSP && (ARCH_X86_32 || !HAVE_YASM)
  534. c->gmc = ff_gmc_mmx;
  535. #endif
  536. c->add_bytes = ff_add_bytes_mmx;
  537. #endif /* HAVE_MMX_INLINE */
  538. #if HAVE_MMX_EXTERNAL
  539. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  540. c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
  541. c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
  542. }
  543. c->vector_clip_int32 = ff_vector_clip_int32_mmx;
  544. #endif /* HAVE_MMX_EXTERNAL */
  545. }
  546. static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx,
  547. int mm_flags)
  548. {
  549. #if HAVE_MMXEXT_EXTERNAL
  550. SET_QPEL_FUNCS(avg_qpel, 0, 16, mmxext, );
  551. SET_QPEL_FUNCS(avg_qpel, 1, 8, mmxext, );
  552. SET_QPEL_FUNCS(put_qpel, 0, 16, mmxext, );
  553. SET_QPEL_FUNCS(put_qpel, 1, 8, mmxext, );
  554. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, );
  555. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmxext, );
  556. /* slower than cmov version on AMD */
  557. if (!(mm_flags & AV_CPU_FLAG_3DNOW))
  558. c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmxext;
  559. c->scalarproduct_int16 = ff_scalarproduct_int16_mmxext;
  560. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
  561. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  562. c->apply_window_int16 = ff_apply_window_int16_mmxext;
  563. } else {
  564. c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
  565. }
  566. #endif /* HAVE_MMXEXT_EXTERNAL */
  567. }
  568. static av_cold void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx,
  569. int mm_flags)
  570. {
  571. #if HAVE_SSE_INLINE
  572. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  573. if (!high_bit_depth) {
  574. if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)) {
  575. /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
  576. c->clear_block = ff_clear_block_sse;
  577. c->clear_blocks = ff_clear_blocks_sse;
  578. }
  579. }
  580. c->vector_clipf = ff_vector_clipf_sse;
  581. #endif /* HAVE_SSE_INLINE */
  582. #if HAVE_YASM
  583. #if HAVE_INLINE_ASM && CONFIG_VIDEODSP
  584. c->gmc = ff_gmc_sse;
  585. #endif
  586. #endif /* HAVE_YASM */
  587. }
  588. static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
  589. int mm_flags)
  590. {
  591. #if HAVE_SSE2_INLINE
  592. const int high_bit_depth = avctx->bits_per_raw_sample > 8;
  593. if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
  594. c->idct_put = ff_idct_xvid_sse2_put;
  595. c->idct_add = ff_idct_xvid_sse2_add;
  596. c->idct = ff_idct_xvid_sse2;
  597. c->idct_permutation_type = FF_SSE2_IDCT_PERM;
  598. }
  599. #endif /* HAVE_SSE2_INLINE */
  600. #if HAVE_SSE2_EXTERNAL
  601. c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
  602. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
  603. if (mm_flags & AV_CPU_FLAG_ATOM) {
  604. c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
  605. } else {
  606. c->vector_clip_int32 = ff_vector_clip_int32_sse2;
  607. }
  608. if (avctx->flags & CODEC_FLAG_BITEXACT) {
  609. c->apply_window_int16 = ff_apply_window_int16_sse2;
  610. } else if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  611. c->apply_window_int16 = ff_apply_window_int16_round_sse2;
  612. }
  613. c->bswap_buf = ff_bswap32_buf_sse2;
  614. #endif /* HAVE_SSE2_EXTERNAL */
  615. }
  616. static av_cold void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
  617. int mm_flags)
  618. {
  619. #if HAVE_SSSE3_EXTERNAL
  620. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
  621. if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
  622. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
  623. if (mm_flags & AV_CPU_FLAG_ATOM)
  624. c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
  625. else
  626. c->apply_window_int16 = ff_apply_window_int16_ssse3;
  627. if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
  628. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
  629. c->bswap_buf = ff_bswap32_buf_ssse3;
  630. #endif /* HAVE_SSSE3_EXTERNAL */
  631. }
  632. static av_cold void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
  633. int mm_flags)
  634. {
  635. #if HAVE_SSE4_EXTERNAL
  636. c->vector_clip_int32 = ff_vector_clip_int32_sse4;
  637. #endif /* HAVE_SSE4_EXTERNAL */
  638. }
  639. av_cold void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
  640. {
  641. int mm_flags = av_get_cpu_flags();
  642. #if HAVE_7REGS && HAVE_INLINE_ASM
  643. if (mm_flags & AV_CPU_FLAG_CMOV)
  644. c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_cmov;
  645. #endif
  646. if (mm_flags & AV_CPU_FLAG_MMX) {
  647. #if HAVE_INLINE_ASM
  648. const int idct_algo = avctx->idct_algo;
  649. if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) {
  650. if (idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_SIMPLEMMX) {
  651. c->idct_put = ff_simple_idct_put_mmx;
  652. c->idct_add = ff_simple_idct_add_mmx;
  653. c->idct = ff_simple_idct_mmx;
  654. c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
  655. } else if (idct_algo == FF_IDCT_XVIDMMX) {
  656. if (mm_flags & AV_CPU_FLAG_SSE2) {
  657. c->idct_put = ff_idct_xvid_sse2_put;
  658. c->idct_add = ff_idct_xvid_sse2_add;
  659. c->idct = ff_idct_xvid_sse2;
  660. c->idct_permutation_type = FF_SSE2_IDCT_PERM;
  661. } else if (mm_flags & AV_CPU_FLAG_MMXEXT) {
  662. c->idct_put = ff_idct_xvid_mmxext_put;
  663. c->idct_add = ff_idct_xvid_mmxext_add;
  664. c->idct = ff_idct_xvid_mmxext;
  665. } else {
  666. c->idct_put = ff_idct_xvid_mmx_put;
  667. c->idct_add = ff_idct_xvid_mmx_add;
  668. c->idct = ff_idct_xvid_mmx;
  669. }
  670. }
  671. }
  672. #endif /* HAVE_INLINE_ASM */
  673. dsputil_init_mmx(c, avctx, mm_flags);
  674. }
  675. if (mm_flags & AV_CPU_FLAG_MMXEXT)
  676. dsputil_init_mmxext(c, avctx, mm_flags);
  677. if (mm_flags & AV_CPU_FLAG_SSE)
  678. dsputil_init_sse(c, avctx, mm_flags);
  679. if (mm_flags & AV_CPU_FLAG_SSE2)
  680. dsputil_init_sse2(c, avctx, mm_flags);
  681. if (mm_flags & AV_CPU_FLAG_SSSE3)
  682. dsputil_init_ssse3(c, avctx, mm_flags);
  683. if (mm_flags & AV_CPU_FLAG_SSE4)
  684. dsputil_init_sse4(c, avctx, mm_flags);
  685. if (CONFIG_ENCODERS)
  686. ff_dsputilenc_init_mmx(c, avctx);
  687. }