vp8dsp.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. /*
  2. * Copyright (c) 2016 Martin Storsjo
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  19. */
  20. #include <stdbool.h>
  21. #include <string.h>
  22. #include "config_components.h"
  23. #include "libavcodec/vp8dsp.h"
  24. #include "libavutil/common.h"
  25. #include "libavutil/intreadwrite.h"
  26. #include "libavutil/mem_internal.h"
  27. #include "checkasm.h"
  28. #define PIXEL_STRIDE 16
  29. #define randomize_buffers(src, dst, stride, coef) \
  30. do { \
  31. int x, y; \
  32. for (y = 0; y < 4; y++) { \
  33. AV_WN32A((src) + y * (stride), rnd()); \
  34. AV_WN32A((dst) + y * (stride), rnd()); \
  35. for (x = 0; x < 4; x++) \
  36. (coef)[y * 4 + x] = (src)[y * (stride) + x] - \
  37. (dst)[y * (stride) + x]; \
  38. } \
  39. } while (0)
  40. static void dct4x4(int16_t *coef)
  41. {
  42. int i;
  43. for (i = 0; i < 4; i++) {
  44. const int a1 = (coef[i*4 + 0] + coef[i*4 + 3]) * 8;
  45. const int b1 = (coef[i*4 + 1] + coef[i*4 + 2]) * 8;
  46. const int c1 = (coef[i*4 + 1] - coef[i*4 + 2]) * 8;
  47. const int d1 = (coef[i*4 + 0] - coef[i*4 + 3]) * 8;
  48. coef[i*4 + 0] = a1 + b1;
  49. coef[i*4 + 1] = (c1 * 2217 + d1 * 5352 + 14500) >> 12;
  50. coef[i*4 + 2] = a1 - b1;
  51. coef[i*4 + 3] = (d1 * 2217 - c1 * 5352 + 7500) >> 12;
  52. }
  53. for (i = 0; i < 4; i++) {
  54. const int a1 = coef[i + 0*4] + coef[i + 3*4];
  55. const int b1 = coef[i + 1*4] + coef[i + 2*4];
  56. const int c1 = coef[i + 1*4] - coef[i + 2*4];
  57. const int d1 = coef[i + 0*4] - coef[i + 3*4];
  58. coef[i + 0*4] = (a1 + b1 + 7) >> 4;
  59. coef[i + 1*4] = ((c1 * 2217 + d1 * 5352 + 12000) >> 16) + !!d1;
  60. coef[i + 2*4] = (a1 - b1 + 7) >> 4;
  61. coef[i + 3*4] = (d1 * 2217 - c1 * 5352 + 51000) >> 16;
  62. }
  63. }
  64. static void wht4x4(int16_t *coef)
  65. {
  66. int i;
  67. for (i = 0; i < 4; i++) {
  68. int a1 = coef[0 * 4 + i];
  69. int b1 = coef[1 * 4 + i];
  70. int c1 = coef[2 * 4 + i];
  71. int d1 = coef[3 * 4 + i];
  72. int e1;
  73. a1 += b1;
  74. d1 -= c1;
  75. e1 = (a1 - d1) >> 1;
  76. b1 = e1 - b1;
  77. c1 = e1 - c1;
  78. a1 -= c1;
  79. d1 += b1;
  80. coef[0 * 4 + i] = a1;
  81. coef[1 * 4 + i] = c1;
  82. coef[2 * 4 + i] = d1;
  83. coef[3 * 4 + i] = b1;
  84. }
  85. for (i = 0; i < 4; i++) {
  86. int a1 = coef[i * 4 + 0];
  87. int b1 = coef[i * 4 + 1];
  88. int c1 = coef[i * 4 + 2];
  89. int d1 = coef[i * 4 + 3];
  90. int e1;
  91. a1 += b1;
  92. d1 -= c1;
  93. e1 = (a1 - d1) >> 1;
  94. b1 = e1 - b1;
  95. c1 = e1 - c1;
  96. a1 -= c1;
  97. d1 += b1;
  98. coef[i * 4 + 0] = a1 * 2;
  99. coef[i * 4 + 1] = c1 * 2;
  100. coef[i * 4 + 2] = d1 * 2;
  101. coef[i * 4 + 3] = b1 * 2;
  102. }
  103. }
  104. static void check_idct(VP8DSPContext *d, bool is_vp7)
  105. {
  106. LOCAL_ALIGNED_16(uint8_t, src, [4 * 4]);
  107. LOCAL_ALIGNED_16(uint8_t, dst, [4 * 4]);
  108. LOCAL_ALIGNED_16(uint8_t, dst0, [4 * 4]);
  109. LOCAL_ALIGNED_16(uint8_t, dst1, [4 * 4]);
  110. LOCAL_ALIGNED_16(int16_t, coef, [4 * 4]);
  111. LOCAL_ALIGNED_16(int16_t, subcoef0, [4 * 4]);
  112. LOCAL_ALIGNED_16(int16_t, subcoef1, [4 * 4]);
  113. int dc;
  114. declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, int16_t *block, ptrdiff_t stride);
  115. randomize_buffers(src, dst, 4, coef);
  116. dct4x4(coef);
  117. for (dc = 0; dc <= 1; dc++) {
  118. void (*idct)(uint8_t *, int16_t *, ptrdiff_t) = dc ? d->vp8_idct_dc_add : d->vp8_idct_add;
  119. if (check_func(idct, "vp%d_idct_%sadd", 8 - is_vp7, dc ? "dc_" : "")) {
  120. if (dc) {
  121. memset(subcoef0, 0, 4 * 4 * sizeof(int16_t));
  122. subcoef0[0] = coef[0];
  123. } else {
  124. memcpy(subcoef0, coef, 4 * 4 * sizeof(int16_t));
  125. }
  126. memcpy(dst0, dst, 4 * 4);
  127. memcpy(dst1, dst, 4 * 4);
  128. memcpy(subcoef1, subcoef0, 4 * 4 * sizeof(int16_t));
  129. // Note, this uses a pixel stride of 4, even though the real decoder uses a stride as a
  130. // multiple of 16. If optimizations want to take advantage of that, this test needs to be
  131. // updated to make it more like the h264dsp tests.
  132. call_ref(dst0, subcoef0, 4);
  133. call_new(dst1, subcoef1, 4);
  134. if (memcmp(dst0, dst1, 4 * 4) ||
  135. memcmp(subcoef0, subcoef1, 4 * 4 * sizeof(int16_t)))
  136. fail();
  137. bench_new(dst1, subcoef1, 4);
  138. }
  139. }
  140. }
  141. static void check_idct_dc4(VP8DSPContext *d, bool is_vp7)
  142. {
  143. LOCAL_ALIGNED_16(uint8_t, src, [4 * 4 * 4]);
  144. LOCAL_ALIGNED_16(uint8_t, dst, [4 * 4 * 4]);
  145. LOCAL_ALIGNED_16(uint8_t, dst0, [4 * 4 * 4]);
  146. LOCAL_ALIGNED_16(uint8_t, dst1, [4 * 4 * 4]);
  147. LOCAL_ALIGNED_16(int16_t, coef, [4], [4 * 4]);
  148. LOCAL_ALIGNED_16(int16_t, subcoef0, [4], [4 * 4]);
  149. LOCAL_ALIGNED_16(int16_t, subcoef1, [4], [4 * 4]);
  150. int i, chroma;
  151. declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
  152. for (chroma = 0; chroma <= 1; chroma++) {
  153. void (*idct4dc)(uint8_t *, int16_t[4][16], ptrdiff_t) = chroma ? d->vp8_idct_dc_add4uv : d->vp8_idct_dc_add4y;
  154. if (check_func(idct4dc, "vp%d_idct_dc_add4%s", 8 - is_vp7, chroma ? "uv" : "y")) {
  155. ptrdiff_t stride = chroma ? 8 : 16;
  156. int w = chroma ? 2 : 4;
  157. for (i = 0; i < 4; i++) {
  158. int blockx = 4 * (i % w);
  159. int blocky = 4 * (i / w);
  160. randomize_buffers(src + stride * blocky + blockx, dst + stride * blocky + blockx, stride, coef[i]);
  161. dct4x4(coef[i]);
  162. memset(&coef[i][1], 0, 15 * sizeof(int16_t));
  163. }
  164. memcpy(dst0, dst, 4 * 4 * 4);
  165. memcpy(dst1, dst, 4 * 4 * 4);
  166. memcpy(subcoef0, coef, 4 * 4 * 4 * sizeof(int16_t));
  167. memcpy(subcoef1, coef, 4 * 4 * 4 * sizeof(int16_t));
  168. call_ref(dst0, subcoef0, stride);
  169. call_new(dst1, subcoef1, stride);
  170. if (memcmp(dst0, dst1, 4 * 4 * 4) ||
  171. memcmp(subcoef0, subcoef1, 4 * 4 * 4 * sizeof(int16_t)))
  172. fail();
  173. bench_new(dst1, subcoef1, stride);
  174. }
  175. }
  176. }
  177. static void check_luma_dc_wht(VP8DSPContext *d, bool is_vp7)
  178. {
  179. LOCAL_ALIGNED_16(int16_t, dc, [4 * 4]);
  180. LOCAL_ALIGNED_16(int16_t, dc0, [4 * 4]);
  181. LOCAL_ALIGNED_16(int16_t, dc1, [4 * 4]);
  182. int16_t block[4][4][16];
  183. LOCAL_ALIGNED_16(int16_t, block0, [4], [4][16]);
  184. LOCAL_ALIGNED_16(int16_t, block1, [4], [4][16]);
  185. int dc_only;
  186. int blockx, blocky;
  187. declare_func_emms(AV_CPU_FLAG_MMX, void, int16_t block[4][4][16], int16_t dc[16]);
  188. for (blocky = 0; blocky < 4; blocky++) {
  189. for (blockx = 0; blockx < 4; blockx++) {
  190. uint8_t src[16], dst[16];
  191. randomize_buffers(src, dst, 4, block[blocky][blockx]);
  192. dct4x4(block[blocky][blockx]);
  193. dc[blocky * 4 + blockx] = block[blocky][blockx][0];
  194. block[blocky][blockx][0] = rnd();
  195. }
  196. }
  197. wht4x4(dc);
  198. for (dc_only = 0; dc_only <= 1; dc_only++) {
  199. void (*idct)(int16_t [4][4][16], int16_t [16]) = dc_only ? d->vp8_luma_dc_wht_dc : d->vp8_luma_dc_wht;
  200. if (check_func(idct, "vp%d_luma_dc_wht%s", 8 - is_vp7, dc_only ? "_dc" : "")) {
  201. if (dc_only) {
  202. memset(dc0, 0, 16 * sizeof(int16_t));
  203. dc0[0] = dc[0];
  204. } else {
  205. memcpy(dc0, dc, 16 * sizeof(int16_t));
  206. }
  207. memcpy(dc1, dc0, 16 * sizeof(int16_t));
  208. memcpy(block0, block, 4 * 4 * 16 * sizeof(int16_t));
  209. memcpy(block1, block, 4 * 4 * 16 * sizeof(int16_t));
  210. call_ref(block0, dc0);
  211. call_new(block1, dc1);
  212. if (memcmp(block0, block1, 4 * 4 * 16 * sizeof(int16_t)) ||
  213. memcmp(dc0, dc1, 16 * sizeof(int16_t)))
  214. fail();
  215. bench_new(block1, dc1);
  216. }
  217. }
  218. }
  219. #define SRC_BUF_STRIDE 32
  220. #define SRC_BUF_SIZE (((size << (size < 16)) + 5) * SRC_BUF_STRIDE)
  221. // The mc subpixel interpolation filter needs the 2 previous pixels in either
  222. // direction, the +1 is to make sure the actual load addresses always are
  223. // unaligned.
  224. #define src (buf + 2 * SRC_BUF_STRIDE + 2 + 1)
  225. #undef randomize_buffers
  226. #define randomize_buffers() \
  227. do { \
  228. int k; \
  229. for (k = 0; k < SRC_BUF_SIZE; k += 4) { \
  230. AV_WN32A(buf + k, rnd()); \
  231. } \
  232. } while (0)
  233. static void check_mc(VP8DSPContext *d)
  234. {
  235. LOCAL_ALIGNED_16(uint8_t, buf, [32 * 32]);
  236. LOCAL_ALIGNED_16(uint8_t, dst0, [16 * 16]);
  237. LOCAL_ALIGNED_16(uint8_t, dst1, [16 * 16]);
  238. int type, k, dx, dy;
  239. declare_func_emms(AV_CPU_FLAG_MMX, void, uint8_t *, ptrdiff_t,
  240. const uint8_t *, ptrdiff_t, int, int, int);
  241. for (type = 0; type < 2; type++) {
  242. for (k = 1; k < 8; k++) {
  243. int hsize = k / 3;
  244. int size = 16 >> hsize;
  245. int height = (size << 1) >> (k % 3);
  246. for (dy = 0; dy < 3; dy++) {
  247. for (dx = 0; dx < 3; dx++) {
  248. char str[100];
  249. vp8_mc_func func = (type ? d->put_vp8_bilinear_pixels_tab : d->put_vp8_epel_pixels_tab)[hsize][dy][dx];
  250. if (dx || dy) {
  251. if (type == 0) {
  252. static const char *dx_names[] = { "", "h4", "h6" };
  253. static const char *dy_names[] = { "", "v4", "v6" };
  254. snprintf(str, sizeof(str), "epel%d_%s%s", size, dx_names[dx], dy_names[dy]);
  255. } else {
  256. snprintf(str, sizeof(str), "bilin%d_%s%s", size, dx ? "h" : "", dy ? "v" : "");
  257. }
  258. } else {
  259. snprintf(str, sizeof(str), "pixels%d", size);
  260. }
  261. if (check_func(func, "vp8_put_%s", str)) {
  262. int mx, my;
  263. int i;
  264. if (type == 0) {
  265. mx = dx == 2 ? 2 + 2 * (rnd() % 3) : dx == 1 ? 1 + 2 * (rnd() % 4) : 0;
  266. my = dy == 2 ? 2 + 2 * (rnd() % 3) : dy == 1 ? 1 + 2 * (rnd() % 4) : 0;
  267. } else {
  268. mx = dx ? 1 + (rnd() % 7) : 0;
  269. my = dy ? 1 + (rnd() % 7) : 0;
  270. }
  271. randomize_buffers();
  272. for (i = -2; i <= 3; i++) {
  273. int val = (i == -1 || i == 2) ? 0 : 0xff;
  274. // Set pixels in the first row and column to the maximum pattern,
  275. // to test for potential overflows in the filter.
  276. src[i ] = val;
  277. src[i * SRC_BUF_STRIDE] = val;
  278. }
  279. call_ref(dst0, size, src, SRC_BUF_STRIDE, height, mx, my);
  280. call_new(dst1, size, src, SRC_BUF_STRIDE, height, mx, my);
  281. if (memcmp(dst0, dst1, size * height))
  282. fail();
  283. bench_new(dst1, size, src, SRC_BUF_STRIDE, height, mx, my);
  284. }
  285. }
  286. }
  287. }
  288. }
  289. }
  290. #undef randomize_buffers
  291. #define setpx(a, b, c) buf[(a) + (b) * jstride] = av_clip_uint8(c)
  292. // Set the pixel to c +/- [0,d]
  293. #define setdx(a, b, c, d) setpx(a, b, c - (d) + (rnd() % ((d) * 2 + 1)))
  294. // Set the pixel to c +/- [d,d+e] (making sure it won't be clipped)
  295. #define setdx2(a, b, o, c, d, e) setpx(a, b, o = c + ((d) + (rnd() % (e))) * (c >= 128 ? -1 : 1))
  296. static void randomize_loopfilter_buffers(int lineoff, int str,
  297. int dir, int flim_E, int flim_I,
  298. int hev_thresh, uint8_t *buf,
  299. int force_hev)
  300. {
  301. uint32_t mask = 0xff;
  302. int off = dir ? lineoff : lineoff * str;
  303. int istride = dir ? 1 : str;
  304. int jstride = dir ? str : 1;
  305. int i;
  306. for (i = 0; i < 8; i += 2) {
  307. // Row 0 will trigger hev for q0/q1, row 2 will trigger hev for p0/p1,
  308. // rows 4 and 6 will not trigger hev.
  309. // force_hev 1 will make sure all rows trigger hev, while force_hev -1
  310. // makes none of them trigger it.
  311. int idx = off + i * istride, p2, p1, p0, q0, q1, q2;
  312. setpx(idx, 0, q0 = rnd() & mask);
  313. if (i == 0 && force_hev >= 0 || force_hev > 0)
  314. setdx2(idx, 1, q1, q0, hev_thresh + 1, flim_I - hev_thresh - 1);
  315. else
  316. setdx(idx, 1, q1 = q0, hev_thresh);
  317. setdx(idx, 2, q2 = q1, flim_I);
  318. setdx(idx, 3, q2, flim_I);
  319. setdx(idx, -1, p0 = q0, flim_E >> 2);
  320. if (i == 2 && force_hev >= 0 || force_hev > 0)
  321. setdx2(idx, -2, p1, p0, hev_thresh + 1, flim_I - hev_thresh - 1);
  322. else
  323. setdx(idx, -2, p1 = p0, hev_thresh);
  324. setdx(idx, -3, p2 = p1, flim_I);
  325. setdx(idx, -4, p2, flim_I);
  326. }
  327. }
  328. // Fill the buffer with random pixels
  329. static void fill_loopfilter_buffers(uint8_t *buf, ptrdiff_t stride, int w, int h)
  330. {
  331. int x, y;
  332. for (y = 0; y < h; y++)
  333. for (x = 0; x < w; x++)
  334. buf[y * stride + x] = rnd() & 0xff;
  335. }
  336. #define randomize_buffers(buf, lineoff, str, force_hev) \
  337. randomize_loopfilter_buffers(lineoff, str, dir, flim_E, flim_I, hev_thresh, buf, force_hev)
  338. static void check_loopfilter_16y(VP8DSPContext *d, bool is_vp7)
  339. {
  340. LOCAL_ALIGNED_16(uint8_t, base0, [32 + 16 * 16]);
  341. LOCAL_ALIGNED_16(uint8_t, base1, [32 + 16 * 16]);
  342. int dir, edge, force_hev;
  343. int flim_E = 20, flim_I = 10, hev_thresh = 7;
  344. declare_func(void, uint8_t *, ptrdiff_t, int, int, int);
  345. for (dir = 0; dir < 2; dir++) {
  346. int midoff = dir ? 4 * 16 : 4;
  347. int midoff_aligned = dir ? 4 * 16 : 16;
  348. uint8_t *buf0 = base0 + midoff_aligned;
  349. uint8_t *buf1 = base1 + midoff_aligned;
  350. for (edge = 0; edge < 2; edge++) {
  351. void (*func)(uint8_t *, ptrdiff_t, int, int, int) = NULL;
  352. switch (dir << 1 | edge) {
  353. case (0 << 1) | 0: func = d->vp8_h_loop_filter16y; break;
  354. case (1 << 1) | 0: func = d->vp8_v_loop_filter16y; break;
  355. case (0 << 1) | 1: func = d->vp8_h_loop_filter16y_inner; break;
  356. case (1 << 1) | 1: func = d->vp8_v_loop_filter16y_inner; break;
  357. }
  358. if (check_func(func, "vp%d_loop_filter16y%s_%s", 8 - is_vp7, edge ? "_inner" : "", dir ? "v" : "h")) {
  359. for (force_hev = -1; force_hev <= 1; force_hev++) {
  360. fill_loopfilter_buffers(buf0 - midoff, 16, 16, 16);
  361. randomize_buffers(buf0, 0, 16, force_hev);
  362. randomize_buffers(buf0, 8, 16, force_hev);
  363. memcpy(buf1 - midoff, buf0 - midoff, 16 * 16);
  364. call_ref(buf0, 16, flim_E, flim_I, hev_thresh);
  365. call_new(buf1, 16, flim_E, flim_I, hev_thresh);
  366. if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16))
  367. fail();
  368. }
  369. fill_loopfilter_buffers(buf0 - midoff, 16, 16, 16);
  370. randomize_buffers(buf0, 0, 16, 0);
  371. randomize_buffers(buf0, 8, 16, 0);
  372. bench_new(buf0, 16, flim_E, flim_I, hev_thresh);
  373. }
  374. }
  375. }
  376. }
  377. static void check_loopfilter_8uv(VP8DSPContext *d, bool is_vp7)
  378. {
  379. LOCAL_ALIGNED_16(uint8_t, base0u, [32 + 16 * 16]);
  380. LOCAL_ALIGNED_16(uint8_t, base0v, [32 + 16 * 16]);
  381. LOCAL_ALIGNED_16(uint8_t, base1u, [32 + 16 * 16]);
  382. LOCAL_ALIGNED_16(uint8_t, base1v, [32 + 16 * 16]);
  383. int dir, edge, force_hev;
  384. int flim_E = 20, flim_I = 10, hev_thresh = 7;
  385. declare_func(void, uint8_t *, uint8_t *, ptrdiff_t, int, int, int);
  386. for (dir = 0; dir < 2; dir++) {
  387. int midoff = dir ? 4 * 16 : 4;
  388. int midoff_aligned = dir ? 4 * 16 : 16;
  389. uint8_t *buf0u = base0u + midoff_aligned;
  390. uint8_t *buf0v = base0v + midoff_aligned;
  391. uint8_t *buf1u = base1u + midoff_aligned;
  392. uint8_t *buf1v = base1v + midoff_aligned;
  393. for (edge = 0; edge < 2; edge++) {
  394. void (*func)(uint8_t *, uint8_t *, ptrdiff_t, int, int, int) = NULL;
  395. switch (dir << 1 | edge) {
  396. case (0 << 1) | 0: func = d->vp8_h_loop_filter8uv; break;
  397. case (1 << 1) | 0: func = d->vp8_v_loop_filter8uv; break;
  398. case (0 << 1) | 1: func = d->vp8_h_loop_filter8uv_inner; break;
  399. case (1 << 1) | 1: func = d->vp8_v_loop_filter8uv_inner; break;
  400. }
  401. if (check_func(func, "vp%d_loop_filter8uv%s_%s", 8 - is_vp7, edge ? "_inner" : "", dir ? "v" : "h")) {
  402. for (force_hev = -1; force_hev <= 1; force_hev++) {
  403. fill_loopfilter_buffers(buf0u - midoff, 16, 16, 16);
  404. fill_loopfilter_buffers(buf0v - midoff, 16, 16, 16);
  405. randomize_buffers(buf0u, 0, 16, force_hev);
  406. randomize_buffers(buf0v, 0, 16, force_hev);
  407. memcpy(buf1u - midoff, buf0u - midoff, 16 * 16);
  408. memcpy(buf1v - midoff, buf0v - midoff, 16 * 16);
  409. call_ref(buf0u, buf0v, 16, flim_E, flim_I, hev_thresh);
  410. call_new(buf1u, buf1v, 16, flim_E, flim_I, hev_thresh);
  411. if (memcmp(buf0u - midoff, buf1u - midoff, 16 * 16) ||
  412. memcmp(buf0v - midoff, buf1v - midoff, 16 * 16))
  413. fail();
  414. }
  415. fill_loopfilter_buffers(buf0u - midoff, 16, 16, 16);
  416. fill_loopfilter_buffers(buf0v - midoff, 16, 16, 16);
  417. randomize_buffers(buf0u, 0, 16, 0);
  418. randomize_buffers(buf0v, 0, 16, 0);
  419. bench_new(buf0u, buf0v, 16, flim_E, flim_I, hev_thresh);
  420. }
  421. }
  422. }
  423. }
  424. static void check_loopfilter_simple(VP8DSPContext *d, bool is_vp7)
  425. {
  426. LOCAL_ALIGNED_16(uint8_t, base0, [32 + 16 * 16]);
  427. LOCAL_ALIGNED_16(uint8_t, base1, [32 + 16 * 16]);
  428. int dir;
  429. int flim_E = 20, flim_I = 30, hev_thresh = 0;
  430. declare_func(void, uint8_t *, ptrdiff_t, int);
  431. for (dir = 0; dir < 2; dir++) {
  432. int midoff = dir ? 4 * 16 : 4;
  433. int midoff_aligned = dir ? 4 * 16 : 16;
  434. uint8_t *buf0 = base0 + midoff_aligned;
  435. uint8_t *buf1 = base1 + midoff_aligned;
  436. void (*func)(uint8_t *, ptrdiff_t, int) = dir ? d->vp8_v_loop_filter_simple : d->vp8_h_loop_filter_simple;
  437. if (check_func(func, "vp%d_loop_filter_simple_%s", 8 - is_vp7, dir ? "v" : "h")) {
  438. fill_loopfilter_buffers(buf0 - midoff, 16, 16, 16);
  439. randomize_buffers(buf0, 0, 16, -1);
  440. randomize_buffers(buf0, 8, 16, -1);
  441. memcpy(buf1 - midoff, buf0 - midoff, 16 * 16);
  442. call_ref(buf0, 16, flim_E);
  443. call_new(buf1, 16, flim_E);
  444. if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16))
  445. fail();
  446. bench_new(buf0, 16, flim_E);
  447. }
  448. }
  449. }
  450. static void checkasm_check_vp78dsp(VP8DSPContext *d, bool is_vp7)
  451. {
  452. #if CONFIG_VP7_DECODER
  453. if (is_vp7)
  454. ff_vp7dsp_init(d);
  455. else
  456. #endif
  457. ff_vp8dsp_init(d);
  458. check_idct(d, is_vp7);
  459. check_idct_dc4(d, is_vp7);
  460. check_luma_dc_wht(d, is_vp7);
  461. report("idct");
  462. check_loopfilter_16y(d, is_vp7);
  463. check_loopfilter_8uv(d, is_vp7);
  464. check_loopfilter_simple(d, is_vp7);
  465. report("loopfilter");
  466. }
  467. void checkasm_check_vp8dsp(void)
  468. {
  469. VP8DSPContext d;
  470. ff_vp78dsp_init(&d);
  471. check_mc(&d);
  472. report("mc");
  473. checkasm_check_vp78dsp(&d, false);
  474. #if CONFIG_VP7_DECODER
  475. checkasm_check_vp78dsp(&d, true);
  476. #endif
  477. }