hevc_pel.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /*
  2. * Copyright (c) 2015 Henrik Gramner
  3. * Copyright (c) 2021 Josh Dekker
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  20. */
  21. #include <string.h>
  22. #include "checkasm.h"
  23. #include "libavcodec/hevc/dsp.h"
  24. #include "libavutil/common.h"
  25. #include "libavutil/internal.h"
  26. #include "libavutil/intreadwrite.h"
  27. static const uint32_t pixel_mask[] = { 0xffffffff, 0x01ff01ff, 0x03ff03ff, 0x07ff07ff, 0x0fff0fff };
  28. static const uint32_t pixel_mask16[] = { 0x00ff00ff, 0x01ff01ff, 0x03ff03ff, 0x07ff07ff, 0x0fff0fff };
  29. static const int sizes[] = { -1, 4, 6, 8, 12, 16, 24, 32, 48, 64 };
  30. static const int weights[] = { 0, 128, 255, -1 };
  31. static const int denoms[] = {0, 7, 12, -1 };
  32. static const int offsets[] = {0, 255, -1 };
  33. #define SIZEOF_PIXEL ((bit_depth + 7) / 8)
  34. #define BUF_SIZE (2 * MAX_PB_SIZE * (2 * 4 + MAX_PB_SIZE))
  35. #define checkasm_check_pixel(buf1, stride1, buf2, stride2, ...) \
  36. ((bit_depth > 8) ? \
  37. checkasm_check(uint16_t, (const uint16_t*)buf1, stride1, \
  38. (const uint16_t*)buf2, stride2, \
  39. __VA_ARGS__) : \
  40. checkasm_check(uint8_t, (const uint8_t*) buf1, stride1, \
  41. (const uint8_t*) buf2, stride2, \
  42. __VA_ARGS__))
  43. #define randomize_buffers() \
  44. do { \
  45. uint32_t mask = pixel_mask[bit_depth - 8]; \
  46. int k; \
  47. for (k = 0; k < BUF_SIZE + SRC_EXTRA; k += 4) { \
  48. uint32_t r = rnd() & mask; \
  49. AV_WN32A(buf0 + k, r); \
  50. AV_WN32A(buf1 + k, r); \
  51. if (k >= BUF_SIZE) \
  52. continue; \
  53. r = rnd(); \
  54. AV_WN32A(dst0 + k, r); \
  55. AV_WN32A(dst1 + k, r); \
  56. } \
  57. } while (0)
  58. #define randomize_buffers_ref() \
  59. randomize_buffers(); \
  60. do { \
  61. uint32_t mask = pixel_mask16[bit_depth - 8]; \
  62. int k; \
  63. for (k = 0; k < BUF_SIZE; k += 2) { \
  64. uint32_t r = rnd() & mask; \
  65. AV_WN32A(ref0 + k, r); \
  66. AV_WN32A(ref1 + k, r); \
  67. } \
  68. } while (0)
  69. #define src0 (buf0 + 2 * 4 * MAX_PB_SIZE) /* hevc qpel functions read data from negative src pointer offsets */
  70. #define src1 (buf1 + 2 * 4 * MAX_PB_SIZE)
  71. /* FIXME: Does the need for SRC_EXTRA for these tests indicate a bug? */
  72. #define SRC_EXTRA 8
  73. static void checkasm_check_hevc_qpel(void)
  74. {
  75. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE + SRC_EXTRA]);
  76. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE + SRC_EXTRA]);
  77. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  78. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  79. HEVCDSPContext h;
  80. int size, bit_depth, i, j;
  81. declare_func(void, int16_t *dst, const uint8_t *src, ptrdiff_t srcstride,
  82. int height, intptr_t mx, intptr_t my, int width);
  83. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  84. ff_hevc_dsp_init(&h, bit_depth);
  85. for (i = 0; i < 2; i++) {
  86. for (j = 0; j < 2; j++) {
  87. for (size = 1; size < 10; size++) {
  88. const char *type;
  89. switch ((j << 1) | i) {
  90. case 0: type = "pel_pixels"; break; // 0 0
  91. case 1: type = "qpel_h"; break; // 0 1
  92. case 2: type = "qpel_v"; break; // 1 0
  93. case 3: type = "qpel_hv"; break; // 1 1
  94. }
  95. if (check_func(h.put_hevc_qpel[size][j][i],
  96. "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  97. int16_t *dstw0 = (int16_t *) dst0, *dstw1 = (int16_t *) dst1;
  98. randomize_buffers();
  99. call_ref(dstw0, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  100. call_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  101. checkasm_check(int16_t, dstw0, MAX_PB_SIZE * sizeof(int16_t),
  102. dstw1, MAX_PB_SIZE * sizeof(int16_t),
  103. size[sizes], size[sizes], "dst");
  104. bench_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  105. }
  106. }
  107. }
  108. }
  109. }
  110. report("qpel");
  111. }
  112. static void checkasm_check_hevc_qpel_uni(void)
  113. {
  114. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE + SRC_EXTRA]);
  115. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE + SRC_EXTRA]);
  116. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  117. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  118. HEVCDSPContext h;
  119. int size, bit_depth, i, j;
  120. declare_func(void, uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride,
  121. int height, intptr_t mx, intptr_t my, int width);
  122. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  123. ff_hevc_dsp_init(&h, bit_depth);
  124. for (i = 0; i < 2; i++) {
  125. for (j = 0; j < 2; j++) {
  126. for (size = 1; size < 10; size++) {
  127. const char *type;
  128. switch ((j << 1) | i) {
  129. case 0: type = "pel_uni_pixels"; break; // 0 0
  130. case 1: type = "qpel_uni_h"; break; // 0 1
  131. case 2: type = "qpel_uni_v"; break; // 1 0
  132. case 3: type = "qpel_uni_hv"; break; // 1 1
  133. }
  134. if (check_func(h.put_hevc_qpel_uni[size][j][i],
  135. "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  136. randomize_buffers();
  137. call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
  138. src0, sizes[size] * SIZEOF_PIXEL,
  139. sizes[size], i, j, sizes[size]);
  140. call_new(dst1, sizes[size] * SIZEOF_PIXEL,
  141. src1, sizes[size] * SIZEOF_PIXEL,
  142. sizes[size], i, j, sizes[size]);
  143. checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
  144. dst1, sizes[size] * SIZEOF_PIXEL,
  145. size[sizes], size[sizes], "dst");
  146. bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
  147. src1, sizes[size] * SIZEOF_PIXEL,
  148. sizes[size], i, j, sizes[size]);
  149. }
  150. }
  151. }
  152. }
  153. }
  154. report("qpel_uni");
  155. }
  156. static void checkasm_check_hevc_qpel_uni_w(void)
  157. {
  158. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE + SRC_EXTRA]);
  159. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE + SRC_EXTRA]);
  160. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  161. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  162. HEVCDSPContext h;
  163. int size, bit_depth, i, j;
  164. const int *denom, *wx, *ox;
  165. declare_func(void, uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride,
  166. int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
  167. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  168. ff_hevc_dsp_init(&h, bit_depth);
  169. for (i = 0; i < 2; i++) {
  170. for (j = 0; j < 2; j++) {
  171. for (size = 1; size < 10; size++) {
  172. const char *type;
  173. switch ((j << 1) | i) {
  174. case 0: type = "pel_uni_w_pixels"; break; // 0 0
  175. case 1: type = "qpel_uni_w_h"; break; // 0 1
  176. case 2: type = "qpel_uni_w_v"; break; // 1 0
  177. case 3: type = "qpel_uni_w_hv"; break; // 1 1
  178. }
  179. if (check_func(h.put_hevc_qpel_uni_w[size][j][i],
  180. "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  181. for (denom = denoms; *denom >= 0; denom++) {
  182. for (wx = weights; *wx >= 0; wx++) {
  183. for (ox = offsets; *ox >= 0; ox++) {
  184. randomize_buffers();
  185. call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
  186. src0, sizes[size] * SIZEOF_PIXEL,
  187. sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  188. call_new(dst1, sizes[size] * SIZEOF_PIXEL,
  189. src1, sizes[size] * SIZEOF_PIXEL,
  190. sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  191. checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
  192. dst1, sizes[size] * SIZEOF_PIXEL,
  193. size[sizes], size[sizes], "dst");
  194. bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
  195. src1, sizes[size] * SIZEOF_PIXEL,
  196. sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  197. }
  198. }
  199. }
  200. }
  201. }
  202. }
  203. }
  204. }
  205. report("qpel_uni_w");
  206. }
  207. static void checkasm_check_hevc_qpel_bi(void)
  208. {
  209. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE + SRC_EXTRA]);
  210. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE + SRC_EXTRA]);
  211. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  212. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  213. LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
  214. LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
  215. HEVCDSPContext h;
  216. int size, bit_depth, i, j;
  217. declare_func(void, uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride,
  218. const int16_t *src2,
  219. int height, intptr_t mx, intptr_t my, int width);
  220. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  221. ff_hevc_dsp_init(&h, bit_depth);
  222. for (i = 0; i < 2; i++) {
  223. for (j = 0; j < 2; j++) {
  224. for (size = 1; size < 10; size++) {
  225. const char *type;
  226. switch ((j << 1) | i) {
  227. case 0: type = "pel_bi_pixels"; break; // 0 0
  228. case 1: type = "qpel_bi_h"; break; // 0 1
  229. case 2: type = "qpel_bi_v"; break; // 1 0
  230. case 3: type = "qpel_bi_hv"; break; // 1 1
  231. }
  232. if (check_func(h.put_hevc_qpel_bi[size][j][i],
  233. "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  234. randomize_buffers_ref();
  235. call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
  236. src0, sizes[size] * SIZEOF_PIXEL,
  237. ref0, sizes[size], i, j, sizes[size]);
  238. call_new(dst1, sizes[size] * SIZEOF_PIXEL,
  239. src1, sizes[size] * SIZEOF_PIXEL,
  240. ref1, sizes[size], i, j, sizes[size]);
  241. checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
  242. dst1, sizes[size] * SIZEOF_PIXEL,
  243. size[sizes], size[sizes], "dst");
  244. bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
  245. src1, sizes[size] * SIZEOF_PIXEL,
  246. ref1, sizes[size], i, j, sizes[size]);
  247. }
  248. }
  249. }
  250. }
  251. }
  252. report("qpel_bi");
  253. }
  254. static void checkasm_check_hevc_qpel_bi_w(void)
  255. {
  256. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE + SRC_EXTRA]);
  257. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE + SRC_EXTRA]);
  258. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  259. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  260. LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
  261. LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
  262. HEVCDSPContext h;
  263. int size, bit_depth, i, j;
  264. const int *denom, *wx, *ox;
  265. declare_func(void, uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride,
  266. const int16_t *src2,
  267. int height, int denom, int wx0, int wx1,
  268. int ox0, int ox1, intptr_t mx, intptr_t my, int width);
  269. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  270. ff_hevc_dsp_init(&h, bit_depth);
  271. for (i = 0; i < 2; i++) {
  272. for (j = 0; j < 2; j++) {
  273. for (size = 1; size < 10; size++) {
  274. const char *type;
  275. switch ((j << 1) | i) {
  276. case 0: type = "pel_bi_w_pixels"; break; // 0 0
  277. case 1: type = "qpel_bi_w_h"; break; // 0 1
  278. case 2: type = "qpel_bi_w_v"; break; // 1 0
  279. case 3: type = "qpel_bi_w_hv"; break; // 1 1
  280. }
  281. if (check_func(h.put_hevc_qpel_bi_w[size][j][i],
  282. "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  283. for (denom = denoms; *denom >= 0; denom++) {
  284. for (wx = weights; *wx >= 0; wx++) {
  285. for (ox = offsets; *ox >= 0; ox++) {
  286. randomize_buffers_ref();
  287. call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
  288. src0, sizes[size] * SIZEOF_PIXEL,
  289. ref0, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  290. call_new(dst1, sizes[size] * SIZEOF_PIXEL,
  291. src1, sizes[size] * SIZEOF_PIXEL,
  292. ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  293. checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
  294. dst1, sizes[size] * SIZEOF_PIXEL,
  295. size[sizes], size[sizes], "dst");
  296. bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
  297. src1, sizes[size] * SIZEOF_PIXEL,
  298. ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  299. }
  300. }
  301. }
  302. }
  303. }
  304. }
  305. }
  306. }
  307. report("qpel_bi_w");
  308. }
  309. #undef SRC_EXTRA
  310. #define SRC_EXTRA 0
  311. static void checkasm_check_hevc_epel(void)
  312. {
  313. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  314. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  315. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  316. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  317. HEVCDSPContext h;
  318. int size, bit_depth, i, j;
  319. declare_func(void, int16_t *dst, const uint8_t *src, ptrdiff_t srcstride,
  320. int height, intptr_t mx, intptr_t my, int width);
  321. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  322. ff_hevc_dsp_init(&h, bit_depth);
  323. for (i = 0; i < 2; i++) {
  324. for (j = 0; j < 2; j++) {
  325. for (size = 1; size < 10; size++) {
  326. const char *type;
  327. switch ((j << 1) | i) {
  328. case 0: type = "pel_pixels"; break; // 0 0
  329. case 1: type = "epel_h"; break; // 0 1
  330. case 2: type = "epel_v"; break; // 1 0
  331. case 3: type = "epel_hv"; break; // 1 1
  332. }
  333. if (check_func(h.put_hevc_epel[size][j][i],
  334. "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  335. int16_t *dstw0 = (int16_t *) dst0, *dstw1 = (int16_t *) dst1;
  336. randomize_buffers();
  337. call_ref(dstw0, src0, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  338. call_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  339. checkasm_check(int16_t, dstw0, MAX_PB_SIZE * sizeof(int16_t),
  340. dstw1, MAX_PB_SIZE * sizeof(int16_t),
  341. size[sizes], size[sizes], "dst");
  342. bench_new(dstw1, src1, sizes[size] * SIZEOF_PIXEL, sizes[size], i, j, sizes[size]);
  343. }
  344. }
  345. }
  346. }
  347. }
  348. report("epel");
  349. }
  350. static void checkasm_check_hevc_epel_uni(void)
  351. {
  352. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  353. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  354. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  355. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  356. HEVCDSPContext h;
  357. int size, bit_depth, i, j;
  358. declare_func(void, uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride,
  359. int height, intptr_t mx, intptr_t my, int width);
  360. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  361. ff_hevc_dsp_init(&h, bit_depth);
  362. for (i = 0; i < 2; i++) {
  363. for (j = 0; j < 2; j++) {
  364. for (size = 1; size < 10; size++) {
  365. const char *type;
  366. switch ((j << 1) | i) {
  367. case 0: type = "pel_uni_pixels"; break; // 0 0
  368. case 1: type = "epel_uni_h"; break; // 0 1
  369. case 2: type = "epel_uni_v"; break; // 1 0
  370. case 3: type = "epel_uni_hv"; break; // 1 1
  371. }
  372. if (check_func(h.put_hevc_epel_uni[size][j][i],
  373. "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  374. randomize_buffers();
  375. call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
  376. src0, sizes[size] * SIZEOF_PIXEL,
  377. sizes[size], i, j, sizes[size]);
  378. call_new(dst1, sizes[size] * SIZEOF_PIXEL,
  379. src1, sizes[size] * SIZEOF_PIXEL,
  380. sizes[size], i, j, sizes[size]);
  381. checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
  382. dst1, sizes[size] * SIZEOF_PIXEL,
  383. size[sizes], size[sizes], "dst");
  384. bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
  385. src1, sizes[size] * SIZEOF_PIXEL,
  386. sizes[size], i, j, sizes[size]);
  387. }
  388. }
  389. }
  390. }
  391. }
  392. report("epel_uni");
  393. }
  394. static void checkasm_check_hevc_epel_uni_w(void)
  395. {
  396. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  397. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  398. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  399. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  400. HEVCDSPContext h;
  401. int size, bit_depth, i, j;
  402. const int *denom, *wx, *ox;
  403. declare_func(void, uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride,
  404. int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width);
  405. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  406. ff_hevc_dsp_init(&h, bit_depth);
  407. for (i = 0; i < 2; i++) {
  408. for (j = 0; j < 2; j++) {
  409. for (size = 1; size < 10; size++) {
  410. const char *type;
  411. switch ((j << 1) | i) {
  412. case 0: type = "pel_uni_w_pixels"; break; // 0 0
  413. case 1: type = "epel_uni_w_h"; break; // 0 1
  414. case 2: type = "epel_uni_w_v"; break; // 1 0
  415. case 3: type = "epel_uni_w_hv"; break; // 1 1
  416. }
  417. if (check_func(h.put_hevc_epel_uni_w[size][j][i],
  418. "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  419. for (denom = denoms; *denom >= 0; denom++) {
  420. for (wx = weights; *wx >= 0; wx++) {
  421. for (ox = offsets; *ox >= 0; ox++) {
  422. randomize_buffers();
  423. call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
  424. src0, sizes[size] * SIZEOF_PIXEL,
  425. sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  426. call_new(dst1, sizes[size] * SIZEOF_PIXEL,
  427. src1, sizes[size] * SIZEOF_PIXEL,
  428. sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  429. checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
  430. dst1, sizes[size] * SIZEOF_PIXEL,
  431. size[sizes], size[sizes], "dst");
  432. bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
  433. src1, sizes[size] * SIZEOF_PIXEL,
  434. sizes[size], *denom, *wx, *ox, i, j, sizes[size]);
  435. }
  436. }
  437. }
  438. }
  439. }
  440. }
  441. }
  442. }
  443. report("epel_uni_w");
  444. }
  445. static void checkasm_check_hevc_epel_bi(void)
  446. {
  447. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  448. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  449. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  450. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  451. LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
  452. LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
  453. HEVCDSPContext h;
  454. int size, bit_depth, i, j;
  455. declare_func(void, uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride,
  456. const int16_t *src2,
  457. int height, intptr_t mx, intptr_t my, int width);
  458. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  459. ff_hevc_dsp_init(&h, bit_depth);
  460. for (i = 0; i < 2; i++) {
  461. for (j = 0; j < 2; j++) {
  462. for (size = 1; size < 10; size++) {
  463. const char *type;
  464. switch ((j << 1) | i) {
  465. case 0: type = "pel_bi_pixels"; break; // 0 0
  466. case 1: type = "epel_bi_h"; break; // 0 1
  467. case 2: type = "epel_bi_v"; break; // 1 0
  468. case 3: type = "epel_bi_hv"; break; // 1 1
  469. }
  470. if (check_func(h.put_hevc_epel_bi[size][j][i],
  471. "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  472. randomize_buffers_ref();
  473. call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
  474. src0, sizes[size] * SIZEOF_PIXEL,
  475. ref0, sizes[size], i, j, sizes[size]);
  476. call_new(dst1, sizes[size] * SIZEOF_PIXEL,
  477. src1, sizes[size] * SIZEOF_PIXEL,
  478. ref1, sizes[size], i, j, sizes[size]);
  479. checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
  480. dst1, sizes[size] * SIZEOF_PIXEL,
  481. size[sizes], size[sizes], "dst");
  482. bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
  483. src1, sizes[size] * SIZEOF_PIXEL,
  484. ref1, sizes[size], i, j, sizes[size]);
  485. }
  486. }
  487. }
  488. }
  489. }
  490. report("epel_bi");
  491. }
  492. static void checkasm_check_hevc_epel_bi_w(void)
  493. {
  494. LOCAL_ALIGNED_32(uint8_t, buf0, [BUF_SIZE]);
  495. LOCAL_ALIGNED_32(uint8_t, buf1, [BUF_SIZE]);
  496. LOCAL_ALIGNED_32(uint8_t, dst0, [BUF_SIZE]);
  497. LOCAL_ALIGNED_32(uint8_t, dst1, [BUF_SIZE]);
  498. LOCAL_ALIGNED_32(int16_t, ref0, [BUF_SIZE]);
  499. LOCAL_ALIGNED_32(int16_t, ref1, [BUF_SIZE]);
  500. HEVCDSPContext h;
  501. int size, bit_depth, i, j;
  502. const int *denom, *wx, *ox;
  503. declare_func(void, uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, ptrdiff_t srcstride,
  504. const int16_t *src2,
  505. int height, int denom, int wx0, int wx1,
  506. int ox0, int ox1, intptr_t mx, intptr_t my, int width);
  507. for (bit_depth = 8; bit_depth <= 12; bit_depth++) {
  508. ff_hevc_dsp_init(&h, bit_depth);
  509. for (i = 0; i < 2; i++) {
  510. for (j = 0; j < 2; j++) {
  511. for (size = 1; size < 10; size++) {
  512. const char *type;
  513. switch ((j << 1) | i) {
  514. case 0: type = "pel_bi_w_pixels"; break; // 0 0
  515. case 1: type = "epel_bi_w_h"; break; // 0 1
  516. case 2: type = "epel_bi_w_v"; break; // 1 0
  517. case 3: type = "epel_bi_w_hv"; break; // 1 1
  518. }
  519. if (check_func(h.put_hevc_epel_bi_w[size][j][i],
  520. "put_hevc_%s%d_%d", type, sizes[size], bit_depth)) {
  521. for (denom = denoms; *denom >= 0; denom++) {
  522. for (wx = weights; *wx >= 0; wx++) {
  523. for (ox = offsets; *ox >= 0; ox++) {
  524. randomize_buffers_ref();
  525. call_ref(dst0, sizes[size] * SIZEOF_PIXEL,
  526. src0, sizes[size] * SIZEOF_PIXEL,
  527. ref0, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  528. call_new(dst1, sizes[size] * SIZEOF_PIXEL,
  529. src1, sizes[size] * SIZEOF_PIXEL,
  530. ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  531. checkasm_check_pixel(dst0, sizes[size] * SIZEOF_PIXEL,
  532. dst1, sizes[size] * SIZEOF_PIXEL,
  533. size[sizes], size[sizes], "dst");
  534. bench_new(dst1, sizes[size] * SIZEOF_PIXEL,
  535. src1, sizes[size] * SIZEOF_PIXEL,
  536. ref1, sizes[size], *denom, *wx, *wx, *ox, *ox, i, j, sizes[size]);
  537. }
  538. }
  539. }
  540. }
  541. }
  542. }
  543. }
  544. }
  545. report("epel_bi_w");
  546. }
  547. void checkasm_check_hevc_pel(void)
  548. {
  549. checkasm_check_hevc_qpel();
  550. checkasm_check_hevc_qpel_uni();
  551. checkasm_check_hevc_qpel_uni_w();
  552. checkasm_check_hevc_qpel_bi();
  553. checkasm_check_hevc_qpel_bi_w();
  554. checkasm_check_hevc_epel();
  555. checkasm_check_hevc_epel_uni();
  556. checkasm_check_hevc_epel_uni_w();
  557. checkasm_check_hevc_epel_bi();
  558. checkasm_check_hevc_epel_bi_w();
  559. }