dsputil_alpha.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. /*
  2. * Alpha optimized DSP utils
  3. * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavcodec/dsputil.h"
  22. #include "asm.h"
  23. void ff_simple_idct_axp(DCTELEM *block);
  24. void ff_simple_idct_put_axp(uint8_t *dest, int line_size, DCTELEM *block);
  25. void ff_simple_idct_add_axp(uint8_t *dest, int line_size, DCTELEM *block);
  26. void put_pixels_axp_asm(uint8_t *block, const uint8_t *pixels,
  27. int line_size, int h);
  28. void put_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels,
  29. int line_size);
  30. void add_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels,
  31. int line_size);
  32. void (*put_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels,
  33. int line_size);
  34. void (*add_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels,
  35. int line_size);
  36. void get_pixels_mvi(DCTELEM *restrict block,
  37. const uint8_t *restrict pixels, int line_size);
  38. void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2,
  39. int stride);
  40. int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
  41. int pix_abs16x16_mvi_asm(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
  42. int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
  43. int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
  44. int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
  45. #if 0
  46. /* These functions were the base for the optimized assembler routines,
  47. and remain here for documentation purposes. */
  48. static void put_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels,
  49. int line_size)
  50. {
  51. int i = 8;
  52. uint64_t clampmask = zap(-1, 0xaa); /* 0x00ff00ff00ff00ff */
  53. do {
  54. uint64_t shorts0, shorts1;
  55. shorts0 = ldq(block);
  56. shorts0 = maxsw4(shorts0, 0);
  57. shorts0 = minsw4(shorts0, clampmask);
  58. stl(pkwb(shorts0), pixels);
  59. shorts1 = ldq(block + 4);
  60. shorts1 = maxsw4(shorts1, 0);
  61. shorts1 = minsw4(shorts1, clampmask);
  62. stl(pkwb(shorts1), pixels + 4);
  63. pixels += line_size;
  64. block += 8;
  65. } while (--i);
  66. }
  67. void add_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels,
  68. int line_size)
  69. {
  70. int h = 8;
  71. /* Keep this function a leaf function by generating the constants
  72. manually (mainly for the hack value ;-). */
  73. uint64_t clampmask = zap(-1, 0xaa); /* 0x00ff00ff00ff00ff */
  74. uint64_t signmask = zap(-1, 0x33);
  75. signmask ^= signmask >> 1; /* 0x8000800080008000 */
  76. do {
  77. uint64_t shorts0, pix0, signs0;
  78. uint64_t shorts1, pix1, signs1;
  79. shorts0 = ldq(block);
  80. shorts1 = ldq(block + 4);
  81. pix0 = unpkbw(ldl(pixels));
  82. /* Signed subword add (MMX paddw). */
  83. signs0 = shorts0 & signmask;
  84. shorts0 &= ~signmask;
  85. shorts0 += pix0;
  86. shorts0 ^= signs0;
  87. /* Clamp. */
  88. shorts0 = maxsw4(shorts0, 0);
  89. shorts0 = minsw4(shorts0, clampmask);
  90. /* Next 4. */
  91. pix1 = unpkbw(ldl(pixels + 4));
  92. signs1 = shorts1 & signmask;
  93. shorts1 &= ~signmask;
  94. shorts1 += pix1;
  95. shorts1 ^= signs1;
  96. shorts1 = maxsw4(shorts1, 0);
  97. shorts1 = minsw4(shorts1, clampmask);
  98. stl(pkwb(shorts0), pixels);
  99. stl(pkwb(shorts1), pixels + 4);
  100. pixels += line_size;
  101. block += 8;
  102. } while (--h);
  103. }
  104. #endif
  105. static void clear_blocks_axp(DCTELEM *blocks) {
  106. uint64_t *p = (uint64_t *) blocks;
  107. int n = sizeof(DCTELEM) * 6 * 64;
  108. do {
  109. p[0] = 0;
  110. p[1] = 0;
  111. p[2] = 0;
  112. p[3] = 0;
  113. p[4] = 0;
  114. p[5] = 0;
  115. p[6] = 0;
  116. p[7] = 0;
  117. p += 8;
  118. n -= 8 * 8;
  119. } while (n);
  120. }
  121. static inline uint64_t avg2_no_rnd(uint64_t a, uint64_t b)
  122. {
  123. return (a & b) + (((a ^ b) & BYTE_VEC(0xfe)) >> 1);
  124. }
  125. static inline uint64_t avg2(uint64_t a, uint64_t b)
  126. {
  127. return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1);
  128. }
  129. #if 0
  130. /* The XY2 routines basically utilize this scheme, but reuse parts in
  131. each iteration. */
  132. static inline uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
  133. {
  134. uint64_t r1 = ((l1 & ~BYTE_VEC(0x03)) >> 2)
  135. + ((l2 & ~BYTE_VEC(0x03)) >> 2)
  136. + ((l3 & ~BYTE_VEC(0x03)) >> 2)
  137. + ((l4 & ~BYTE_VEC(0x03)) >> 2);
  138. uint64_t r2 = (( (l1 & BYTE_VEC(0x03))
  139. + (l2 & BYTE_VEC(0x03))
  140. + (l3 & BYTE_VEC(0x03))
  141. + (l4 & BYTE_VEC(0x03))
  142. + BYTE_VEC(0x02)) >> 2) & BYTE_VEC(0x03);
  143. return r1 + r2;
  144. }
  145. #endif
  146. #define OP(LOAD, STORE) \
  147. do { \
  148. STORE(LOAD(pixels), block); \
  149. pixels += line_size; \
  150. block += line_size; \
  151. } while (--h)
  152. #define OP_X2(LOAD, STORE) \
  153. do { \
  154. uint64_t pix1, pix2; \
  155. \
  156. pix1 = LOAD(pixels); \
  157. pix2 = pix1 >> 8 | ((uint64_t) pixels[8] << 56); \
  158. STORE(AVG2(pix1, pix2), block); \
  159. pixels += line_size; \
  160. block += line_size; \
  161. } while (--h)
  162. #define OP_Y2(LOAD, STORE) \
  163. do { \
  164. uint64_t pix = LOAD(pixels); \
  165. do { \
  166. uint64_t next_pix; \
  167. \
  168. pixels += line_size; \
  169. next_pix = LOAD(pixels); \
  170. STORE(AVG2(pix, next_pix), block); \
  171. block += line_size; \
  172. pix = next_pix; \
  173. } while (--h); \
  174. } while (0)
  175. #define OP_XY2(LOAD, STORE) \
  176. do { \
  177. uint64_t pix1 = LOAD(pixels); \
  178. uint64_t pix2 = pix1 >> 8 | ((uint64_t) pixels[8] << 56); \
  179. uint64_t pix_l = (pix1 & BYTE_VEC(0x03)) \
  180. + (pix2 & BYTE_VEC(0x03)); \
  181. uint64_t pix_h = ((pix1 & ~BYTE_VEC(0x03)) >> 2) \
  182. + ((pix2 & ~BYTE_VEC(0x03)) >> 2); \
  183. \
  184. do { \
  185. uint64_t npix1, npix2; \
  186. uint64_t npix_l, npix_h; \
  187. uint64_t avg; \
  188. \
  189. pixels += line_size; \
  190. npix1 = LOAD(pixels); \
  191. npix2 = npix1 >> 8 | ((uint64_t) pixels[8] << 56); \
  192. npix_l = (npix1 & BYTE_VEC(0x03)) \
  193. + (npix2 & BYTE_VEC(0x03)); \
  194. npix_h = ((npix1 & ~BYTE_VEC(0x03)) >> 2) \
  195. + ((npix2 & ~BYTE_VEC(0x03)) >> 2); \
  196. avg = (((pix_l + npix_l + AVG4_ROUNDER) >> 2) & BYTE_VEC(0x03)) \
  197. + pix_h + npix_h; \
  198. STORE(avg, block); \
  199. \
  200. block += line_size; \
  201. pix_l = npix_l; \
  202. pix_h = npix_h; \
  203. } while (--h); \
  204. } while (0)
  205. #define MAKE_OP(OPNAME, SUFF, OPKIND, STORE) \
  206. static void OPNAME ## _pixels ## SUFF ## _axp \
  207. (uint8_t *restrict block, const uint8_t *restrict pixels, \
  208. int line_size, int h) \
  209. { \
  210. if ((size_t) pixels & 0x7) { \
  211. OPKIND(uldq, STORE); \
  212. } else { \
  213. OPKIND(ldq, STORE); \
  214. } \
  215. } \
  216. \
  217. static void OPNAME ## _pixels16 ## SUFF ## _axp \
  218. (uint8_t *restrict block, const uint8_t *restrict pixels, \
  219. int line_size, int h) \
  220. { \
  221. OPNAME ## _pixels ## SUFF ## _axp(block, pixels, line_size, h); \
  222. OPNAME ## _pixels ## SUFF ## _axp(block + 8, pixels + 8, line_size, h); \
  223. }
  224. #define PIXOP(OPNAME, STORE) \
  225. MAKE_OP(OPNAME, , OP, STORE) \
  226. MAKE_OP(OPNAME, _x2, OP_X2, STORE) \
  227. MAKE_OP(OPNAME, _y2, OP_Y2, STORE) \
  228. MAKE_OP(OPNAME, _xy2, OP_XY2, STORE)
  229. /* Rounding primitives. */
  230. #define AVG2 avg2
  231. #define AVG4 avg4
  232. #define AVG4_ROUNDER BYTE_VEC(0x02)
  233. #define STORE(l, b) stq(l, b)
  234. PIXOP(put, STORE);
  235. #undef STORE
  236. #define STORE(l, b) stq(AVG2(l, ldq(b)), b);
  237. PIXOP(avg, STORE);
  238. /* Not rounding primitives. */
  239. #undef AVG2
  240. #undef AVG4
  241. #undef AVG4_ROUNDER
  242. #undef STORE
  243. #define AVG2 avg2_no_rnd
  244. #define AVG4 avg4_no_rnd
  245. #define AVG4_ROUNDER BYTE_VEC(0x01)
  246. #define STORE(l, b) stq(l, b)
  247. PIXOP(put_no_rnd, STORE);
  248. #undef STORE
  249. #define STORE(l, b) stq(AVG2(l, ldq(b)), b);
  250. PIXOP(avg_no_rnd, STORE);
  251. void put_pixels16_axp_asm(uint8_t *block, const uint8_t *pixels,
  252. int line_size, int h)
  253. {
  254. put_pixels_axp_asm(block, pixels, line_size, h);
  255. put_pixels_axp_asm(block + 8, pixels + 8, line_size, h);
  256. }
  257. void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
  258. {
  259. c->put_pixels_tab[0][0] = put_pixels16_axp_asm;
  260. c->put_pixels_tab[0][1] = put_pixels16_x2_axp;
  261. c->put_pixels_tab[0][2] = put_pixels16_y2_axp;
  262. c->put_pixels_tab[0][3] = put_pixels16_xy2_axp;
  263. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_axp_asm;
  264. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_axp;
  265. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_axp;
  266. c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_axp;
  267. c->avg_pixels_tab[0][0] = avg_pixels16_axp;
  268. c->avg_pixels_tab[0][1] = avg_pixels16_x2_axp;
  269. c->avg_pixels_tab[0][2] = avg_pixels16_y2_axp;
  270. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_axp;
  271. c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_axp;
  272. c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_axp;
  273. c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_axp;
  274. c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_axp;
  275. c->put_pixels_tab[1][0] = put_pixels_axp_asm;
  276. c->put_pixels_tab[1][1] = put_pixels_x2_axp;
  277. c->put_pixels_tab[1][2] = put_pixels_y2_axp;
  278. c->put_pixels_tab[1][3] = put_pixels_xy2_axp;
  279. c->put_no_rnd_pixels_tab[1][0] = put_pixels_axp_asm;
  280. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels_x2_axp;
  281. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels_y2_axp;
  282. c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels_xy2_axp;
  283. c->avg_pixels_tab[1][0] = avg_pixels_axp;
  284. c->avg_pixels_tab[1][1] = avg_pixels_x2_axp;
  285. c->avg_pixels_tab[1][2] = avg_pixels_y2_axp;
  286. c->avg_pixels_tab[1][3] = avg_pixels_xy2_axp;
  287. c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels_axp;
  288. c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels_x2_axp;
  289. c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels_y2_axp;
  290. c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels_xy2_axp;
  291. c->clear_blocks = clear_blocks_axp;
  292. /* amask clears all bits that correspond to present features. */
  293. if (amask(AMASK_MVI) == 0) {
  294. c->put_pixels_clamped = put_pixels_clamped_mvi_asm;
  295. c->add_pixels_clamped = add_pixels_clamped_mvi_asm;
  296. c->get_pixels = get_pixels_mvi;
  297. c->diff_pixels = diff_pixels_mvi;
  298. c->sad[0] = pix_abs16x16_mvi_asm;
  299. c->sad[1] = pix_abs8x8_mvi;
  300. c->pix_abs[0][0] = pix_abs16x16_mvi_asm;
  301. c->pix_abs[1][0] = pix_abs8x8_mvi;
  302. c->pix_abs[0][1] = pix_abs16x16_x2_mvi;
  303. c->pix_abs[0][2] = pix_abs16x16_y2_mvi;
  304. c->pix_abs[0][3] = pix_abs16x16_xy2_mvi;
  305. }
  306. put_pixels_clamped_axp_p = c->put_pixels_clamped;
  307. add_pixels_clamped_axp_p = c->add_pixels_clamped;
  308. if (!avctx->lowres &&
  309. (avctx->idct_algo == FF_IDCT_AUTO ||
  310. avctx->idct_algo == FF_IDCT_SIMPLEALPHA)) {
  311. c->idct_put = ff_simple_idct_put_axp;
  312. c->idct_add = ff_simple_idct_add_axp;
  313. c->idct = ff_simple_idct_axp;
  314. }
  315. }