fdct_altivec.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. /* ffmpeg/libavcodec/ppc/fdct_altivec.c, this file is part of the
  2. * AltiVec optimized library for the FFMPEG Multimedia System
  3. * Copyright (C) 2003 James Klicman <james@klicman.org>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/common.h"
  22. #include "libavcodec/dsputil.h"
  23. #include "dsputil_ppc.h"
  24. #include "gcc_fixes.h"
  25. #define vs16(v) ((vector signed short)(v))
  26. #define vs32(v) ((vector signed int)(v))
  27. #define vu8(v) ((vector unsigned char)(v))
  28. #define vu16(v) ((vector unsigned short)(v))
  29. #define vu32(v) ((vector unsigned int)(v))
  30. #define C1 0.98078525066375732421875000 /* cos(1*PI/16) */
  31. #define C2 0.92387950420379638671875000 /* cos(2*PI/16) */
  32. #define C3 0.83146959543228149414062500 /* cos(3*PI/16) */
  33. #define C4 0.70710676908493041992187500 /* cos(4*PI/16) */
  34. #define C5 0.55557024478912353515625000 /* cos(5*PI/16) */
  35. #define C6 0.38268342614173889160156250 /* cos(6*PI/16) */
  36. #define C7 0.19509032368659973144531250 /* cos(7*PI/16) */
  37. #define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */
  38. #define W0 -(2 * C2)
  39. #define W1 (2 * C6)
  40. #define W2 (SQRT_2 * C6)
  41. #define W3 (SQRT_2 * C3)
  42. #define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
  43. #define W5 (SQRT_2 * ( C1 + C3 - C5 + C7))
  44. #define W6 (SQRT_2 * ( C1 + C3 + C5 - C7))
  45. #define W7 (SQRT_2 * ( C1 + C3 - C5 - C7))
  46. #define W8 (SQRT_2 * ( C7 - C3))
  47. #define W9 (SQRT_2 * (-C1 - C3))
  48. #define WA (SQRT_2 * (-C3 - C5))
  49. #define WB (SQRT_2 * ( C5 - C3))
  50. static vector float fdctconsts[3] = {
  51. { W0, W1, W2, W3 },
  52. { W4, W5, W6, W7 },
  53. { W8, W9, WA, WB }
  54. };
  55. #define LD_W0 vec_splat(cnsts0, 0)
  56. #define LD_W1 vec_splat(cnsts0, 1)
  57. #define LD_W2 vec_splat(cnsts0, 2)
  58. #define LD_W3 vec_splat(cnsts0, 3)
  59. #define LD_W4 vec_splat(cnsts1, 0)
  60. #define LD_W5 vec_splat(cnsts1, 1)
  61. #define LD_W6 vec_splat(cnsts1, 2)
  62. #define LD_W7 vec_splat(cnsts1, 3)
  63. #define LD_W8 vec_splat(cnsts2, 0)
  64. #define LD_W9 vec_splat(cnsts2, 1)
  65. #define LD_WA vec_splat(cnsts2, 2)
  66. #define LD_WB vec_splat(cnsts2, 3)
  67. #define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
  68. x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
  69. x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
  70. x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
  71. x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
  72. x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
  73. x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
  74. x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
  75. x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
  76. \
  77. b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
  78. b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
  79. b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
  80. b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
  81. \
  82. b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
  83. b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
  84. b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
  85. cnst = LD_W2; \
  86. b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
  87. cnst = LD_W1; \
  88. b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
  89. cnst = LD_W0; \
  90. b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
  91. \
  92. x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
  93. x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
  94. x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
  95. x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
  96. x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
  97. cnst = LD_W3; \
  98. x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
  99. \
  100. cnst = LD_W8; \
  101. x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
  102. cnst = LD_W9; \
  103. x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
  104. cnst = LD_WA; \
  105. x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
  106. cnst = LD_WB; \
  107. x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
  108. \
  109. cnst = LD_W4; \
  110. b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
  111. cnst = LD_W5; \
  112. b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
  113. cnst = LD_W6; \
  114. b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
  115. cnst = LD_W7; \
  116. b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
  117. \
  118. b7 = vec_add(b7, x2); /* b7 = b7 + x2; */ \
  119. b5 = vec_add(b5, x3); /* b5 = b5 + x3; */ \
  120. b3 = vec_add(b3, x2); /* b3 = b3 + x2; */ \
  121. b1 = vec_add(b1, x3); /* b1 = b1 + x3; */ \
  122. /* }}} */
  123. #define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
  124. x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
  125. x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
  126. x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
  127. x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
  128. x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
  129. x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
  130. x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
  131. x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
  132. \
  133. b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
  134. b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
  135. b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
  136. b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
  137. \
  138. b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
  139. b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
  140. b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
  141. cnst = LD_W2; \
  142. b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
  143. cnst = LD_W1; \
  144. b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
  145. cnst = LD_W0; \
  146. b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
  147. \
  148. x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
  149. x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
  150. x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
  151. x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
  152. x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
  153. cnst = LD_W3; \
  154. x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
  155. \
  156. cnst = LD_W8; \
  157. x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
  158. cnst = LD_W9; \
  159. x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
  160. cnst = LD_WA; \
  161. x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
  162. cnst = LD_WB; \
  163. x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
  164. \
  165. cnst = LD_W4; \
  166. b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
  167. cnst = LD_W5; \
  168. b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
  169. cnst = LD_W6; \
  170. b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
  171. cnst = LD_W7; \
  172. b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
  173. \
  174. b7 = vec_add(b7, x2); /* b7 += x2; */ \
  175. b5 = vec_add(b5, x3); /* b5 += x3; */ \
  176. b3 = vec_add(b3, x2); /* b3 += x2; */ \
  177. b1 = vec_add(b1, x3); /* b1 += x3; */ \
  178. /* }}} */
  179. /* two dimensional discrete cosine transform */
  180. void fdct_altivec(int16_t *block)
  181. {
  182. POWERPC_PERF_DECLARE(altivec_fdct, 1);
  183. vector signed short *bp;
  184. vector float *cp;
  185. vector float b00, b10, b20, b30, b40, b50, b60, b70;
  186. vector float b01, b11, b21, b31, b41, b51, b61, b71;
  187. vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
  188. vector float x0, x1, x2, x3, x4, x5, x6, x7, x8;
  189. POWERPC_PERF_START_COUNT(altivec_fdct, 1);
  190. /* setup constants {{{ */
  191. /* mzero = -0.0 */
  192. mzero = ((vector float)vec_splat_u32(-1));
  193. mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero)));
  194. cp = fdctconsts;
  195. cnsts0 = vec_ld(0, cp); cp++;
  196. cnsts1 = vec_ld(0, cp); cp++;
  197. cnsts2 = vec_ld(0, cp);
  198. /* }}} */
  199. /* 8x8 matrix transpose (vector short[8]) {{{ */
  200. #define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))
  201. bp = (vector signed short*)block;
  202. b00 = ((vector float)vec_ld(0, bp));
  203. b40 = ((vector float)vec_ld(16*4, bp));
  204. b01 = ((vector float)MERGE_S16(h, b00, b40));
  205. b11 = ((vector float)MERGE_S16(l, b00, b40));
  206. bp++;
  207. b10 = ((vector float)vec_ld(0, bp));
  208. b50 = ((vector float)vec_ld(16*4, bp));
  209. b21 = ((vector float)MERGE_S16(h, b10, b50));
  210. b31 = ((vector float)MERGE_S16(l, b10, b50));
  211. bp++;
  212. b20 = ((vector float)vec_ld(0, bp));
  213. b60 = ((vector float)vec_ld(16*4, bp));
  214. b41 = ((vector float)MERGE_S16(h, b20, b60));
  215. b51 = ((vector float)MERGE_S16(l, b20, b60));
  216. bp++;
  217. b30 = ((vector float)vec_ld(0, bp));
  218. b70 = ((vector float)vec_ld(16*4, bp));
  219. b61 = ((vector float)MERGE_S16(h, b30, b70));
  220. b71 = ((vector float)MERGE_S16(l, b30, b70));
  221. x0 = ((vector float)MERGE_S16(h, b01, b41));
  222. x1 = ((vector float)MERGE_S16(l, b01, b41));
  223. x2 = ((vector float)MERGE_S16(h, b11, b51));
  224. x3 = ((vector float)MERGE_S16(l, b11, b51));
  225. x4 = ((vector float)MERGE_S16(h, b21, b61));
  226. x5 = ((vector float)MERGE_S16(l, b21, b61));
  227. x6 = ((vector float)MERGE_S16(h, b31, b71));
  228. x7 = ((vector float)MERGE_S16(l, b31, b71));
  229. b00 = ((vector float)MERGE_S16(h, x0, x4));
  230. b10 = ((vector float)MERGE_S16(l, x0, x4));
  231. b20 = ((vector float)MERGE_S16(h, x1, x5));
  232. b30 = ((vector float)MERGE_S16(l, x1, x5));
  233. b40 = ((vector float)MERGE_S16(h, x2, x6));
  234. b50 = ((vector float)MERGE_S16(l, x2, x6));
  235. b60 = ((vector float)MERGE_S16(h, x3, x7));
  236. b70 = ((vector float)MERGE_S16(l, x3, x7));
  237. #undef MERGE_S16
  238. /* }}} */
  239. /* Some of the initial calculations can be done as vector short before
  240. * conversion to vector float. The following code section takes advantage
  241. * of this.
  242. */
  243. #if 1
  244. /* fdct rows {{{ */
  245. x0 = ((vector float)vec_add(vs16(b00), vs16(b70)));
  246. x7 = ((vector float)vec_sub(vs16(b00), vs16(b70)));
  247. x1 = ((vector float)vec_add(vs16(b10), vs16(b60)));
  248. x6 = ((vector float)vec_sub(vs16(b10), vs16(b60)));
  249. x2 = ((vector float)vec_add(vs16(b20), vs16(b50)));
  250. x5 = ((vector float)vec_sub(vs16(b20), vs16(b50)));
  251. x3 = ((vector float)vec_add(vs16(b30), vs16(b40)));
  252. x4 = ((vector float)vec_sub(vs16(b30), vs16(b40)));
  253. b70 = ((vector float)vec_add(vs16(x0), vs16(x3)));
  254. b10 = ((vector float)vec_add(vs16(x1), vs16(x2)));
  255. b00 = ((vector float)vec_add(vs16(b70), vs16(b10)));
  256. b40 = ((vector float)vec_sub(vs16(b70), vs16(b10)));
  257. #define CTF0(n) \
  258. b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
  259. b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
  260. b##n##1 = vec_ctf(vs32(b##n##1), 0); \
  261. b##n##0 = vec_ctf(vs32(b##n##0), 0);
  262. CTF0(0);
  263. CTF0(4);
  264. b20 = ((vector float)vec_sub(vs16(x0), vs16(x3)));
  265. b60 = ((vector float)vec_sub(vs16(x1), vs16(x2)));
  266. CTF0(2);
  267. CTF0(6);
  268. #undef CTF0
  269. x0 = vec_add(b60, b20);
  270. x1 = vec_add(b61, b21);
  271. cnst = LD_W2;
  272. x0 = vec_madd(cnst, x0, mzero);
  273. x1 = vec_madd(cnst, x1, mzero);
  274. cnst = LD_W1;
  275. b20 = vec_madd(cnst, b20, x0);
  276. b21 = vec_madd(cnst, b21, x1);
  277. cnst = LD_W0;
  278. b60 = vec_madd(cnst, b60, x0);
  279. b61 = vec_madd(cnst, b61, x1);
  280. #define CTFX(x,b) \
  281. b##0 = ((vector float)vec_unpackh(vs16(x))); \
  282. b##1 = ((vector float)vec_unpackl(vs16(x))); \
  283. b##0 = vec_ctf(vs32(b##0), 0); \
  284. b##1 = vec_ctf(vs32(b##1), 0); \
  285. CTFX(x4, b7);
  286. CTFX(x5, b5);
  287. CTFX(x6, b3);
  288. CTFX(x7, b1);
  289. #undef CTFX
  290. x0 = vec_add(b70, b10);
  291. x1 = vec_add(b50, b30);
  292. x2 = vec_add(b70, b30);
  293. x3 = vec_add(b50, b10);
  294. x8 = vec_add(x2, x3);
  295. cnst = LD_W3;
  296. x8 = vec_madd(cnst, x8, mzero);
  297. cnst = LD_W8;
  298. x0 = vec_madd(cnst, x0, mzero);
  299. cnst = LD_W9;
  300. x1 = vec_madd(cnst, x1, mzero);
  301. cnst = LD_WA;
  302. x2 = vec_madd(cnst, x2, x8);
  303. cnst = LD_WB;
  304. x3 = vec_madd(cnst, x3, x8);
  305. cnst = LD_W4;
  306. b70 = vec_madd(cnst, b70, x0);
  307. cnst = LD_W5;
  308. b50 = vec_madd(cnst, b50, x1);
  309. cnst = LD_W6;
  310. b30 = vec_madd(cnst, b30, x1);
  311. cnst = LD_W7;
  312. b10 = vec_madd(cnst, b10, x0);
  313. b70 = vec_add(b70, x2);
  314. b50 = vec_add(b50, x3);
  315. b30 = vec_add(b30, x2);
  316. b10 = vec_add(b10, x3);
  317. x0 = vec_add(b71, b11);
  318. x1 = vec_add(b51, b31);
  319. x2 = vec_add(b71, b31);
  320. x3 = vec_add(b51, b11);
  321. x8 = vec_add(x2, x3);
  322. cnst = LD_W3;
  323. x8 = vec_madd(cnst, x8, mzero);
  324. cnst = LD_W8;
  325. x0 = vec_madd(cnst, x0, mzero);
  326. cnst = LD_W9;
  327. x1 = vec_madd(cnst, x1, mzero);
  328. cnst = LD_WA;
  329. x2 = vec_madd(cnst, x2, x8);
  330. cnst = LD_WB;
  331. x3 = vec_madd(cnst, x3, x8);
  332. cnst = LD_W4;
  333. b71 = vec_madd(cnst, b71, x0);
  334. cnst = LD_W5;
  335. b51 = vec_madd(cnst, b51, x1);
  336. cnst = LD_W6;
  337. b31 = vec_madd(cnst, b31, x1);
  338. cnst = LD_W7;
  339. b11 = vec_madd(cnst, b11, x0);
  340. b71 = vec_add(b71, x2);
  341. b51 = vec_add(b51, x3);
  342. b31 = vec_add(b31, x2);
  343. b11 = vec_add(b11, x3);
  344. /* }}} */
  345. #else
  346. /* convert to float {{{ */
  347. #define CTF(n) \
  348. vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \
  349. vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \
  350. b##n##1 = vec_ctf(vs32(b##n##1), 0); \
  351. b##n##0 = vec_ctf(vs32(b##n##0), 0); \
  352. CTF(0);
  353. CTF(1);
  354. CTF(2);
  355. CTF(3);
  356. CTF(4);
  357. CTF(5);
  358. CTF(6);
  359. CTF(7);
  360. #undef CTF
  361. /* }}} */
  362. FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70);
  363. FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71);
  364. #endif
  365. /* 8x8 matrix transpose (vector float[8][2]) {{{ */
  366. x0 = vec_mergel(b00, b20);
  367. x1 = vec_mergeh(b00, b20);
  368. x2 = vec_mergel(b10, b30);
  369. x3 = vec_mergeh(b10, b30);
  370. b00 = vec_mergeh(x1, x3);
  371. b10 = vec_mergel(x1, x3);
  372. b20 = vec_mergeh(x0, x2);
  373. b30 = vec_mergel(x0, x2);
  374. x4 = vec_mergel(b41, b61);
  375. x5 = vec_mergeh(b41, b61);
  376. x6 = vec_mergel(b51, b71);
  377. x7 = vec_mergeh(b51, b71);
  378. b41 = vec_mergeh(x5, x7);
  379. b51 = vec_mergel(x5, x7);
  380. b61 = vec_mergeh(x4, x6);
  381. b71 = vec_mergel(x4, x6);
  382. x0 = vec_mergel(b01, b21);
  383. x1 = vec_mergeh(b01, b21);
  384. x2 = vec_mergel(b11, b31);
  385. x3 = vec_mergeh(b11, b31);
  386. x4 = vec_mergel(b40, b60);
  387. x5 = vec_mergeh(b40, b60);
  388. x6 = vec_mergel(b50, b70);
  389. x7 = vec_mergeh(b50, b70);
  390. b40 = vec_mergeh(x1, x3);
  391. b50 = vec_mergel(x1, x3);
  392. b60 = vec_mergeh(x0, x2);
  393. b70 = vec_mergel(x0, x2);
  394. b01 = vec_mergeh(x5, x7);
  395. b11 = vec_mergel(x5, x7);
  396. b21 = vec_mergeh(x4, x6);
  397. b31 = vec_mergel(x4, x6);
  398. /* }}} */
  399. FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70);
  400. FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71);
  401. /* round, convert back to short {{{ */
  402. #define CTS(n) \
  403. b##n##0 = vec_round(b##n##0); \
  404. b##n##1 = vec_round(b##n##1); \
  405. b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
  406. b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
  407. b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
  408. vec_st(vs16(b##n##0), 0, bp);
  409. bp = (vector signed short*)block;
  410. CTS(0); bp++;
  411. CTS(1); bp++;
  412. CTS(2); bp++;
  413. CTS(3); bp++;
  414. CTS(4); bp++;
  415. CTS(5); bp++;
  416. CTS(6); bp++;
  417. CTS(7);
  418. #undef CTS
  419. /* }}} */
  420. POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
  421. }
  422. /* vim:set foldmethod=marker foldlevel=0: */