yuv2rgb_altivec.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962
  1. /*
  2. * AltiVec acceleration for colorspace conversion
  3. *
  4. * copyright (C) 2004 Marc Hoffman <marc.hoffman@analog.com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /*
  23. Convert I420 YV12 to RGB in various formats,
  24. it rejects images that are not in 420 formats,
  25. it rejects images that don't have widths of multiples of 16,
  26. it rejects images that don't have heights of multiples of 2.
  27. Reject defers to C simulation code.
  28. Lots of optimizations to be done here.
  29. 1. Need to fix saturation code. I just couldn't get it to fly with packs
  30. and adds, so we currently use max/min to clip.
  31. 2. The inefficient use of chroma loading needs a bit of brushing up.
  32. 3. Analysis of pipeline stalls needs to be done. Use shark to identify
  33. pipeline stalls.
  34. MODIFIED to calculate coeffs from currently selected color space.
  35. MODIFIED core to be a macro where you specify the output format.
  36. ADDED UYVY conversion which is never called due to some thing in swscale.
  37. CORRECTED algorithim selection to be strict on input formats.
  38. ADDED runtime detection of AltiVec.
  39. ADDED altivec_yuv2packedX vertical scl + RGB converter
  40. March 27,2004
  41. PERFORMANCE ANALYSIS
  42. The C version uses 25% of the processor or ~250Mips for D1 video rawvideo
  43. used as test.
  44. The AltiVec version uses 10% of the processor or ~100Mips for D1 video
  45. same sequence.
  46. 720 * 480 * 30 ~10MPS
  47. so we have roughly 10 clocks per pixel. This is too high, something has
  48. to be wrong.
  49. OPTIMIZED clip codes to utilize vec_max and vec_packs removing the
  50. need for vec_min.
  51. OPTIMIZED DST OUTPUT cache/DMA controls. We are pretty much guaranteed to have
  52. the input video frame, it was just decompressed so it probably resides in L1
  53. caches. However, we are creating the output video stream. This needs to use the
  54. DSTST instruction to optimize for the cache. We couple this with the fact that
  55. we are not going to be visiting the input buffer again so we mark it Least
  56. Recently Used. This shaves 25% of the processor cycles off.
  57. Now memcpy is the largest mips consumer in the system, probably due
  58. to the inefficient X11 stuff.
  59. GL libraries seem to be very slow on this machine 1.33Ghz PB running
  60. Jaguar, this is not the case for my 1Ghz PB. I thought it might be
  61. a versioning issue, however I have libGL.1.2.dylib for both
  62. machines. (We need to figure this out now.)
  63. GL2 libraries work now with patch for RGB32.
  64. NOTE: quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor.
  65. Integrated luma prescaling adjustment for saturation/contrast/brightness
  66. adjustment.
  67. */
  68. #include <stdio.h>
  69. #include <stdlib.h>
  70. #include <string.h>
  71. #include <inttypes.h>
  72. #include <assert.h>
  73. #include "config.h"
  74. #include "rgb2rgb.h"
  75. #include "swscale.h"
  76. #include "swscale_internal.h"
  77. #undef PROFILE_THE_BEAST
  78. #undef INC_SCALING
  79. typedef unsigned char ubyte;
  80. typedef signed char sbyte;
  81. /* RGB interleaver, 16 planar pels 8-bit samples per channel in
  82. homogeneous vector registers x0,x1,x2 are interleaved with the
  83. following technique:
  84. o0 = vec_mergeh (x0,x1);
  85. o1 = vec_perm (o0, x2, perm_rgb_0);
  86. o2 = vec_perm (o0, x2, perm_rgb_1);
  87. o3 = vec_mergel (x0,x1);
  88. o4 = vec_perm (o3,o2,perm_rgb_2);
  89. o5 = vec_perm (o3,o2,perm_rgb_3);
  90. perm_rgb_0: o0(RG).h v1(B) --> o1*
  91. 0 1 2 3 4
  92. rgbr|gbrg|brgb|rgbr
  93. 0010 0100 1001 0010
  94. 0102 3145 2673 894A
  95. perm_rgb_1: o0(RG).h v1(B) --> o2
  96. 0 1 2 3 4
  97. gbrg|brgb|bbbb|bbbb
  98. 0100 1001 1111 1111
  99. B5CD 6EF7 89AB CDEF
  100. perm_rgb_2: o3(RG).l o2(rgbB.l) --> o4*
  101. 0 1 2 3 4
  102. gbrg|brgb|rgbr|gbrg
  103. 1111 1111 0010 0100
  104. 89AB CDEF 0182 3945
  105. perm_rgb_2: o3(RG).l o2(rgbB.l) ---> o5*
  106. 0 1 2 3 4
  107. brgb|rgbr|gbrg|brgb
  108. 1001 0010 0100 1001
  109. a67b 89cA BdCD eEFf
  110. */
  111. static
  112. const vector unsigned char
  113. perm_rgb_0 = {0x00,0x01,0x10,0x02,0x03,0x11,0x04,0x05,
  114. 0x12,0x06,0x07,0x13,0x08,0x09,0x14,0x0a},
  115. perm_rgb_1 = {0x0b,0x15,0x0c,0x0d,0x16,0x0e,0x0f,0x17,
  116. 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f},
  117. perm_rgb_2 = {0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,
  118. 0x00,0x01,0x18,0x02,0x03,0x19,0x04,0x05},
  119. perm_rgb_3 = {0x1a,0x06,0x07,0x1b,0x08,0x09,0x1c,0x0a,
  120. 0x0b,0x1d,0x0c,0x0d,0x1e,0x0e,0x0f,0x1f};
  121. #define vec_merge3(x2,x1,x0,y0,y1,y2) \
  122. do { \
  123. __typeof__(x0) o0,o2,o3; \
  124. o0 = vec_mergeh (x0,x1); \
  125. y0 = vec_perm (o0, x2, perm_rgb_0); \
  126. o2 = vec_perm (o0, x2, perm_rgb_1); \
  127. o3 = vec_mergel (x0,x1); \
  128. y1 = vec_perm (o3,o2,perm_rgb_2); \
  129. y2 = vec_perm (o3,o2,perm_rgb_3); \
  130. } while(0)
  131. #define vec_mstbgr24(x0,x1,x2,ptr) \
  132. do { \
  133. __typeof__(x0) _0,_1,_2; \
  134. vec_merge3 (x0,x1,x2,_0,_1,_2); \
  135. vec_st (_0, 0, ptr++); \
  136. vec_st (_1, 0, ptr++); \
  137. vec_st (_2, 0, ptr++); \
  138. } while (0);
  139. #define vec_mstrgb24(x0,x1,x2,ptr) \
  140. do { \
  141. __typeof__(x0) _0,_1,_2; \
  142. vec_merge3 (x2,x1,x0,_0,_1,_2); \
  143. vec_st (_0, 0, ptr++); \
  144. vec_st (_1, 0, ptr++); \
  145. vec_st (_2, 0, ptr++); \
  146. } while (0);
  147. /* pack the pixels in rgb0 format
  148. msb R
  149. lsb 0
  150. */
  151. #define vec_mstrgb32(T,x0,x1,x2,x3,ptr) \
  152. do { \
  153. T _0,_1,_2,_3; \
  154. _0 = vec_mergeh (x0,x1); \
  155. _1 = vec_mergeh (x2,x3); \
  156. _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
  157. _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
  158. vec_st (_2, 0*16, (T *)ptr); \
  159. vec_st (_3, 1*16, (T *)ptr); \
  160. _0 = vec_mergel (x0,x1); \
  161. _1 = vec_mergel (x2,x3); \
  162. _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
  163. _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
  164. vec_st (_2, 2*16, (T *)ptr); \
  165. vec_st (_3, 3*16, (T *)ptr); \
  166. ptr += 4; \
  167. } while (0);
  168. /*
  169. | 1 0 1.4021 | | Y |
  170. | 1 -0.3441 -0.7142 |x| Cb|
  171. | 1 1.7718 0 | | Cr|
  172. Y: [-128 127]
  173. Cb/Cr : [-128 127]
  174. typical yuv conversion work on Y: 0-255 this version has been optimized for jpeg decode.
  175. */
  176. #define vec_unh(x) \
  177. (vector signed short) \
  178. vec_perm(x,(__typeof__(x)){0}, \
  179. ((vector unsigned char){0x10,0x00,0x10,0x01,0x10,0x02,0x10,0x03,\
  180. 0x10,0x04,0x10,0x05,0x10,0x06,0x10,0x07}))
  181. #define vec_unl(x) \
  182. (vector signed short) \
  183. vec_perm(x,(__typeof__(x)){0}, \
  184. ((vector unsigned char){0x10,0x08,0x10,0x09,0x10,0x0A,0x10,0x0B,\
  185. 0x10,0x0C,0x10,0x0D,0x10,0x0E,0x10,0x0F}))
  186. #define vec_clip_s16(x) \
  187. vec_max (vec_min (x, ((vector signed short){235,235,235,235,235,235,235,235})), \
  188. ((vector signed short){ 16, 16, 16, 16, 16, 16, 16, 16}))
  189. #define vec_packclp(x,y) \
  190. (vector unsigned char)vec_packs \
  191. ((vector unsigned short)vec_max (x,((vector signed short) {0})), \
  192. (vector unsigned short)vec_max (y,((vector signed short) {0})))
  193. //#define out_pixels(a,b,c,ptr) vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),a,a,a,ptr)
  194. static inline void cvtyuvtoRGB (SwsContext *c,
  195. vector signed short Y, vector signed short U, vector signed short V,
  196. vector signed short *R, vector signed short *G, vector signed short *B)
  197. {
  198. vector signed short vx,ux,uvx;
  199. Y = vec_mradds (Y, c->CY, c->OY);
  200. U = vec_sub (U,(vector signed short)
  201. vec_splat((vector signed short){128},0));
  202. V = vec_sub (V,(vector signed short)
  203. vec_splat((vector signed short){128},0));
  204. // ux = (CBU*(u<<c->CSHIFT)+0x4000)>>15;
  205. ux = vec_sl (U, c->CSHIFT);
  206. *B = vec_mradds (ux, c->CBU, Y);
  207. // vx = (CRV*(v<<c->CSHIFT)+0x4000)>>15;
  208. vx = vec_sl (V, c->CSHIFT);
  209. *R = vec_mradds (vx, c->CRV, Y);
  210. // uvx = ((CGU*u) + (CGV*v))>>15;
  211. uvx = vec_mradds (U, c->CGU, Y);
  212. *G = vec_mradds (V, c->CGV, uvx);
  213. }
  214. /*
  215. ------------------------------------------------------------------------------
  216. CS converters
  217. ------------------------------------------------------------------------------
  218. */
  219. #define DEFCSP420_CVT(name,out_pixels) \
  220. static int altivec_##name (SwsContext *c, \
  221. unsigned char **in, int *instrides, \
  222. int srcSliceY, int srcSliceH, \
  223. unsigned char **oplanes, int *outstrides) \
  224. { \
  225. int w = c->srcW; \
  226. int h = srcSliceH; \
  227. int i,j; \
  228. int instrides_scl[3]; \
  229. vector unsigned char y0,y1; \
  230. \
  231. vector signed char u,v; \
  232. \
  233. vector signed short Y0,Y1,Y2,Y3; \
  234. vector signed short U,V; \
  235. vector signed short vx,ux,uvx; \
  236. vector signed short vx0,ux0,uvx0; \
  237. vector signed short vx1,ux1,uvx1; \
  238. vector signed short R0,G0,B0; \
  239. vector signed short R1,G1,B1; \
  240. vector unsigned char R,G,B; \
  241. \
  242. vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \
  243. vector unsigned char align_perm; \
  244. \
  245. vector signed short \
  246. lCY = c->CY, \
  247. lOY = c->OY, \
  248. lCRV = c->CRV, \
  249. lCBU = c->CBU, \
  250. lCGU = c->CGU, \
  251. lCGV = c->CGV; \
  252. \
  253. vector unsigned short lCSHIFT = c->CSHIFT; \
  254. \
  255. ubyte *y1i = in[0]; \
  256. ubyte *y2i = in[0]+instrides[0]; \
  257. ubyte *ui = in[1]; \
  258. ubyte *vi = in[2]; \
  259. \
  260. vector unsigned char *oute \
  261. = (vector unsigned char *) \
  262. (oplanes[0]+srcSliceY*outstrides[0]); \
  263. vector unsigned char *outo \
  264. = (vector unsigned char *) \
  265. (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]); \
  266. \
  267. \
  268. instrides_scl[0] = instrides[0]*2-w; /* the loop moves y{1,2}i by w */ \
  269. instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */ \
  270. instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */ \
  271. \
  272. \
  273. for (i=0;i<h/2;i++) { \
  274. vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0); \
  275. vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1); \
  276. \
  277. for (j=0;j<w/16;j++) { \
  278. \
  279. y1ivP = (vector unsigned char *)y1i; \
  280. y2ivP = (vector unsigned char *)y2i; \
  281. uivP = (vector unsigned char *)ui; \
  282. vivP = (vector unsigned char *)vi; \
  283. \
  284. align_perm = vec_lvsl (0, y1i); \
  285. y0 = (vector unsigned char) \
  286. vec_perm (y1ivP[0], y1ivP[1], align_perm); \
  287. \
  288. align_perm = vec_lvsl (0, y2i); \
  289. y1 = (vector unsigned char) \
  290. vec_perm (y2ivP[0], y2ivP[1], align_perm); \
  291. \
  292. align_perm = vec_lvsl (0, ui); \
  293. u = (vector signed char) \
  294. vec_perm (uivP[0], uivP[1], align_perm); \
  295. \
  296. align_perm = vec_lvsl (0, vi); \
  297. v = (vector signed char) \
  298. vec_perm (vivP[0], vivP[1], align_perm); \
  299. \
  300. u = (vector signed char) \
  301. vec_sub (u,(vector signed char) \
  302. vec_splat((vector signed char){128},0)); \
  303. v = (vector signed char) \
  304. vec_sub (v,(vector signed char) \
  305. vec_splat((vector signed char){128},0)); \
  306. \
  307. U = vec_unpackh (u); \
  308. V = vec_unpackh (v); \
  309. \
  310. \
  311. Y0 = vec_unh (y0); \
  312. Y1 = vec_unl (y0); \
  313. Y2 = vec_unh (y1); \
  314. Y3 = vec_unl (y1); \
  315. \
  316. Y0 = vec_mradds (Y0, lCY, lOY); \
  317. Y1 = vec_mradds (Y1, lCY, lOY); \
  318. Y2 = vec_mradds (Y2, lCY, lOY); \
  319. Y3 = vec_mradds (Y3, lCY, lOY); \
  320. \
  321. /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */ \
  322. ux = vec_sl (U, lCSHIFT); \
  323. ux = vec_mradds (ux, lCBU, (vector signed short){0}); \
  324. ux0 = vec_mergeh (ux,ux); \
  325. ux1 = vec_mergel (ux,ux); \
  326. \
  327. /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */ \
  328. vx = vec_sl (V, lCSHIFT); \
  329. vx = vec_mradds (vx, lCRV, (vector signed short){0}); \
  330. vx0 = vec_mergeh (vx,vx); \
  331. vx1 = vec_mergel (vx,vx); \
  332. \
  333. /* uvx = ((CGU*u) + (CGV*v))>>15 */ \
  334. uvx = vec_mradds (U, lCGU, (vector signed short){0}); \
  335. uvx = vec_mradds (V, lCGV, uvx); \
  336. uvx0 = vec_mergeh (uvx,uvx); \
  337. uvx1 = vec_mergel (uvx,uvx); \
  338. \
  339. R0 = vec_add (Y0,vx0); \
  340. G0 = vec_add (Y0,uvx0); \
  341. B0 = vec_add (Y0,ux0); \
  342. R1 = vec_add (Y1,vx1); \
  343. G1 = vec_add (Y1,uvx1); \
  344. B1 = vec_add (Y1,ux1); \
  345. \
  346. R = vec_packclp (R0,R1); \
  347. G = vec_packclp (G0,G1); \
  348. B = vec_packclp (B0,B1); \
  349. \
  350. out_pixels(R,G,B,oute); \
  351. \
  352. R0 = vec_add (Y2,vx0); \
  353. G0 = vec_add (Y2,uvx0); \
  354. B0 = vec_add (Y2,ux0); \
  355. R1 = vec_add (Y3,vx1); \
  356. G1 = vec_add (Y3,uvx1); \
  357. B1 = vec_add (Y3,ux1); \
  358. R = vec_packclp (R0,R1); \
  359. G = vec_packclp (G0,G1); \
  360. B = vec_packclp (B0,B1); \
  361. \
  362. \
  363. out_pixels(R,G,B,outo); \
  364. \
  365. y1i += 16; \
  366. y2i += 16; \
  367. ui += 8; \
  368. vi += 8; \
  369. \
  370. } \
  371. \
  372. outo += (outstrides[0])>>4; \
  373. oute += (outstrides[0])>>4; \
  374. \
  375. ui += instrides_scl[1]; \
  376. vi += instrides_scl[2]; \
  377. y1i += instrides_scl[0]; \
  378. y2i += instrides_scl[0]; \
  379. } \
  380. return srcSliceH; \
  381. }
  382. #define out_abgr(a,b,c,ptr) vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),c,b,a,ptr)
  383. #define out_bgra(a,b,c,ptr) vec_mstrgb32(__typeof__(a),c,b,a,((__typeof__ (a)){255}),ptr)
  384. #define out_rgba(a,b,c,ptr) vec_mstrgb32(__typeof__(a),a,b,c,((__typeof__ (a)){255}),ptr)
  385. #define out_argb(a,b,c,ptr) vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),a,b,c,ptr)
  386. #define out_rgb24(a,b,c,ptr) vec_mstrgb24(a,b,c,ptr)
  387. #define out_bgr24(a,b,c,ptr) vec_mstbgr24(a,b,c,ptr)
  388. DEFCSP420_CVT (yuv2_abgr, out_abgr)
  389. #if 1
  390. DEFCSP420_CVT (yuv2_bgra, out_bgra)
  391. #else
  392. static int altivec_yuv2_bgra32 (SwsContext *c,
  393. unsigned char **in, int *instrides,
  394. int srcSliceY, int srcSliceH,
  395. unsigned char **oplanes, int *outstrides)
  396. {
  397. int w = c->srcW;
  398. int h = srcSliceH;
  399. int i,j;
  400. int instrides_scl[3];
  401. vector unsigned char y0,y1;
  402. vector signed char u,v;
  403. vector signed short Y0,Y1,Y2,Y3;
  404. vector signed short U,V;
  405. vector signed short vx,ux,uvx;
  406. vector signed short vx0,ux0,uvx0;
  407. vector signed short vx1,ux1,uvx1;
  408. vector signed short R0,G0,B0;
  409. vector signed short R1,G1,B1;
  410. vector unsigned char R,G,B;
  411. vector unsigned char *uivP, *vivP;
  412. vector unsigned char align_perm;
  413. vector signed short
  414. lCY = c->CY,
  415. lOY = c->OY,
  416. lCRV = c->CRV,
  417. lCBU = c->CBU,
  418. lCGU = c->CGU,
  419. lCGV = c->CGV;
  420. vector unsigned short lCSHIFT = c->CSHIFT;
  421. ubyte *y1i = in[0];
  422. ubyte *y2i = in[0]+w;
  423. ubyte *ui = in[1];
  424. ubyte *vi = in[2];
  425. vector unsigned char *oute
  426. = (vector unsigned char *)
  427. (oplanes[0]+srcSliceY*outstrides[0]);
  428. vector unsigned char *outo
  429. = (vector unsigned char *)
  430. (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]);
  431. instrides_scl[0] = instrides[0];
  432. instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */
  433. instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */
  434. for (i=0;i<h/2;i++) {
  435. vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0);
  436. vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1);
  437. for (j=0;j<w/16;j++) {
  438. y0 = vec_ldl (0,y1i);
  439. y1 = vec_ldl (0,y2i);
  440. uivP = (vector unsigned char *)ui;
  441. vivP = (vector unsigned char *)vi;
  442. align_perm = vec_lvsl (0, ui);
  443. u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm);
  444. align_perm = vec_lvsl (0, vi);
  445. v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm);
  446. u = (vector signed char)
  447. vec_sub (u,(vector signed char)
  448. vec_splat((vector signed char){128},0));
  449. v = (vector signed char)
  450. vec_sub (v, (vector signed char)
  451. vec_splat((vector signed char){128},0));
  452. U = vec_unpackh (u);
  453. V = vec_unpackh (v);
  454. Y0 = vec_unh (y0);
  455. Y1 = vec_unl (y0);
  456. Y2 = vec_unh (y1);
  457. Y3 = vec_unl (y1);
  458. Y0 = vec_mradds (Y0, lCY, lOY);
  459. Y1 = vec_mradds (Y1, lCY, lOY);
  460. Y2 = vec_mradds (Y2, lCY, lOY);
  461. Y3 = vec_mradds (Y3, lCY, lOY);
  462. /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */
  463. ux = vec_sl (U, lCSHIFT);
  464. ux = vec_mradds (ux, lCBU, (vector signed short){0});
  465. ux0 = vec_mergeh (ux,ux);
  466. ux1 = vec_mergel (ux,ux);
  467. /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */
  468. vx = vec_sl (V, lCSHIFT);
  469. vx = vec_mradds (vx, lCRV, (vector signed short){0});
  470. vx0 = vec_mergeh (vx,vx);
  471. vx1 = vec_mergel (vx,vx);
  472. /* uvx = ((CGU*u) + (CGV*v))>>15 */
  473. uvx = vec_mradds (U, lCGU, (vector signed short){0});
  474. uvx = vec_mradds (V, lCGV, uvx);
  475. uvx0 = vec_mergeh (uvx,uvx);
  476. uvx1 = vec_mergel (uvx,uvx);
  477. R0 = vec_add (Y0,vx0);
  478. G0 = vec_add (Y0,uvx0);
  479. B0 = vec_add (Y0,ux0);
  480. R1 = vec_add (Y1,vx1);
  481. G1 = vec_add (Y1,uvx1);
  482. B1 = vec_add (Y1,ux1);
  483. R = vec_packclp (R0,R1);
  484. G = vec_packclp (G0,G1);
  485. B = vec_packclp (B0,B1);
  486. out_argb(R,G,B,oute);
  487. R0 = vec_add (Y2,vx0);
  488. G0 = vec_add (Y2,uvx0);
  489. B0 = vec_add (Y2,ux0);
  490. R1 = vec_add (Y3,vx1);
  491. G1 = vec_add (Y3,uvx1);
  492. B1 = vec_add (Y3,ux1);
  493. R = vec_packclp (R0,R1);
  494. G = vec_packclp (G0,G1);
  495. B = vec_packclp (B0,B1);
  496. out_argb(R,G,B,outo);
  497. y1i += 16;
  498. y2i += 16;
  499. ui += 8;
  500. vi += 8;
  501. }
  502. outo += (outstrides[0])>>4;
  503. oute += (outstrides[0])>>4;
  504. ui += instrides_scl[1];
  505. vi += instrides_scl[2];
  506. y1i += instrides_scl[0];
  507. y2i += instrides_scl[0];
  508. }
  509. return srcSliceH;
  510. }
  511. #endif
  512. DEFCSP420_CVT (yuv2_rgba, out_rgba)
  513. DEFCSP420_CVT (yuv2_argb, out_argb)
  514. DEFCSP420_CVT (yuv2_rgb24, out_rgb24)
  515. DEFCSP420_CVT (yuv2_bgr24, out_bgr24)
  516. // uyvy|uyvy|uyvy|uyvy
  517. // 0123 4567 89ab cdef
  518. static
  519. const vector unsigned char
  520. demux_u = {0x10,0x00,0x10,0x00,
  521. 0x10,0x04,0x10,0x04,
  522. 0x10,0x08,0x10,0x08,
  523. 0x10,0x0c,0x10,0x0c},
  524. demux_v = {0x10,0x02,0x10,0x02,
  525. 0x10,0x06,0x10,0x06,
  526. 0x10,0x0A,0x10,0x0A,
  527. 0x10,0x0E,0x10,0x0E},
  528. demux_y = {0x10,0x01,0x10,0x03,
  529. 0x10,0x05,0x10,0x07,
  530. 0x10,0x09,0x10,0x0B,
  531. 0x10,0x0D,0x10,0x0F};
  532. /*
  533. this is so I can play live CCIR raw video
  534. */
  535. static int altivec_uyvy_rgb32 (SwsContext *c,
  536. unsigned char **in, int *instrides,
  537. int srcSliceY, int srcSliceH,
  538. unsigned char **oplanes, int *outstrides)
  539. {
  540. int w = c->srcW;
  541. int h = srcSliceH;
  542. int i,j;
  543. vector unsigned char uyvy;
  544. vector signed short Y,U,V;
  545. vector signed short R0,G0,B0,R1,G1,B1;
  546. vector unsigned char R,G,B;
  547. vector unsigned char *out;
  548. ubyte *img;
  549. img = in[0];
  550. out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]);
  551. for (i=0;i<h;i++) {
  552. for (j=0;j<w/16;j++) {
  553. uyvy = vec_ld (0, img);
  554. U = (vector signed short)
  555. vec_perm (uyvy, (vector unsigned char){0}, demux_u);
  556. V = (vector signed short)
  557. vec_perm (uyvy, (vector unsigned char){0}, demux_v);
  558. Y = (vector signed short)
  559. vec_perm (uyvy, (vector unsigned char){0}, demux_y);
  560. cvtyuvtoRGB (c, Y,U,V,&R0,&G0,&B0);
  561. uyvy = vec_ld (16, img);
  562. U = (vector signed short)
  563. vec_perm (uyvy, (vector unsigned char){0}, demux_u);
  564. V = (vector signed short)
  565. vec_perm (uyvy, (vector unsigned char){0}, demux_v);
  566. Y = (vector signed short)
  567. vec_perm (uyvy, (vector unsigned char){0}, demux_y);
  568. cvtyuvtoRGB (c, Y,U,V,&R1,&G1,&B1);
  569. R = vec_packclp (R0,R1);
  570. G = vec_packclp (G0,G1);
  571. B = vec_packclp (B0,B1);
  572. // vec_mstbgr24 (R,G,B, out);
  573. out_rgba (R,G,B,out);
  574. img += 32;
  575. }
  576. }
  577. return srcSliceH;
  578. }
  579. /* Ok currently the acceleration routine only supports
  580. inputs of widths a multiple of 16
  581. and heights a multiple 2
  582. So we just fall back to the C codes for this.
  583. */
  584. SwsFunc sws_yuv2rgb_init_altivec (SwsContext *c)
  585. {
  586. if (!(c->flags & SWS_CPU_CAPS_ALTIVEC))
  587. return NULL;
  588. /*
  589. and this seems not to matter too much I tried a bunch of
  590. videos with abnormal widths and MPlayer crashes elsewhere.
  591. mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv
  592. boom with X11 bad match.
  593. */
  594. if ((c->srcW & 0xf) != 0) return NULL;
  595. switch (c->srcFormat) {
  596. case PIX_FMT_YUV410P:
  597. case PIX_FMT_YUV420P:
  598. /*case IMGFMT_CLPL: ??? */
  599. case PIX_FMT_GRAY8:
  600. case PIX_FMT_NV12:
  601. case PIX_FMT_NV21:
  602. if ((c->srcH & 0x1) != 0)
  603. return NULL;
  604. switch(c->dstFormat){
  605. case PIX_FMT_RGB24:
  606. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGB24\n");
  607. return altivec_yuv2_rgb24;
  608. case PIX_FMT_BGR24:
  609. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGR24\n");
  610. return altivec_yuv2_bgr24;
  611. case PIX_FMT_ARGB:
  612. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ARGB\n");
  613. return altivec_yuv2_argb;
  614. case PIX_FMT_ABGR:
  615. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ABGR\n");
  616. return altivec_yuv2_abgr;
  617. case PIX_FMT_RGBA:
  618. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGBA\n");
  619. return altivec_yuv2_rgba;
  620. case PIX_FMT_BGRA:
  621. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGRA\n");
  622. return altivec_yuv2_bgra;
  623. default: return NULL;
  624. }
  625. break;
  626. case PIX_FMT_UYVY422:
  627. switch(c->dstFormat){
  628. case PIX_FMT_BGR32:
  629. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space UYVY -> RGB32\n");
  630. return altivec_uyvy_rgb32;
  631. default: return NULL;
  632. }
  633. break;
  634. }
  635. return NULL;
  636. }
  637. void sws_yuv2rgb_altivec_init_tables (SwsContext *c, const int inv_table[4],int brightness,int contrast, int saturation)
  638. {
  639. union {
  640. signed short tmp[8] __attribute__ ((aligned(16)));
  641. vector signed short vec;
  642. } buf;
  643. buf.tmp[0] = ((0xffffLL) * contrast>>8)>>9; //cy
  644. buf.tmp[1] = -256*brightness; //oy
  645. buf.tmp[2] = (inv_table[0]>>3) *(contrast>>16)*(saturation>>16); //crv
  646. buf.tmp[3] = (inv_table[1]>>3) *(contrast>>16)*(saturation>>16); //cbu
  647. buf.tmp[4] = -((inv_table[2]>>1)*(contrast>>16)*(saturation>>16)); //cgu
  648. buf.tmp[5] = -((inv_table[3]>>1)*(contrast>>16)*(saturation>>16)); //cgv
  649. c->CSHIFT = (vector unsigned short)vec_splat_u16(2);
  650. c->CY = vec_splat ((vector signed short)buf.vec, 0);
  651. c->OY = vec_splat ((vector signed short)buf.vec, 1);
  652. c->CRV = vec_splat ((vector signed short)buf.vec, 2);
  653. c->CBU = vec_splat ((vector signed short)buf.vec, 3);
  654. c->CGU = vec_splat ((vector signed short)buf.vec, 4);
  655. c->CGV = vec_splat ((vector signed short)buf.vec, 5);
  656. #if 0
  657. {
  658. int i;
  659. char *v[6]={"cy","oy","crv","cbu","cgu","cgv"};
  660. for (i=0; i<6; i++)
  661. printf("%s %d ", v[i],buf.tmp[i] );
  662. printf("\n");
  663. }
  664. #endif
  665. return;
  666. }
  667. void
  668. altivec_yuv2packedX (SwsContext *c,
  669. int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  670. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  671. uint8_t *dest, int dstW, int dstY)
  672. {
  673. int i,j;
  674. vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V;
  675. vector signed short R0,G0,B0,R1,G1,B1;
  676. vector unsigned char R,G,B;
  677. vector unsigned char *out,*nout;
  678. vector signed short RND = vec_splat_s16(1<<3);
  679. vector unsigned short SCL = vec_splat_u16(4);
  680. unsigned long scratch[16] __attribute__ ((aligned (16)));
  681. vector signed short *YCoeffs, *CCoeffs;
  682. YCoeffs = c->vYCoeffsBank+dstY*lumFilterSize;
  683. CCoeffs = c->vCCoeffsBank+dstY*chrFilterSize;
  684. out = (vector unsigned char *)dest;
  685. for (i=0; i<dstW; i+=16){
  686. Y0 = RND;
  687. Y1 = RND;
  688. /* extract 16 coeffs from lumSrc */
  689. for (j=0; j<lumFilterSize; j++) {
  690. X0 = vec_ld (0, &lumSrc[j][i]);
  691. X1 = vec_ld (16, &lumSrc[j][i]);
  692. Y0 = vec_mradds (X0, YCoeffs[j], Y0);
  693. Y1 = vec_mradds (X1, YCoeffs[j], Y1);
  694. }
  695. U = RND;
  696. V = RND;
  697. /* extract 8 coeffs from U,V */
  698. for (j=0; j<chrFilterSize; j++) {
  699. X = vec_ld (0, &chrSrc[j][i/2]);
  700. U = vec_mradds (X, CCoeffs[j], U);
  701. X = vec_ld (0, &chrSrc[j][i/2+2048]);
  702. V = vec_mradds (X, CCoeffs[j], V);
  703. }
  704. /* scale and clip signals */
  705. Y0 = vec_sra (Y0, SCL);
  706. Y1 = vec_sra (Y1, SCL);
  707. U = vec_sra (U, SCL);
  708. V = vec_sra (V, SCL);
  709. Y0 = vec_clip_s16 (Y0);
  710. Y1 = vec_clip_s16 (Y1);
  711. U = vec_clip_s16 (U);
  712. V = vec_clip_s16 (V);
  713. /* now we have
  714. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  715. U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
  716. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  717. U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
  718. V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
  719. */
  720. U0 = vec_mergeh (U,U);
  721. V0 = vec_mergeh (V,V);
  722. U1 = vec_mergel (U,U);
  723. V1 = vec_mergel (V,V);
  724. cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
  725. cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
  726. R = vec_packclp (R0,R1);
  727. G = vec_packclp (G0,G1);
  728. B = vec_packclp (B0,B1);
  729. switch(c->dstFormat) {
  730. case PIX_FMT_ABGR: out_abgr (R,G,B,out); break;
  731. case PIX_FMT_BGRA: out_bgra (R,G,B,out); break;
  732. case PIX_FMT_RGBA: out_rgba (R,G,B,out); break;
  733. case PIX_FMT_ARGB: out_argb (R,G,B,out); break;
  734. case PIX_FMT_RGB24: out_rgb24 (R,G,B,out); break;
  735. case PIX_FMT_BGR24: out_bgr24 (R,G,B,out); break;
  736. default:
  737. {
  738. /* If this is reached, the caller should have called yuv2packedXinC
  739. instead. */
  740. static int printed_error_message;
  741. if (!printed_error_message) {
  742. av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
  743. sws_format_name(c->dstFormat));
  744. printed_error_message=1;
  745. }
  746. return;
  747. }
  748. }
  749. }
  750. if (i < dstW) {
  751. i -= 16;
  752. Y0 = RND;
  753. Y1 = RND;
  754. /* extract 16 coeffs from lumSrc */
  755. for (j=0; j<lumFilterSize; j++) {
  756. X0 = vec_ld (0, &lumSrc[j][i]);
  757. X1 = vec_ld (16, &lumSrc[j][i]);
  758. Y0 = vec_mradds (X0, YCoeffs[j], Y0);
  759. Y1 = vec_mradds (X1, YCoeffs[j], Y1);
  760. }
  761. U = RND;
  762. V = RND;
  763. /* extract 8 coeffs from U,V */
  764. for (j=0; j<chrFilterSize; j++) {
  765. X = vec_ld (0, &chrSrc[j][i/2]);
  766. U = vec_mradds (X, CCoeffs[j], U);
  767. X = vec_ld (0, &chrSrc[j][i/2+2048]);
  768. V = vec_mradds (X, CCoeffs[j], V);
  769. }
  770. /* scale and clip signals */
  771. Y0 = vec_sra (Y0, SCL);
  772. Y1 = vec_sra (Y1, SCL);
  773. U = vec_sra (U, SCL);
  774. V = vec_sra (V, SCL);
  775. Y0 = vec_clip_s16 (Y0);
  776. Y1 = vec_clip_s16 (Y1);
  777. U = vec_clip_s16 (U);
  778. V = vec_clip_s16 (V);
  779. /* now we have
  780. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  781. U = u0 u1 u2 u3 u4 u5 u6 u7 V = v0 v1 v2 v3 v4 v5 v6 v7
  782. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  783. U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
  784. V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
  785. */
  786. U0 = vec_mergeh (U,U);
  787. V0 = vec_mergeh (V,V);
  788. U1 = vec_mergel (U,U);
  789. V1 = vec_mergel (V,V);
  790. cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
  791. cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
  792. R = vec_packclp (R0,R1);
  793. G = vec_packclp (G0,G1);
  794. B = vec_packclp (B0,B1);
  795. nout = (vector unsigned char *)scratch;
  796. switch(c->dstFormat) {
  797. case PIX_FMT_ABGR: out_abgr (R,G,B,nout); break;
  798. case PIX_FMT_BGRA: out_bgra (R,G,B,nout); break;
  799. case PIX_FMT_RGBA: out_rgba (R,G,B,nout); break;
  800. case PIX_FMT_ARGB: out_argb (R,G,B,nout); break;
  801. case PIX_FMT_RGB24: out_rgb24 (R,G,B,nout); break;
  802. case PIX_FMT_BGR24: out_bgr24 (R,G,B,nout); break;
  803. default:
  804. /* Unreachable, I think. */
  805. av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
  806. sws_format_name(c->dstFormat));
  807. return;
  808. }
  809. memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4);
  810. }
  811. }