yuv2rgb_altivec.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952
  1. /*
  2. marc.hoffman@analog.com March 8, 2004
  3. Altivec Acceleration for Color Space Conversion revision 0.2
  4. convert I420 YV12 to RGB in various formats,
  5. it rejects images that are not in 420 formats
  6. it rejects images that don't have widths of multiples of 16
  7. it rejects images that don't have heights of multiples of 2
  8. reject defers to C simulation codes.
  9. lots of optimizations to be done here
  10. 1. need to fix saturation code, I just couldn't get it to fly with packs and adds.
  11. so we currently use max min to clip
  12. 2. the inefficient use of chroma loading needs a bit of brushing up
  13. 3. analysis of pipeline stalls needs to be done, use shark to identify pipeline stalls
  14. MODIFIED to calculate coeffs from currently selected color space.
  15. MODIFIED core to be a macro which you spec the output format.
  16. ADDED UYVY conversion which is never called due to some thing in SWSCALE.
  17. CORRECTED algorithim selection to be strict on input formats.
  18. ADDED runtime detection of altivec.
  19. ADDED altivec_yuv2packedX vertical scl + RGB converter
  20. March 27,2004
  21. PERFORMANCE ANALYSIS
  22. The C version use 25% of the processor or ~250Mips for D1 video rawvideo used as test
  23. The ALTIVEC version uses 10% of the processor or ~100Mips for D1 video same sequence
  24. 720*480*30 ~10MPS
  25. so we have roughly 10clocks per pixel this is too high something has to be wrong.
  26. OPTIMIZED clip codes to utilize vec_max and vec_packs removing the need for vec_min.
  27. OPTIMIZED DST OUTPUT cache/dma controls. we are pretty much
  28. guaranteed to have the input video frame it was just decompressed so
  29. it probably resides in L1 caches. However we are creating the
  30. output video stream this needs to use the DSTST instruction to
  31. optimize for the cache. We couple this with the fact that we are
  32. not going to be visiting the input buffer again so we mark it Least
  33. Recently Used. This shaves 25% of the processor cycles off.
  34. Now MEMCPY is the largest mips consumer in the system, probably due
  35. to the inefficient X11 stuff.
  36. GL libraries seem to be very slow on this machine 1.33Ghz PB running
  37. Jaguar, this is not the case for my 1Ghz PB. I thought it might be
  38. a versioning issues, however i have libGL.1.2.dylib for both
  39. machines. ((We need to figure this out now))
  40. GL2 libraries work now with patch for RGB32
  41. NOTE quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor
  42. Integrated luma prescaling adjustment for saturation/contrast/brightness adjustment.
  43. */
  44. #include <stdio.h>
  45. #include <stdlib.h>
  46. #include <string.h>
  47. #include <inttypes.h>
  48. #include <assert.h>
  49. #include "config.h"
  50. #ifdef HAVE_MALLOC_H
  51. #include <malloc.h>
  52. #endif
  53. #include "rgb2rgb.h"
  54. #include "swscale.h"
  55. #include "swscale_internal.h"
  56. #include "img_format.h" //FIXME try to reduce dependency of such stuff
  57. #undef PROFILE_THE_BEAST
  58. #undef INC_SCALING
  59. typedef unsigned char ubyte;
  60. typedef signed char sbyte;
  61. /* RGB interleaver, 16 planar pels 8-bit samples per channel in
  62. homogeneous vector registers x0,x1,x2 are interleaved with the
  63. following technique:
  64. o0 = vec_mergeh (x0,x1);
  65. o1 = vec_perm (o0, x2, perm_rgb_0);
  66. o2 = vec_perm (o0, x2, perm_rgb_1);
  67. o3 = vec_mergel (x0,x1);
  68. o4 = vec_perm (o3,o2,perm_rgb_2);
  69. o5 = vec_perm (o3,o2,perm_rgb_3);
  70. perm_rgb_0: o0(RG).h v1(B) --> o1*
  71. 0 1 2 3 4
  72. rgbr|gbrg|brgb|rgbr
  73. 0010 0100 1001 0010
  74. 0102 3145 2673 894A
  75. perm_rgb_1: o0(RG).h v1(B) --> o2
  76. 0 1 2 3 4
  77. gbrg|brgb|bbbb|bbbb
  78. 0100 1001 1111 1111
  79. B5CD 6EF7 89AB CDEF
  80. perm_rgb_2: o3(RG).l o2(rgbB.l) --> o4*
  81. 0 1 2 3 4
  82. gbrg|brgb|rgbr|gbrg
  83. 1111 1111 0010 0100
  84. 89AB CDEF 0182 3945
  85. perm_rgb_2: o3(RG).l o2(rgbB.l) ---> o5*
  86. 0 1 2 3 4
  87. brgb|rgbr|gbrg|brgb
  88. 1001 0010 0100 1001
  89. a67b 89cA BdCD eEFf
  90. */
  91. static
  92. const vector unsigned char
  93. perm_rgb_0 = (const vector unsigned char)AVV(0x00,0x01,0x10,0x02,0x03,0x11,0x04,0x05,
  94. 0x12,0x06,0x07,0x13,0x08,0x09,0x14,0x0a),
  95. perm_rgb_1 = (const vector unsigned char)AVV(0x0b,0x15,0x0c,0x0d,0x16,0x0e,0x0f,0x17,
  96. 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f),
  97. perm_rgb_2 = (const vector unsigned char)AVV(0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,
  98. 0x00,0x01,0x18,0x02,0x03,0x19,0x04,0x05),
  99. perm_rgb_3 = (const vector unsigned char)AVV(0x1a,0x06,0x07,0x1b,0x08,0x09,0x1c,0x0a,
  100. 0x0b,0x1d,0x0c,0x0d,0x1e,0x0e,0x0f,0x1f);
  101. #define vec_merge3(x2,x1,x0,y0,y1,y2) \
  102. do { \
  103. typeof(x0) o0,o2,o3; \
  104. o0 = vec_mergeh (x0,x1); \
  105. y0 = vec_perm (o0, x2, perm_rgb_0);\
  106. o2 = vec_perm (o0, x2, perm_rgb_1);\
  107. o3 = vec_mergel (x0,x1); \
  108. y1 = vec_perm (o3,o2,perm_rgb_2); \
  109. y2 = vec_perm (o3,o2,perm_rgb_3); \
  110. } while(0)
  111. #define vec_mstbgr24(x0,x1,x2,ptr) \
  112. do { \
  113. typeof(x0) _0,_1,_2; \
  114. vec_merge3 (x0,x1,x2,_0,_1,_2); \
  115. vec_st (_0, 0, ptr++); \
  116. vec_st (_1, 0, ptr++); \
  117. vec_st (_2, 0, ptr++); \
  118. } while (0);
  119. #define vec_mstrgb24(x0,x1,x2,ptr) \
  120. do { \
  121. typeof(x0) _0,_1,_2; \
  122. vec_merge3 (x2,x1,x0,_0,_1,_2); \
  123. vec_st (_0, 0, ptr++); \
  124. vec_st (_1, 0, ptr++); \
  125. vec_st (_2, 0, ptr++); \
  126. } while (0);
  127. /* pack the pixels in rgb0 format
  128. msb R
  129. lsb 0
  130. */
  131. #define vec_mstrgb32(T,x0,x1,x2,x3,ptr) \
  132. do { \
  133. T _0,_1,_2,_3; \
  134. _0 = vec_mergeh (x0,x1); \
  135. _1 = vec_mergeh (x2,x3); \
  136. _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
  137. _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
  138. vec_st (_2, 0*16, (T *)ptr); \
  139. vec_st (_3, 1*16, (T *)ptr); \
  140. _0 = vec_mergel (x0,x1); \
  141. _1 = vec_mergel (x2,x3); \
  142. _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
  143. _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
  144. vec_st (_2, 2*16, (T *)ptr); \
  145. vec_st (_3, 3*16, (T *)ptr); \
  146. ptr += 4; \
  147. } while (0);
  148. /*
  149. | 1 0 1.4021 | | Y |
  150. | 1 -0.3441 -0.7142 |x| Cb|
  151. | 1 1.7718 0 | | Cr|
  152. Y: [-128 127]
  153. Cb/Cr : [-128 127]
  154. typical yuv conversion work on Y: 0-255 this version has been optimized for jpeg decode.
  155. */
  156. #define vec_unh(x) \
  157. (vector signed short) \
  158. vec_perm(x,(typeof(x))AVV(0),\
  159. (vector unsigned char)AVV(0x10,0x00,0x10,0x01,0x10,0x02,0x10,0x03,\
  160. 0x10,0x04,0x10,0x05,0x10,0x06,0x10,0x07))
  161. #define vec_unl(x) \
  162. (vector signed short) \
  163. vec_perm(x,(typeof(x))AVV(0),\
  164. (vector unsigned char)AVV(0x10,0x08,0x10,0x09,0x10,0x0A,0x10,0x0B,\
  165. 0x10,0x0C,0x10,0x0D,0x10,0x0E,0x10,0x0F))
  166. #define vec_clip_s16(x) \
  167. vec_max (vec_min (x, (vector signed short)AVV(235,235,235,235,235,235,235,235)),\
  168. (vector signed short)AVV(16, 16, 16, 16, 16, 16, 16, 16 ))
  169. #define vec_packclp(x,y) \
  170. (vector unsigned char)vec_packs \
  171. ((vector unsigned short)vec_max (x,(vector signed short) AVV(0)), \
  172. (vector unsigned short)vec_max (y,(vector signed short) AVV(0)))
  173. //#define out_pixels(a,b,c,ptr) vec_mstrgb32(typeof(a),((typeof (a))AVV(0)),a,a,a,ptr)
  174. static inline void cvtyuvtoRGB (SwsContext *c,
  175. vector signed short Y, vector signed short U, vector signed short V,
  176. vector signed short *R, vector signed short *G, vector signed short *B)
  177. {
  178. vector signed short vx,ux,uvx;
  179. Y = vec_mradds (Y, c->CY, c->OY);
  180. U = vec_sub (U,(vector signed short)
  181. vec_splat((vector signed short)AVV(128),0));
  182. V = vec_sub (V,(vector signed short)
  183. vec_splat((vector signed short)AVV(128),0));
  184. // ux = (CBU*(u<<c->CSHIFT)+0x4000)>>15;
  185. ux = vec_sl (U, c->CSHIFT);
  186. *B = vec_mradds (ux, c->CBU, Y);
  187. // vx = (CRV*(v<<c->CSHIFT)+0x4000)>>15;
  188. vx = vec_sl (V, c->CSHIFT);
  189. *R = vec_mradds (vx, c->CRV, Y);
  190. // uvx = ((CGU*u) + (CGV*v))>>15;
  191. uvx = vec_mradds (U, c->CGU, Y);
  192. *G = vec_mradds (V, c->CGV, uvx);
  193. }
  194. /*
  195. ------------------------------------------------------------------------------
  196. CS converters
  197. ------------------------------------------------------------------------------
  198. */
  199. #define DEFCSP420_CVT(name,out_pixels) \
  200. static int altivec_##name (SwsContext *c, \
  201. unsigned char **in, int *instrides, \
  202. int srcSliceY, int srcSliceH, \
  203. unsigned char **oplanes, int *outstrides) \
  204. { \
  205. int w = c->srcW; \
  206. int h = srcSliceH; \
  207. int i,j; \
  208. int instrides_scl[3]; \
  209. vector unsigned char y0,y1; \
  210. \
  211. vector signed char u,v; \
  212. \
  213. vector signed short Y0,Y1,Y2,Y3; \
  214. vector signed short U,V; \
  215. vector signed short vx,ux,uvx; \
  216. vector signed short vx0,ux0,uvx0; \
  217. vector signed short vx1,ux1,uvx1; \
  218. vector signed short R0,G0,B0; \
  219. vector signed short R1,G1,B1; \
  220. vector unsigned char R,G,B; \
  221. \
  222. vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \
  223. vector unsigned char align_perm; \
  224. \
  225. vector signed short \
  226. lCY = c->CY, \
  227. lOY = c->OY, \
  228. lCRV = c->CRV, \
  229. lCBU = c->CBU, \
  230. lCGU = c->CGU, \
  231. lCGV = c->CGV; \
  232. \
  233. vector unsigned short lCSHIFT = c->CSHIFT; \
  234. \
  235. ubyte *y1i = in[0]; \
  236. ubyte *y2i = in[0]+instrides[0]; \
  237. ubyte *ui = in[1]; \
  238. ubyte *vi = in[2]; \
  239. \
  240. vector unsigned char *oute \
  241. = (vector unsigned char *) \
  242. (oplanes[0]+srcSliceY*outstrides[0]); \
  243. vector unsigned char *outo \
  244. = (vector unsigned char *) \
  245. (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]); \
  246. \
  247. \
  248. instrides_scl[0] = instrides[0]*2-w; /* the loop moves y{1,2}i by w */ \
  249. instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */ \
  250. instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */ \
  251. \
  252. \
  253. for (i=0;i<h/2;i++) { \
  254. vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0); \
  255. vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1); \
  256. \
  257. for (j=0;j<w/16;j++) { \
  258. \
  259. y1ivP = (vector unsigned char *)y1i; \
  260. y2ivP = (vector unsigned char *)y2i; \
  261. uivP = (vector unsigned char *)ui; \
  262. vivP = (vector unsigned char *)vi; \
  263. \
  264. align_perm = vec_lvsl (0, y1i); \
  265. y0 = (vector unsigned char)vec_perm (y1ivP[0], y1ivP[1], align_perm);\
  266. \
  267. align_perm = vec_lvsl (0, y2i); \
  268. y1 = (vector unsigned char)vec_perm (y2ivP[0], y2ivP[1], align_perm);\
  269. \
  270. align_perm = vec_lvsl (0, ui); \
  271. u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm); \
  272. \
  273. align_perm = vec_lvsl (0, vi); \
  274. v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm); \
  275. \
  276. u = (vector signed char) \
  277. vec_sub (u,(vector signed char) \
  278. vec_splat((vector signed char)AVV(128),0));\
  279. v = (vector signed char) \
  280. vec_sub (v,(vector signed char) \
  281. vec_splat((vector signed char)AVV(128),0));\
  282. \
  283. U = vec_unpackh (u); \
  284. V = vec_unpackh (v); \
  285. \
  286. \
  287. Y0 = vec_unh (y0); \
  288. Y1 = vec_unl (y0); \
  289. Y2 = vec_unh (y1); \
  290. Y3 = vec_unl (y1); \
  291. \
  292. Y0 = vec_mradds (Y0, lCY, lOY); \
  293. Y1 = vec_mradds (Y1, lCY, lOY); \
  294. Y2 = vec_mradds (Y2, lCY, lOY); \
  295. Y3 = vec_mradds (Y3, lCY, lOY); \
  296. \
  297. /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */ \
  298. ux = vec_sl (U, lCSHIFT); \
  299. ux = vec_mradds (ux, lCBU, (vector signed short)AVV(0)); \
  300. ux0 = vec_mergeh (ux,ux); \
  301. ux1 = vec_mergel (ux,ux); \
  302. \
  303. /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */ \
  304. vx = vec_sl (V, lCSHIFT); \
  305. vx = vec_mradds (vx, lCRV, (vector signed short)AVV(0)); \
  306. vx0 = vec_mergeh (vx,vx); \
  307. vx1 = vec_mergel (vx,vx); \
  308. \
  309. /* uvx = ((CGU*u) + (CGV*v))>>15 */ \
  310. uvx = vec_mradds (U, lCGU, (vector signed short)AVV(0)); \
  311. uvx = vec_mradds (V, lCGV, uvx); \
  312. uvx0 = vec_mergeh (uvx,uvx); \
  313. uvx1 = vec_mergel (uvx,uvx); \
  314. \
  315. R0 = vec_add (Y0,vx0); \
  316. G0 = vec_add (Y0,uvx0); \
  317. B0 = vec_add (Y0,ux0); \
  318. R1 = vec_add (Y1,vx1); \
  319. G1 = vec_add (Y1,uvx1); \
  320. B1 = vec_add (Y1,ux1); \
  321. \
  322. R = vec_packclp (R0,R1); \
  323. G = vec_packclp (G0,G1); \
  324. B = vec_packclp (B0,B1); \
  325. \
  326. out_pixels(R,G,B,oute); \
  327. \
  328. R0 = vec_add (Y2,vx0); \
  329. G0 = vec_add (Y2,uvx0); \
  330. B0 = vec_add (Y2,ux0); \
  331. R1 = vec_add (Y3,vx1); \
  332. G1 = vec_add (Y3,uvx1); \
  333. B1 = vec_add (Y3,ux1); \
  334. R = vec_packclp (R0,R1); \
  335. G = vec_packclp (G0,G1); \
  336. B = vec_packclp (B0,B1); \
  337. \
  338. \
  339. out_pixels(R,G,B,outo); \
  340. \
  341. y1i += 16; \
  342. y2i += 16; \
  343. ui += 8; \
  344. vi += 8; \
  345. \
  346. } \
  347. \
  348. outo += (outstrides[0])>>4; \
  349. oute += (outstrides[0])>>4; \
  350. \
  351. ui += instrides_scl[1]; \
  352. vi += instrides_scl[2]; \
  353. y1i += instrides_scl[0]; \
  354. y2i += instrides_scl[0]; \
  355. } \
  356. return srcSliceH; \
  357. }
  358. #define out_abgr(a,b,c,ptr) vec_mstrgb32(typeof(a),((typeof (a))AVV(0)),c,b,a,ptr)
  359. #define out_bgra(a,b,c,ptr) vec_mstrgb32(typeof(a),c,b,a,((typeof (a))AVV(0)),ptr)
  360. #define out_rgba(a,b,c,ptr) vec_mstrgb32(typeof(a),a,b,c,((typeof (a))AVV(0)),ptr)
  361. #define out_argb(a,b,c,ptr) vec_mstrgb32(typeof(a),((typeof (a))AVV(0)),a,b,c,ptr)
  362. #define out_rgb24(a,b,c,ptr) vec_mstrgb24(a,b,c,ptr)
  363. #define out_bgr24(a,b,c,ptr) vec_mstbgr24(a,b,c,ptr)
  364. DEFCSP420_CVT (yuv2_abgr, out_abgr)
  365. #if 1
  366. DEFCSP420_CVT (yuv2_bgra, out_bgra)
  367. #else
  368. static int altivec_yuv2_bgra32 (SwsContext *c,
  369. unsigned char **in, int *instrides,
  370. int srcSliceY, int srcSliceH,
  371. unsigned char **oplanes, int *outstrides)
  372. {
  373. int w = c->srcW;
  374. int h = srcSliceH;
  375. int i,j;
  376. int instrides_scl[3];
  377. vector unsigned char y0,y1;
  378. vector signed char u,v;
  379. vector signed short Y0,Y1,Y2,Y3;
  380. vector signed short U,V;
  381. vector signed short vx,ux,uvx;
  382. vector signed short vx0,ux0,uvx0;
  383. vector signed short vx1,ux1,uvx1;
  384. vector signed short R0,G0,B0;
  385. vector signed short R1,G1,B1;
  386. vector unsigned char R,G,B;
  387. vector unsigned char *uivP, *vivP;
  388. vector unsigned char align_perm;
  389. vector signed short
  390. lCY = c->CY,
  391. lOY = c->OY,
  392. lCRV = c->CRV,
  393. lCBU = c->CBU,
  394. lCGU = c->CGU,
  395. lCGV = c->CGV;
  396. vector unsigned short lCSHIFT = c->CSHIFT;
  397. ubyte *y1i = in[0];
  398. ubyte *y2i = in[0]+w;
  399. ubyte *ui = in[1];
  400. ubyte *vi = in[2];
  401. vector unsigned char *oute
  402. = (vector unsigned char *)
  403. (oplanes[0]+srcSliceY*outstrides[0]);
  404. vector unsigned char *outo
  405. = (vector unsigned char *)
  406. (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]);
  407. instrides_scl[0] = instrides[0];
  408. instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */
  409. instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */
  410. for (i=0;i<h/2;i++) {
  411. vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0);
  412. vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1);
  413. for (j=0;j<w/16;j++) {
  414. y0 = vec_ldl (0,y1i);
  415. y1 = vec_ldl (0,y2i);
  416. uivP = (vector unsigned char *)ui;
  417. vivP = (vector unsigned char *)vi;
  418. align_perm = vec_lvsl (0, ui);
  419. u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm);
  420. align_perm = vec_lvsl (0, vi);
  421. v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm);
  422. u = (vector signed char)
  423. vec_sub (u,(vector signed char)
  424. vec_splat((vector signed char)AVV(128),0));
  425. v = (vector signed char)
  426. vec_sub (v, (vector signed char)
  427. vec_splat((vector signed char)AVV(128),0));
  428. U = vec_unpackh (u);
  429. V = vec_unpackh (v);
  430. Y0 = vec_unh (y0);
  431. Y1 = vec_unl (y0);
  432. Y2 = vec_unh (y1);
  433. Y3 = vec_unl (y1);
  434. Y0 = vec_mradds (Y0, lCY, lOY);
  435. Y1 = vec_mradds (Y1, lCY, lOY);
  436. Y2 = vec_mradds (Y2, lCY, lOY);
  437. Y3 = vec_mradds (Y3, lCY, lOY);
  438. /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */
  439. ux = vec_sl (U, lCSHIFT);
  440. ux = vec_mradds (ux, lCBU, (vector signed short)AVV(0));
  441. ux0 = vec_mergeh (ux,ux);
  442. ux1 = vec_mergel (ux,ux);
  443. /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */
  444. vx = vec_sl (V, lCSHIFT);
  445. vx = vec_mradds (vx, lCRV, (vector signed short)AVV(0));
  446. vx0 = vec_mergeh (vx,vx);
  447. vx1 = vec_mergel (vx,vx);
  448. /* uvx = ((CGU*u) + (CGV*v))>>15 */
  449. uvx = vec_mradds (U, lCGU, (vector signed short)AVV(0));
  450. uvx = vec_mradds (V, lCGV, uvx);
  451. uvx0 = vec_mergeh (uvx,uvx);
  452. uvx1 = vec_mergel (uvx,uvx);
  453. R0 = vec_add (Y0,vx0);
  454. G0 = vec_add (Y0,uvx0);
  455. B0 = vec_add (Y0,ux0);
  456. R1 = vec_add (Y1,vx1);
  457. G1 = vec_add (Y1,uvx1);
  458. B1 = vec_add (Y1,ux1);
  459. R = vec_packclp (R0,R1);
  460. G = vec_packclp (G0,G1);
  461. B = vec_packclp (B0,B1);
  462. out_argb(R,G,B,oute);
  463. R0 = vec_add (Y2,vx0);
  464. G0 = vec_add (Y2,uvx0);
  465. B0 = vec_add (Y2,ux0);
  466. R1 = vec_add (Y3,vx1);
  467. G1 = vec_add (Y3,uvx1);
  468. B1 = vec_add (Y3,ux1);
  469. R = vec_packclp (R0,R1);
  470. G = vec_packclp (G0,G1);
  471. B = vec_packclp (B0,B1);
  472. out_argb(R,G,B,outo);
  473. y1i += 16;
  474. y2i += 16;
  475. ui += 8;
  476. vi += 8;
  477. }
  478. outo += (outstrides[0])>>4;
  479. oute += (outstrides[0])>>4;
  480. ui += instrides_scl[1];
  481. vi += instrides_scl[2];
  482. y1i += instrides_scl[0];
  483. y2i += instrides_scl[0];
  484. }
  485. return srcSliceH;
  486. }
  487. #endif
  488. DEFCSP420_CVT (yuv2_rgba, out_rgba)
  489. DEFCSP420_CVT (yuv2_argb, out_argb)
  490. DEFCSP420_CVT (yuv2_rgb24, out_rgb24)
  491. DEFCSP420_CVT (yuv2_bgr24, out_bgr24)
  492. // uyvy|uyvy|uyvy|uyvy
  493. // 0123 4567 89ab cdef
  494. static
  495. const vector unsigned char
  496. demux_u = (const vector unsigned char)AVV(0x10,0x00,0x10,0x00,
  497. 0x10,0x04,0x10,0x04,
  498. 0x10,0x08,0x10,0x08,
  499. 0x10,0x0c,0x10,0x0c),
  500. demux_v = (const vector unsigned char)AVV(0x10,0x02,0x10,0x02,
  501. 0x10,0x06,0x10,0x06,
  502. 0x10,0x0A,0x10,0x0A,
  503. 0x10,0x0E,0x10,0x0E),
  504. demux_y = (const vector unsigned char)AVV(0x10,0x01,0x10,0x03,
  505. 0x10,0x05,0x10,0x07,
  506. 0x10,0x09,0x10,0x0B,
  507. 0x10,0x0D,0x10,0x0F);
  508. /*
  509. this is so I can play live CCIR raw video
  510. */
  511. static int altivec_uyvy_rgb32 (SwsContext *c,
  512. unsigned char **in, int *instrides,
  513. int srcSliceY, int srcSliceH,
  514. unsigned char **oplanes, int *outstrides)
  515. {
  516. int w = c->srcW;
  517. int h = srcSliceH;
  518. int i,j;
  519. vector unsigned char uyvy;
  520. vector signed short Y,U,V;
  521. vector signed short vx,ux,uvx;
  522. vector signed short R0,G0,B0,R1,G1,B1;
  523. vector unsigned char R,G,B;
  524. vector unsigned char *out;
  525. ubyte *img;
  526. img = in[0];
  527. out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]);
  528. for (i=0;i<h;i++) {
  529. for (j=0;j<w/16;j++) {
  530. uyvy = vec_ld (0, img);
  531. U = (vector signed short)
  532. vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
  533. V = (vector signed short)
  534. vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
  535. Y = (vector signed short)
  536. vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
  537. cvtyuvtoRGB (c, Y,U,V,&R0,&G0,&B0);
  538. uyvy = vec_ld (16, img);
  539. U = (vector signed short)
  540. vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
  541. V = (vector signed short)
  542. vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
  543. Y = (vector signed short)
  544. vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
  545. cvtyuvtoRGB (c, Y,U,V,&R1,&G1,&B1);
  546. R = vec_packclp (R0,R1);
  547. G = vec_packclp (G0,G1);
  548. B = vec_packclp (B0,B1);
  549. // vec_mstbgr24 (R,G,B, out);
  550. out_rgba (R,G,B,out);
  551. img += 32;
  552. }
  553. }
  554. return srcSliceH;
  555. }
  556. /* Ok currently the acceleration routine only supports
  557. inputs of widths a multiple of 16
  558. and heights a multiple 2
  559. So we just fall back to the C codes for this.
  560. */
  561. SwsFunc yuv2rgb_init_altivec (SwsContext *c)
  562. {
  563. if (!(c->flags & SWS_CPU_CAPS_ALTIVEC))
  564. return NULL;
  565. /*
  566. and this seems not to matter too much I tried a bunch of
  567. videos with abnormal widths and mplayer crashes else where.
  568. mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv
  569. boom with X11 bad match.
  570. */
  571. if ((c->srcW & 0xf) != 0) return NULL;
  572. switch (c->srcFormat) {
  573. case IMGFMT_YVU9:
  574. case IMGFMT_IF09:
  575. case IMGFMT_YV12:
  576. case IMGFMT_I420:
  577. case IMGFMT_IYUV:
  578. case IMGFMT_CLPL:
  579. case IMGFMT_Y800:
  580. case IMGFMT_Y8:
  581. case IMGFMT_NV12:
  582. case IMGFMT_NV21:
  583. if ((c->srcH & 0x1) != 0)
  584. return NULL;
  585. switch(c->dstFormat){
  586. case IMGFMT_RGB24:
  587. MSG_WARN("ALTIVEC: Color Space RGB24\n");
  588. return altivec_yuv2_rgb24;
  589. case IMGFMT_BGR24:
  590. MSG_WARN("ALTIVEC: Color Space BGR24\n");
  591. return altivec_yuv2_bgr24;
  592. case IMGFMT_ARGB:
  593. MSG_WARN("ALTIVEC: Color Space ARGB\n");
  594. return altivec_yuv2_argb;
  595. case IMGFMT_ABGR:
  596. MSG_WARN("ALTIVEC: Color Space ABGR\n");
  597. return altivec_yuv2_abgr;
  598. case IMGFMT_RGBA:
  599. MSG_WARN("ALTIVEC: Color Space RGBA\n");
  600. return altivec_yuv2_rgba;
  601. case IMGFMT_BGRA:
  602. MSG_WARN("ALTIVEC: Color Space BGRA\n");
  603. return altivec_yuv2_bgra;
  604. default: return NULL;
  605. }
  606. break;
  607. case IMGFMT_UYVY:
  608. switch(c->dstFormat){
  609. case IMGFMT_RGB32:
  610. MSG_WARN("ALTIVEC: Color Space UYVY -> RGB32\n");
  611. return altivec_uyvy_rgb32;
  612. default: return NULL;
  613. }
  614. break;
  615. }
  616. return NULL;
  617. }
  618. static uint16_t roundToInt16(int64_t f){
  619. int r= (f + (1<<15))>>16;
  620. if(r<-0x7FFF) return 0x8000;
  621. else if(r> 0x7FFF) return 0x7FFF;
  622. else return r;
  623. }
  624. void yuv2rgb_altivec_init_tables (SwsContext *c, const int inv_table[4],int brightness,int contrast, int saturation)
  625. {
  626. union {
  627. signed short tmp[8] __attribute__ ((aligned(16)));
  628. vector signed short vec;
  629. } buf;
  630. buf.tmp[0] = ( (0xffffLL) * contrast>>8 )>>9; //cy
  631. buf.tmp[1] = -256*brightness; //oy
  632. buf.tmp[2] = (inv_table[0]>>3) *(contrast>>16)*(saturation>>16); //crv
  633. buf.tmp[3] = (inv_table[1]>>3) *(contrast>>16)*(saturation>>16); //cbu
  634. buf.tmp[4] = -((inv_table[2]>>1)*(contrast>>16)*(saturation>>16)); //cgu
  635. buf.tmp[5] = -((inv_table[3]>>1)*(contrast>>16)*(saturation>>16)); //cgv
  636. c->CSHIFT = (vector unsigned short)vec_splat_u16(2);
  637. c->CY = vec_splat ((vector signed short)buf.vec, 0);
  638. c->OY = vec_splat ((vector signed short)buf.vec, 1);
  639. c->CRV = vec_splat ((vector signed short)buf.vec, 2);
  640. c->CBU = vec_splat ((vector signed short)buf.vec, 3);
  641. c->CGU = vec_splat ((vector signed short)buf.vec, 4);
  642. c->CGV = vec_splat ((vector signed short)buf.vec, 5);
  643. #if 0
  644. {
  645. int i;
  646. char *v[6]={"cy","oy","crv","cbu","cgu","cgv"};
  647. for (i=0; i<6;i++)
  648. printf("%s %d ", v[i],buf.tmp[i] );
  649. printf("\n");
  650. }
  651. #endif
  652. return;
  653. }
  654. void
  655. altivec_yuv2packedX (SwsContext *c,
  656. int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  657. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  658. uint8_t *dest, int dstW, int dstY)
  659. {
  660. int i,j;
  661. short *f;
  662. vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V;
  663. vector signed short R0,G0,B0,R1,G1,B1;
  664. vector unsigned char R,G,B,pels[3];
  665. vector unsigned char *out,*nout;
  666. vector signed short RND = vec_splat_s16(1<<3);
  667. vector unsigned short SCL = vec_splat_u16(4);
  668. unsigned long scratch[16] __attribute__ ((aligned (16)));
  669. vector signed short *YCoeffs, *CCoeffs;
  670. YCoeffs = c->vYCoeffsBank+dstY*lumFilterSize;
  671. CCoeffs = c->vCCoeffsBank+dstY*chrFilterSize;
  672. out = (vector unsigned char *)dest;
  673. for(i=0; i<dstW; i+=16){
  674. Y0 = RND;
  675. Y1 = RND;
  676. /* extract 16 coeffs from lumSrc */
  677. for(j=0; j<lumFilterSize; j++) {
  678. X0 = vec_ld (0, &lumSrc[j][i]);
  679. X1 = vec_ld (16, &lumSrc[j][i]);
  680. Y0 = vec_mradds (X0, YCoeffs[j], Y0);
  681. Y1 = vec_mradds (X1, YCoeffs[j], Y1);
  682. }
  683. U = RND;
  684. V = RND;
  685. /* extract 8 coeffs from U,V */
  686. for(j=0; j<chrFilterSize; j++) {
  687. X = vec_ld (0, &chrSrc[j][i/2]);
  688. U = vec_mradds (X, CCoeffs[j], U);
  689. X = vec_ld (0, &chrSrc[j][i/2+2048]);
  690. V = vec_mradds (X, CCoeffs[j], V);
  691. }
  692. /* scale and clip signals */
  693. Y0 = vec_sra (Y0, SCL);
  694. Y1 = vec_sra (Y1, SCL);
  695. U = vec_sra (U, SCL);
  696. V = vec_sra (V, SCL);
  697. Y0 = vec_clip_s16 (Y0);
  698. Y1 = vec_clip_s16 (Y1);
  699. U = vec_clip_s16 (U);
  700. V = vec_clip_s16 (V);
  701. /* now we have
  702. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  703. U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
  704. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  705. U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
  706. V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
  707. */
  708. U0 = vec_mergeh (U,U);
  709. V0 = vec_mergeh (V,V);
  710. U1 = vec_mergel (U,U);
  711. V1 = vec_mergel (V,V);
  712. cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
  713. cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
  714. R = vec_packclp (R0,R1);
  715. G = vec_packclp (G0,G1);
  716. B = vec_packclp (B0,B1);
  717. switch(c->dstFormat) {
  718. case IMGFMT_ABGR: out_abgr (R,G,B,out); break;
  719. case IMGFMT_BGRA: out_bgra (R,G,B,out); break;
  720. case IMGFMT_RGBA: out_rgba (R,G,B,out); break;
  721. case IMGFMT_ARGB: out_argb (R,G,B,out); break;
  722. case IMGFMT_RGB24: out_rgb24 (R,G,B,out); break;
  723. case IMGFMT_BGR24: out_bgr24 (R,G,B,out); break;
  724. default:
  725. {
  726. /* If this is reached, the caller should have called yuv2packedXinC
  727. instead. */
  728. static int printed_error_message;
  729. if(!printed_error_message) {
  730. MSG_ERR("altivec_yuv2packedX doesn't support %s output\n",
  731. sws_format_name(c->dstFormat));
  732. printed_error_message=1;
  733. }
  734. return;
  735. }
  736. }
  737. }
  738. if (i < dstW) {
  739. i -= 16;
  740. Y0 = RND;
  741. Y1 = RND;
  742. /* extract 16 coeffs from lumSrc */
  743. for(j=0; j<lumFilterSize; j++) {
  744. X0 = vec_ld (0, &lumSrc[j][i]);
  745. X1 = vec_ld (16, &lumSrc[j][i]);
  746. Y0 = vec_mradds (X0, YCoeffs[j], Y0);
  747. Y1 = vec_mradds (X1, YCoeffs[j], Y1);
  748. }
  749. U = RND;
  750. V = RND;
  751. /* extract 8 coeffs from U,V */
  752. for(j=0; j<chrFilterSize; j++) {
  753. X = vec_ld (0, &chrSrc[j][i/2]);
  754. U = vec_mradds (X, CCoeffs[j], U);
  755. X = vec_ld (0, &chrSrc[j][i/2+2048]);
  756. V = vec_mradds (X, CCoeffs[j], V);
  757. }
  758. /* scale and clip signals */
  759. Y0 = vec_sra (Y0, SCL);
  760. Y1 = vec_sra (Y1, SCL);
  761. U = vec_sra (U, SCL);
  762. V = vec_sra (V, SCL);
  763. Y0 = vec_clip_s16 (Y0);
  764. Y1 = vec_clip_s16 (Y1);
  765. U = vec_clip_s16 (U);
  766. V = vec_clip_s16 (V);
  767. /* now we have
  768. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  769. U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
  770. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  771. U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
  772. V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
  773. */
  774. U0 = vec_mergeh (U,U);
  775. V0 = vec_mergeh (V,V);
  776. U1 = vec_mergel (U,U);
  777. V1 = vec_mergel (V,V);
  778. cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
  779. cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
  780. R = vec_packclp (R0,R1);
  781. G = vec_packclp (G0,G1);
  782. B = vec_packclp (B0,B1);
  783. nout = (vector unsigned char *)scratch;
  784. switch(c->dstFormat) {
  785. case IMGFMT_ABGR: out_abgr (R,G,B,nout); break;
  786. case IMGFMT_BGRA: out_bgra (R,G,B,nout); break;
  787. case IMGFMT_RGBA: out_rgba (R,G,B,nout); break;
  788. case IMGFMT_ARGB: out_argb (R,G,B,nout); break;
  789. case IMGFMT_RGB24: out_rgb24 (R,G,B,nout); break;
  790. case IMGFMT_BGR24: out_bgr24 (R,G,B,nout); break;
  791. default:
  792. /* Unreachable, I think. */
  793. MSG_ERR("altivec_yuv2packedX doesn't support %s output\n",
  794. sws_format_name(c->dstFormat));
  795. return;
  796. }
  797. memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4);
  798. }
  799. }