yuv2rgb_altivec.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * AltiVec acceleration for colorspace conversion
  3. *
  4. * copyright (C) 2004 Marc Hoffman <marc.hoffman@analog.com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /*
  23. Convert I420 YV12 to RGB in various formats,
  24. it rejects images that are not in 420 formats,
  25. it rejects images that don't have widths of multiples of 16,
  26. it rejects images that don't have heights of multiples of 2.
  27. Reject defers to C simulation code.
  28. Lots of optimizations to be done here.
  29. 1. Need to fix saturation code. I just couldn't get it to fly with packs
  30. and adds, so we currently use max/min to clip.
  31. 2. The inefficient use of chroma loading needs a bit of brushing up.
  32. 3. Analysis of pipeline stalls needs to be done. Use shark to identify
  33. pipeline stalls.
  34. MODIFIED to calculate coeffs from currently selected color space.
  35. MODIFIED core to be a macro where you specify the output format.
  36. ADDED UYVY conversion which is never called due to some thing in swscale.
  37. CORRECTED algorithim selection to be strict on input formats.
  38. ADDED runtime detection of AltiVec.
  39. ADDED altivec_yuv2packedX vertical scl + RGB converter
  40. March 27,2004
  41. PERFORMANCE ANALYSIS
  42. The C version uses 25% of the processor or ~250Mips for D1 video rawvideo
  43. used as test.
  44. The AltiVec version uses 10% of the processor or ~100Mips for D1 video
  45. same sequence.
  46. 720 * 480 * 30 ~10MPS
  47. so we have roughly 10 clocks per pixel. This is too high, something has
  48. to be wrong.
  49. OPTIMIZED clip codes to utilize vec_max and vec_packs removing the
  50. need for vec_min.
  51. OPTIMIZED DST OUTPUT cache/DMA controls. We are pretty much guaranteed to have
  52. the input video frame, it was just decompressed so it probably resides in L1
  53. caches. However, we are creating the output video stream. This needs to use the
  54. DSTST instruction to optimize for the cache. We couple this with the fact that
  55. we are not going to be visiting the input buffer again so we mark it Least
  56. Recently Used. This shaves 25% of the processor cycles off.
  57. Now memcpy is the largest mips consumer in the system, probably due
  58. to the inefficient X11 stuff.
  59. GL libraries seem to be very slow on this machine 1.33Ghz PB running
  60. Jaguar, this is not the case for my 1Ghz PB. I thought it might be
  61. a versioning issue, however I have libGL.1.2.dylib for both
  62. machines. (We need to figure this out now.)
  63. GL2 libraries work now with patch for RGB32.
  64. NOTE: quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor.
  65. Integrated luma prescaling adjustment for saturation/contrast/brightness
  66. adjustment.
  67. */
  68. #include <stdio.h>
  69. #include <stdlib.h>
  70. #include <string.h>
  71. #include <inttypes.h>
  72. #include <assert.h>
  73. #include "config.h"
  74. #include "libswscale/rgb2rgb.h"
  75. #include "libswscale/swscale.h"
  76. #include "libswscale/swscale_internal.h"
  77. #include "libavutil/cpu.h"
  78. #include "libavutil/pixdesc.h"
  79. #include "yuv2rgb_altivec.h"
  80. #undef PROFILE_THE_BEAST
  81. #undef INC_SCALING
  82. typedef unsigned char ubyte;
  83. typedef signed char sbyte;
  84. /* RGB interleaver, 16 planar pels 8-bit samples per channel in
  85. homogeneous vector registers x0,x1,x2 are interleaved with the
  86. following technique:
  87. o0 = vec_mergeh (x0,x1);
  88. o1 = vec_perm (o0, x2, perm_rgb_0);
  89. o2 = vec_perm (o0, x2, perm_rgb_1);
  90. o3 = vec_mergel (x0,x1);
  91. o4 = vec_perm (o3,o2,perm_rgb_2);
  92. o5 = vec_perm (o3,o2,perm_rgb_3);
  93. perm_rgb_0: o0(RG).h v1(B) --> o1*
  94. 0 1 2 3 4
  95. rgbr|gbrg|brgb|rgbr
  96. 0010 0100 1001 0010
  97. 0102 3145 2673 894A
  98. perm_rgb_1: o0(RG).h v1(B) --> o2
  99. 0 1 2 3 4
  100. gbrg|brgb|bbbb|bbbb
  101. 0100 1001 1111 1111
  102. B5CD 6EF7 89AB CDEF
  103. perm_rgb_2: o3(RG).l o2(rgbB.l) --> o4*
  104. 0 1 2 3 4
  105. gbrg|brgb|rgbr|gbrg
  106. 1111 1111 0010 0100
  107. 89AB CDEF 0182 3945
  108. perm_rgb_2: o3(RG).l o2(rgbB.l) ---> o5*
  109. 0 1 2 3 4
  110. brgb|rgbr|gbrg|brgb
  111. 1001 0010 0100 1001
  112. a67b 89cA BdCD eEFf
  113. */
  114. static
  115. const vector unsigned char
  116. perm_rgb_0 = {0x00,0x01,0x10,0x02,0x03,0x11,0x04,0x05,
  117. 0x12,0x06,0x07,0x13,0x08,0x09,0x14,0x0a},
  118. perm_rgb_1 = {0x0b,0x15,0x0c,0x0d,0x16,0x0e,0x0f,0x17,
  119. 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f},
  120. perm_rgb_2 = {0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,
  121. 0x00,0x01,0x18,0x02,0x03,0x19,0x04,0x05},
  122. perm_rgb_3 = {0x1a,0x06,0x07,0x1b,0x08,0x09,0x1c,0x0a,
  123. 0x0b,0x1d,0x0c,0x0d,0x1e,0x0e,0x0f,0x1f};
  124. #define vec_merge3(x2,x1,x0,y0,y1,y2) \
  125. do { \
  126. __typeof__(x0) o0,o2,o3; \
  127. o0 = vec_mergeh (x0,x1); \
  128. y0 = vec_perm (o0, x2, perm_rgb_0); \
  129. o2 = vec_perm (o0, x2, perm_rgb_1); \
  130. o3 = vec_mergel (x0,x1); \
  131. y1 = vec_perm (o3,o2,perm_rgb_2); \
  132. y2 = vec_perm (o3,o2,perm_rgb_3); \
  133. } while(0)
  134. #define vec_mstbgr24(x0,x1,x2,ptr) \
  135. do { \
  136. __typeof__(x0) _0,_1,_2; \
  137. vec_merge3 (x0,x1,x2,_0,_1,_2); \
  138. vec_st (_0, 0, ptr++); \
  139. vec_st (_1, 0, ptr++); \
  140. vec_st (_2, 0, ptr++); \
  141. } while (0)
  142. #define vec_mstrgb24(x0,x1,x2,ptr) \
  143. do { \
  144. __typeof__(x0) _0,_1,_2; \
  145. vec_merge3 (x2,x1,x0,_0,_1,_2); \
  146. vec_st (_0, 0, ptr++); \
  147. vec_st (_1, 0, ptr++); \
  148. vec_st (_2, 0, ptr++); \
  149. } while (0)
  150. /* pack the pixels in rgb0 format
  151. msb R
  152. lsb 0
  153. */
  154. #define vec_mstrgb32(T,x0,x1,x2,x3,ptr) \
  155. do { \
  156. T _0,_1,_2,_3; \
  157. _0 = vec_mergeh (x0,x1); \
  158. _1 = vec_mergeh (x2,x3); \
  159. _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
  160. _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
  161. vec_st (_2, 0*16, (T *)ptr); \
  162. vec_st (_3, 1*16, (T *)ptr); \
  163. _0 = vec_mergel (x0,x1); \
  164. _1 = vec_mergel (x2,x3); \
  165. _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
  166. _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
  167. vec_st (_2, 2*16, (T *)ptr); \
  168. vec_st (_3, 3*16, (T *)ptr); \
  169. ptr += 4; \
  170. } while (0)
  171. /*
  172. | 1 0 1.4021 | | Y |
  173. | 1 -0.3441 -0.7142 |x| Cb|
  174. | 1 1.7718 0 | | Cr|
  175. Y: [-128 127]
  176. Cb/Cr : [-128 127]
  177. typical yuv conversion work on Y: 0-255 this version has been optimized for jpeg decode.
  178. */
  179. #define vec_unh(x) \
  180. (vector signed short) \
  181. vec_perm(x,(__typeof__(x)){0}, \
  182. ((vector unsigned char){0x10,0x00,0x10,0x01,0x10,0x02,0x10,0x03,\
  183. 0x10,0x04,0x10,0x05,0x10,0x06,0x10,0x07}))
  184. #define vec_unl(x) \
  185. (vector signed short) \
  186. vec_perm(x,(__typeof__(x)){0}, \
  187. ((vector unsigned char){0x10,0x08,0x10,0x09,0x10,0x0A,0x10,0x0B,\
  188. 0x10,0x0C,0x10,0x0D,0x10,0x0E,0x10,0x0F}))
  189. #define vec_clip_s16(x) \
  190. vec_max (vec_min (x, ((vector signed short){235,235,235,235,235,235,235,235})), \
  191. ((vector signed short){ 16, 16, 16, 16, 16, 16, 16, 16}))
  192. #define vec_packclp(x,y) \
  193. (vector unsigned char)vec_packs \
  194. ((vector unsigned short)vec_max (x,((vector signed short) {0})), \
  195. (vector unsigned short)vec_max (y,((vector signed short) {0})))
  196. //#define out_pixels(a,b,c,ptr) vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),a,a,a,ptr)
  197. static inline void cvtyuvtoRGB (SwsContext *c,
  198. vector signed short Y, vector signed short U, vector signed short V,
  199. vector signed short *R, vector signed short *G, vector signed short *B)
  200. {
  201. vector signed short vx,ux,uvx;
  202. Y = vec_mradds (Y, c->CY, c->OY);
  203. U = vec_sub (U,(vector signed short)
  204. vec_splat((vector signed short){128},0));
  205. V = vec_sub (V,(vector signed short)
  206. vec_splat((vector signed short){128},0));
  207. // ux = (CBU*(u<<c->CSHIFT)+0x4000)>>15;
  208. ux = vec_sl (U, c->CSHIFT);
  209. *B = vec_mradds (ux, c->CBU, Y);
  210. // vx = (CRV*(v<<c->CSHIFT)+0x4000)>>15;
  211. vx = vec_sl (V, c->CSHIFT);
  212. *R = vec_mradds (vx, c->CRV, Y);
  213. // uvx = ((CGU*u) + (CGV*v))>>15;
  214. uvx = vec_mradds (U, c->CGU, Y);
  215. *G = vec_mradds (V, c->CGV, uvx);
  216. }
  217. /*
  218. ------------------------------------------------------------------------------
  219. CS converters
  220. ------------------------------------------------------------------------------
  221. */
  222. #define DEFCSP420_CVT(name,out_pixels) \
  223. static int altivec_##name (SwsContext *c, \
  224. const unsigned char **in, int *instrides, \
  225. int srcSliceY, int srcSliceH, \
  226. unsigned char **oplanes, int *outstrides) \
  227. { \
  228. int w = c->srcW; \
  229. int h = srcSliceH; \
  230. int i,j; \
  231. int instrides_scl[3]; \
  232. vector unsigned char y0,y1; \
  233. \
  234. vector signed char u,v; \
  235. \
  236. vector signed short Y0,Y1,Y2,Y3; \
  237. vector signed short U,V; \
  238. vector signed short vx,ux,uvx; \
  239. vector signed short vx0,ux0,uvx0; \
  240. vector signed short vx1,ux1,uvx1; \
  241. vector signed short R0,G0,B0; \
  242. vector signed short R1,G1,B1; \
  243. vector unsigned char R,G,B; \
  244. \
  245. const vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \
  246. vector unsigned char align_perm; \
  247. \
  248. vector signed short \
  249. lCY = c->CY, \
  250. lOY = c->OY, \
  251. lCRV = c->CRV, \
  252. lCBU = c->CBU, \
  253. lCGU = c->CGU, \
  254. lCGV = c->CGV; \
  255. \
  256. vector unsigned short lCSHIFT = c->CSHIFT; \
  257. \
  258. const ubyte *y1i = in[0]; \
  259. const ubyte *y2i = in[0]+instrides[0]; \
  260. const ubyte *ui = in[1]; \
  261. const ubyte *vi = in[2]; \
  262. \
  263. vector unsigned char *oute \
  264. = (vector unsigned char *) \
  265. (oplanes[0]+srcSliceY*outstrides[0]); \
  266. vector unsigned char *outo \
  267. = (vector unsigned char *) \
  268. (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]); \
  269. \
  270. \
  271. instrides_scl[0] = instrides[0]*2-w; /* the loop moves y{1,2}i by w */ \
  272. instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */ \
  273. instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */ \
  274. \
  275. \
  276. for (i=0;i<h/2;i++) { \
  277. vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0); \
  278. vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1); \
  279. \
  280. for (j=0;j<w/16;j++) { \
  281. \
  282. y1ivP = (const vector unsigned char *)y1i; \
  283. y2ivP = (const vector unsigned char *)y2i; \
  284. uivP = (const vector unsigned char *)ui; \
  285. vivP = (const vector unsigned char *)vi; \
  286. \
  287. align_perm = vec_lvsl (0, y1i); \
  288. y0 = (vector unsigned char) \
  289. vec_perm (y1ivP[0], y1ivP[1], align_perm); \
  290. \
  291. align_perm = vec_lvsl (0, y2i); \
  292. y1 = (vector unsigned char) \
  293. vec_perm (y2ivP[0], y2ivP[1], align_perm); \
  294. \
  295. align_perm = vec_lvsl (0, ui); \
  296. u = (vector signed char) \
  297. vec_perm (uivP[0], uivP[1], align_perm); \
  298. \
  299. align_perm = vec_lvsl (0, vi); \
  300. v = (vector signed char) \
  301. vec_perm (vivP[0], vivP[1], align_perm); \
  302. \
  303. u = (vector signed char) \
  304. vec_sub (u,(vector signed char) \
  305. vec_splat((vector signed char){128},0)); \
  306. v = (vector signed char) \
  307. vec_sub (v,(vector signed char) \
  308. vec_splat((vector signed char){128},0)); \
  309. \
  310. U = vec_unpackh (u); \
  311. V = vec_unpackh (v); \
  312. \
  313. \
  314. Y0 = vec_unh (y0); \
  315. Y1 = vec_unl (y0); \
  316. Y2 = vec_unh (y1); \
  317. Y3 = vec_unl (y1); \
  318. \
  319. Y0 = vec_mradds (Y0, lCY, lOY); \
  320. Y1 = vec_mradds (Y1, lCY, lOY); \
  321. Y2 = vec_mradds (Y2, lCY, lOY); \
  322. Y3 = vec_mradds (Y3, lCY, lOY); \
  323. \
  324. /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */ \
  325. ux = vec_sl (U, lCSHIFT); \
  326. ux = vec_mradds (ux, lCBU, (vector signed short){0}); \
  327. ux0 = vec_mergeh (ux,ux); \
  328. ux1 = vec_mergel (ux,ux); \
  329. \
  330. /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */ \
  331. vx = vec_sl (V, lCSHIFT); \
  332. vx = vec_mradds (vx, lCRV, (vector signed short){0}); \
  333. vx0 = vec_mergeh (vx,vx); \
  334. vx1 = vec_mergel (vx,vx); \
  335. \
  336. /* uvx = ((CGU*u) + (CGV*v))>>15 */ \
  337. uvx = vec_mradds (U, lCGU, (vector signed short){0}); \
  338. uvx = vec_mradds (V, lCGV, uvx); \
  339. uvx0 = vec_mergeh (uvx,uvx); \
  340. uvx1 = vec_mergel (uvx,uvx); \
  341. \
  342. R0 = vec_add (Y0,vx0); \
  343. G0 = vec_add (Y0,uvx0); \
  344. B0 = vec_add (Y0,ux0); \
  345. R1 = vec_add (Y1,vx1); \
  346. G1 = vec_add (Y1,uvx1); \
  347. B1 = vec_add (Y1,ux1); \
  348. \
  349. R = vec_packclp (R0,R1); \
  350. G = vec_packclp (G0,G1); \
  351. B = vec_packclp (B0,B1); \
  352. \
  353. out_pixels(R,G,B,oute); \
  354. \
  355. R0 = vec_add (Y2,vx0); \
  356. G0 = vec_add (Y2,uvx0); \
  357. B0 = vec_add (Y2,ux0); \
  358. R1 = vec_add (Y3,vx1); \
  359. G1 = vec_add (Y3,uvx1); \
  360. B1 = vec_add (Y3,ux1); \
  361. R = vec_packclp (R0,R1); \
  362. G = vec_packclp (G0,G1); \
  363. B = vec_packclp (B0,B1); \
  364. \
  365. \
  366. out_pixels(R,G,B,outo); \
  367. \
  368. y1i += 16; \
  369. y2i += 16; \
  370. ui += 8; \
  371. vi += 8; \
  372. \
  373. } \
  374. \
  375. outo += (outstrides[0])>>4; \
  376. oute += (outstrides[0])>>4; \
  377. \
  378. ui += instrides_scl[1]; \
  379. vi += instrides_scl[2]; \
  380. y1i += instrides_scl[0]; \
  381. y2i += instrides_scl[0]; \
  382. } \
  383. return srcSliceH; \
  384. }
  385. #define out_abgr(a,b,c,ptr) vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),c,b,a,ptr)
  386. #define out_bgra(a,b,c,ptr) vec_mstrgb32(__typeof__(a),c,b,a,((__typeof__ (a)){255}),ptr)
  387. #define out_rgba(a,b,c,ptr) vec_mstrgb32(__typeof__(a),a,b,c,((__typeof__ (a)){255}),ptr)
  388. #define out_argb(a,b,c,ptr) vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),a,b,c,ptr)
  389. #define out_rgb24(a,b,c,ptr) vec_mstrgb24(a,b,c,ptr)
  390. #define out_bgr24(a,b,c,ptr) vec_mstbgr24(a,b,c,ptr)
  391. DEFCSP420_CVT (yuv2_abgr, out_abgr)
  392. DEFCSP420_CVT (yuv2_bgra, out_bgra)
  393. DEFCSP420_CVT (yuv2_rgba, out_rgba)
  394. DEFCSP420_CVT (yuv2_argb, out_argb)
  395. DEFCSP420_CVT (yuv2_rgb24, out_rgb24)
  396. DEFCSP420_CVT (yuv2_bgr24, out_bgr24)
  397. // uyvy|uyvy|uyvy|uyvy
  398. // 0123 4567 89ab cdef
  399. static
  400. const vector unsigned char
  401. demux_u = {0x10,0x00,0x10,0x00,
  402. 0x10,0x04,0x10,0x04,
  403. 0x10,0x08,0x10,0x08,
  404. 0x10,0x0c,0x10,0x0c},
  405. demux_v = {0x10,0x02,0x10,0x02,
  406. 0x10,0x06,0x10,0x06,
  407. 0x10,0x0A,0x10,0x0A,
  408. 0x10,0x0E,0x10,0x0E},
  409. demux_y = {0x10,0x01,0x10,0x03,
  410. 0x10,0x05,0x10,0x07,
  411. 0x10,0x09,0x10,0x0B,
  412. 0x10,0x0D,0x10,0x0F};
  413. /*
  414. this is so I can play live CCIR raw video
  415. */
  416. static int altivec_uyvy_rgb32 (SwsContext *c,
  417. const unsigned char **in, int *instrides,
  418. int srcSliceY, int srcSliceH,
  419. unsigned char **oplanes, int *outstrides)
  420. {
  421. int w = c->srcW;
  422. int h = srcSliceH;
  423. int i,j;
  424. vector unsigned char uyvy;
  425. vector signed short Y,U,V;
  426. vector signed short R0,G0,B0,R1,G1,B1;
  427. vector unsigned char R,G,B;
  428. vector unsigned char *out;
  429. const ubyte *img;
  430. img = in[0];
  431. out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]);
  432. for (i=0;i<h;i++) {
  433. for (j=0;j<w/16;j++) {
  434. uyvy = vec_ld (0, img);
  435. U = (vector signed short)
  436. vec_perm (uyvy, (vector unsigned char){0}, demux_u);
  437. V = (vector signed short)
  438. vec_perm (uyvy, (vector unsigned char){0}, demux_v);
  439. Y = (vector signed short)
  440. vec_perm (uyvy, (vector unsigned char){0}, demux_y);
  441. cvtyuvtoRGB (c, Y,U,V,&R0,&G0,&B0);
  442. uyvy = vec_ld (16, img);
  443. U = (vector signed short)
  444. vec_perm (uyvy, (vector unsigned char){0}, demux_u);
  445. V = (vector signed short)
  446. vec_perm (uyvy, (vector unsigned char){0}, demux_v);
  447. Y = (vector signed short)
  448. vec_perm (uyvy, (vector unsigned char){0}, demux_y);
  449. cvtyuvtoRGB (c, Y,U,V,&R1,&G1,&B1);
  450. R = vec_packclp (R0,R1);
  451. G = vec_packclp (G0,G1);
  452. B = vec_packclp (B0,B1);
  453. // vec_mstbgr24 (R,G,B, out);
  454. out_rgba (R,G,B,out);
  455. img += 32;
  456. }
  457. }
  458. return srcSliceH;
  459. }
  460. /* Ok currently the acceleration routine only supports
  461. inputs of widths a multiple of 16
  462. and heights a multiple 2
  463. So we just fall back to the C codes for this.
  464. */
  465. SwsFunc ff_yuv2rgb_init_altivec(SwsContext *c)
  466. {
  467. if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
  468. return NULL;
  469. /*
  470. and this seems not to matter too much I tried a bunch of
  471. videos with abnormal widths and MPlayer crashes elsewhere.
  472. mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv
  473. boom with X11 bad match.
  474. */
  475. if ((c->srcW & 0xf) != 0) return NULL;
  476. switch (c->srcFormat) {
  477. case PIX_FMT_YUV410P:
  478. case PIX_FMT_YUV420P:
  479. /*case IMGFMT_CLPL: ??? */
  480. case PIX_FMT_GRAY8:
  481. case PIX_FMT_NV12:
  482. case PIX_FMT_NV21:
  483. if ((c->srcH & 0x1) != 0)
  484. return NULL;
  485. switch(c->dstFormat) {
  486. case PIX_FMT_RGB24:
  487. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGB24\n");
  488. return altivec_yuv2_rgb24;
  489. case PIX_FMT_BGR24:
  490. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGR24\n");
  491. return altivec_yuv2_bgr24;
  492. case PIX_FMT_ARGB:
  493. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ARGB\n");
  494. return altivec_yuv2_argb;
  495. case PIX_FMT_ABGR:
  496. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ABGR\n");
  497. return altivec_yuv2_abgr;
  498. case PIX_FMT_RGBA:
  499. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGBA\n");
  500. return altivec_yuv2_rgba;
  501. case PIX_FMT_BGRA:
  502. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGRA\n");
  503. return altivec_yuv2_bgra;
  504. default: return NULL;
  505. }
  506. break;
  507. case PIX_FMT_UYVY422:
  508. switch(c->dstFormat) {
  509. case PIX_FMT_BGR32:
  510. av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space UYVY -> RGB32\n");
  511. return altivec_uyvy_rgb32;
  512. default: return NULL;
  513. }
  514. break;
  515. }
  516. return NULL;
  517. }
  518. void ff_yuv2rgb_init_tables_altivec(SwsContext *c, const int inv_table[4], int brightness, int contrast, int saturation)
  519. {
  520. union {
  521. DECLARE_ALIGNED(16, signed short, tmp)[8];
  522. vector signed short vec;
  523. } buf;
  524. buf.tmp[0] = ((0xffffLL) * contrast>>8)>>9; //cy
  525. buf.tmp[1] = -256*brightness; //oy
  526. buf.tmp[2] = (inv_table[0]>>3) *(contrast>>16)*(saturation>>16); //crv
  527. buf.tmp[3] = (inv_table[1]>>3) *(contrast>>16)*(saturation>>16); //cbu
  528. buf.tmp[4] = -((inv_table[2]>>1)*(contrast>>16)*(saturation>>16)); //cgu
  529. buf.tmp[5] = -((inv_table[3]>>1)*(contrast>>16)*(saturation>>16)); //cgv
  530. c->CSHIFT = (vector unsigned short)vec_splat_u16(2);
  531. c->CY = vec_splat ((vector signed short)buf.vec, 0);
  532. c->OY = vec_splat ((vector signed short)buf.vec, 1);
  533. c->CRV = vec_splat ((vector signed short)buf.vec, 2);
  534. c->CBU = vec_splat ((vector signed short)buf.vec, 3);
  535. c->CGU = vec_splat ((vector signed short)buf.vec, 4);
  536. c->CGV = vec_splat ((vector signed short)buf.vec, 5);
  537. return;
  538. }
  539. static av_always_inline void
  540. ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter,
  541. const int16_t **lumSrc, int lumFilterSize,
  542. const int16_t *chrFilter, const int16_t **chrUSrc,
  543. const int16_t **chrVSrc, int chrFilterSize,
  544. const int16_t **alpSrc, uint8_t *dest,
  545. int dstW, int dstY, enum PixelFormat target)
  546. {
  547. int i,j;
  548. vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V;
  549. vector signed short R0,G0,B0,R1,G1,B1;
  550. vector unsigned char R,G,B;
  551. vector unsigned char *out,*nout;
  552. vector signed short RND = vec_splat_s16(1<<3);
  553. vector unsigned short SCL = vec_splat_u16(4);
  554. DECLARE_ALIGNED(16, unsigned int, scratch)[16];
  555. vector signed short *YCoeffs, *CCoeffs;
  556. YCoeffs = c->vYCoeffsBank+dstY*lumFilterSize;
  557. CCoeffs = c->vCCoeffsBank+dstY*chrFilterSize;
  558. out = (vector unsigned char *)dest;
  559. for (i=0; i<dstW; i+=16) {
  560. Y0 = RND;
  561. Y1 = RND;
  562. /* extract 16 coeffs from lumSrc */
  563. for (j=0; j<lumFilterSize; j++) {
  564. X0 = vec_ld (0, &lumSrc[j][i]);
  565. X1 = vec_ld (16, &lumSrc[j][i]);
  566. Y0 = vec_mradds (X0, YCoeffs[j], Y0);
  567. Y1 = vec_mradds (X1, YCoeffs[j], Y1);
  568. }
  569. U = RND;
  570. V = RND;
  571. /* extract 8 coeffs from U,V */
  572. for (j=0; j<chrFilterSize; j++) {
  573. X = vec_ld (0, &chrUSrc[j][i/2]);
  574. U = vec_mradds (X, CCoeffs[j], U);
  575. X = vec_ld (0, &chrVSrc[j][i/2]);
  576. V = vec_mradds (X, CCoeffs[j], V);
  577. }
  578. /* scale and clip signals */
  579. Y0 = vec_sra (Y0, SCL);
  580. Y1 = vec_sra (Y1, SCL);
  581. U = vec_sra (U, SCL);
  582. V = vec_sra (V, SCL);
  583. Y0 = vec_clip_s16 (Y0);
  584. Y1 = vec_clip_s16 (Y1);
  585. U = vec_clip_s16 (U);
  586. V = vec_clip_s16 (V);
  587. /* now we have
  588. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  589. U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
  590. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  591. U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
  592. V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
  593. */
  594. U0 = vec_mergeh (U,U);
  595. V0 = vec_mergeh (V,V);
  596. U1 = vec_mergel (U,U);
  597. V1 = vec_mergel (V,V);
  598. cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
  599. cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
  600. R = vec_packclp (R0,R1);
  601. G = vec_packclp (G0,G1);
  602. B = vec_packclp (B0,B1);
  603. switch(target) {
  604. case PIX_FMT_ABGR: out_abgr (R,G,B,out); break;
  605. case PIX_FMT_BGRA: out_bgra (R,G,B,out); break;
  606. case PIX_FMT_RGBA: out_rgba (R,G,B,out); break;
  607. case PIX_FMT_ARGB: out_argb (R,G,B,out); break;
  608. case PIX_FMT_RGB24: out_rgb24 (R,G,B,out); break;
  609. case PIX_FMT_BGR24: out_bgr24 (R,G,B,out); break;
  610. default:
  611. {
  612. /* If this is reached, the caller should have called yuv2packedXinC
  613. instead. */
  614. static int printed_error_message;
  615. if (!printed_error_message) {
  616. av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
  617. av_get_pix_fmt_name(c->dstFormat));
  618. printed_error_message=1;
  619. }
  620. return;
  621. }
  622. }
  623. }
  624. if (i < dstW) {
  625. i -= 16;
  626. Y0 = RND;
  627. Y1 = RND;
  628. /* extract 16 coeffs from lumSrc */
  629. for (j=0; j<lumFilterSize; j++) {
  630. X0 = vec_ld (0, &lumSrc[j][i]);
  631. X1 = vec_ld (16, &lumSrc[j][i]);
  632. Y0 = vec_mradds (X0, YCoeffs[j], Y0);
  633. Y1 = vec_mradds (X1, YCoeffs[j], Y1);
  634. }
  635. U = RND;
  636. V = RND;
  637. /* extract 8 coeffs from U,V */
  638. for (j=0; j<chrFilterSize; j++) {
  639. X = vec_ld (0, &chrUSrc[j][i/2]);
  640. U = vec_mradds (X, CCoeffs[j], U);
  641. X = vec_ld (0, &chrVSrc[j][i/2]);
  642. V = vec_mradds (X, CCoeffs[j], V);
  643. }
  644. /* scale and clip signals */
  645. Y0 = vec_sra (Y0, SCL);
  646. Y1 = vec_sra (Y1, SCL);
  647. U = vec_sra (U, SCL);
  648. V = vec_sra (V, SCL);
  649. Y0 = vec_clip_s16 (Y0);
  650. Y1 = vec_clip_s16 (Y1);
  651. U = vec_clip_s16 (U);
  652. V = vec_clip_s16 (V);
  653. /* now we have
  654. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  655. U = u0 u1 u2 u3 u4 u5 u6 u7 V = v0 v1 v2 v3 v4 v5 v6 v7
  656. Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
  657. U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
  658. V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
  659. */
  660. U0 = vec_mergeh (U,U);
  661. V0 = vec_mergeh (V,V);
  662. U1 = vec_mergel (U,U);
  663. V1 = vec_mergel (V,V);
  664. cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
  665. cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
  666. R = vec_packclp (R0,R1);
  667. G = vec_packclp (G0,G1);
  668. B = vec_packclp (B0,B1);
  669. nout = (vector unsigned char *)scratch;
  670. switch(target) {
  671. case PIX_FMT_ABGR: out_abgr (R,G,B,nout); break;
  672. case PIX_FMT_BGRA: out_bgra (R,G,B,nout); break;
  673. case PIX_FMT_RGBA: out_rgba (R,G,B,nout); break;
  674. case PIX_FMT_ARGB: out_argb (R,G,B,nout); break;
  675. case PIX_FMT_RGB24: out_rgb24 (R,G,B,nout); break;
  676. case PIX_FMT_BGR24: out_bgr24 (R,G,B,nout); break;
  677. default:
  678. /* Unreachable, I think. */
  679. av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
  680. av_get_pix_fmt_name(c->dstFormat));
  681. return;
  682. }
  683. memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4);
  684. }
  685. }
  686. #define YUV2PACKEDX_WRAPPER(suffix, pixfmt) \
  687. void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c, const int16_t *lumFilter, \
  688. const int16_t **lumSrc, int lumFilterSize, \
  689. const int16_t *chrFilter, const int16_t **chrUSrc, \
  690. const int16_t **chrVSrc, int chrFilterSize, \
  691. const int16_t **alpSrc, uint8_t *dest, \
  692. int dstW, int dstY) \
  693. { \
  694. ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, \
  695. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  696. alpSrc, dest, dstW, dstY, pixfmt); \
  697. }
  698. YUV2PACKEDX_WRAPPER(abgr, PIX_FMT_ABGR);
  699. YUV2PACKEDX_WRAPPER(bgra, PIX_FMT_BGRA);
  700. YUV2PACKEDX_WRAPPER(argb, PIX_FMT_ARGB);
  701. YUV2PACKEDX_WRAPPER(rgba, PIX_FMT_RGBA);
  702. YUV2PACKEDX_WRAPPER(rgb24, PIX_FMT_RGB24);
  703. YUV2PACKEDX_WRAPPER(bgr24, PIX_FMT_BGR24);