swscale_vsx.c 80 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208
  1. /*
  2. * AltiVec-enhanced yuv2yuvX
  3. *
  4. * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
  5. * based on the equivalent C code in swscale.c
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <inttypes.h>
  24. #include "config.h"
  25. #include "libswscale/swscale.h"
  26. #include "libswscale/swscale_internal.h"
  27. #include "libavutil/attributes.h"
  28. #include "libavutil/cpu.h"
  29. #include "libavutil/mem_internal.h"
  30. #include "yuv2rgb_altivec.h"
  31. #include "libavutil/ppc/util_altivec.h"
  32. #if HAVE_VSX
  33. #define vzero vec_splat_s32(0)
  34. #if !HAVE_BIGENDIAN
  35. #define GET_LS(a,b,c,s) {\
  36. ls = a;\
  37. a = vec_vsx_ld(((b) << 1) + 16, s);\
  38. }
  39. #define yuv2planeX_8(d1, d2, l1, src, x, perm, filter) do {\
  40. vector signed short ls;\
  41. vector signed int vf1, vf2, i1, i2;\
  42. GET_LS(l1, x, perm, src);\
  43. i1 = vec_mule(filter, ls);\
  44. i2 = vec_mulo(filter, ls);\
  45. vf1 = vec_mergeh(i1, i2);\
  46. vf2 = vec_mergel(i1, i2);\
  47. d1 = vec_add(d1, vf1);\
  48. d2 = vec_add(d2, vf2);\
  49. } while (0)
  50. #define LOAD_FILTER(vf,f) {\
  51. vf = vec_vsx_ld(joffset, f);\
  52. }
  53. #define LOAD_L1(ll1,s,p){\
  54. ll1 = vec_vsx_ld(xoffset, s);\
  55. }
  56. // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2).
  57. // The neat trick: We only care for half the elements,
  58. // high or low depending on (i<<3)%16 (it's 0 or 8 here),
  59. // and we're going to use vec_mule, so we choose
  60. // carefully how to "unpack" the elements into the even slots.
  61. #define GET_VF4(a, vf, f) {\
  62. vf = (vector signed short)vec_vsx_ld(a << 3, f);\
  63. vf = vec_mergeh(vf, (vector signed short)vzero);\
  64. }
  65. #define FIRST_LOAD(sv, pos, s, per) {}
  66. #define UPDATE_PTR(s0, d0, s1, d1) {}
  67. #define LOAD_SRCV(pos, a, s, per, v0, v1, vf) {\
  68. vf = vec_vsx_ld(pos + a, s);\
  69. }
  70. #define LOAD_SRCV8(pos, a, s, per, v0, v1, vf) LOAD_SRCV(pos, a, s, per, v0, v1, vf)
  71. #define GET_VFD(a, b, f, vf0, vf1, per, vf, off) {\
  72. vf = vec_vsx_ld((a * 2 * filterSize) + (b * 2) + off, f);\
  73. }
  74. #define FUNC(name) name ## _vsx
  75. #include "swscale_ppc_template.c"
  76. #undef FUNC
  77. #undef vzero
  78. #endif /* !HAVE_BIGENDIAN */
  79. static void yuv2plane1_8_u(const int16_t *src, uint8_t *dest, int dstW,
  80. const uint8_t *dither, int offset, int start)
  81. {
  82. int i;
  83. for (i = start; i < dstW; i++) {
  84. int val = (src[i] + dither[(i + offset) & 7]) >> 7;
  85. dest[i] = av_clip_uint8(val);
  86. }
  87. }
  88. static void yuv2plane1_8_vsx(const int16_t *src, uint8_t *dest, int dstW,
  89. const uint8_t *dither, int offset)
  90. {
  91. const int dst_u = -(uintptr_t)dest & 15;
  92. int i, j;
  93. LOCAL_ALIGNED(16, int16_t, val, [16]);
  94. const vec_u16 shifts = (vec_u16) {7, 7, 7, 7, 7, 7, 7, 7};
  95. vec_s16 vi, vileft, ditherleft, ditherright;
  96. vec_u8 vd;
  97. for (j = 0; j < 16; j++) {
  98. val[j] = dither[(dst_u + offset + j) & 7];
  99. }
  100. ditherleft = vec_ld(0, val);
  101. ditherright = vec_ld(0, &val[8]);
  102. yuv2plane1_8_u(src, dest, dst_u, dither, offset, 0);
  103. for (i = dst_u; i < dstW - 15; i += 16) {
  104. vi = vec_vsx_ld(0, &src[i]);
  105. vi = vec_adds(ditherleft, vi);
  106. vileft = vec_sra(vi, shifts);
  107. vi = vec_vsx_ld(0, &src[i + 8]);
  108. vi = vec_adds(ditherright, vi);
  109. vi = vec_sra(vi, shifts);
  110. vd = vec_packsu(vileft, vi);
  111. vec_st(vd, 0, &dest[i]);
  112. }
  113. yuv2plane1_8_u(src, dest, dstW, dither, offset, i);
  114. }
  115. #if !HAVE_BIGENDIAN
  116. #define output_pixel(pos, val) \
  117. if (big_endian) { \
  118. AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
  119. } else { \
  120. AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
  121. }
  122. static void yuv2plane1_nbps_u(const int16_t *src, uint16_t *dest, int dstW,
  123. int big_endian, int output_bits, int start)
  124. {
  125. int i;
  126. int shift = 15 - output_bits;
  127. for (i = start; i < dstW; i++) {
  128. int val = src[i] + (1 << (shift - 1));
  129. output_pixel(&dest[i], val);
  130. }
  131. }
  132. static av_always_inline void yuv2plane1_nbps_vsx(const int16_t *src,
  133. uint16_t *dest, int dstW,
  134. const int big_endian,
  135. const int output_bits)
  136. {
  137. const int dst_u = -(uintptr_t)dest & 7;
  138. const int shift = 15 - output_bits;
  139. const int add = (1 << (shift - 1));
  140. const int clip = (1 << output_bits) - 1;
  141. const vec_u16 vadd = (vec_u16) {add, add, add, add, add, add, add, add};
  142. const vec_u16 vswap = (vec_u16) vec_splat_u16(big_endian ? 8 : 0);
  143. const vec_u16 vshift = (vec_u16) vec_splat_u16(shift);
  144. const vec_u16 vlargest = (vec_u16) {clip, clip, clip, clip, clip, clip, clip, clip};
  145. vec_u16 v;
  146. int i;
  147. yuv2plane1_nbps_u(src, dest, dst_u, big_endian, output_bits, 0);
  148. for (i = dst_u; i < dstW - 7; i += 8) {
  149. v = vec_vsx_ld(0, (const uint16_t *) &src[i]);
  150. v = vec_add(v, vadd);
  151. v = vec_sr(v, vshift);
  152. v = vec_min(v, vlargest);
  153. v = vec_rl(v, vswap);
  154. vec_st(v, 0, &dest[i]);
  155. }
  156. yuv2plane1_nbps_u(src, dest, dstW, big_endian, output_bits, i);
  157. }
  158. static void yuv2planeX_nbps_u(const int16_t *filter, int filterSize,
  159. const int16_t **src, uint16_t *dest, int dstW,
  160. int big_endian, int output_bits, int start)
  161. {
  162. int i;
  163. int shift = 11 + 16 - output_bits;
  164. for (i = start; i < dstW; i++) {
  165. int val = 1 << (shift - 1);
  166. int j;
  167. for (j = 0; j < filterSize; j++)
  168. val += src[j][i] * filter[j];
  169. output_pixel(&dest[i], val);
  170. }
  171. }
  172. static void yuv2planeX_nbps_vsx(const int16_t *filter, int filterSize,
  173. const int16_t **src, uint16_t *dest, int dstW,
  174. int big_endian, int output_bits)
  175. {
  176. const int dst_u = -(uintptr_t)dest & 7;
  177. const int shift = 11 + 16 - output_bits;
  178. const int add = (1 << (shift - 1));
  179. const int clip = (1 << output_bits) - 1;
  180. const uint16_t swap = big_endian ? 8 : 0;
  181. const vec_u32 vadd = (vec_u32) {add, add, add, add};
  182. const vec_u32 vshift = (vec_u32) {shift, shift, shift, shift};
  183. const vec_u16 vswap = (vec_u16) {swap, swap, swap, swap, swap, swap, swap, swap};
  184. const vec_u16 vlargest = (vec_u16) {clip, clip, clip, clip, clip, clip, clip, clip};
  185. const vec_s16 vzero = vec_splat_s16(0);
  186. const vec_u8 vperm = (vec_u8) {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15};
  187. vec_s16 vfilter[MAX_FILTER_SIZE], vin;
  188. vec_u16 v;
  189. vec_u32 vleft, vright, vtmp;
  190. int i, j;
  191. for (i = 0; i < filterSize; i++) {
  192. vfilter[i] = (vec_s16) {filter[i], filter[i], filter[i], filter[i],
  193. filter[i], filter[i], filter[i], filter[i]};
  194. }
  195. yuv2planeX_nbps_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
  196. for (i = dst_u; i < dstW - 7; i += 8) {
  197. vleft = vright = vadd;
  198. for (j = 0; j < filterSize; j++) {
  199. vin = vec_vsx_ld(0, &src[j][i]);
  200. vtmp = (vec_u32) vec_mule(vin, vfilter[j]);
  201. vleft = vec_add(vleft, vtmp);
  202. vtmp = (vec_u32) vec_mulo(vin, vfilter[j]);
  203. vright = vec_add(vright, vtmp);
  204. }
  205. vleft = vec_sra(vleft, vshift);
  206. vright = vec_sra(vright, vshift);
  207. v = vec_packsu(vleft, vright);
  208. v = (vec_u16) vec_max((vec_s16) v, vzero);
  209. v = vec_min(v, vlargest);
  210. v = vec_rl(v, vswap);
  211. v = vec_perm(v, v, vperm);
  212. vec_st(v, 0, &dest[i]);
  213. }
  214. yuv2planeX_nbps_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
  215. }
  216. #undef output_pixel
  217. #define output_pixel(pos, val, bias, signedness) \
  218. if (big_endian) { \
  219. AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
  220. } else { \
  221. AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
  222. }
  223. static void yuv2plane1_16_u(const int32_t *src, uint16_t *dest, int dstW,
  224. int big_endian, int output_bits, int start)
  225. {
  226. int i;
  227. const int shift = 3;
  228. for (i = start; i < dstW; i++) {
  229. int val = src[i] + (1 << (shift - 1));
  230. output_pixel(&dest[i], val, 0, uint);
  231. }
  232. }
  233. static av_always_inline void yuv2plane1_16_vsx(const int32_t *src,
  234. uint16_t *dest, int dstW,
  235. const int big_endian,
  236. int output_bits)
  237. {
  238. const int dst_u = -(uintptr_t)dest & 7;
  239. const int shift = 3;
  240. const int add = (1 << (shift - 1));
  241. const vec_u32 vadd = (vec_u32) {add, add, add, add};
  242. const vec_u16 vswap = (vec_u16) vec_splat_u16(big_endian ? 8 : 0);
  243. const vec_u32 vshift = (vec_u32) vec_splat_u32(shift);
  244. vec_u32 v, v2;
  245. vec_u16 vd;
  246. int i;
  247. yuv2plane1_16_u(src, dest, dst_u, big_endian, output_bits, 0);
  248. for (i = dst_u; i < dstW - 7; i += 8) {
  249. v = vec_vsx_ld(0, (const uint32_t *) &src[i]);
  250. v = vec_add(v, vadd);
  251. v = vec_sr(v, vshift);
  252. v2 = vec_vsx_ld(0, (const uint32_t *) &src[i + 4]);
  253. v2 = vec_add(v2, vadd);
  254. v2 = vec_sr(v2, vshift);
  255. vd = vec_packsu(v, v2);
  256. vd = vec_rl(vd, vswap);
  257. vec_st(vd, 0, &dest[i]);
  258. }
  259. yuv2plane1_16_u(src, dest, dstW, big_endian, output_bits, i);
  260. }
  261. #if HAVE_POWER8
  262. static void yuv2planeX_16_u(const int16_t *filter, int filterSize,
  263. const int32_t **src, uint16_t *dest, int dstW,
  264. int big_endian, int output_bits, int start)
  265. {
  266. int i;
  267. int shift = 15;
  268. for (i = start; i < dstW; i++) {
  269. int val = 1 << (shift - 1);
  270. int j;
  271. /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
  272. * filters (or anything with negative coeffs, the range can be slightly
  273. * wider in both directions. To account for this overflow, we subtract
  274. * a constant so it always fits in the signed range (assuming a
  275. * reasonable filterSize), and re-add that at the end. */
  276. val -= 0x40000000;
  277. for (j = 0; j < filterSize; j++)
  278. val += src[j][i] * (unsigned)filter[j];
  279. output_pixel(&dest[i], val, 0x8000, int);
  280. }
  281. }
  282. static void yuv2planeX_16_vsx(const int16_t *filter, int filterSize,
  283. const int32_t **src, uint16_t *dest, int dstW,
  284. int big_endian, int output_bits)
  285. {
  286. const int dst_u = -(uintptr_t)dest & 7;
  287. const int shift = 15;
  288. const int bias = 0x8000;
  289. const int add = (1 << (shift - 1)) - 0x40000000;
  290. const uint16_t swap = big_endian ? 8 : 0;
  291. const vec_u32 vadd = (vec_u32) {add, add, add, add};
  292. const vec_u32 vshift = (vec_u32) {shift, shift, shift, shift};
  293. const vec_u16 vswap = (vec_u16) {swap, swap, swap, swap, swap, swap, swap, swap};
  294. const vec_u16 vbias = (vec_u16) {bias, bias, bias, bias, bias, bias, bias, bias};
  295. vec_s32 vfilter[MAX_FILTER_SIZE];
  296. vec_u16 v;
  297. vec_u32 vleft, vright, vtmp;
  298. vec_s32 vin32l, vin32r;
  299. int i, j;
  300. for (i = 0; i < filterSize; i++) {
  301. vfilter[i] = (vec_s32) {filter[i], filter[i], filter[i], filter[i]};
  302. }
  303. yuv2planeX_16_u(filter, filterSize, src, dest, dst_u, big_endian, output_bits, 0);
  304. for (i = dst_u; i < dstW - 7; i += 8) {
  305. vleft = vright = vadd;
  306. for (j = 0; j < filterSize; j++) {
  307. vin32l = vec_vsx_ld(0, &src[j][i]);
  308. vin32r = vec_vsx_ld(0, &src[j][i + 4]);
  309. vtmp = (vec_u32) vec_mul(vin32l, vfilter[j]);
  310. vleft = vec_add(vleft, vtmp);
  311. vtmp = (vec_u32) vec_mul(vin32r, vfilter[j]);
  312. vright = vec_add(vright, vtmp);
  313. }
  314. vleft = vec_sra(vleft, vshift);
  315. vright = vec_sra(vright, vshift);
  316. v = (vec_u16) vec_packs((vec_s32) vleft, (vec_s32) vright);
  317. v = vec_add(v, vbias);
  318. v = vec_rl(v, vswap);
  319. vec_st(v, 0, &dest[i]);
  320. }
  321. yuv2planeX_16_u(filter, filterSize, src, dest, dstW, big_endian, output_bits, i);
  322. }
  323. #endif /* HAVE_POWER8 */
  324. #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
  325. yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
  326. yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t)
  327. #define yuv2NBPS1(bits, BE_LE, is_be, template_size, typeX_t) \
  328. static void yuv2plane1_ ## bits ## BE_LE ## _vsx(const int16_t *src, \
  329. uint8_t *dest, int dstW, \
  330. const uint8_t *dither, int offset) \
  331. { \
  332. yuv2plane1_ ## template_size ## _vsx((const typeX_t *) src, \
  333. (uint16_t *) dest, dstW, is_be, bits); \
  334. }
  335. #define yuv2NBPSX(bits, BE_LE, is_be, template_size, typeX_t) \
  336. static void yuv2planeX_ ## bits ## BE_LE ## _vsx(const int16_t *filter, int filterSize, \
  337. const int16_t **src, uint8_t *dest, int dstW, \
  338. const uint8_t *dither, int offset)\
  339. { \
  340. yuv2planeX_## template_size ## _vsx(filter, \
  341. filterSize, (const typeX_t **) src, \
  342. (uint16_t *) dest, dstW, is_be, bits); \
  343. }
  344. yuv2NBPS( 9, BE, 1, nbps, int16_t)
  345. yuv2NBPS( 9, LE, 0, nbps, int16_t)
  346. yuv2NBPS(10, BE, 1, nbps, int16_t)
  347. yuv2NBPS(10, LE, 0, nbps, int16_t)
  348. yuv2NBPS(12, BE, 1, nbps, int16_t)
  349. yuv2NBPS(12, LE, 0, nbps, int16_t)
  350. yuv2NBPS(14, BE, 1, nbps, int16_t)
  351. yuv2NBPS(14, LE, 0, nbps, int16_t)
  352. yuv2NBPS1(16, BE, 1, 16, int32_t)
  353. yuv2NBPS1(16, LE, 0, 16, int32_t)
  354. #if HAVE_POWER8
  355. yuv2NBPSX(16, BE, 1, 16, int32_t)
  356. yuv2NBPSX(16, LE, 0, 16, int32_t)
  357. #endif
  358. #define WRITERGB \
  359. R_l = vec_max(R_l, zero32); \
  360. R_r = vec_max(R_r, zero32); \
  361. G_l = vec_max(G_l, zero32); \
  362. G_r = vec_max(G_r, zero32); \
  363. B_l = vec_max(B_l, zero32); \
  364. B_r = vec_max(B_r, zero32); \
  365. \
  366. R_l = vec_min(R_l, rgbclip); \
  367. R_r = vec_min(R_r, rgbclip); \
  368. G_l = vec_min(G_l, rgbclip); \
  369. G_r = vec_min(G_r, rgbclip); \
  370. B_l = vec_min(B_l, rgbclip); \
  371. B_r = vec_min(B_r, rgbclip); \
  372. \
  373. R_l = vec_sr(R_l, shift22); \
  374. R_r = vec_sr(R_r, shift22); \
  375. G_l = vec_sr(G_l, shift22); \
  376. G_r = vec_sr(G_r, shift22); \
  377. B_l = vec_sr(B_l, shift22); \
  378. B_r = vec_sr(B_r, shift22); \
  379. \
  380. rd16 = vec_packsu(R_l, R_r); \
  381. gd16 = vec_packsu(G_l, G_r); \
  382. bd16 = vec_packsu(B_l, B_r); \
  383. rd = vec_packsu(rd16, zero16); \
  384. gd = vec_packsu(gd16, zero16); \
  385. bd = vec_packsu(bd16, zero16); \
  386. \
  387. switch(target) { \
  388. case AV_PIX_FMT_RGB24: \
  389. out0 = vec_perm(rd, gd, perm3rg0); \
  390. out0 = vec_perm(out0, bd, perm3tb0); \
  391. out1 = vec_perm(rd, gd, perm3rg1); \
  392. out1 = vec_perm(out1, bd, perm3tb1); \
  393. \
  394. vec_vsx_st(out0, 0, dest); \
  395. vec_vsx_st(out1, 16, dest); \
  396. \
  397. dest += 24; \
  398. break; \
  399. case AV_PIX_FMT_BGR24: \
  400. out0 = vec_perm(bd, gd, perm3rg0); \
  401. out0 = vec_perm(out0, rd, perm3tb0); \
  402. out1 = vec_perm(bd, gd, perm3rg1); \
  403. out1 = vec_perm(out1, rd, perm3tb1); \
  404. \
  405. vec_vsx_st(out0, 0, dest); \
  406. vec_vsx_st(out1, 16, dest); \
  407. \
  408. dest += 24; \
  409. break; \
  410. case AV_PIX_FMT_BGRA: \
  411. out0 = vec_mergeh(bd, gd); \
  412. out1 = vec_mergeh(rd, ad); \
  413. \
  414. tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
  415. vec_vsx_st(tmp8, 0, dest); \
  416. tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
  417. vec_vsx_st(tmp8, 16, dest); \
  418. \
  419. dest += 32; \
  420. break; \
  421. case AV_PIX_FMT_RGBA: \
  422. out0 = vec_mergeh(rd, gd); \
  423. out1 = vec_mergeh(bd, ad); \
  424. \
  425. tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
  426. vec_vsx_st(tmp8, 0, dest); \
  427. tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
  428. vec_vsx_st(tmp8, 16, dest); \
  429. \
  430. dest += 32; \
  431. break; \
  432. case AV_PIX_FMT_ARGB: \
  433. out0 = vec_mergeh(ad, rd); \
  434. out1 = vec_mergeh(gd, bd); \
  435. \
  436. tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
  437. vec_vsx_st(tmp8, 0, dest); \
  438. tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
  439. vec_vsx_st(tmp8, 16, dest); \
  440. \
  441. dest += 32; \
  442. break; \
  443. case AV_PIX_FMT_ABGR: \
  444. out0 = vec_mergeh(ad, bd); \
  445. out1 = vec_mergeh(gd, rd); \
  446. \
  447. tmp8 = (vec_u8) vec_mergeh((vec_u16) out0, (vec_u16) out1); \
  448. vec_vsx_st(tmp8, 0, dest); \
  449. tmp8 = (vec_u8) vec_mergel((vec_u16) out0, (vec_u16) out1); \
  450. vec_vsx_st(tmp8, 16, dest); \
  451. \
  452. dest += 32; \
  453. break; \
  454. }
  455. static av_always_inline void
  456. yuv2rgb_full_X_vsx_template(SwsContext *c, const int16_t *lumFilter,
  457. const int16_t **lumSrc, int lumFilterSize,
  458. const int16_t *chrFilter, const int16_t **chrUSrc,
  459. const int16_t **chrVSrc, int chrFilterSize,
  460. const int16_t **alpSrc, uint8_t *dest,
  461. int dstW, int y, enum AVPixelFormat target, int hasAlpha)
  462. {
  463. vec_s16 vv;
  464. vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
  465. vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
  466. vec_s32 tmp, tmp2, tmp3, tmp4;
  467. vec_u16 rd16, gd16, bd16;
  468. vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
  469. vec_s16 vlumFilter[MAX_FILTER_SIZE], vchrFilter[MAX_FILTER_SIZE];
  470. const vec_s32 ystart = vec_splats(1 << 9);
  471. const vec_s32 uvstart = vec_splats((1 << 9) - (128 << 19));
  472. const vec_u16 zero16 = vec_splat_u16(0);
  473. const vec_s32 y_offset = vec_splats(c->yuv2rgb_y_offset);
  474. const vec_s32 y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  475. const vec_s32 y_add = vec_splats(1 << 21);
  476. const vec_s32 v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  477. const vec_s32 v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  478. const vec_s32 u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  479. const vec_s32 u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  480. const vec_s32 rgbclip = vec_splats(1 << 30);
  481. const vec_s32 zero32 = vec_splat_s32(0);
  482. const vec_u32 shift22 = vec_splats(22U);
  483. const vec_u32 shift10 = vec_splat_u32(10);
  484. int i, j;
  485. // Various permutations
  486. const vec_u8 perm3rg0 = (vec_u8) {0x0, 0x10, 0,
  487. 0x1, 0x11, 0,
  488. 0x2, 0x12, 0,
  489. 0x3, 0x13, 0,
  490. 0x4, 0x14, 0,
  491. 0x5 };
  492. const vec_u8 perm3rg1 = (vec_u8) { 0x15, 0,
  493. 0x6, 0x16, 0,
  494. 0x7, 0x17, 0 };
  495. const vec_u8 perm3tb0 = (vec_u8) {0x0, 0x1, 0x10,
  496. 0x3, 0x4, 0x11,
  497. 0x6, 0x7, 0x12,
  498. 0x9, 0xa, 0x13,
  499. 0xc, 0xd, 0x14,
  500. 0xf };
  501. const vec_u8 perm3tb1 = (vec_u8) { 0x0, 0x15,
  502. 0x2, 0x3, 0x16,
  503. 0x5, 0x6, 0x17 };
  504. ad = vec_splats((uint8_t) 255);
  505. for (i = 0; i < lumFilterSize; i++)
  506. vlumFilter[i] = vec_splats(lumFilter[i]);
  507. for (i = 0; i < chrFilterSize; i++)
  508. vchrFilter[i] = vec_splats(chrFilter[i]);
  509. for (i = 0; i < dstW; i += 8) {
  510. vy32_l =
  511. vy32_r = ystart;
  512. vu32_l =
  513. vu32_r =
  514. vv32_l =
  515. vv32_r = uvstart;
  516. for (j = 0; j < lumFilterSize; j++) {
  517. vv = vec_ld(0, &lumSrc[j][i]);
  518. tmp = vec_mule(vv, vlumFilter[j]);
  519. tmp2 = vec_mulo(vv, vlumFilter[j]);
  520. tmp3 = vec_mergeh(tmp, tmp2);
  521. tmp4 = vec_mergel(tmp, tmp2);
  522. vy32_l = vec_adds(vy32_l, tmp3);
  523. vy32_r = vec_adds(vy32_r, tmp4);
  524. }
  525. for (j = 0; j < chrFilterSize; j++) {
  526. vv = vec_ld(0, &chrUSrc[j][i]);
  527. tmp = vec_mule(vv, vchrFilter[j]);
  528. tmp2 = vec_mulo(vv, vchrFilter[j]);
  529. tmp3 = vec_mergeh(tmp, tmp2);
  530. tmp4 = vec_mergel(tmp, tmp2);
  531. vu32_l = vec_adds(vu32_l, tmp3);
  532. vu32_r = vec_adds(vu32_r, tmp4);
  533. vv = vec_ld(0, &chrVSrc[j][i]);
  534. tmp = vec_mule(vv, vchrFilter[j]);
  535. tmp2 = vec_mulo(vv, vchrFilter[j]);
  536. tmp3 = vec_mergeh(tmp, tmp2);
  537. tmp4 = vec_mergel(tmp, tmp2);
  538. vv32_l = vec_adds(vv32_l, tmp3);
  539. vv32_r = vec_adds(vv32_r, tmp4);
  540. }
  541. vy32_l = vec_sra(vy32_l, shift10);
  542. vy32_r = vec_sra(vy32_r, shift10);
  543. vu32_l = vec_sra(vu32_l, shift10);
  544. vu32_r = vec_sra(vu32_r, shift10);
  545. vv32_l = vec_sra(vv32_l, shift10);
  546. vv32_r = vec_sra(vv32_r, shift10);
  547. vy32_l = vec_sub(vy32_l, y_offset);
  548. vy32_r = vec_sub(vy32_r, y_offset);
  549. vy32_l = vec_mul(vy32_l, y_coeff);
  550. vy32_r = vec_mul(vy32_r, y_coeff);
  551. vy32_l = vec_add(vy32_l, y_add);
  552. vy32_r = vec_add(vy32_r, y_add);
  553. R_l = vec_mul(vv32_l, v2r_coeff);
  554. R_l = vec_add(R_l, vy32_l);
  555. R_r = vec_mul(vv32_r, v2r_coeff);
  556. R_r = vec_add(R_r, vy32_r);
  557. G_l = vec_mul(vv32_l, v2g_coeff);
  558. tmp32 = vec_mul(vu32_l, u2g_coeff);
  559. G_l = vec_add(G_l, vy32_l);
  560. G_l = vec_add(G_l, tmp32);
  561. G_r = vec_mul(vv32_r, v2g_coeff);
  562. tmp32 = vec_mul(vu32_r, u2g_coeff);
  563. G_r = vec_add(G_r, vy32_r);
  564. G_r = vec_add(G_r, tmp32);
  565. B_l = vec_mul(vu32_l, u2b_coeff);
  566. B_l = vec_add(B_l, vy32_l);
  567. B_r = vec_mul(vu32_r, u2b_coeff);
  568. B_r = vec_add(B_r, vy32_r);
  569. WRITERGB
  570. }
  571. }
  572. #define SETUP(x, buf0, alpha1, buf1, alpha) { \
  573. x = vec_ld(0, buf0); \
  574. tmp = vec_mule(x, alpha1); \
  575. tmp2 = vec_mulo(x, alpha1); \
  576. tmp3 = vec_mergeh(tmp, tmp2); \
  577. tmp4 = vec_mergel(tmp, tmp2); \
  578. \
  579. x = vec_ld(0, buf1); \
  580. tmp = vec_mule(x, alpha); \
  581. tmp2 = vec_mulo(x, alpha); \
  582. tmp5 = vec_mergeh(tmp, tmp2); \
  583. tmp6 = vec_mergel(tmp, tmp2); \
  584. \
  585. tmp3 = vec_add(tmp3, tmp5); \
  586. tmp4 = vec_add(tmp4, tmp6); \
  587. }
  588. static av_always_inline void
  589. yuv2rgb_full_2_vsx_template(SwsContext *c, const int16_t *buf[2],
  590. const int16_t *ubuf[2], const int16_t *vbuf[2],
  591. const int16_t *abuf[2], uint8_t *dest, int dstW,
  592. int yalpha, int uvalpha, int y,
  593. enum AVPixelFormat target, int hasAlpha)
  594. {
  595. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  596. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  597. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
  598. *abuf0 = hasAlpha ? abuf[0] : NULL,
  599. *abuf1 = hasAlpha ? abuf[1] : NULL;
  600. const int16_t yalpha1 = 4096 - yalpha;
  601. const int16_t uvalpha1 = 4096 - uvalpha;
  602. vec_s16 vy, vu, vv, A = vec_splat_s16(0);
  603. vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
  604. vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
  605. vec_s32 tmp, tmp2, tmp3, tmp4, tmp5, tmp6;
  606. vec_u16 rd16, gd16, bd16;
  607. vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
  608. const vec_s16 vyalpha1 = vec_splats(yalpha1);
  609. const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
  610. const vec_s16 vyalpha = vec_splats((int16_t) yalpha);
  611. const vec_s16 vuvalpha = vec_splats((int16_t) uvalpha);
  612. const vec_u16 zero16 = vec_splat_u16(0);
  613. const vec_s32 y_offset = vec_splats(c->yuv2rgb_y_offset);
  614. const vec_s32 y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  615. const vec_s32 y_add = vec_splats(1 << 21);
  616. const vec_s32 v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  617. const vec_s32 v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  618. const vec_s32 u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  619. const vec_s32 u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  620. const vec_s32 rgbclip = vec_splats(1 << 30);
  621. const vec_s32 zero32 = vec_splat_s32(0);
  622. const vec_u32 shift19 = vec_splats(19U);
  623. const vec_u32 shift22 = vec_splats(22U);
  624. const vec_u32 shift10 = vec_splat_u32(10);
  625. const vec_s32 dec128 = vec_splats(128 << 19);
  626. const vec_s32 add18 = vec_splats(1 << 18);
  627. int i;
  628. // Various permutations
  629. const vec_u8 perm3rg0 = (vec_u8) {0x0, 0x10, 0,
  630. 0x1, 0x11, 0,
  631. 0x2, 0x12, 0,
  632. 0x3, 0x13, 0,
  633. 0x4, 0x14, 0,
  634. 0x5 };
  635. const vec_u8 perm3rg1 = (vec_u8) { 0x15, 0,
  636. 0x6, 0x16, 0,
  637. 0x7, 0x17, 0 };
  638. const vec_u8 perm3tb0 = (vec_u8) {0x0, 0x1, 0x10,
  639. 0x3, 0x4, 0x11,
  640. 0x6, 0x7, 0x12,
  641. 0x9, 0xa, 0x13,
  642. 0xc, 0xd, 0x14,
  643. 0xf };
  644. const vec_u8 perm3tb1 = (vec_u8) { 0x0, 0x15,
  645. 0x2, 0x3, 0x16,
  646. 0x5, 0x6, 0x17 };
  647. av_assert2(yalpha <= 4096U);
  648. av_assert2(uvalpha <= 4096U);
  649. for (i = 0; i < dstW; i += 8) {
  650. SETUP(vy, &buf0[i], vyalpha1, &buf1[i], vyalpha);
  651. vy32_l = vec_sra(tmp3, shift10);
  652. vy32_r = vec_sra(tmp4, shift10);
  653. SETUP(vu, &ubuf0[i], vuvalpha1, &ubuf1[i], vuvalpha);
  654. tmp3 = vec_sub(tmp3, dec128);
  655. tmp4 = vec_sub(tmp4, dec128);
  656. vu32_l = vec_sra(tmp3, shift10);
  657. vu32_r = vec_sra(tmp4, shift10);
  658. SETUP(vv, &vbuf0[i], vuvalpha1, &vbuf1[i], vuvalpha);
  659. tmp3 = vec_sub(tmp3, dec128);
  660. tmp4 = vec_sub(tmp4, dec128);
  661. vv32_l = vec_sra(tmp3, shift10);
  662. vv32_r = vec_sra(tmp4, shift10);
  663. if (hasAlpha) {
  664. SETUP(A, &abuf0[i], vyalpha1, &abuf1[i], vyalpha);
  665. tmp3 = vec_add(tmp3, add18);
  666. tmp4 = vec_add(tmp4, add18);
  667. tmp3 = vec_sra(tmp3, shift19);
  668. tmp4 = vec_sra(tmp4, shift19);
  669. A = vec_packs(tmp3, tmp4);
  670. ad = vec_packsu(A, (vec_s16) zero16);
  671. } else {
  672. ad = vec_splats((uint8_t) 255);
  673. }
  674. vy32_l = vec_sub(vy32_l, y_offset);
  675. vy32_r = vec_sub(vy32_r, y_offset);
  676. vy32_l = vec_mul(vy32_l, y_coeff);
  677. vy32_r = vec_mul(vy32_r, y_coeff);
  678. vy32_l = vec_add(vy32_l, y_add);
  679. vy32_r = vec_add(vy32_r, y_add);
  680. R_l = vec_mul(vv32_l, v2r_coeff);
  681. R_l = vec_add(R_l, vy32_l);
  682. R_r = vec_mul(vv32_r, v2r_coeff);
  683. R_r = vec_add(R_r, vy32_r);
  684. G_l = vec_mul(vv32_l, v2g_coeff);
  685. tmp32 = vec_mul(vu32_l, u2g_coeff);
  686. G_l = vec_add(G_l, vy32_l);
  687. G_l = vec_add(G_l, tmp32);
  688. G_r = vec_mul(vv32_r, v2g_coeff);
  689. tmp32 = vec_mul(vu32_r, u2g_coeff);
  690. G_r = vec_add(G_r, vy32_r);
  691. G_r = vec_add(G_r, tmp32);
  692. B_l = vec_mul(vu32_l, u2b_coeff);
  693. B_l = vec_add(B_l, vy32_l);
  694. B_r = vec_mul(vu32_r, u2b_coeff);
  695. B_r = vec_add(B_r, vy32_r);
  696. WRITERGB
  697. }
  698. }
  699. static av_always_inline void
  700. yuv2rgb_2_vsx_template(SwsContext *c, const int16_t *buf[2],
  701. const int16_t *ubuf[2], const int16_t *vbuf[2],
  702. const int16_t *abuf[2], uint8_t *dest, int dstW,
  703. int yalpha, int uvalpha, int y,
  704. enum AVPixelFormat target, int hasAlpha)
  705. {
  706. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  707. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  708. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
  709. *abuf0 = hasAlpha ? abuf[0] : NULL,
  710. *abuf1 = hasAlpha ? abuf[1] : NULL;
  711. const int16_t yalpha1 = 4096 - yalpha;
  712. const int16_t uvalpha1 = 4096 - uvalpha;
  713. vec_s16 vy, vu, vv, A = vec_splat_s16(0);
  714. vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
  715. vec_s32 R_l, R_r, G_l, G_r, B_l, B_r, vud32_l, vud32_r, vvd32_l, vvd32_r;
  716. vec_s32 tmp, tmp2, tmp3, tmp4, tmp5, tmp6;
  717. vec_u16 rd16, gd16, bd16;
  718. vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
  719. const vec_s16 vyalpha1 = vec_splats(yalpha1);
  720. const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
  721. const vec_s16 vyalpha = vec_splats((int16_t) yalpha);
  722. const vec_s16 vuvalpha = vec_splats((int16_t) uvalpha);
  723. const vec_u16 zero16 = vec_splat_u16(0);
  724. const vec_s32 y_offset = vec_splats(c->yuv2rgb_y_offset);
  725. const vec_s32 y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  726. const vec_s32 y_add = vec_splats(1 << 21);
  727. const vec_s32 v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  728. const vec_s32 v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  729. const vec_s32 u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  730. const vec_s32 u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  731. const vec_s32 rgbclip = vec_splats(1 << 30);
  732. const vec_s32 zero32 = vec_splat_s32(0);
  733. const vec_u32 shift19 = vec_splats(19U);
  734. const vec_u32 shift22 = vec_splats(22U);
  735. const vec_u32 shift10 = vec_splat_u32(10);
  736. const vec_s32 dec128 = vec_splats(128 << 19);
  737. const vec_s32 add18 = vec_splats(1 << 18);
  738. int i;
  739. // Various permutations
  740. const vec_u8 doubleleft = (vec_u8) {0, 1, 2, 3,
  741. 0, 1, 2, 3,
  742. 4, 5, 6, 7,
  743. 4, 5, 6, 7 };
  744. const vec_u8 doubleright = (vec_u8) {8, 9, 10, 11,
  745. 8, 9, 10, 11,
  746. 12, 13, 14, 15,
  747. 12, 13, 14, 15 };
  748. const vec_u8 perm3rg0 = (vec_u8) {0x0, 0x10, 0,
  749. 0x1, 0x11, 0,
  750. 0x2, 0x12, 0,
  751. 0x3, 0x13, 0,
  752. 0x4, 0x14, 0,
  753. 0x5 };
  754. const vec_u8 perm3rg1 = (vec_u8) { 0x15, 0,
  755. 0x6, 0x16, 0,
  756. 0x7, 0x17, 0 };
  757. const vec_u8 perm3tb0 = (vec_u8) {0x0, 0x1, 0x10,
  758. 0x3, 0x4, 0x11,
  759. 0x6, 0x7, 0x12,
  760. 0x9, 0xa, 0x13,
  761. 0xc, 0xd, 0x14,
  762. 0xf };
  763. const vec_u8 perm3tb1 = (vec_u8) { 0x0, 0x15,
  764. 0x2, 0x3, 0x16,
  765. 0x5, 0x6, 0x17 };
  766. av_assert2(yalpha <= 4096U);
  767. av_assert2(uvalpha <= 4096U);
  768. for (i = 0; i < (dstW + 1) >> 1; i += 8) {
  769. SETUP(vy, &buf0[i * 2], vyalpha1, &buf1[i * 2], vyalpha);
  770. vy32_l = vec_sra(tmp3, shift10);
  771. vy32_r = vec_sra(tmp4, shift10);
  772. SETUP(vu, &ubuf0[i], vuvalpha1, &ubuf1[i], vuvalpha);
  773. tmp3 = vec_sub(tmp3, dec128);
  774. tmp4 = vec_sub(tmp4, dec128);
  775. vu32_l = vec_sra(tmp3, shift10);
  776. vu32_r = vec_sra(tmp4, shift10);
  777. SETUP(vv, &vbuf0[i], vuvalpha1, &vbuf1[i], vuvalpha);
  778. tmp3 = vec_sub(tmp3, dec128);
  779. tmp4 = vec_sub(tmp4, dec128);
  780. vv32_l = vec_sra(tmp3, shift10);
  781. vv32_r = vec_sra(tmp4, shift10);
  782. if (hasAlpha) {
  783. SETUP(A, &abuf0[i], vyalpha1, &abuf1[i], vyalpha);
  784. tmp3 = vec_add(tmp3, add18);
  785. tmp4 = vec_add(tmp4, add18);
  786. tmp3 = vec_sra(tmp3, shift19);
  787. tmp4 = vec_sra(tmp4, shift19);
  788. A = vec_packs(tmp3, tmp4);
  789. ad = vec_packsu(A, (vec_s16) zero16);
  790. } else {
  791. ad = vec_splats((uint8_t) 255);
  792. }
  793. vy32_l = vec_sub(vy32_l, y_offset);
  794. vy32_r = vec_sub(vy32_r, y_offset);
  795. vy32_l = vec_mul(vy32_l, y_coeff);
  796. vy32_r = vec_mul(vy32_r, y_coeff);
  797. vy32_l = vec_add(vy32_l, y_add);
  798. vy32_r = vec_add(vy32_r, y_add);
  799. // Use the first UV half
  800. vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
  801. vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
  802. vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
  803. vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
  804. R_l = vec_mul(vvd32_l, v2r_coeff);
  805. R_l = vec_add(R_l, vy32_l);
  806. R_r = vec_mul(vvd32_r, v2r_coeff);
  807. R_r = vec_add(R_r, vy32_r);
  808. G_l = vec_mul(vvd32_l, v2g_coeff);
  809. tmp32 = vec_mul(vud32_l, u2g_coeff);
  810. G_l = vec_add(G_l, vy32_l);
  811. G_l = vec_add(G_l, tmp32);
  812. G_r = vec_mul(vvd32_r, v2g_coeff);
  813. tmp32 = vec_mul(vud32_r, u2g_coeff);
  814. G_r = vec_add(G_r, vy32_r);
  815. G_r = vec_add(G_r, tmp32);
  816. B_l = vec_mul(vud32_l, u2b_coeff);
  817. B_l = vec_add(B_l, vy32_l);
  818. B_r = vec_mul(vud32_r, u2b_coeff);
  819. B_r = vec_add(B_r, vy32_r);
  820. WRITERGB
  821. // New Y for the second half
  822. SETUP(vy, &buf0[i * 2 + 8], vyalpha1, &buf1[i * 2 + 8], vyalpha);
  823. vy32_l = vec_sra(tmp3, shift10);
  824. vy32_r = vec_sra(tmp4, shift10);
  825. vy32_l = vec_sub(vy32_l, y_offset);
  826. vy32_r = vec_sub(vy32_r, y_offset);
  827. vy32_l = vec_mul(vy32_l, y_coeff);
  828. vy32_r = vec_mul(vy32_r, y_coeff);
  829. vy32_l = vec_add(vy32_l, y_add);
  830. vy32_r = vec_add(vy32_r, y_add);
  831. // Second UV half
  832. vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
  833. vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
  834. vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
  835. vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
  836. R_l = vec_mul(vvd32_l, v2r_coeff);
  837. R_l = vec_add(R_l, vy32_l);
  838. R_r = vec_mul(vvd32_r, v2r_coeff);
  839. R_r = vec_add(R_r, vy32_r);
  840. G_l = vec_mul(vvd32_l, v2g_coeff);
  841. tmp32 = vec_mul(vud32_l, u2g_coeff);
  842. G_l = vec_add(G_l, vy32_l);
  843. G_l = vec_add(G_l, tmp32);
  844. G_r = vec_mul(vvd32_r, v2g_coeff);
  845. tmp32 = vec_mul(vud32_r, u2g_coeff);
  846. G_r = vec_add(G_r, vy32_r);
  847. G_r = vec_add(G_r, tmp32);
  848. B_l = vec_mul(vud32_l, u2b_coeff);
  849. B_l = vec_add(B_l, vy32_l);
  850. B_r = vec_mul(vud32_r, u2b_coeff);
  851. B_r = vec_add(B_r, vy32_r);
  852. WRITERGB
  853. }
  854. }
  855. #undef SETUP
  856. static av_always_inline void
  857. yuv2rgb_full_1_vsx_template(SwsContext *c, const int16_t *buf0,
  858. const int16_t *ubuf[2], const int16_t *vbuf[2],
  859. const int16_t *abuf0, uint8_t *dest, int dstW,
  860. int uvalpha, int y, enum AVPixelFormat target,
  861. int hasAlpha)
  862. {
  863. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  864. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  865. vec_s16 vy, vu, vv, A = vec_splat_s16(0), tmp16;
  866. vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
  867. vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
  868. vec_u16 rd16, gd16, bd16;
  869. vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
  870. const vec_u16 zero16 = vec_splat_u16(0);
  871. const vec_s32 y_offset = vec_splats(c->yuv2rgb_y_offset);
  872. const vec_s32 y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  873. const vec_s32 y_add = vec_splats(1 << 21);
  874. const vec_s32 v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  875. const vec_s32 v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  876. const vec_s32 u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  877. const vec_s32 u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  878. const vec_s32 rgbclip = vec_splats(1 << 30);
  879. const vec_s32 zero32 = vec_splat_s32(0);
  880. const vec_u32 shift2 = vec_splat_u32(2);
  881. const vec_u32 shift22 = vec_splats(22U);
  882. const vec_u16 sub7 = vec_splats((uint16_t) (128 << 7));
  883. const vec_u16 sub8 = vec_splats((uint16_t) (128 << 8));
  884. const vec_s16 mul4 = vec_splat_s16(4);
  885. const vec_s16 mul8 = vec_splat_s16(8);
  886. const vec_s16 add64 = vec_splat_s16(64);
  887. const vec_u16 shift7 = vec_splat_u16(7);
  888. const vec_s16 max255 = vec_splat_s16(255);
  889. int i;
  890. // Various permutations
  891. const vec_u8 perm3rg0 = (vec_u8) {0x0, 0x10, 0,
  892. 0x1, 0x11, 0,
  893. 0x2, 0x12, 0,
  894. 0x3, 0x13, 0,
  895. 0x4, 0x14, 0,
  896. 0x5 };
  897. const vec_u8 perm3rg1 = (vec_u8) { 0x15, 0,
  898. 0x6, 0x16, 0,
  899. 0x7, 0x17, 0 };
  900. const vec_u8 perm3tb0 = (vec_u8) {0x0, 0x1, 0x10,
  901. 0x3, 0x4, 0x11,
  902. 0x6, 0x7, 0x12,
  903. 0x9, 0xa, 0x13,
  904. 0xc, 0xd, 0x14,
  905. 0xf };
  906. const vec_u8 perm3tb1 = (vec_u8) { 0x0, 0x15,
  907. 0x2, 0x3, 0x16,
  908. 0x5, 0x6, 0x17 };
  909. for (i = 0; i < dstW; i += 8) { // The x86 asm also overwrites padding bytes.
  910. vy = vec_ld(0, &buf0[i]);
  911. vy32_l = vec_unpackh(vy);
  912. vy32_r = vec_unpackl(vy);
  913. vy32_l = vec_sl(vy32_l, shift2);
  914. vy32_r = vec_sl(vy32_r, shift2);
  915. vu = vec_ld(0, &ubuf0[i]);
  916. vv = vec_ld(0, &vbuf0[i]);
  917. if (uvalpha < 2048) {
  918. vu = (vec_s16) vec_sub((vec_u16) vu, sub7);
  919. vv = (vec_s16) vec_sub((vec_u16) vv, sub7);
  920. tmp32 = vec_mule(vu, mul4);
  921. tmp32_2 = vec_mulo(vu, mul4);
  922. vu32_l = vec_mergeh(tmp32, tmp32_2);
  923. vu32_r = vec_mergel(tmp32, tmp32_2);
  924. tmp32 = vec_mule(vv, mul4);
  925. tmp32_2 = vec_mulo(vv, mul4);
  926. vv32_l = vec_mergeh(tmp32, tmp32_2);
  927. vv32_r = vec_mergel(tmp32, tmp32_2);
  928. } else {
  929. tmp16 = vec_ld(0, &ubuf1[i]);
  930. vu = vec_add(vu, tmp16);
  931. vu = (vec_s16) vec_sub((vec_u16) vu, sub8);
  932. tmp16 = vec_ld(0, &vbuf1[i]);
  933. vv = vec_add(vv, tmp16);
  934. vv = (vec_s16) vec_sub((vec_u16) vv, sub8);
  935. vu32_l = vec_mule(vu, mul8);
  936. vu32_r = vec_mulo(vu, mul8);
  937. vv32_l = vec_mule(vv, mul8);
  938. vv32_r = vec_mulo(vv, mul8);
  939. }
  940. if (hasAlpha) {
  941. A = vec_ld(0, &abuf0[i]);
  942. A = vec_add(A, add64);
  943. A = vec_sr(A, shift7);
  944. A = vec_max(A, max255);
  945. ad = vec_packsu(A, (vec_s16) zero16);
  946. } else {
  947. ad = vec_splats((uint8_t) 255);
  948. }
  949. vy32_l = vec_sub(vy32_l, y_offset);
  950. vy32_r = vec_sub(vy32_r, y_offset);
  951. vy32_l = vec_mul(vy32_l, y_coeff);
  952. vy32_r = vec_mul(vy32_r, y_coeff);
  953. vy32_l = vec_add(vy32_l, y_add);
  954. vy32_r = vec_add(vy32_r, y_add);
  955. R_l = vec_mul(vv32_l, v2r_coeff);
  956. R_l = vec_add(R_l, vy32_l);
  957. R_r = vec_mul(vv32_r, v2r_coeff);
  958. R_r = vec_add(R_r, vy32_r);
  959. G_l = vec_mul(vv32_l, v2g_coeff);
  960. tmp32 = vec_mul(vu32_l, u2g_coeff);
  961. G_l = vec_add(G_l, vy32_l);
  962. G_l = vec_add(G_l, tmp32);
  963. G_r = vec_mul(vv32_r, v2g_coeff);
  964. tmp32 = vec_mul(vu32_r, u2g_coeff);
  965. G_r = vec_add(G_r, vy32_r);
  966. G_r = vec_add(G_r, tmp32);
  967. B_l = vec_mul(vu32_l, u2b_coeff);
  968. B_l = vec_add(B_l, vy32_l);
  969. B_r = vec_mul(vu32_r, u2b_coeff);
  970. B_r = vec_add(B_r, vy32_r);
  971. WRITERGB
  972. }
  973. }
  974. static av_always_inline void
  975. yuv2rgb_1_vsx_template(SwsContext *c, const int16_t *buf0,
  976. const int16_t *ubuf[2], const int16_t *vbuf[2],
  977. const int16_t *abuf0, uint8_t *dest, int dstW,
  978. int uvalpha, int y, enum AVPixelFormat target,
  979. int hasAlpha)
  980. {
  981. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  982. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  983. vec_s16 vy, vu, vv, A = vec_splat_s16(0), tmp16;
  984. vec_s32 vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
  985. vec_s32 vud32_l, vud32_r, vvd32_l, vvd32_r;
  986. vec_s32 R_l, R_r, G_l, G_r, B_l, B_r;
  987. vec_u16 rd16, gd16, bd16;
  988. vec_u8 rd, bd, gd, ad, out0, out1, tmp8;
  989. const vec_u16 zero16 = vec_splat_u16(0);
  990. const vec_s32 y_offset = vec_splats(c->yuv2rgb_y_offset);
  991. const vec_s32 y_coeff = vec_splats(c->yuv2rgb_y_coeff);
  992. const vec_s32 y_add = vec_splats(1 << 21);
  993. const vec_s32 v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
  994. const vec_s32 v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
  995. const vec_s32 u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
  996. const vec_s32 u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
  997. const vec_s32 rgbclip = vec_splats(1 << 30);
  998. const vec_s32 zero32 = vec_splat_s32(0);
  999. const vec_u32 shift2 = vec_splat_u32(2);
  1000. const vec_u32 shift22 = vec_splats(22U);
  1001. const vec_u16 sub7 = vec_splats((uint16_t) (128 << 7));
  1002. const vec_u16 sub8 = vec_splats((uint16_t) (128 << 8));
  1003. const vec_s16 mul4 = vec_splat_s16(4);
  1004. const vec_s16 mul8 = vec_splat_s16(8);
  1005. const vec_s16 add64 = vec_splat_s16(64);
  1006. const vec_u16 shift7 = vec_splat_u16(7);
  1007. const vec_s16 max255 = vec_splat_s16(255);
  1008. int i;
  1009. // Various permutations
  1010. const vec_u8 doubleleft = (vec_u8) {0, 1, 2, 3,
  1011. 0, 1, 2, 3,
  1012. 4, 5, 6, 7,
  1013. 4, 5, 6, 7 };
  1014. const vec_u8 doubleright = (vec_u8) {8, 9, 10, 11,
  1015. 8, 9, 10, 11,
  1016. 12, 13, 14, 15,
  1017. 12, 13, 14, 15 };
  1018. const vec_u8 perm3rg0 = (vec_u8) {0x0, 0x10, 0,
  1019. 0x1, 0x11, 0,
  1020. 0x2, 0x12, 0,
  1021. 0x3, 0x13, 0,
  1022. 0x4, 0x14, 0,
  1023. 0x5 };
  1024. const vec_u8 perm3rg1 = (vec_u8) { 0x15, 0,
  1025. 0x6, 0x16, 0,
  1026. 0x7, 0x17, 0 };
  1027. const vec_u8 perm3tb0 = (vec_u8) {0x0, 0x1, 0x10,
  1028. 0x3, 0x4, 0x11,
  1029. 0x6, 0x7, 0x12,
  1030. 0x9, 0xa, 0x13,
  1031. 0xc, 0xd, 0x14,
  1032. 0xf };
  1033. const vec_u8 perm3tb1 = (vec_u8) { 0x0, 0x15,
  1034. 0x2, 0x3, 0x16,
  1035. 0x5, 0x6, 0x17 };
  1036. for (i = 0; i < (dstW + 1) >> 1; i += 8) { // The x86 asm also overwrites padding bytes.
  1037. vy = vec_ld(0, &buf0[i * 2]);
  1038. vy32_l = vec_unpackh(vy);
  1039. vy32_r = vec_unpackl(vy);
  1040. vy32_l = vec_sl(vy32_l, shift2);
  1041. vy32_r = vec_sl(vy32_r, shift2);
  1042. vu = vec_ld(0, &ubuf0[i]);
  1043. vv = vec_ld(0, &vbuf0[i]);
  1044. if (uvalpha < 2048) {
  1045. vu = (vec_s16) vec_sub((vec_u16) vu, sub7);
  1046. vv = (vec_s16) vec_sub((vec_u16) vv, sub7);
  1047. tmp32 = vec_mule(vu, mul4);
  1048. tmp32_2 = vec_mulo(vu, mul4);
  1049. vu32_l = vec_mergeh(tmp32, tmp32_2);
  1050. vu32_r = vec_mergel(tmp32, tmp32_2);
  1051. tmp32 = vec_mule(vv, mul4);
  1052. tmp32_2 = vec_mulo(vv, mul4);
  1053. vv32_l = vec_mergeh(tmp32, tmp32_2);
  1054. vv32_r = vec_mergel(tmp32, tmp32_2);
  1055. } else {
  1056. tmp16 = vec_ld(0, &ubuf1[i]);
  1057. vu = vec_add(vu, tmp16);
  1058. vu = (vec_s16) vec_sub((vec_u16) vu, sub8);
  1059. tmp16 = vec_ld(0, &vbuf1[i]);
  1060. vv = vec_add(vv, tmp16);
  1061. vv = (vec_s16) vec_sub((vec_u16) vv, sub8);
  1062. vu32_l = vec_mule(vu, mul8);
  1063. vu32_r = vec_mulo(vu, mul8);
  1064. vv32_l = vec_mule(vv, mul8);
  1065. vv32_r = vec_mulo(vv, mul8);
  1066. }
  1067. if (hasAlpha) {
  1068. A = vec_ld(0, &abuf0[i]);
  1069. A = vec_add(A, add64);
  1070. A = vec_sr(A, shift7);
  1071. A = vec_max(A, max255);
  1072. ad = vec_packsu(A, (vec_s16) zero16);
  1073. } else {
  1074. ad = vec_splats((uint8_t) 255);
  1075. }
  1076. vy32_l = vec_sub(vy32_l, y_offset);
  1077. vy32_r = vec_sub(vy32_r, y_offset);
  1078. vy32_l = vec_mul(vy32_l, y_coeff);
  1079. vy32_r = vec_mul(vy32_r, y_coeff);
  1080. vy32_l = vec_add(vy32_l, y_add);
  1081. vy32_r = vec_add(vy32_r, y_add);
  1082. // Use the first UV half
  1083. vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
  1084. vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
  1085. vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
  1086. vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
  1087. R_l = vec_mul(vvd32_l, v2r_coeff);
  1088. R_l = vec_add(R_l, vy32_l);
  1089. R_r = vec_mul(vvd32_r, v2r_coeff);
  1090. R_r = vec_add(R_r, vy32_r);
  1091. G_l = vec_mul(vvd32_l, v2g_coeff);
  1092. tmp32 = vec_mul(vud32_l, u2g_coeff);
  1093. G_l = vec_add(G_l, vy32_l);
  1094. G_l = vec_add(G_l, tmp32);
  1095. G_r = vec_mul(vvd32_r, v2g_coeff);
  1096. tmp32 = vec_mul(vud32_r, u2g_coeff);
  1097. G_r = vec_add(G_r, vy32_r);
  1098. G_r = vec_add(G_r, tmp32);
  1099. B_l = vec_mul(vud32_l, u2b_coeff);
  1100. B_l = vec_add(B_l, vy32_l);
  1101. B_r = vec_mul(vud32_r, u2b_coeff);
  1102. B_r = vec_add(B_r, vy32_r);
  1103. WRITERGB
  1104. // New Y for the second half
  1105. vy = vec_ld(16, &buf0[i * 2]);
  1106. vy32_l = vec_unpackh(vy);
  1107. vy32_r = vec_unpackl(vy);
  1108. vy32_l = vec_sl(vy32_l, shift2);
  1109. vy32_r = vec_sl(vy32_r, shift2);
  1110. vy32_l = vec_sub(vy32_l, y_offset);
  1111. vy32_r = vec_sub(vy32_r, y_offset);
  1112. vy32_l = vec_mul(vy32_l, y_coeff);
  1113. vy32_r = vec_mul(vy32_r, y_coeff);
  1114. vy32_l = vec_add(vy32_l, y_add);
  1115. vy32_r = vec_add(vy32_r, y_add);
  1116. // Second UV half
  1117. vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
  1118. vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
  1119. vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
  1120. vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
  1121. R_l = vec_mul(vvd32_l, v2r_coeff);
  1122. R_l = vec_add(R_l, vy32_l);
  1123. R_r = vec_mul(vvd32_r, v2r_coeff);
  1124. R_r = vec_add(R_r, vy32_r);
  1125. G_l = vec_mul(vvd32_l, v2g_coeff);
  1126. tmp32 = vec_mul(vud32_l, u2g_coeff);
  1127. G_l = vec_add(G_l, vy32_l);
  1128. G_l = vec_add(G_l, tmp32);
  1129. G_r = vec_mul(vvd32_r, v2g_coeff);
  1130. tmp32 = vec_mul(vud32_r, u2g_coeff);
  1131. G_r = vec_add(G_r, vy32_r);
  1132. G_r = vec_add(G_r, tmp32);
  1133. B_l = vec_mul(vud32_l, u2b_coeff);
  1134. B_l = vec_add(B_l, vy32_l);
  1135. B_r = vec_mul(vud32_r, u2b_coeff);
  1136. B_r = vec_add(B_r, vy32_r);
  1137. WRITERGB
  1138. }
  1139. }
  1140. #undef WRITERGB
  1141. #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
  1142. static void name ## ext ## _X_vsx(SwsContext *c, const int16_t *lumFilter, \
  1143. const int16_t **lumSrc, int lumFilterSize, \
  1144. const int16_t *chrFilter, const int16_t **chrUSrc, \
  1145. const int16_t **chrVSrc, int chrFilterSize, \
  1146. const int16_t **alpSrc, uint8_t *dest, int dstW, \
  1147. int y) \
  1148. { \
  1149. name ## base ## _X_vsx_template(c, lumFilter, lumSrc, lumFilterSize, \
  1150. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  1151. alpSrc, dest, dstW, y, fmt, hasAlpha); \
  1152. }
  1153. #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
  1154. static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \
  1155. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1156. const int16_t *abuf[2], uint8_t *dest, int dstW, \
  1157. int yalpha, int uvalpha, int y) \
  1158. { \
  1159. name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \
  1160. dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
  1161. }
  1162. #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
  1163. static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
  1164. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1165. const int16_t *abuf0, uint8_t *dest, int dstW, \
  1166. int uvalpha, int y) \
  1167. { \
  1168. name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, abuf0, dest, \
  1169. dstW, uvalpha, y, fmt, hasAlpha); \
  1170. }
  1171. YUV2RGBWRAPPER(yuv2, rgb, bgrx32, AV_PIX_FMT_BGRA, 0)
  1172. YUV2RGBWRAPPER(yuv2, rgb, rgbx32, AV_PIX_FMT_RGBA, 0)
  1173. YUV2RGBWRAPPER(yuv2, rgb, xrgb32, AV_PIX_FMT_ARGB, 0)
  1174. YUV2RGBWRAPPER(yuv2, rgb, xbgr32, AV_PIX_FMT_ABGR, 0)
  1175. YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
  1176. YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
  1177. YUV2RGBWRAPPERX2(yuv2, rgb, bgrx32, AV_PIX_FMT_BGRA, 0)
  1178. YUV2RGBWRAPPERX2(yuv2, rgb, rgbx32, AV_PIX_FMT_RGBA, 0)
  1179. YUV2RGBWRAPPERX2(yuv2, rgb, xrgb32, AV_PIX_FMT_ARGB, 0)
  1180. YUV2RGBWRAPPERX2(yuv2, rgb, xbgr32, AV_PIX_FMT_ABGR, 0)
  1181. YUV2RGBWRAPPERX2(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
  1182. YUV2RGBWRAPPERX2(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
  1183. YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
  1184. YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
  1185. YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
  1186. YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
  1187. YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
  1188. YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
  1189. YUV2RGBWRAPPERX2(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
  1190. YUV2RGBWRAPPERX2(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
  1191. YUV2RGBWRAPPERX2(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
  1192. YUV2RGBWRAPPERX2(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
  1193. YUV2RGBWRAPPERX2(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
  1194. YUV2RGBWRAPPERX2(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
  1195. YUV2RGBWRAPPERX(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
  1196. YUV2RGBWRAPPERX(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
  1197. YUV2RGBWRAPPERX(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
  1198. YUV2RGBWRAPPERX(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
  1199. YUV2RGBWRAPPERX(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
  1200. YUV2RGBWRAPPERX(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
  1201. static av_always_inline void
  1202. write422(const vec_s16 vy1, const vec_s16 vy2,
  1203. const vec_s16 vu, const vec_s16 vv,
  1204. uint8_t *dest, const enum AVPixelFormat target)
  1205. {
  1206. vec_u8 vd1, vd2, tmp;
  1207. const vec_u8 yuyv1 = (vec_u8) {
  1208. 0x0, 0x10, 0x1, 0x18,
  1209. 0x2, 0x11, 0x3, 0x19,
  1210. 0x4, 0x12, 0x5, 0x1a,
  1211. 0x6, 0x13, 0x7, 0x1b };
  1212. const vec_u8 yuyv2 = (vec_u8) {
  1213. 0x8, 0x14, 0x9, 0x1c,
  1214. 0xa, 0x15, 0xb, 0x1d,
  1215. 0xc, 0x16, 0xd, 0x1e,
  1216. 0xe, 0x17, 0xf, 0x1f };
  1217. const vec_u8 yvyu1 = (vec_u8) {
  1218. 0x0, 0x18, 0x1, 0x10,
  1219. 0x2, 0x19, 0x3, 0x11,
  1220. 0x4, 0x1a, 0x5, 0x12,
  1221. 0x6, 0x1b, 0x7, 0x13 };
  1222. const vec_u8 yvyu2 = (vec_u8) {
  1223. 0x8, 0x1c, 0x9, 0x14,
  1224. 0xa, 0x1d, 0xb, 0x15,
  1225. 0xc, 0x1e, 0xd, 0x16,
  1226. 0xe, 0x1f, 0xf, 0x17 };
  1227. const vec_u8 uyvy1 = (vec_u8) {
  1228. 0x10, 0x0, 0x18, 0x1,
  1229. 0x11, 0x2, 0x19, 0x3,
  1230. 0x12, 0x4, 0x1a, 0x5,
  1231. 0x13, 0x6, 0x1b, 0x7 };
  1232. const vec_u8 uyvy2 = (vec_u8) {
  1233. 0x14, 0x8, 0x1c, 0x9,
  1234. 0x15, 0xa, 0x1d, 0xb,
  1235. 0x16, 0xc, 0x1e, 0xd,
  1236. 0x17, 0xe, 0x1f, 0xf };
  1237. vd1 = vec_packsu(vy1, vy2);
  1238. vd2 = vec_packsu(vu, vv);
  1239. switch (target) {
  1240. case AV_PIX_FMT_YUYV422:
  1241. tmp = vec_perm(vd1, vd2, yuyv1);
  1242. vec_st(tmp, 0, dest);
  1243. tmp = vec_perm(vd1, vd2, yuyv2);
  1244. vec_st(tmp, 16, dest);
  1245. break;
  1246. case AV_PIX_FMT_YVYU422:
  1247. tmp = vec_perm(vd1, vd2, yvyu1);
  1248. vec_st(tmp, 0, dest);
  1249. tmp = vec_perm(vd1, vd2, yvyu2);
  1250. vec_st(tmp, 16, dest);
  1251. break;
  1252. case AV_PIX_FMT_UYVY422:
  1253. tmp = vec_perm(vd1, vd2, uyvy1);
  1254. vec_st(tmp, 0, dest);
  1255. tmp = vec_perm(vd1, vd2, uyvy2);
  1256. vec_st(tmp, 16, dest);
  1257. break;
  1258. }
  1259. }
  1260. static av_always_inline void
  1261. yuv2422_X_vsx_template(SwsContext *c, const int16_t *lumFilter,
  1262. const int16_t **lumSrc, int lumFilterSize,
  1263. const int16_t *chrFilter, const int16_t **chrUSrc,
  1264. const int16_t **chrVSrc, int chrFilterSize,
  1265. const int16_t **alpSrc, uint8_t *dest, int dstW,
  1266. int y, enum AVPixelFormat target)
  1267. {
  1268. int i, j;
  1269. vec_s16 vy1, vy2, vu, vv;
  1270. vec_s32 vy32[4], vu32[2], vv32[2], tmp, tmp2, tmp3, tmp4;
  1271. vec_s16 vlumFilter[MAX_FILTER_SIZE], vchrFilter[MAX_FILTER_SIZE];
  1272. const vec_s32 start = vec_splats(1 << 18);
  1273. const vec_u32 shift19 = vec_splats(19U);
  1274. for (i = 0; i < lumFilterSize; i++)
  1275. vlumFilter[i] = vec_splats(lumFilter[i]);
  1276. for (i = 0; i < chrFilterSize; i++)
  1277. vchrFilter[i] = vec_splats(chrFilter[i]);
  1278. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1279. vy32[0] =
  1280. vy32[1] =
  1281. vy32[2] =
  1282. vy32[3] =
  1283. vu32[0] =
  1284. vu32[1] =
  1285. vv32[0] =
  1286. vv32[1] = start;
  1287. for (j = 0; j < lumFilterSize; j++) {
  1288. vv = vec_ld(0, &lumSrc[j][i * 2]);
  1289. tmp = vec_mule(vv, vlumFilter[j]);
  1290. tmp2 = vec_mulo(vv, vlumFilter[j]);
  1291. tmp3 = vec_mergeh(tmp, tmp2);
  1292. tmp4 = vec_mergel(tmp, tmp2);
  1293. vy32[0] = vec_adds(vy32[0], tmp3);
  1294. vy32[1] = vec_adds(vy32[1], tmp4);
  1295. vv = vec_ld(0, &lumSrc[j][(i + 4) * 2]);
  1296. tmp = vec_mule(vv, vlumFilter[j]);
  1297. tmp2 = vec_mulo(vv, vlumFilter[j]);
  1298. tmp3 = vec_mergeh(tmp, tmp2);
  1299. tmp4 = vec_mergel(tmp, tmp2);
  1300. vy32[2] = vec_adds(vy32[2], tmp3);
  1301. vy32[3] = vec_adds(vy32[3], tmp4);
  1302. }
  1303. for (j = 0; j < chrFilterSize; j++) {
  1304. vv = vec_ld(0, &chrUSrc[j][i]);
  1305. tmp = vec_mule(vv, vchrFilter[j]);
  1306. tmp2 = vec_mulo(vv, vchrFilter[j]);
  1307. tmp3 = vec_mergeh(tmp, tmp2);
  1308. tmp4 = vec_mergel(tmp, tmp2);
  1309. vu32[0] = vec_adds(vu32[0], tmp3);
  1310. vu32[1] = vec_adds(vu32[1], tmp4);
  1311. vv = vec_ld(0, &chrVSrc[j][i]);
  1312. tmp = vec_mule(vv, vchrFilter[j]);
  1313. tmp2 = vec_mulo(vv, vchrFilter[j]);
  1314. tmp3 = vec_mergeh(tmp, tmp2);
  1315. tmp4 = vec_mergel(tmp, tmp2);
  1316. vv32[0] = vec_adds(vv32[0], tmp3);
  1317. vv32[1] = vec_adds(vv32[1], tmp4);
  1318. }
  1319. for (j = 0; j < 4; j++) {
  1320. vy32[j] = vec_sra(vy32[j], shift19);
  1321. }
  1322. for (j = 0; j < 2; j++) {
  1323. vu32[j] = vec_sra(vu32[j], shift19);
  1324. vv32[j] = vec_sra(vv32[j], shift19);
  1325. }
  1326. vy1 = vec_packs(vy32[0], vy32[1]);
  1327. vy2 = vec_packs(vy32[2], vy32[3]);
  1328. vu = vec_packs(vu32[0], vu32[1]);
  1329. vv = vec_packs(vv32[0], vv32[1]);
  1330. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1331. }
  1332. }
  1333. #define SETUP(x, buf0, buf1, alpha) { \
  1334. x = vec_ld(0, buf0); \
  1335. tmp = vec_mule(x, alpha); \
  1336. tmp2 = vec_mulo(x, alpha); \
  1337. tmp3 = vec_mergeh(tmp, tmp2); \
  1338. tmp4 = vec_mergel(tmp, tmp2); \
  1339. \
  1340. x = vec_ld(0, buf1); \
  1341. tmp = vec_mule(x, alpha); \
  1342. tmp2 = vec_mulo(x, alpha); \
  1343. tmp5 = vec_mergeh(tmp, tmp2); \
  1344. tmp6 = vec_mergel(tmp, tmp2); \
  1345. \
  1346. tmp3 = vec_add(tmp3, tmp5); \
  1347. tmp4 = vec_add(tmp4, tmp6); \
  1348. \
  1349. tmp3 = vec_sra(tmp3, shift19); \
  1350. tmp4 = vec_sra(tmp4, shift19); \
  1351. x = vec_packs(tmp3, tmp4); \
  1352. }
  1353. static av_always_inline void
  1354. yuv2422_2_vsx_template(SwsContext *c, const int16_t *buf[2],
  1355. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1356. const int16_t *abuf[2], uint8_t *dest, int dstW,
  1357. int yalpha, int uvalpha, int y,
  1358. enum AVPixelFormat target)
  1359. {
  1360. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  1361. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  1362. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
  1363. const int16_t yalpha1 = 4096 - yalpha;
  1364. const int16_t uvalpha1 = 4096 - uvalpha;
  1365. vec_s16 vy1, vy2, vu, vv;
  1366. vec_s32 tmp, tmp2, tmp3, tmp4, tmp5, tmp6;
  1367. const vec_s16 vyalpha1 = vec_splats(yalpha1);
  1368. const vec_s16 vuvalpha1 = vec_splats(uvalpha1);
  1369. const vec_u32 shift19 = vec_splats(19U);
  1370. int i;
  1371. av_assert2(yalpha <= 4096U);
  1372. av_assert2(uvalpha <= 4096U);
  1373. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1374. SETUP(vy1, &buf0[i * 2], &buf1[i * 2], vyalpha1)
  1375. SETUP(vy2, &buf0[(i + 4) * 2], &buf1[(i + 4) * 2], vyalpha1)
  1376. SETUP(vu, &ubuf0[i], &ubuf1[i], vuvalpha1)
  1377. SETUP(vv, &vbuf0[i], &vbuf1[i], vuvalpha1)
  1378. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1379. }
  1380. }
  1381. #undef SETUP
  1382. static av_always_inline void
  1383. yuv2422_1_vsx_template(SwsContext *c, const int16_t *buf0,
  1384. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1385. const int16_t *abuf0, uint8_t *dest, int dstW,
  1386. int uvalpha, int y, enum AVPixelFormat target)
  1387. {
  1388. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  1389. vec_s16 vy1, vy2, vu, vv, tmp;
  1390. const vec_s16 add64 = vec_splats((int16_t) 64);
  1391. const vec_s16 add128 = vec_splats((int16_t) 128);
  1392. const vec_u16 shift7 = vec_splat_u16(7);
  1393. const vec_u16 shift8 = vec_splat_u16(8);
  1394. int i;
  1395. if (uvalpha < 2048) {
  1396. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1397. vy1 = vec_ld(0, &buf0[i * 2]);
  1398. vy2 = vec_ld(0, &buf0[(i + 4) * 2]);
  1399. vu = vec_ld(0, &ubuf0[i]);
  1400. vv = vec_ld(0, &vbuf0[i]);
  1401. vy1 = vec_add(vy1, add64);
  1402. vy2 = vec_add(vy2, add64);
  1403. vu = vec_add(vu, add64);
  1404. vv = vec_add(vv, add64);
  1405. vy1 = vec_sra(vy1, shift7);
  1406. vy2 = vec_sra(vy2, shift7);
  1407. vu = vec_sra(vu, shift7);
  1408. vv = vec_sra(vv, shift7);
  1409. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1410. }
  1411. } else {
  1412. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  1413. for (i = 0; i < ((dstW + 1) >> 1); i += 8) {
  1414. vy1 = vec_ld(0, &buf0[i * 2]);
  1415. vy2 = vec_ld(0, &buf0[(i + 4) * 2]);
  1416. vu = vec_ld(0, &ubuf0[i]);
  1417. tmp = vec_ld(0, &ubuf1[i]);
  1418. vu = vec_adds(vu, tmp);
  1419. vv = vec_ld(0, &vbuf0[i]);
  1420. tmp = vec_ld(0, &vbuf1[i]);
  1421. vv = vec_adds(vv, tmp);
  1422. vy1 = vec_add(vy1, add64);
  1423. vy2 = vec_add(vy2, add64);
  1424. vu = vec_adds(vu, add128);
  1425. vv = vec_adds(vv, add128);
  1426. vy1 = vec_sra(vy1, shift7);
  1427. vy2 = vec_sra(vy2, shift7);
  1428. vu = vec_sra(vu, shift8);
  1429. vv = vec_sra(vv, shift8);
  1430. write422(vy1, vy2, vu, vv, &dest[i * 4], target);
  1431. }
  1432. }
  1433. }
  1434. #define YUV2PACKEDWRAPPERX(name, base, ext, fmt) \
  1435. static void name ## ext ## _X_vsx(SwsContext *c, const int16_t *lumFilter, \
  1436. const int16_t **lumSrc, int lumFilterSize, \
  1437. const int16_t *chrFilter, const int16_t **chrUSrc, \
  1438. const int16_t **chrVSrc, int chrFilterSize, \
  1439. const int16_t **alpSrc, uint8_t *dest, int dstW, \
  1440. int y) \
  1441. { \
  1442. name ## base ## _X_vsx_template(c, lumFilter, lumSrc, lumFilterSize, \
  1443. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  1444. alpSrc, dest, dstW, y, fmt); \
  1445. }
  1446. #define YUV2PACKEDWRAPPER2(name, base, ext, fmt) \
  1447. YUV2PACKEDWRAPPERX(name, base, ext, fmt) \
  1448. static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \
  1449. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1450. const int16_t *abuf[2], uint8_t *dest, int dstW, \
  1451. int yalpha, int uvalpha, int y) \
  1452. { \
  1453. name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \
  1454. dest, dstW, yalpha, uvalpha, y, fmt); \
  1455. }
  1456. #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
  1457. YUV2PACKEDWRAPPER2(name, base, ext, fmt) \
  1458. static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
  1459. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1460. const int16_t *abuf0, uint8_t *dest, int dstW, \
  1461. int uvalpha, int y) \
  1462. { \
  1463. name ## base ## _1_vsx_template(c, buf0, ubuf, vbuf, \
  1464. abuf0, dest, dstW, uvalpha, \
  1465. y, fmt); \
  1466. }
  1467. YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
  1468. YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
  1469. YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
  1470. static void hyscale_fast_vsx(SwsContext *c, int16_t *dst, int dstWidth,
  1471. const uint8_t *src, int srcW, int xInc)
  1472. {
  1473. int i;
  1474. unsigned int xpos = 0, xx;
  1475. vec_u8 vin, vin2, vperm;
  1476. vec_s8 vmul, valpha;
  1477. vec_s16 vtmp, vtmp2, vtmp3, vtmp4;
  1478. vec_u16 vd_l, vd_r, vcoord16[2];
  1479. vec_u32 vcoord[4];
  1480. const vec_u32 vadd = (vec_u32) {
  1481. 0,
  1482. xInc * 1,
  1483. xInc * 2,
  1484. xInc * 3,
  1485. };
  1486. const vec_u16 vadd16 = (vec_u16) { // Modulo math
  1487. 0,
  1488. xInc * 1,
  1489. xInc * 2,
  1490. xInc * 3,
  1491. xInc * 4,
  1492. xInc * 5,
  1493. xInc * 6,
  1494. xInc * 7,
  1495. };
  1496. const vec_u32 vshift16 = vec_splats((uint32_t) 16);
  1497. const vec_u16 vshift9 = vec_splat_u16(9);
  1498. const vec_u8 vzero = vec_splat_u8(0);
  1499. const vec_u16 vshift = vec_splat_u16(7);
  1500. for (i = 0; i < dstWidth; i += 16) {
  1501. vcoord16[0] = vec_splats((uint16_t) xpos);
  1502. vcoord16[1] = vec_splats((uint16_t) (xpos + xInc * 8));
  1503. vcoord16[0] = vec_add(vcoord16[0], vadd16);
  1504. vcoord16[1] = vec_add(vcoord16[1], vadd16);
  1505. vcoord16[0] = vec_sr(vcoord16[0], vshift9);
  1506. vcoord16[1] = vec_sr(vcoord16[1], vshift9);
  1507. valpha = (vec_s8) vec_pack(vcoord16[0], vcoord16[1]);
  1508. xx = xpos >> 16;
  1509. vin = vec_vsx_ld(0, &src[xx]);
  1510. vcoord[0] = vec_splats(xpos & 0xffff);
  1511. vcoord[1] = vec_splats((xpos & 0xffff) + xInc * 4);
  1512. vcoord[2] = vec_splats((xpos & 0xffff) + xInc * 8);
  1513. vcoord[3] = vec_splats((xpos & 0xffff) + xInc * 12);
  1514. vcoord[0] = vec_add(vcoord[0], vadd);
  1515. vcoord[1] = vec_add(vcoord[1], vadd);
  1516. vcoord[2] = vec_add(vcoord[2], vadd);
  1517. vcoord[3] = vec_add(vcoord[3], vadd);
  1518. vcoord[0] = vec_sr(vcoord[0], vshift16);
  1519. vcoord[1] = vec_sr(vcoord[1], vshift16);
  1520. vcoord[2] = vec_sr(vcoord[2], vshift16);
  1521. vcoord[3] = vec_sr(vcoord[3], vshift16);
  1522. vcoord16[0] = vec_pack(vcoord[0], vcoord[1]);
  1523. vcoord16[1] = vec_pack(vcoord[2], vcoord[3]);
  1524. vperm = vec_pack(vcoord16[0], vcoord16[1]);
  1525. vin = vec_perm(vin, vin, vperm);
  1526. vin2 = vec_vsx_ld(1, &src[xx]);
  1527. vin2 = vec_perm(vin2, vin2, vperm);
  1528. vmul = (vec_s8) vec_sub(vin2, vin);
  1529. vtmp = vec_mule(vmul, valpha);
  1530. vtmp2 = vec_mulo(vmul, valpha);
  1531. vtmp3 = vec_mergeh(vtmp, vtmp2);
  1532. vtmp4 = vec_mergel(vtmp, vtmp2);
  1533. vd_l = (vec_u16) vec_mergeh(vin, vzero);
  1534. vd_r = (vec_u16) vec_mergel(vin, vzero);
  1535. vd_l = vec_sl(vd_l, vshift);
  1536. vd_r = vec_sl(vd_r, vshift);
  1537. vd_l = vec_add(vd_l, (vec_u16) vtmp3);
  1538. vd_r = vec_add(vd_r, (vec_u16) vtmp4);
  1539. vec_st((vec_s16) vd_l, 0, &dst[i]);
  1540. vec_st((vec_s16) vd_r, 0, &dst[i + 8]);
  1541. xpos += xInc * 16;
  1542. }
  1543. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  1544. dst[i] = src[srcW-1]*128;
  1545. }
  1546. #define HCSCALE(in, out) \
  1547. vin = vec_vsx_ld(0, &in[xx]); \
  1548. vin = vec_perm(vin, vin, vperm); \
  1549. \
  1550. vin2 = vec_vsx_ld(1, &in[xx]); \
  1551. vin2 = vec_perm(vin2, vin2, vperm); \
  1552. \
  1553. vtmp = vec_mule(vin, valphaxor); \
  1554. vtmp2 = vec_mulo(vin, valphaxor); \
  1555. vtmp3 = vec_mergeh(vtmp, vtmp2); \
  1556. vtmp4 = vec_mergel(vtmp, vtmp2); \
  1557. \
  1558. vtmp = vec_mule(vin2, valpha); \
  1559. vtmp2 = vec_mulo(vin2, valpha); \
  1560. vd_l = vec_mergeh(vtmp, vtmp2); \
  1561. vd_r = vec_mergel(vtmp, vtmp2); \
  1562. \
  1563. vd_l = vec_add(vd_l, vtmp3); \
  1564. vd_r = vec_add(vd_r, vtmp4); \
  1565. \
  1566. vec_st((vec_s16) vd_l, 0, &out[i]); \
  1567. vec_st((vec_s16) vd_r, 0, &out[i + 8])
  1568. static void hcscale_fast_vsx(SwsContext *c, int16_t *dst1, int16_t *dst2,
  1569. int dstWidth, const uint8_t *src1,
  1570. const uint8_t *src2, int srcW, int xInc)
  1571. {
  1572. int i;
  1573. unsigned int xpos = 0, xx;
  1574. vec_u8 vin, vin2, vperm;
  1575. vec_u8 valpha, valphaxor;
  1576. vec_u16 vtmp, vtmp2, vtmp3, vtmp4;
  1577. vec_u16 vd_l, vd_r, vcoord16[2];
  1578. vec_u32 vcoord[4];
  1579. const vec_u8 vxor = vec_splats((uint8_t) 127);
  1580. const vec_u32 vadd = (vec_u32) {
  1581. 0,
  1582. xInc * 1,
  1583. xInc * 2,
  1584. xInc * 3,
  1585. };
  1586. const vec_u16 vadd16 = (vec_u16) { // Modulo math
  1587. 0,
  1588. xInc * 1,
  1589. xInc * 2,
  1590. xInc * 3,
  1591. xInc * 4,
  1592. xInc * 5,
  1593. xInc * 6,
  1594. xInc * 7,
  1595. };
  1596. const vec_u32 vshift16 = vec_splats((uint32_t) 16);
  1597. const vec_u16 vshift9 = vec_splat_u16(9);
  1598. for (i = 0; i < dstWidth; i += 16) {
  1599. vcoord16[0] = vec_splats((uint16_t) xpos);
  1600. vcoord16[1] = vec_splats((uint16_t) (xpos + xInc * 8));
  1601. vcoord16[0] = vec_add(vcoord16[0], vadd16);
  1602. vcoord16[1] = vec_add(vcoord16[1], vadd16);
  1603. vcoord16[0] = vec_sr(vcoord16[0], vshift9);
  1604. vcoord16[1] = vec_sr(vcoord16[1], vshift9);
  1605. valpha = vec_pack(vcoord16[0], vcoord16[1]);
  1606. valphaxor = vec_xor(valpha, vxor);
  1607. xx = xpos >> 16;
  1608. vcoord[0] = vec_splats(xpos & 0xffff);
  1609. vcoord[1] = vec_splats((xpos & 0xffff) + xInc * 4);
  1610. vcoord[2] = vec_splats((xpos & 0xffff) + xInc * 8);
  1611. vcoord[3] = vec_splats((xpos & 0xffff) + xInc * 12);
  1612. vcoord[0] = vec_add(vcoord[0], vadd);
  1613. vcoord[1] = vec_add(vcoord[1], vadd);
  1614. vcoord[2] = vec_add(vcoord[2], vadd);
  1615. vcoord[3] = vec_add(vcoord[3], vadd);
  1616. vcoord[0] = vec_sr(vcoord[0], vshift16);
  1617. vcoord[1] = vec_sr(vcoord[1], vshift16);
  1618. vcoord[2] = vec_sr(vcoord[2], vshift16);
  1619. vcoord[3] = vec_sr(vcoord[3], vshift16);
  1620. vcoord16[0] = vec_pack(vcoord[0], vcoord[1]);
  1621. vcoord16[1] = vec_pack(vcoord[2], vcoord[3]);
  1622. vperm = vec_pack(vcoord16[0], vcoord16[1]);
  1623. HCSCALE(src1, dst1);
  1624. HCSCALE(src2, dst2);
  1625. xpos += xInc * 16;
  1626. }
  1627. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
  1628. dst1[i] = src1[srcW-1]*128;
  1629. dst2[i] = src2[srcW-1]*128;
  1630. }
  1631. }
  1632. #undef HCSCALE
  1633. static void hScale16To19_vsx(SwsContext *c, int16_t *_dst, int dstW,
  1634. const uint8_t *_src, const int16_t *filter,
  1635. const int32_t *filterPos, int filterSize)
  1636. {
  1637. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->srcFormat);
  1638. int i, j;
  1639. int32_t *dst = (int32_t *) _dst;
  1640. const uint16_t *src = (const uint16_t *) _src;
  1641. int bits = desc->comp[0].depth - 1;
  1642. int sh = bits - 4;
  1643. vec_s16 vfilter, vin;
  1644. vec_s32 vout, vtmp, vtmp2, vfilter32_l, vfilter32_r;
  1645. const vec_u8 vzero = vec_splat_u8(0);
  1646. const vec_u8 vunusedtab[8] = {
  1647. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1648. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
  1649. (vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
  1650. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1651. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
  1652. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1653. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
  1654. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1655. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1656. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1657. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1658. 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1659. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1660. 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
  1661. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1662. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
  1663. };
  1664. const vec_u8 vunused = vunusedtab[filterSize % 8];
  1665. if ((isAnyRGB(c->srcFormat) || c->srcFormat==AV_PIX_FMT_PAL8) && desc->comp[0].depth<16) {
  1666. sh = 9;
  1667. } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
  1668. sh = 16 - 1 - 4;
  1669. }
  1670. if (filterSize == 1) {
  1671. for (i = 0; i < dstW; i++) {
  1672. int srcPos = filterPos[i];
  1673. int val = 0;
  1674. for (j = 0; j < filterSize; j++) {
  1675. val += src[srcPos + j] * filter[filterSize * i + j];
  1676. }
  1677. // filter=14 bit, input=16 bit, output=30 bit, >> 11 makes 19 bit
  1678. dst[i] = FFMIN(val >> sh, (1 << 19) - 1);
  1679. }
  1680. } else {
  1681. for (i = 0; i < dstW; i++) {
  1682. const int srcPos = filterPos[i];
  1683. vout = vec_splat_s32(0);
  1684. for (j = 0; j < filterSize; j += 8) {
  1685. vin = (vec_s16) vec_vsx_ld(0, &src[srcPos + j]);
  1686. if (j + 8 > filterSize) // Remove the unused elements on the last round
  1687. vin = vec_perm(vin, (vec_s16) vzero, vunused);
  1688. vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
  1689. vfilter32_l = vec_unpackh(vfilter);
  1690. vfilter32_r = vec_unpackl(vfilter);
  1691. vtmp = (vec_s32) vec_mergeh(vin, (vec_s16) vzero);
  1692. vtmp2 = (vec_s32) vec_mergel(vin, (vec_s16) vzero);
  1693. vtmp = vec_mul(vtmp, vfilter32_l);
  1694. vtmp2 = vec_mul(vtmp2, vfilter32_r);
  1695. vout = vec_adds(vout, vtmp);
  1696. vout = vec_adds(vout, vtmp2);
  1697. }
  1698. vout = vec_sums(vout, (vec_s32) vzero);
  1699. dst[i] = FFMIN(vout[3] >> sh, (1 << 19) - 1);
  1700. }
  1701. }
  1702. }
  1703. static void hScale16To15_vsx(SwsContext *c, int16_t *dst, int dstW,
  1704. const uint8_t *_src, const int16_t *filter,
  1705. const int32_t *filterPos, int filterSize)
  1706. {
  1707. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->srcFormat);
  1708. int i, j;
  1709. const uint16_t *src = (const uint16_t *) _src;
  1710. int sh = desc->comp[0].depth - 1;
  1711. vec_s16 vfilter, vin;
  1712. vec_s32 vout, vtmp, vtmp2, vfilter32_l, vfilter32_r;
  1713. const vec_u8 vzero = vec_splat_u8(0);
  1714. const vec_u8 vunusedtab[8] = {
  1715. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1716. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf},
  1717. (vec_u8) {0x0, 0x1, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
  1718. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1719. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x10, 0x10, 0x10, 0x10,
  1720. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1721. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x10, 0x10,
  1722. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1723. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1724. 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1725. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1726. 0x8, 0x9, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10},
  1727. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1728. 0x8, 0x9, 0xa, 0xb, 0x10, 0x10, 0x10, 0x10},
  1729. (vec_u8) {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
  1730. 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0x10, 0x10},
  1731. };
  1732. const vec_u8 vunused = vunusedtab[filterSize % 8];
  1733. if (sh<15) {
  1734. sh = isAnyRGB(c->srcFormat) || c->srcFormat==AV_PIX_FMT_PAL8 ? 13 : (desc->comp[0].depth - 1);
  1735. } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
  1736. sh = 16 - 1;
  1737. }
  1738. if (filterSize == 1) {
  1739. for (i = 0; i < dstW; i++) {
  1740. int srcPos = filterPos[i];
  1741. int val = 0;
  1742. for (j = 0; j < filterSize; j++) {
  1743. val += src[srcPos + j] * filter[filterSize * i + j];
  1744. }
  1745. // filter=14 bit, input=16 bit, output=30 bit, >> 15 makes 15 bit
  1746. dst[i] = FFMIN(val >> sh, (1 << 15) - 1);
  1747. }
  1748. } else {
  1749. for (i = 0; i < dstW; i++) {
  1750. const int srcPos = filterPos[i];
  1751. vout = vec_splat_s32(0);
  1752. for (j = 0; j < filterSize; j += 8) {
  1753. vin = (vec_s16) vec_vsx_ld(0, &src[srcPos + j]);
  1754. if (j + 8 > filterSize) // Remove the unused elements on the last round
  1755. vin = vec_perm(vin, (vec_s16) vzero, vunused);
  1756. vfilter = vec_vsx_ld(0, &filter[filterSize * i + j]);
  1757. vfilter32_l = vec_unpackh(vfilter);
  1758. vfilter32_r = vec_unpackl(vfilter);
  1759. vtmp = (vec_s32) vec_mergeh(vin, (vec_s16) vzero);
  1760. vtmp2 = (vec_s32) vec_mergel(vin, (vec_s16) vzero);
  1761. vtmp = vec_mul(vtmp, vfilter32_l);
  1762. vtmp2 = vec_mul(vtmp2, vfilter32_r);
  1763. vout = vec_adds(vout, vtmp);
  1764. vout = vec_adds(vout, vtmp2);
  1765. }
  1766. vout = vec_sums(vout, (vec_s32) vzero);
  1767. dst[i] = FFMIN(vout[3] >> sh, (1 << 15) - 1);
  1768. }
  1769. }
  1770. }
  1771. #endif /* !HAVE_BIGENDIAN */
  1772. #endif /* HAVE_VSX */
  1773. av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
  1774. {
  1775. #if HAVE_VSX
  1776. enum AVPixelFormat dstFormat = c->dstFormat;
  1777. const int cpu_flags = av_get_cpu_flags();
  1778. const unsigned char power8 = HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8;
  1779. if (!(cpu_flags & AV_CPU_FLAG_VSX))
  1780. return;
  1781. #if !HAVE_BIGENDIAN
  1782. if (c->srcBpc == 8) {
  1783. if (c->dstBpc <= 14) {
  1784. c->hyScale = c->hcScale = hScale_real_vsx;
  1785. if (c->flags & SWS_FAST_BILINEAR && c->dstW >= c->srcW && c->chrDstW >= c->chrSrcW) {
  1786. c->hyscale_fast = hyscale_fast_vsx;
  1787. c->hcscale_fast = hcscale_fast_vsx;
  1788. }
  1789. }
  1790. } else {
  1791. if (power8) {
  1792. c->hyScale = c->hcScale = c->dstBpc > 14 ? hScale16To19_vsx
  1793. : hScale16To15_vsx;
  1794. }
  1795. }
  1796. if (!is16BPS(dstFormat) && !isNBPS(dstFormat) && !isSemiPlanarYUV(dstFormat) &&
  1797. dstFormat != AV_PIX_FMT_GRAYF32BE && dstFormat != AV_PIX_FMT_GRAYF32LE &&
  1798. !c->needAlpha) {
  1799. c->yuv2planeX = yuv2planeX_vsx;
  1800. }
  1801. #endif
  1802. if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->needAlpha) {
  1803. switch (c->dstBpc) {
  1804. case 8:
  1805. c->yuv2plane1 = yuv2plane1_8_vsx;
  1806. break;
  1807. #if !HAVE_BIGENDIAN
  1808. case 9:
  1809. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_vsx : yuv2plane1_9LE_vsx;
  1810. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_vsx : yuv2planeX_9LE_vsx;
  1811. break;
  1812. case 10:
  1813. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_vsx : yuv2plane1_10LE_vsx;
  1814. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_vsx : yuv2planeX_10LE_vsx;
  1815. break;
  1816. case 12:
  1817. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_vsx : yuv2plane1_12LE_vsx;
  1818. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_vsx : yuv2planeX_12LE_vsx;
  1819. break;
  1820. case 14:
  1821. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_vsx : yuv2plane1_14LE_vsx;
  1822. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_vsx : yuv2planeX_14LE_vsx;
  1823. break;
  1824. case 16:
  1825. c->yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_vsx : yuv2plane1_16LE_vsx;
  1826. #if HAVE_POWER8
  1827. if (cpu_flags & AV_CPU_FLAG_POWER8) {
  1828. c->yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_vsx : yuv2planeX_16LE_vsx;
  1829. }
  1830. #endif /* HAVE_POWER8 */
  1831. break;
  1832. #endif /* !HAVE_BIGENDIAN */
  1833. }
  1834. }
  1835. if (c->flags & SWS_BITEXACT)
  1836. return;
  1837. #if !HAVE_BIGENDIAN
  1838. if (c->flags & SWS_FULL_CHR_H_INT) {
  1839. switch (dstFormat) {
  1840. case AV_PIX_FMT_RGB24:
  1841. if (power8) {
  1842. c->yuv2packed1 = yuv2rgb24_full_1_vsx;
  1843. c->yuv2packed2 = yuv2rgb24_full_2_vsx;
  1844. c->yuv2packedX = yuv2rgb24_full_X_vsx;
  1845. }
  1846. break;
  1847. case AV_PIX_FMT_BGR24:
  1848. if (power8) {
  1849. c->yuv2packed1 = yuv2bgr24_full_1_vsx;
  1850. c->yuv2packed2 = yuv2bgr24_full_2_vsx;
  1851. c->yuv2packedX = yuv2bgr24_full_X_vsx;
  1852. }
  1853. break;
  1854. case AV_PIX_FMT_BGRA:
  1855. if (power8) {
  1856. if (!c->needAlpha) {
  1857. c->yuv2packed1 = yuv2bgrx32_full_1_vsx;
  1858. c->yuv2packed2 = yuv2bgrx32_full_2_vsx;
  1859. c->yuv2packedX = yuv2bgrx32_full_X_vsx;
  1860. }
  1861. }
  1862. break;
  1863. case AV_PIX_FMT_RGBA:
  1864. if (power8) {
  1865. if (!c->needAlpha) {
  1866. c->yuv2packed1 = yuv2rgbx32_full_1_vsx;
  1867. c->yuv2packed2 = yuv2rgbx32_full_2_vsx;
  1868. c->yuv2packedX = yuv2rgbx32_full_X_vsx;
  1869. }
  1870. }
  1871. break;
  1872. case AV_PIX_FMT_ARGB:
  1873. if (power8) {
  1874. if (!c->needAlpha) {
  1875. c->yuv2packed1 = yuv2xrgb32_full_1_vsx;
  1876. c->yuv2packed2 = yuv2xrgb32_full_2_vsx;
  1877. c->yuv2packedX = yuv2xrgb32_full_X_vsx;
  1878. }
  1879. }
  1880. break;
  1881. case AV_PIX_FMT_ABGR:
  1882. if (power8) {
  1883. if (!c->needAlpha) {
  1884. c->yuv2packed1 = yuv2xbgr32_full_1_vsx;
  1885. c->yuv2packed2 = yuv2xbgr32_full_2_vsx;
  1886. c->yuv2packedX = yuv2xbgr32_full_X_vsx;
  1887. }
  1888. }
  1889. break;
  1890. }
  1891. } else { /* !SWS_FULL_CHR_H_INT */
  1892. switch (dstFormat) {
  1893. case AV_PIX_FMT_YUYV422:
  1894. c->yuv2packed1 = yuv2yuyv422_1_vsx;
  1895. c->yuv2packed2 = yuv2yuyv422_2_vsx;
  1896. c->yuv2packedX = yuv2yuyv422_X_vsx;
  1897. break;
  1898. case AV_PIX_FMT_YVYU422:
  1899. c->yuv2packed1 = yuv2yvyu422_1_vsx;
  1900. c->yuv2packed2 = yuv2yvyu422_2_vsx;
  1901. c->yuv2packedX = yuv2yvyu422_X_vsx;
  1902. break;
  1903. case AV_PIX_FMT_UYVY422:
  1904. c->yuv2packed1 = yuv2uyvy422_1_vsx;
  1905. c->yuv2packed2 = yuv2uyvy422_2_vsx;
  1906. c->yuv2packedX = yuv2uyvy422_X_vsx;
  1907. break;
  1908. case AV_PIX_FMT_BGRA:
  1909. if (power8) {
  1910. if (!c->needAlpha) {
  1911. c->yuv2packed1 = yuv2bgrx32_1_vsx;
  1912. c->yuv2packed2 = yuv2bgrx32_2_vsx;
  1913. }
  1914. }
  1915. break;
  1916. case AV_PIX_FMT_RGBA:
  1917. if (power8) {
  1918. if (!c->needAlpha) {
  1919. c->yuv2packed1 = yuv2rgbx32_1_vsx;
  1920. c->yuv2packed2 = yuv2rgbx32_2_vsx;
  1921. }
  1922. }
  1923. break;
  1924. case AV_PIX_FMT_ARGB:
  1925. if (power8) {
  1926. if (!c->needAlpha) {
  1927. c->yuv2packed1 = yuv2xrgb32_1_vsx;
  1928. c->yuv2packed2 = yuv2xrgb32_2_vsx;
  1929. }
  1930. }
  1931. break;
  1932. case AV_PIX_FMT_ABGR:
  1933. if (power8) {
  1934. if (!c->needAlpha) {
  1935. c->yuv2packed1 = yuv2xbgr32_1_vsx;
  1936. c->yuv2packed2 = yuv2xbgr32_2_vsx;
  1937. }
  1938. }
  1939. break;
  1940. case AV_PIX_FMT_RGB24:
  1941. if (power8) {
  1942. c->yuv2packed1 = yuv2rgb24_1_vsx;
  1943. c->yuv2packed2 = yuv2rgb24_2_vsx;
  1944. }
  1945. break;
  1946. case AV_PIX_FMT_BGR24:
  1947. if (power8) {
  1948. c->yuv2packed1 = yuv2bgr24_1_vsx;
  1949. c->yuv2packed2 = yuv2bgr24_2_vsx;
  1950. }
  1951. break;
  1952. }
  1953. }
  1954. #endif /* !HAVE_BIGENDIAN */
  1955. #endif /* HAVE_VSX */
  1956. }