output_lsx.c 75 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828
  1. /*
  2. * Copyright (C) 2023 Loongson Technology Corporation Limited
  3. * Contributed by Lu Wang <wanglu@loongson.cn>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "swscale_loongarch.h"
  22. #include "libavutil/loongarch/loongson_intrinsics.h"
  23. /*Copy from libswscale/output.c*/
  24. static av_always_inline void
  25. yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2,
  26. unsigned A1, unsigned A2,
  27. const void *_r, const void *_g, const void *_b, int y,
  28. enum AVPixelFormat target, int hasAlpha)
  29. {
  30. if (target == AV_PIX_FMT_ARGB || target == AV_PIX_FMT_RGBA ||
  31. target == AV_PIX_FMT_ABGR || target == AV_PIX_FMT_BGRA) {
  32. uint32_t *dest = (uint32_t *) _dest;
  33. const uint32_t *r = (const uint32_t *) _r;
  34. const uint32_t *g = (const uint32_t *) _g;
  35. const uint32_t *b = (const uint32_t *) _b;
  36. #if CONFIG_SMALL
  37. dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
  38. dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
  39. #else
  40. #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
  41. int sh = (target == AV_PIX_FMT_RGB32_1 ||
  42. target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
  43. av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0xFF);
  44. #endif
  45. dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
  46. dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
  47. #endif
  48. } else if (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) {
  49. uint8_t *dest = (uint8_t *) _dest;
  50. const uint8_t *r = (const uint8_t *) _r;
  51. const uint8_t *g = (const uint8_t *) _g;
  52. const uint8_t *b = (const uint8_t *) _b;
  53. #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
  54. #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
  55. dest[i * 6 + 0] = r_b[Y1];
  56. dest[i * 6 + 1] = g[Y1];
  57. dest[i * 6 + 2] = b_r[Y1];
  58. dest[i * 6 + 3] = r_b[Y2];
  59. dest[i * 6 + 4] = g[Y2];
  60. dest[i * 6 + 5] = b_r[Y2];
  61. #undef r_b
  62. #undef b_r
  63. } else if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565 ||
  64. target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555 ||
  65. target == AV_PIX_FMT_RGB444 || target == AV_PIX_FMT_BGR444) {
  66. uint16_t *dest = (uint16_t *) _dest;
  67. const uint16_t *r = (const uint16_t *) _r;
  68. const uint16_t *g = (const uint16_t *) _g;
  69. const uint16_t *b = (const uint16_t *) _b;
  70. int dr1, dg1, db1, dr2, dg2, db2;
  71. if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
  72. dr1 = ff_dither_2x2_8[ y & 1 ][0];
  73. dg1 = ff_dither_2x2_4[ y & 1 ][0];
  74. db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
  75. dr2 = ff_dither_2x2_8[ y & 1 ][1];
  76. dg2 = ff_dither_2x2_4[ y & 1 ][1];
  77. db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
  78. } else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
  79. dr1 = ff_dither_2x2_8[ y & 1 ][0];
  80. dg1 = ff_dither_2x2_8[ y & 1 ][1];
  81. db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
  82. dr2 = ff_dither_2x2_8[ y & 1 ][1];
  83. dg2 = ff_dither_2x2_8[ y & 1 ][0];
  84. db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
  85. } else {
  86. dr1 = ff_dither_4x4_16[ y & 3 ][0];
  87. dg1 = ff_dither_4x4_16[ y & 3 ][1];
  88. db1 = ff_dither_4x4_16[(y & 3) ^ 3][0];
  89. dr2 = ff_dither_4x4_16[ y & 3 ][1];
  90. dg2 = ff_dither_4x4_16[ y & 3 ][0];
  91. db2 = ff_dither_4x4_16[(y & 3) ^ 3][1];
  92. }
  93. dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
  94. dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
  95. } else { /* 8/4 bits */
  96. uint8_t *dest = (uint8_t *) _dest;
  97. const uint8_t *r = (const uint8_t *) _r;
  98. const uint8_t *g = (const uint8_t *) _g;
  99. const uint8_t *b = (const uint8_t *) _b;
  100. int dr1, dg1, db1, dr2, dg2, db2;
  101. if (target == AV_PIX_FMT_RGB8 || target == AV_PIX_FMT_BGR8) {
  102. const uint8_t * const d64 = ff_dither_8x8_73[y & 7];
  103. const uint8_t * const d32 = ff_dither_8x8_32[y & 7];
  104. dr1 = dg1 = d32[(i * 2 + 0) & 7];
  105. db1 = d64[(i * 2 + 0) & 7];
  106. dr2 = dg2 = d32[(i * 2 + 1) & 7];
  107. db2 = d64[(i * 2 + 1) & 7];
  108. } else {
  109. const uint8_t * const d64 = ff_dither_8x8_73 [y & 7];
  110. const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
  111. dr1 = db1 = d128[(i * 2 + 0) & 7];
  112. dg1 = d64[(i * 2 + 0) & 7];
  113. dr2 = db2 = d128[(i * 2 + 1) & 7];
  114. dg2 = d64[(i * 2 + 1) & 7];
  115. }
  116. if (target == AV_PIX_FMT_RGB4 || target == AV_PIX_FMT_BGR4) {
  117. dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
  118. ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
  119. } else {
  120. dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
  121. dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
  122. }
  123. }
  124. }
  125. #define WRITE_YUV2RGB_LSX(vec_y1, vec_y2, vec_u, vec_v, t1, t2, t3, t4) \
  126. { \
  127. Y1 = __lsx_vpickve2gr_w(vec_y1, t1); \
  128. Y2 = __lsx_vpickve2gr_w(vec_y2, t2); \
  129. U = __lsx_vpickve2gr_w(vec_u, t3); \
  130. V = __lsx_vpickve2gr_w(vec_v, t4); \
  131. r = c->table_rV[V]; \
  132. g = (c->table_gU[U] + c->table_gV[V]); \
  133. b = c->table_bU[U]; \
  134. yuv2rgb_write(dest, count, Y1, Y2, 0, 0, \
  135. r, g, b, y, target, 0); \
  136. count++; \
  137. }
  138. static void
  139. yuv2rgb_X_template_lsx(SwsContext *c, const int16_t *lumFilter,
  140. const int16_t **lumSrc, int lumFilterSize,
  141. const int16_t *chrFilter, const int16_t **chrUSrc,
  142. const int16_t **chrVSrc, int chrFilterSize,
  143. const int16_t **alpSrc, uint8_t *dest, int dstW,
  144. int y, enum AVPixelFormat target, int hasAlpha)
  145. {
  146. int i, j;
  147. int count = 0;
  148. int t = 1 << 18;
  149. int len = dstW >> 5;
  150. int res = dstW & 31;
  151. int len_count = (dstW + 1) >> 1;
  152. const void *r, *g, *b;
  153. int head = YUVRGB_TABLE_HEADROOM;
  154. __m128i headroom = __lsx_vreplgr2vr_w(head);
  155. for (i = 0; i < len; i++) {
  156. int Y1, Y2, U, V, count_lum = count << 1;
  157. __m128i l_src1, l_src2, l_src3, l_src4, u_src1, u_src2, v_src1, v_src2;
  158. __m128i yl_ev, yl_ev1, yl_ev2, yl_od1, yl_od2, yh_ev1, yh_ev2, yh_od1, yh_od2;
  159. __m128i u_ev1, u_ev2, u_od1, u_od2, v_ev1, v_ev2, v_od1, v_od2, temp;
  160. yl_ev = __lsx_vldrepl_w(&t, 0);
  161. yl_ev1 = yl_ev;
  162. yl_od1 = yl_ev;
  163. yh_ev1 = yl_ev;
  164. yh_od1 = yl_ev;
  165. u_ev1 = yl_ev;
  166. v_ev1 = yl_ev;
  167. u_od1 = yl_ev;
  168. v_od1 = yl_ev;
  169. yl_ev2 = yl_ev;
  170. yl_od2 = yl_ev;
  171. yh_ev2 = yl_ev;
  172. yh_od2 = yl_ev;
  173. u_ev2 = yl_ev;
  174. v_ev2 = yl_ev;
  175. u_od2 = yl_ev;
  176. v_od2 = yl_ev;
  177. for (j = 0; j < lumFilterSize; j++) {
  178. temp = __lsx_vldrepl_h((lumFilter + j), 0);
  179. DUP2_ARG2(__lsx_vld, lumSrc[j] + count_lum, 0, lumSrc[j] + count_lum,
  180. 16, l_src1, l_src2);
  181. DUP2_ARG2(__lsx_vld, lumSrc[j] + count_lum, 32, lumSrc[j] + count_lum,
  182. 48, l_src3, l_src4);
  183. yl_ev1 = __lsx_vmaddwev_w_h(yl_ev1, temp, l_src1);
  184. yl_od1 = __lsx_vmaddwod_w_h(yl_od1, temp, l_src1);
  185. yh_ev1 = __lsx_vmaddwev_w_h(yh_ev1, temp, l_src3);
  186. yh_od1 = __lsx_vmaddwod_w_h(yh_od1, temp, l_src3);
  187. yl_ev2 = __lsx_vmaddwev_w_h(yl_ev2, temp, l_src2);
  188. yl_od2 = __lsx_vmaddwod_w_h(yl_od2, temp, l_src2);
  189. yh_ev2 = __lsx_vmaddwev_w_h(yh_ev2, temp, l_src4);
  190. yh_od2 = __lsx_vmaddwod_w_h(yh_od2, temp, l_src4);
  191. }
  192. for (j = 0; j < chrFilterSize; j++) {
  193. DUP2_ARG2(__lsx_vld, chrUSrc[j] + count, 0, chrVSrc[j] + count, 0,
  194. u_src1, v_src1);
  195. DUP2_ARG2(__lsx_vld, chrUSrc[j] + count, 16, chrVSrc[j] + count, 16,
  196. u_src2, v_src2);
  197. temp = __lsx_vldrepl_h((chrFilter + j), 0);
  198. u_ev1 = __lsx_vmaddwev_w_h(u_ev1, temp, u_src1);
  199. u_od1 = __lsx_vmaddwod_w_h(u_od1, temp, u_src1);
  200. v_ev1 = __lsx_vmaddwev_w_h(v_ev1, temp, v_src1);
  201. v_od1 = __lsx_vmaddwod_w_h(v_od1, temp, v_src1);
  202. u_ev2 = __lsx_vmaddwev_w_h(u_ev2, temp, u_src2);
  203. u_od2 = __lsx_vmaddwod_w_h(u_od2, temp, u_src2);
  204. v_ev2 = __lsx_vmaddwev_w_h(v_ev2, temp, v_src2);
  205. v_od2 = __lsx_vmaddwod_w_h(v_od2, temp, v_src2);
  206. }
  207. yl_ev1 = __lsx_vsrai_w(yl_ev1, 19);
  208. yh_ev1 = __lsx_vsrai_w(yh_ev1, 19);
  209. yl_od1 = __lsx_vsrai_w(yl_od1, 19);
  210. yh_od1 = __lsx_vsrai_w(yh_od1, 19);
  211. u_ev1 = __lsx_vsrai_w(u_ev1, 19);
  212. v_ev1 = __lsx_vsrai_w(v_ev1, 19);
  213. u_od1 = __lsx_vsrai_w(u_od1, 19);
  214. v_od1 = __lsx_vsrai_w(v_od1, 19);
  215. yl_ev2 = __lsx_vsrai_w(yl_ev2, 19);
  216. yh_ev2 = __lsx_vsrai_w(yh_ev2, 19);
  217. yl_od2 = __lsx_vsrai_w(yl_od2, 19);
  218. yh_od2 = __lsx_vsrai_w(yh_od2, 19);
  219. u_ev2 = __lsx_vsrai_w(u_ev2, 19);
  220. v_ev2 = __lsx_vsrai_w(v_ev2, 19);
  221. u_od2 = __lsx_vsrai_w(u_od2, 19);
  222. v_od2 = __lsx_vsrai_w(v_od2, 19);
  223. u_ev1 = __lsx_vadd_w(u_ev1, headroom);
  224. v_ev1 = __lsx_vadd_w(v_ev1, headroom);
  225. u_od1 = __lsx_vadd_w(u_od1, headroom);
  226. v_od1 = __lsx_vadd_w(v_od1, headroom);
  227. u_ev2 = __lsx_vadd_w(u_ev2, headroom);
  228. v_ev2 = __lsx_vadd_w(v_ev2, headroom);
  229. u_od2 = __lsx_vadd_w(u_od2, headroom);
  230. v_od2 = __lsx_vadd_w(v_od2, headroom);
  231. WRITE_YUV2RGB_LSX(yl_ev1, yl_od1, u_ev1, v_ev1, 0, 0, 0, 0);
  232. WRITE_YUV2RGB_LSX(yl_ev1, yl_od1, u_od1, v_od1, 1, 1, 0, 0);
  233. WRITE_YUV2RGB_LSX(yl_ev1, yl_od1, u_ev1, v_ev1, 2, 2, 1, 1);
  234. WRITE_YUV2RGB_LSX(yl_ev1, yl_od1, u_od1, v_od1, 3, 3, 1, 1);
  235. WRITE_YUV2RGB_LSX(yl_ev2, yl_od2, u_ev1, v_ev1, 0, 0, 2, 2);
  236. WRITE_YUV2RGB_LSX(yl_ev2, yl_od2, u_od1, v_od1, 1, 1, 2, 2);
  237. WRITE_YUV2RGB_LSX(yl_ev2, yl_od2, u_ev1, v_ev1, 2, 2, 3, 3);
  238. WRITE_YUV2RGB_LSX(yl_ev2, yl_od2, u_od1, v_od1, 3, 3, 3, 3);
  239. WRITE_YUV2RGB_LSX(yh_ev1, yh_od1, u_ev2, v_ev2, 0, 0, 0, 0);
  240. WRITE_YUV2RGB_LSX(yh_ev1, yh_od1, u_od2, v_od2, 1, 1, 0, 0);
  241. WRITE_YUV2RGB_LSX(yh_ev1, yh_od1, u_ev2, v_ev2, 2, 2, 1, 1);
  242. WRITE_YUV2RGB_LSX(yh_ev1, yh_od1, u_od2, v_od2, 3, 3, 1, 1);
  243. WRITE_YUV2RGB_LSX(yh_ev2, yh_od2, u_ev2, v_ev2, 0, 0, 2, 2);
  244. WRITE_YUV2RGB_LSX(yh_ev2, yh_od2, u_od2, v_od2, 1, 1, 2, 2);
  245. WRITE_YUV2RGB_LSX(yh_ev2, yh_od2, u_ev2, v_ev2, 2, 2, 3, 3);
  246. WRITE_YUV2RGB_LSX(yh_ev2, yh_od2, u_od2, v_od2, 3, 3, 3, 3);
  247. }
  248. if (res >= 16) {
  249. int Y1, Y2, U, V, count_lum = count << 1;
  250. __m128i l_src1, l_src2, u_src1, v_src1;
  251. __m128i yl_ev, yl_ev1, yl_ev2, yl_od1, yl_od2;
  252. __m128i u_ev1, u_od1, v_ev1, v_od1, temp;
  253. yl_ev = __lsx_vldrepl_w(&t, 0);
  254. yl_ev1 = yl_ev;
  255. yl_od1 = yl_ev;
  256. u_ev1 = yl_ev;
  257. v_ev1 = yl_ev;
  258. u_od1 = yl_ev;
  259. v_od1 = yl_ev;
  260. yl_ev2 = yl_ev;
  261. yl_od2 = yl_ev;
  262. for (j = 0; j < lumFilterSize; j++) {
  263. temp = __lsx_vldrepl_h((lumFilter + j), 0);
  264. DUP2_ARG2(__lsx_vld, lumSrc[j] + count_lum, 0, lumSrc[j] + count_lum,
  265. 16, l_src1, l_src2);
  266. yl_ev1 = __lsx_vmaddwev_w_h(yl_ev1, temp, l_src1);
  267. yl_od1 = __lsx_vmaddwod_w_h(yl_od1, temp, l_src1);
  268. yl_ev2 = __lsx_vmaddwev_w_h(yl_ev2, temp, l_src2);
  269. yl_od2 = __lsx_vmaddwod_w_h(yl_od2, temp, l_src2);
  270. }
  271. for (j = 0; j < chrFilterSize; j++) {
  272. DUP2_ARG2(__lsx_vld, chrUSrc[j] + count, 0, chrVSrc[j] + count, 0,
  273. u_src1, v_src1);
  274. temp = __lsx_vldrepl_h((chrFilter + j), 0);
  275. u_ev1 = __lsx_vmaddwev_w_h(u_ev1, temp, u_src1);
  276. u_od1 = __lsx_vmaddwod_w_h(u_od1, temp, u_src1);
  277. v_ev1 = __lsx_vmaddwev_w_h(v_ev1, temp, v_src1);
  278. v_od1 = __lsx_vmaddwod_w_h(v_od1, temp, v_src1);
  279. }
  280. yl_ev1 = __lsx_vsrai_w(yl_ev1, 19);
  281. yl_od1 = __lsx_vsrai_w(yl_od1, 19);
  282. u_ev1 = __lsx_vsrai_w(u_ev1, 19);
  283. v_ev1 = __lsx_vsrai_w(v_ev1, 19);
  284. u_od1 = __lsx_vsrai_w(u_od1, 19);
  285. v_od1 = __lsx_vsrai_w(v_od1, 19);
  286. yl_ev2 = __lsx_vsrai_w(yl_ev2, 19);
  287. yl_od2 = __lsx_vsrai_w(yl_od2, 19);
  288. u_ev1 = __lsx_vadd_w(u_ev1, headroom);
  289. v_ev1 = __lsx_vadd_w(v_ev1, headroom);
  290. u_od1 = __lsx_vadd_w(u_od1, headroom);
  291. v_od1 = __lsx_vadd_w(v_od1, headroom);
  292. WRITE_YUV2RGB_LSX(yl_ev1, yl_od1, u_ev1, v_ev1, 0, 0, 0, 0);
  293. WRITE_YUV2RGB_LSX(yl_ev1, yl_od1, u_od1, v_od1, 1, 1, 0, 0);
  294. WRITE_YUV2RGB_LSX(yl_ev1, yl_od1, u_ev1, v_ev1, 2, 2, 1, 1);
  295. WRITE_YUV2RGB_LSX(yl_ev1, yl_od1, u_od1, v_od1, 3, 3, 1, 1);
  296. WRITE_YUV2RGB_LSX(yl_ev2, yl_od2, u_ev1, v_ev1, 0, 0, 2, 2);
  297. WRITE_YUV2RGB_LSX(yl_ev2, yl_od2, u_od1, v_od1, 1, 1, 2, 2);
  298. WRITE_YUV2RGB_LSX(yl_ev2, yl_od2, u_ev1, v_ev1, 2, 2, 3, 3);
  299. WRITE_YUV2RGB_LSX(yl_ev2, yl_od2, u_od1, v_od1, 3, 3, 3, 3);
  300. res -= 16;
  301. }
  302. if (res >= 8) {
  303. int Y1, Y2, U, V, count_lum = count << 1;
  304. __m128i l_src1, u_src, v_src;
  305. __m128i yl_ev, yl_od;
  306. __m128i u_ev, u_od, v_ev, v_od, temp;
  307. yl_ev = __lsx_vldrepl_w(&t, 0);
  308. yl_od = yl_ev;
  309. u_ev = yl_ev;
  310. v_ev = yl_ev;
  311. u_od = yl_ev;
  312. v_od = yl_ev;
  313. for (j = 0; j < lumFilterSize; j++) {
  314. temp = __lsx_vldrepl_h((lumFilter + j), 0);
  315. l_src1 = __lsx_vld(lumSrc[j] + count_lum, 0);
  316. yl_ev = __lsx_vmaddwev_w_h(yl_ev, temp, l_src1);
  317. yl_od = __lsx_vmaddwod_w_h(yl_od, temp, l_src1);
  318. }
  319. for (j = 0; j < chrFilterSize; j++) {
  320. DUP2_ARG2(__lsx_vld, chrUSrc[j] + count, 0, chrVSrc[j] + count, 0,
  321. u_src, v_src);
  322. temp = __lsx_vldrepl_h((chrFilter + j), 0);
  323. u_ev = __lsx_vmaddwev_w_h(u_ev, temp, u_src);
  324. u_od = __lsx_vmaddwod_w_h(u_od, temp, u_src);
  325. v_ev = __lsx_vmaddwev_w_h(v_ev, temp, v_src);
  326. v_od = __lsx_vmaddwod_w_h(v_od, temp, v_src);
  327. }
  328. yl_ev = __lsx_vsrai_w(yl_ev, 19);
  329. yl_od = __lsx_vsrai_w(yl_od, 19);
  330. u_ev = __lsx_vsrai_w(u_ev, 19);
  331. v_ev = __lsx_vsrai_w(v_ev, 19);
  332. u_od = __lsx_vsrai_w(u_od, 19);
  333. v_od = __lsx_vsrai_w(v_od, 19);
  334. u_ev = __lsx_vadd_w(u_ev, headroom);
  335. v_ev = __lsx_vadd_w(v_ev, headroom);
  336. u_od = __lsx_vadd_w(u_od, headroom);
  337. v_od = __lsx_vadd_w(v_od, headroom);
  338. WRITE_YUV2RGB_LSX(yl_ev, yl_od, u_ev, v_ev, 0, 0, 0, 0);
  339. WRITE_YUV2RGB_LSX(yl_ev, yl_od, u_od, v_od, 1, 1, 0, 0);
  340. WRITE_YUV2RGB_LSX(yl_ev, yl_od, u_ev, v_ev, 2, 2, 1, 1);
  341. WRITE_YUV2RGB_LSX(yl_ev, yl_od, u_od, v_od, 3, 3, 1, 1);
  342. res -= 8;
  343. }
  344. if (res >= 4) {
  345. int Y1, Y2, U, V, count_lum = count << 1;
  346. __m128i l_src1, u_src, v_src;
  347. __m128i yl_ev, yl_od;
  348. __m128i u_ev, u_od, v_ev, v_od, temp;
  349. yl_ev = __lsx_vldrepl_w(&t, 0);
  350. yl_od = yl_ev;
  351. u_ev = yl_ev;
  352. v_ev = yl_ev;
  353. u_od = yl_ev;
  354. v_od = yl_ev;
  355. for (j = 0; j < lumFilterSize; j++) {
  356. temp = __lsx_vldrepl_h((lumFilter + j), 0);
  357. l_src1 = __lsx_vld(lumSrc[j] + count_lum, 0);
  358. yl_ev = __lsx_vmaddwev_w_h(yl_ev, temp, l_src1);
  359. yl_od = __lsx_vmaddwod_w_h(yl_od, temp, l_src1);
  360. }
  361. for (j = 0; j < chrFilterSize; j++) {
  362. DUP2_ARG2(__lsx_vld, chrUSrc[j] + count, 0, chrVSrc[j] + count, 0,
  363. u_src, v_src);
  364. temp = __lsx_vldrepl_h((chrFilter + j), 0);
  365. u_ev = __lsx_vmaddwev_w_h(u_ev, temp, u_src);
  366. u_od = __lsx_vmaddwod_w_h(u_od, temp, u_src);
  367. v_ev = __lsx_vmaddwev_w_h(v_ev, temp, v_src);
  368. v_od = __lsx_vmaddwod_w_h(v_od, temp, v_src);
  369. }
  370. yl_ev = __lsx_vsrai_w(yl_ev, 19);
  371. yl_od = __lsx_vsrai_w(yl_od, 19);
  372. u_ev = __lsx_vsrai_w(u_ev, 19);
  373. v_ev = __lsx_vsrai_w(v_ev, 19);
  374. u_od = __lsx_vsrai_w(u_od, 19);
  375. v_od = __lsx_vsrai_w(v_od, 19);
  376. u_ev = __lsx_vadd_w(u_ev, headroom);
  377. v_ev = __lsx_vadd_w(v_ev, headroom);
  378. u_od = __lsx_vadd_w(u_od, headroom);
  379. v_od = __lsx_vadd_w(v_od, headroom);
  380. WRITE_YUV2RGB_LSX(yl_ev, yl_od, u_ev, v_ev, 0, 0, 0, 0);
  381. WRITE_YUV2RGB_LSX(yl_ev, yl_od, u_od, v_od, 1, 1, 0, 0);
  382. res -= 4;
  383. }
  384. if (res >= 2) {
  385. int Y1, Y2, U, V, count_lum = count << 1;
  386. __m128i l_src1, u_src, v_src;
  387. __m128i yl_ev, yl_od;
  388. __m128i u_ev, u_od, v_ev, v_od, temp;
  389. yl_ev = __lsx_vldrepl_w(&t, 0);
  390. yl_od = yl_ev;
  391. u_ev = yl_ev;
  392. v_ev = yl_ev;
  393. u_od = yl_ev;
  394. v_od = yl_ev;
  395. for (j = 0; j < lumFilterSize; j++) {
  396. temp = __lsx_vldrepl_h((lumFilter + j), 0);
  397. l_src1 = __lsx_vld(lumSrc[j] + count_lum, 0);
  398. yl_ev = __lsx_vmaddwev_w_h(yl_ev, temp, l_src1);
  399. yl_od = __lsx_vmaddwod_w_h(yl_od, temp, l_src1);
  400. }
  401. for (j = 0; j < chrFilterSize; j++) {
  402. DUP2_ARG2(__lsx_vld, chrUSrc[j] + count, 0, chrVSrc[j] + count, 0,
  403. u_src, v_src);
  404. temp = __lsx_vldrepl_h((chrFilter + j), 0);
  405. u_ev = __lsx_vmaddwev_w_h(u_ev, temp, u_src);
  406. u_od = __lsx_vmaddwod_w_h(u_od, temp, u_src);
  407. v_ev = __lsx_vmaddwev_w_h(v_ev, temp, v_src);
  408. v_od = __lsx_vmaddwod_w_h(v_od, temp, v_src);
  409. }
  410. yl_ev = __lsx_vsrai_w(yl_ev, 19);
  411. yl_od = __lsx_vsrai_w(yl_od, 19);
  412. u_ev = __lsx_vsrai_w(u_ev, 19);
  413. v_ev = __lsx_vsrai_w(v_ev, 19);
  414. u_od = __lsx_vsrai_w(u_od, 19);
  415. v_od = __lsx_vsrai_w(v_od, 19);
  416. u_ev = __lsx_vadd_w(u_ev, headroom);
  417. v_ev = __lsx_vadd_w(v_ev, headroom);
  418. u_od = __lsx_vadd_w(u_od, headroom);
  419. v_od = __lsx_vadd_w(v_od, headroom);
  420. WRITE_YUV2RGB_LSX(yl_ev, yl_od, u_ev, v_ev, 0, 0, 0, 0);
  421. res -= 2;
  422. }
  423. for (; count < len_count; count++) {
  424. int Y1 = 1 << 18;
  425. int Y2 = Y1;
  426. int U = Y1;
  427. int V = Y1;
  428. for (j = 0; j < lumFilterSize; j++) {
  429. Y1 += lumSrc[j][count * 2] * lumFilter[j];
  430. Y2 += lumSrc[j][count * 2 + 1] * lumFilter[j];
  431. }
  432. for (j = 0; j < chrFilterSize; j++) {
  433. U += chrUSrc[j][count] * chrFilter[j];
  434. V += chrVSrc[j][count] * chrFilter[j];
  435. }
  436. Y1 >>= 19;
  437. Y2 >>= 19;
  438. U >>= 19;
  439. V >>= 19;
  440. r = c->table_rV[V + YUVRGB_TABLE_HEADROOM];
  441. g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] +
  442. c->table_gV[V + YUVRGB_TABLE_HEADROOM]);
  443. b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
  444. yuv2rgb_write(dest, count, Y1, Y2, 0, 0,
  445. r, g, b, y, target, 0);
  446. }
  447. }
  448. static void
  449. yuv2rgb_2_template_lsx(SwsContext *c, const int16_t *buf[2],
  450. const int16_t *ubuf[2], const int16_t *vbuf[2],
  451. const int16_t *abuf[2], uint8_t *dest, int dstW,
  452. int yalpha, int uvalpha, int y,
  453. enum AVPixelFormat target, int hasAlpha)
  454. {
  455. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  456. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  457. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
  458. int yalpha1 = 4096 - yalpha;
  459. int uvalpha1 = 4096 - uvalpha;
  460. int i, count = 0;
  461. int len = dstW - 7;
  462. int len_count = (dstW + 1) >> 1;
  463. const void *r, *g, *b;
  464. int head = YUVRGB_TABLE_HEADROOM;
  465. __m128i v_yalpha1 = __lsx_vreplgr2vr_w(yalpha1);
  466. __m128i v_uvalpha1 = __lsx_vreplgr2vr_w(uvalpha1);
  467. __m128i v_yalpha = __lsx_vreplgr2vr_w(yalpha);
  468. __m128i v_uvalpha = __lsx_vreplgr2vr_w(uvalpha);
  469. __m128i headroom = __lsx_vreplgr2vr_w(head);
  470. __m128i zero = __lsx_vldi(0);
  471. for (i = 0; i < len; i += 8) {
  472. int Y1, Y2, U, V;
  473. int i_dex = i << 1;
  474. int c_dex = count << 1;
  475. __m128i y0_h, y0_l, y0, u0, v0;
  476. __m128i y1_h, y1_l, y1, u1, v1;
  477. __m128i y_l, y_h, u, v;
  478. DUP4_ARG2(__lsx_vldx, buf0, i_dex, ubuf0, c_dex, vbuf0, c_dex,
  479. buf1, i_dex, y0, u0, v0, y1);
  480. DUP2_ARG2(__lsx_vldx, ubuf1, c_dex, vbuf1, c_dex, u1, v1);
  481. DUP2_ARG2(__lsx_vsllwil_w_h, y0, 0, y1, 0, y0_l, y1_l);
  482. DUP2_ARG1(__lsx_vexth_w_h, y0, y1, y0_h, y1_h);
  483. DUP4_ARG2(__lsx_vilvl_h, zero, u0, zero, u1, zero, v0, zero, v1,
  484. u0, u1, v0, v1);
  485. y0_l = __lsx_vmul_w(y0_l, v_yalpha1);
  486. y0_h = __lsx_vmul_w(y0_h, v_yalpha1);
  487. u0 = __lsx_vmul_w(u0, v_uvalpha1);
  488. v0 = __lsx_vmul_w(v0, v_uvalpha1);
  489. y_l = __lsx_vmadd_w(y0_l, v_yalpha, y1_l);
  490. y_h = __lsx_vmadd_w(y0_h, v_yalpha, y1_h);
  491. u = __lsx_vmadd_w(u0, v_uvalpha, u1);
  492. v = __lsx_vmadd_w(v0, v_uvalpha, v1);
  493. y_l = __lsx_vsrai_w(y_l, 19);
  494. y_h = __lsx_vsrai_w(y_h, 19);
  495. u = __lsx_vsrai_w(u, 19);
  496. v = __lsx_vsrai_w(v, 19);
  497. u = __lsx_vadd_w(u, headroom);
  498. v = __lsx_vadd_w(v, headroom);
  499. WRITE_YUV2RGB_LSX(y_l, y_l, u, v, 0, 1, 0, 0);
  500. WRITE_YUV2RGB_LSX(y_l, y_l, u, v, 2, 3, 1, 1);
  501. WRITE_YUV2RGB_LSX(y_h, y_h, u, v, 0, 1, 2, 2);
  502. WRITE_YUV2RGB_LSX(y_h, y_h, u, v, 2, 3, 3, 3);
  503. }
  504. if (dstW - i >= 4) {
  505. int Y1, Y2, U, V;
  506. int i_dex = i << 1;
  507. __m128i y0_l, y0, u0, v0;
  508. __m128i y1_l, y1, u1, v1;
  509. __m128i y_l, u, v;
  510. y0 = __lsx_vldx(buf0, i_dex);
  511. u0 = __lsx_vldrepl_d((ubuf0 + count), 0);
  512. v0 = __lsx_vldrepl_d((vbuf0 + count), 0);
  513. y1 = __lsx_vldx(buf1, i_dex);
  514. u1 = __lsx_vldrepl_d((ubuf1 + count), 0);
  515. v1 = __lsx_vldrepl_d((vbuf1 + count), 0);
  516. DUP2_ARG2(__lsx_vilvl_h, zero, y0, zero, y1, y0_l, y1_l);
  517. DUP4_ARG2(__lsx_vilvl_h, zero, u0, zero, u1, zero, v0, zero, v1,
  518. u0, u1, v0, v1);
  519. y0_l = __lsx_vmul_w(y0_l, v_yalpha1);
  520. u0 = __lsx_vmul_w(u0, v_uvalpha1);
  521. v0 = __lsx_vmul_w(v0, v_uvalpha1);
  522. y_l = __lsx_vmadd_w(y0_l, v_yalpha, y1_l);
  523. u = __lsx_vmadd_w(u0, v_uvalpha, u1);
  524. v = __lsx_vmadd_w(v0, v_uvalpha, v1);
  525. y_l = __lsx_vsrai_w(y_l, 19);
  526. u = __lsx_vsrai_w(u, 19);
  527. v = __lsx_vsrai_w(v, 19);
  528. u = __lsx_vadd_w(u, headroom);
  529. v = __lsx_vadd_w(v, headroom);
  530. WRITE_YUV2RGB_LSX(y_l, y_l, u, v, 0, 1, 0, 0);
  531. WRITE_YUV2RGB_LSX(y_l, y_l, u, v, 2, 3, 1, 1);
  532. i += 4;
  533. }
  534. for (; count < len_count; count++) {
  535. int Y1 = (buf0[count * 2] * yalpha1 +
  536. buf1[count * 2] * yalpha) >> 19;
  537. int Y2 = (buf0[count * 2 + 1] * yalpha1 +
  538. buf1[count * 2 + 1] * yalpha) >> 19;
  539. int U = (ubuf0[count] * uvalpha1 + ubuf1[count] * uvalpha) >> 19;
  540. int V = (vbuf0[count] * uvalpha1 + vbuf1[count] * uvalpha) >> 19;
  541. r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
  542. g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] +
  543. c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
  544. b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
  545. yuv2rgb_write(dest, count, Y1, Y2, 0, 0,
  546. r, g, b, y, target, 0);
  547. }
  548. }
  549. static void
  550. yuv2rgb_1_template_lsx(SwsContext *c, const int16_t *buf0,
  551. const int16_t *ubuf[2], const int16_t *vbuf[2],
  552. const int16_t *abuf0, uint8_t *dest, int dstW,
  553. int uvalpha, int y, enum AVPixelFormat target,
  554. int hasAlpha)
  555. {
  556. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  557. int i;
  558. int len = (dstW - 7);
  559. int len_count = (dstW + 1) >> 1;
  560. const void *r, *g, *b;
  561. if (uvalpha < 2048) {
  562. int count = 0;
  563. int head = YUVRGB_TABLE_HEADROOM;
  564. __m128i headroom = __lsx_vreplgr2vr_h(head);
  565. for (i = 0; i < len; i += 8) {
  566. int Y1, Y2, U, V;
  567. int i_dex = i << 1;
  568. int c_dex = count << 1;
  569. __m128i src_y, src_u, src_v;
  570. __m128i u, v, uv, y_l, y_h;
  571. src_y = __lsx_vldx(buf0, i_dex);
  572. DUP2_ARG2(__lsx_vldx, ubuf0, c_dex, vbuf0, c_dex, src_u, src_v);
  573. src_y = __lsx_vsrari_h(src_y, 7);
  574. src_u = __lsx_vsrari_h(src_u, 7);
  575. src_v = __lsx_vsrari_h(src_v, 7);
  576. y_l = __lsx_vsllwil_w_h(src_y, 0);
  577. y_h = __lsx_vexth_w_h(src_y);
  578. uv = __lsx_vilvl_h(src_v, src_u);
  579. u = __lsx_vaddwev_w_h(uv, headroom);
  580. v = __lsx_vaddwod_w_h(uv, headroom);
  581. WRITE_YUV2RGB_LSX(y_l, y_l, u, v, 0, 1, 0, 0);
  582. WRITE_YUV2RGB_LSX(y_l, y_l, u, v, 2, 3, 1, 1);
  583. WRITE_YUV2RGB_LSX(y_h, y_h, u, v, 0, 1, 2, 2);
  584. WRITE_YUV2RGB_LSX(y_h, y_h, u, v, 2, 3, 3, 3);
  585. }
  586. if (dstW - i >= 4){
  587. int Y1, Y2, U, V;
  588. int i_dex = i << 1;
  589. __m128i src_y, src_u, src_v;
  590. __m128i y_l, u, v, uv;
  591. src_y = __lsx_vldx(buf0, i_dex);
  592. src_u = __lsx_vldrepl_d((ubuf0 + count), 0);
  593. src_v = __lsx_vldrepl_d((vbuf0 + count), 0);
  594. y_l = __lsx_vsrari_h(src_y, 7);
  595. y_l = __lsx_vsllwil_w_h(y_l, 0);
  596. uv = __lsx_vilvl_h(src_v, src_u);
  597. uv = __lsx_vsrari_h(uv, 7);
  598. u = __lsx_vaddwev_w_h(uv, headroom);
  599. v = __lsx_vaddwod_w_h(uv, headroom);
  600. WRITE_YUV2RGB_LSX(y_l, y_l, u, v, 0, 1, 0, 0);
  601. WRITE_YUV2RGB_LSX(y_l, y_l, u, v, 2, 3, 1, 1);
  602. i += 4;
  603. }
  604. for (; count < len_count; count++) {
  605. int Y1 = (buf0[count * 2 ] + 64) >> 7;
  606. int Y2 = (buf0[count * 2 + 1] + 64) >> 7;
  607. int U = (ubuf0[count] + 64) >> 7;
  608. int V = (vbuf0[count] + 64) >> 7;
  609. r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
  610. g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] +
  611. c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
  612. b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
  613. yuv2rgb_write(dest, count, Y1, Y2, 0, 0,
  614. r, g, b, y, target, 0);
  615. }
  616. } else {
  617. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  618. int count = 0;
  619. int HEADROOM = YUVRGB_TABLE_HEADROOM;
  620. __m128i headroom = __lsx_vreplgr2vr_w(HEADROOM);
  621. for (i = 0; i < len; i += 8) {
  622. int Y1, Y2, U, V;
  623. int i_dex = i << 1;
  624. int c_dex = count << 1;
  625. __m128i src_y, src_u0, src_v0, src_u1, src_v1;
  626. __m128i y_l, y_h, u1, u2, v1, v2;
  627. DUP4_ARG2(__lsx_vldx, buf0, i_dex, ubuf0, c_dex, vbuf0, c_dex,
  628. ubuf1, c_dex, src_y, src_u0, src_v0, src_u1);
  629. src_v1 = __lsx_vldx(vbuf1, c_dex);
  630. src_y = __lsx_vsrari_h(src_y, 7);
  631. u1 = __lsx_vaddwev_w_h(src_u0, src_u1);
  632. v1 = __lsx_vaddwod_w_h(src_u0, src_u1);
  633. u2 = __lsx_vaddwev_w_h(src_v0, src_v1);
  634. v2 = __lsx_vaddwod_w_h(src_v0, src_v1);
  635. y_l = __lsx_vsllwil_w_h(src_y, 0);
  636. y_h = __lsx_vexth_w_h(src_y);
  637. u1 = __lsx_vsrari_w(u1, 8);
  638. v1 = __lsx_vsrari_w(v1, 8);
  639. u2 = __lsx_vsrari_w(u2, 8);
  640. v2 = __lsx_vsrari_w(v2, 8);
  641. u1 = __lsx_vadd_w(u1, headroom);
  642. v1 = __lsx_vadd_w(v1, headroom);
  643. u2 = __lsx_vadd_w(u2, headroom);
  644. v2 = __lsx_vadd_w(v2, headroom);
  645. WRITE_YUV2RGB_LSX(y_l, y_l, u1, v1, 0, 1, 0, 0);
  646. WRITE_YUV2RGB_LSX(y_l, y_l, u2, v2, 2, 3, 0, 0);
  647. WRITE_YUV2RGB_LSX(y_h, y_h, u1, v1, 0, 1, 1, 1);
  648. WRITE_YUV2RGB_LSX(y_h, y_h, u2, v2, 2, 3, 1, 1);
  649. }
  650. if (dstW - i >= 4) {
  651. int Y1, Y2, U, V;
  652. int i_dex = i << 1;
  653. __m128i src_y, src_u0, src_v0, src_u1, src_v1;
  654. __m128i uv;
  655. src_y = __lsx_vldx(buf0, i_dex);
  656. src_u0 = __lsx_vldrepl_d((ubuf0 + count), 0);
  657. src_v0 = __lsx_vldrepl_d((vbuf0 + count), 0);
  658. src_u1 = __lsx_vldrepl_d((ubuf1 + count), 0);
  659. src_v1 = __lsx_vldrepl_d((vbuf1 + count), 0);
  660. src_u0 = __lsx_vilvl_h(src_u1, src_u0);
  661. src_v0 = __lsx_vilvl_h(src_v1, src_v0);
  662. src_y = __lsx_vsrari_h(src_y, 7);
  663. src_y = __lsx_vsllwil_w_h(src_y, 0);
  664. uv = __lsx_vilvl_h(src_v0, src_u0);
  665. uv = __lsx_vhaddw_w_h(uv, uv);
  666. uv = __lsx_vsrari_w(uv, 8);
  667. uv = __lsx_vadd_w(uv, headroom);
  668. WRITE_YUV2RGB_LSX(src_y, src_y, uv, uv, 0, 1, 0, 1);
  669. WRITE_YUV2RGB_LSX(src_y, src_y, uv, uv, 2, 3, 2, 3);
  670. i += 4;
  671. }
  672. for (; count < len_count; count++) {
  673. int Y1 = (buf0[count * 2 ] + 64) >> 7;
  674. int Y2 = (buf0[count * 2 + 1] + 64) >> 7;
  675. int U = (ubuf0[count] + ubuf1[count] + 128) >> 8;
  676. int V = (vbuf0[count] + vbuf1[count] + 128) >> 8;
  677. r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
  678. g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] +
  679. c->table_gV[V + YUVRGB_TABLE_HEADROOM]),
  680. b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
  681. yuv2rgb_write(dest, count, Y1, Y2, 0, 0,
  682. r, g, b, y, target, 0);
  683. }
  684. }
  685. }
  686. #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
  687. static void name ## ext ## _X_lsx(SwsContext *c, const int16_t *lumFilter, \
  688. const int16_t **lumSrc, int lumFilterSize, \
  689. const int16_t *chrFilter, const int16_t **chrUSrc, \
  690. const int16_t **chrVSrc, int chrFilterSize, \
  691. const int16_t **alpSrc, uint8_t *dest, int dstW, \
  692. int y) \
  693. { \
  694. name ## base ## _X_template_lsx(c, lumFilter, lumSrc, lumFilterSize, \
  695. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  696. alpSrc, dest, dstW, y, fmt, hasAlpha); \
  697. }
  698. #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
  699. YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
  700. static void name ## ext ## _2_lsx(SwsContext *c, const int16_t *buf[2], \
  701. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  702. const int16_t *abuf[2], uint8_t *dest, int dstW, \
  703. int yalpha, int uvalpha, int y) \
  704. { \
  705. name ## base ## _2_template_lsx(c, buf, ubuf, vbuf, abuf, dest, \
  706. dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
  707. }
  708. #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
  709. YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
  710. static void name ## ext ## _1_lsx(SwsContext *c, const int16_t *buf0, \
  711. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  712. const int16_t *abuf0, uint8_t *dest, int dstW, \
  713. int uvalpha, int y) \
  714. { \
  715. name ## base ## _1_template_lsx(c, buf0, ubuf, vbuf, abuf0, dest, \
  716. dstW, uvalpha, y, fmt, hasAlpha); \
  717. }
  718. #if CONFIG_SMALL
  719. #else
  720. #if CONFIG_SWSCALE_ALPHA
  721. #endif
  722. YUV2RGBWRAPPER(yuv2rgb,, x32_1, AV_PIX_FMT_RGB32_1, 0)
  723. YUV2RGBWRAPPER(yuv2rgb,, x32, AV_PIX_FMT_RGB32, 0)
  724. #endif
  725. YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
  726. YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
  727. YUV2RGBWRAPPER(yuv2rgb,, 16, AV_PIX_FMT_RGB565, 0)
  728. YUV2RGBWRAPPER(yuv2rgb,, 15, AV_PIX_FMT_RGB555, 0)
  729. YUV2RGBWRAPPER(yuv2rgb,, 12, AV_PIX_FMT_RGB444, 0)
  730. YUV2RGBWRAPPER(yuv2rgb,, 8, AV_PIX_FMT_RGB8, 0)
  731. YUV2RGBWRAPPER(yuv2rgb,, 4, AV_PIX_FMT_RGB4, 0)
  732. YUV2RGBWRAPPER(yuv2rgb,, 4b, AV_PIX_FMT_RGB4_BYTE, 0)
  733. // This function is copied from libswscale/output.c
  734. static av_always_inline void yuv2rgb_write_full(SwsContext *c,
  735. uint8_t *dest, int i, int R, int A, int G, int B,
  736. int y, enum AVPixelFormat target, int hasAlpha, int err[4])
  737. {
  738. int isrgb8 = target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8;
  739. if ((R | G | B) & 0xC0000000) {
  740. R = av_clip_uintp2(R, 30);
  741. G = av_clip_uintp2(G, 30);
  742. B = av_clip_uintp2(B, 30);
  743. }
  744. switch(target) {
  745. case AV_PIX_FMT_ARGB:
  746. dest[0] = hasAlpha ? A : 255;
  747. dest[1] = R >> 22;
  748. dest[2] = G >> 22;
  749. dest[3] = B >> 22;
  750. break;
  751. case AV_PIX_FMT_RGB24:
  752. dest[0] = R >> 22;
  753. dest[1] = G >> 22;
  754. dest[2] = B >> 22;
  755. break;
  756. case AV_PIX_FMT_RGBA:
  757. dest[0] = R >> 22;
  758. dest[1] = G >> 22;
  759. dest[2] = B >> 22;
  760. dest[3] = hasAlpha ? A : 255;
  761. break;
  762. case AV_PIX_FMT_ABGR:
  763. dest[0] = hasAlpha ? A : 255;
  764. dest[1] = B >> 22;
  765. dest[2] = G >> 22;
  766. dest[3] = R >> 22;
  767. break;
  768. case AV_PIX_FMT_BGR24:
  769. dest[0] = B >> 22;
  770. dest[1] = G >> 22;
  771. dest[2] = R >> 22;
  772. break;
  773. case AV_PIX_FMT_BGRA:
  774. dest[0] = B >> 22;
  775. dest[1] = G >> 22;
  776. dest[2] = R >> 22;
  777. dest[3] = hasAlpha ? A : 255;
  778. break;
  779. case AV_PIX_FMT_BGR4_BYTE:
  780. case AV_PIX_FMT_RGB4_BYTE:
  781. case AV_PIX_FMT_BGR8:
  782. case AV_PIX_FMT_RGB8:
  783. {
  784. int r,g,b;
  785. switch (c->dither) {
  786. default:
  787. case SWS_DITHER_AUTO:
  788. case SWS_DITHER_ED:
  789. R >>= 22;
  790. G >>= 22;
  791. B >>= 22;
  792. R += (7*err[0] + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2])>>4;
  793. G += (7*err[1] + 1*c->dither_error[1][i] + 5*c->dither_error[1][i+1] + 3*c->dither_error[1][i+2])>>4;
  794. B += (7*err[2] + 1*c->dither_error[2][i] + 5*c->dither_error[2][i+1] + 3*c->dither_error[2][i+2])>>4;
  795. c->dither_error[0][i] = err[0];
  796. c->dither_error[1][i] = err[1];
  797. c->dither_error[2][i] = err[2];
  798. r = R >> (isrgb8 ? 5 : 7);
  799. g = G >> (isrgb8 ? 5 : 6);
  800. b = B >> (isrgb8 ? 6 : 7);
  801. r = av_clip(r, 0, isrgb8 ? 7 : 1);
  802. g = av_clip(g, 0, isrgb8 ? 7 : 3);
  803. b = av_clip(b, 0, isrgb8 ? 3 : 1);
  804. err[0] = R - r*(isrgb8 ? 36 : 255);
  805. err[1] = G - g*(isrgb8 ? 36 : 85);
  806. err[2] = B - b*(isrgb8 ? 85 : 255);
  807. break;
  808. case SWS_DITHER_A_DITHER:
  809. if (isrgb8) {
  810. /* see http://pippin.gimp.org/a_dither/ for details/origin */
  811. #define A_DITHER(u,v) (((((u)+((v)*236))*119)&0xff))
  812. r = (((R >> 19) + A_DITHER(i,y) -96)>>8);
  813. g = (((G >> 19) + A_DITHER(i + 17,y) - 96)>>8);
  814. b = (((B >> 20) + A_DITHER(i + 17*2,y) -96)>>8);
  815. r = av_clip_uintp2(r, 3);
  816. g = av_clip_uintp2(g, 3);
  817. b = av_clip_uintp2(b, 2);
  818. } else {
  819. r = (((R >> 21) + A_DITHER(i,y)-256)>>8);
  820. g = (((G >> 19) + A_DITHER(i + 17,y)-256)>>8);
  821. b = (((B >> 21) + A_DITHER(i + 17*2,y)-256)>>8);
  822. r = av_clip_uintp2(r, 1);
  823. g = av_clip_uintp2(g, 2);
  824. b = av_clip_uintp2(b, 1);
  825. }
  826. break;
  827. case SWS_DITHER_X_DITHER:
  828. if (isrgb8) {
  829. /* see http://pippin.gimp.org/a_dither/ for details/origin */
  830. #define X_DITHER(u,v) (((((u)^((v)*237))*181)&0x1ff)/2)
  831. r = (((R >> 19) + X_DITHER(i,y) - 96)>>8);
  832. g = (((G >> 19) + X_DITHER(i + 17,y) - 96)>>8);
  833. b = (((B >> 20) + X_DITHER(i + 17*2,y) - 96)>>8);
  834. r = av_clip_uintp2(r, 3);
  835. g = av_clip_uintp2(g, 3);
  836. b = av_clip_uintp2(b, 2);
  837. } else {
  838. r = (((R >> 21) + X_DITHER(i,y)-256)>>8);
  839. g = (((G >> 19) + X_DITHER(i + 17,y)-256)>>8);
  840. b = (((B >> 21) + X_DITHER(i + 17*2,y)-256)>>8);
  841. r = av_clip_uintp2(r, 1);
  842. g = av_clip_uintp2(g, 2);
  843. b = av_clip_uintp2(b, 1);
  844. }
  845. break;
  846. }
  847. if(target == AV_PIX_FMT_BGR4_BYTE) {
  848. dest[0] = r + 2*g + 8*b;
  849. } else if(target == AV_PIX_FMT_RGB4_BYTE) {
  850. dest[0] = b + 2*g + 8*r;
  851. } else if(target == AV_PIX_FMT_BGR8) {
  852. dest[0] = r + 8*g + 64*b;
  853. } else if(target == AV_PIX_FMT_RGB8) {
  854. dest[0] = b + 4*g + 32*r;
  855. } else
  856. av_assert2(0);
  857. break; }
  858. }
  859. }
  860. #define YUVTORGB_SETUP_LSX \
  861. int y_offset = c->yuv2rgb_y_offset; \
  862. int y_coeff = c->yuv2rgb_y_coeff; \
  863. int v2r_coe = c->yuv2rgb_v2r_coeff; \
  864. int v2g_coe = c->yuv2rgb_v2g_coeff; \
  865. int u2g_coe = c->yuv2rgb_u2g_coeff; \
  866. int u2b_coe = c->yuv2rgb_u2b_coeff; \
  867. __m128i offset = __lsx_vreplgr2vr_w(y_offset); \
  868. __m128i coeff = __lsx_vreplgr2vr_w(y_coeff); \
  869. __m128i v2r = __lsx_vreplgr2vr_w(v2r_coe); \
  870. __m128i v2g = __lsx_vreplgr2vr_w(v2g_coe); \
  871. __m128i u2g = __lsx_vreplgr2vr_w(u2g_coe); \
  872. __m128i u2b = __lsx_vreplgr2vr_w(u2b_coe); \
  873. #define YUVTORGB_LSX(y, u, v, R, G, B, offset, coeff, \
  874. y_temp, v2r, v2g, u2g, u2b) \
  875. { \
  876. y = __lsx_vsub_w(y, offset); \
  877. y = __lsx_vmul_w(y, coeff); \
  878. y = __lsx_vadd_w(y, y_temp); \
  879. R = __lsx_vmadd_w(y, v, v2r); \
  880. v = __lsx_vmadd_w(y, v, v2g); \
  881. G = __lsx_vmadd_w(v, u, u2g); \
  882. B = __lsx_vmadd_w(y, u, u2b); \
  883. }
  884. #define WRITE_FULL_A_LSX(r, g, b, a, t1, s) \
  885. { \
  886. R = __lsx_vpickve2gr_w(r, t1); \
  887. G = __lsx_vpickve2gr_w(g, t1); \
  888. B = __lsx_vpickve2gr_w(b, t1); \
  889. A = __lsx_vpickve2gr_w(a, t1); \
  890. if (A & 0x100) \
  891. A = av_clip_uint8(A); \
  892. yuv2rgb_write_full(c, dest, i + s, R, A, G, B, y, target, hasAlpha, err);\
  893. dest += step; \
  894. }
  895. #define WRITE_FULL_LSX(r, g, b, t1, s) \
  896. { \
  897. R = __lsx_vpickve2gr_w(r, t1); \
  898. G = __lsx_vpickve2gr_w(g, t1); \
  899. B = __lsx_vpickve2gr_w(b, t1); \
  900. yuv2rgb_write_full(c, dest, i + s, R, 0, G, B, y, target, hasAlpha, err); \
  901. dest += step; \
  902. }
  903. static void
  904. yuv2rgb_full_X_template_lsx(SwsContext *c, const int16_t *lumFilter,
  905. const int16_t **lumSrc, int lumFilterSize,
  906. const int16_t *chrFilter, const int16_t **chrUSrc,
  907. const int16_t **chrVSrc, int chrFilterSize,
  908. const int16_t **alpSrc, uint8_t *dest,
  909. int dstW, int y, enum AVPixelFormat target,
  910. int hasAlpha)
  911. {
  912. int i, j, B, G, R, A;
  913. int step = (target == AV_PIX_FMT_RGB24 ||
  914. target == AV_PIX_FMT_BGR24) ? 3 : 4;
  915. int err[4] = {0};
  916. int a_temp = 1 << 18;
  917. int templ = 1 << 9;
  918. int tempc = templ - (128 << 19);
  919. int ytemp = 1 << 21;
  920. int len = dstW - 7;
  921. __m128i y_temp = __lsx_vreplgr2vr_w(ytemp);
  922. YUVTORGB_SETUP_LSX
  923. if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
  924. || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
  925. step = 1;
  926. for (i = 0; i < len; i += 8) {
  927. __m128i l_src, u_src, v_src;
  928. __m128i y_ev, y_od, u_ev, u_od, v_ev, v_od, temp;
  929. __m128i R_ev, R_od, G_ev, G_od, B_ev, B_od;
  930. int n = i << 1;
  931. y_ev = y_od = __lsx_vreplgr2vr_w(templ);
  932. u_ev = u_od = v_ev = v_od = __lsx_vreplgr2vr_w(tempc);
  933. for (j = 0; j < lumFilterSize; j++) {
  934. temp = __lsx_vldrepl_h((lumFilter + j), 0);
  935. l_src = __lsx_vldx(lumSrc[j], n);
  936. y_ev = __lsx_vmaddwev_w_h(y_ev, l_src, temp);
  937. y_od = __lsx_vmaddwod_w_h(y_od, l_src, temp);
  938. }
  939. for (j = 0; j < chrFilterSize; j++) {
  940. temp = __lsx_vldrepl_h((chrFilter + j), 0);
  941. DUP2_ARG2(__lsx_vldx, chrUSrc[j], n, chrVSrc[j], n,
  942. u_src, v_src);
  943. DUP2_ARG3(__lsx_vmaddwev_w_h, u_ev, u_src, temp, v_ev,
  944. v_src, temp, u_ev, v_ev);
  945. DUP2_ARG3(__lsx_vmaddwod_w_h, u_od, u_src, temp, v_od,
  946. v_src, temp, u_od, v_od);
  947. }
  948. y_ev = __lsx_vsrai_w(y_ev, 10);
  949. y_od = __lsx_vsrai_w(y_od, 10);
  950. u_ev = __lsx_vsrai_w(u_ev, 10);
  951. u_od = __lsx_vsrai_w(u_od, 10);
  952. v_ev = __lsx_vsrai_w(v_ev, 10);
  953. v_od = __lsx_vsrai_w(v_od, 10);
  954. YUVTORGB_LSX(y_ev, u_ev, v_ev, R_ev, G_ev, B_ev, offset, coeff,
  955. y_temp, v2r, v2g, u2g, u2b);
  956. YUVTORGB_LSX(y_od, u_od, v_od, R_od, G_od, B_od, offset, coeff,
  957. y_temp, v2r, v2g, u2g, u2b);
  958. if (hasAlpha) {
  959. __m128i a_src, a_ev, a_od;
  960. a_ev = a_od = __lsx_vreplgr2vr_w(a_temp);
  961. for (j = 0; j < lumFilterSize; j++) {
  962. temp = __lsx_vldrepl_h(lumFilter + j, 0);
  963. a_src = __lsx_vldx(alpSrc[j], n);
  964. a_ev = __lsx_vmaddwev_w_h(a_ev, a_src, temp);
  965. a_od = __lsx_vmaddwod_w_h(a_od, a_src, temp);
  966. }
  967. a_ev = __lsx_vsrai_w(a_ev, 19);
  968. a_od = __lsx_vsrai_w(a_od, 19);
  969. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 0, 0);
  970. WRITE_FULL_A_LSX(R_od, G_od, B_od, a_od, 0, 1);
  971. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 1, 2);
  972. WRITE_FULL_A_LSX(R_od, G_od, B_od, a_od, 1, 3);
  973. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 2, 4);
  974. WRITE_FULL_A_LSX(R_od, G_od, B_od, a_od, 2, 5);
  975. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 3, 6);
  976. WRITE_FULL_A_LSX(R_od, G_od, B_od, a_od, 3, 7);
  977. } else {
  978. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 0, 0);
  979. WRITE_FULL_LSX(R_od, G_od, B_od, 0, 1);
  980. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 1, 2);
  981. WRITE_FULL_LSX(R_od, G_od, B_od, 1, 3);
  982. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 2, 4);
  983. WRITE_FULL_LSX(R_od, G_od, B_od, 2, 5);
  984. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 3, 6);
  985. WRITE_FULL_LSX(R_od, G_od, B_od, 3, 7);
  986. }
  987. }
  988. if (dstW - i >= 4) {
  989. __m128i l_src, u_src, v_src;
  990. __m128i y_ev, u_ev, v_ev, uv, temp;
  991. __m128i R_ev, G_ev, B_ev;
  992. int n = i << 1;
  993. y_ev = __lsx_vreplgr2vr_w(templ);
  994. u_ev = v_ev = __lsx_vreplgr2vr_w(tempc);
  995. for (j = 0; j < lumFilterSize; j++) {
  996. temp = __lsx_vldrepl_h((lumFilter + j), 0);
  997. l_src = __lsx_vldx(lumSrc[j], n);
  998. l_src = __lsx_vilvl_h(l_src, l_src);
  999. y_ev = __lsx_vmaddwev_w_h(y_ev, l_src, temp);
  1000. }
  1001. for (j = 0; j < chrFilterSize; j++) {
  1002. temp = __lsx_vldrepl_h((chrFilter + j), 0);
  1003. DUP2_ARG2(__lsx_vldx, chrUSrc[j], n, chrVSrc[j], n, u_src, v_src);
  1004. uv = __lsx_vilvl_h(v_src, u_src);
  1005. u_ev = __lsx_vmaddwev_w_h(u_ev, uv, temp);
  1006. v_ev = __lsx_vmaddwod_w_h(v_ev, uv, temp);
  1007. }
  1008. y_ev = __lsx_vsrai_w(y_ev, 10);
  1009. u_ev = __lsx_vsrai_w(u_ev, 10);
  1010. v_ev = __lsx_vsrai_w(v_ev, 10);
  1011. YUVTORGB_LSX(y_ev, u_ev, v_ev, R_ev, G_ev, B_ev, offset, coeff,
  1012. y_temp, v2r, v2g, u2g, u2b);
  1013. if (hasAlpha) {
  1014. __m128i a_src, a_ev;
  1015. a_ev = __lsx_vreplgr2vr_w(a_temp);
  1016. for (j = 0; j < lumFilterSize; j++) {
  1017. temp = __lsx_vldrepl_h(lumFilter + j, 0);
  1018. a_src = __lsx_vldx(alpSrc[j], n);
  1019. a_src = __lsx_vilvl_h(a_src, a_src);
  1020. a_ev = __lsx_vmaddwev_w_h(a_ev, a_src, temp);
  1021. }
  1022. a_ev = __lsx_vsrai_w(a_ev, 19);
  1023. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 0, 0);
  1024. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 1, 1);
  1025. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 2, 2);
  1026. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 3, 3);
  1027. } else {
  1028. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 0, 0);
  1029. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 1, 1);
  1030. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 2, 2);
  1031. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 3, 3);
  1032. }
  1033. i += 4;
  1034. }
  1035. for (; i < dstW; i++) {
  1036. int Y = templ;
  1037. int V, U = V = tempc;
  1038. A = 0;
  1039. for (j = 0; j < lumFilterSize; j++) {
  1040. Y += lumSrc[j][i] * lumFilter[j];
  1041. }
  1042. for (j = 0; j < chrFilterSize; j++) {
  1043. U += chrUSrc[j][i] * chrFilter[j];
  1044. V += chrVSrc[j][i] * chrFilter[j];
  1045. }
  1046. Y >>= 10;
  1047. U >>= 10;
  1048. V >>= 10;
  1049. if (hasAlpha) {
  1050. A = 1 << 18;
  1051. for (j = 0; j < lumFilterSize; j++) {
  1052. A += alpSrc[j][i] * lumFilter[j];
  1053. }
  1054. A >>= 19;
  1055. if (A & 0x100)
  1056. A = av_clip_uint8(A);
  1057. }
  1058. Y -= y_offset;
  1059. Y *= y_coeff;
  1060. Y += ytemp;
  1061. R = (unsigned)Y + V * v2r_coe;
  1062. G = (unsigned)Y + V * v2g_coe + U * u2g_coe;
  1063. B = (unsigned)Y + U * u2b_coe;
  1064. yuv2rgb_write_full(c, dest, i, R, A, G, B, y, target, hasAlpha, err);
  1065. dest += step;
  1066. }
  1067. c->dither_error[0][i] = err[0];
  1068. c->dither_error[1][i] = err[1];
  1069. c->dither_error[2][i] = err[2];
  1070. }
  1071. static void
  1072. yuv2rgb_full_2_template_lsx(SwsContext *c, const int16_t *buf[2],
  1073. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1074. const int16_t *abuf[2], uint8_t *dest, int dstW,
  1075. int yalpha, int uvalpha, int y,
  1076. enum AVPixelFormat target, int hasAlpha)
  1077. {
  1078. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  1079. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  1080. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
  1081. *abuf0 = hasAlpha ? abuf[0] : NULL,
  1082. *abuf1 = hasAlpha ? abuf[1] : NULL;
  1083. int yalpha1 = 4096 - yalpha;
  1084. int uvalpha1 = 4096 - uvalpha;
  1085. int uvtemp = 128 << 19;
  1086. int atemp = 1 << 18;
  1087. int err[4] = {0};
  1088. int ytemp = 1 << 21;
  1089. int len = dstW - 7;
  1090. int i, R, G, B, A;
  1091. int step = (target == AV_PIX_FMT_RGB24 ||
  1092. target == AV_PIX_FMT_BGR24) ? 3 : 4;
  1093. __m128i v_uvalpha1 = __lsx_vreplgr2vr_w(uvalpha1);
  1094. __m128i v_yalpha1 = __lsx_vreplgr2vr_w(yalpha1);
  1095. __m128i v_uvalpha = __lsx_vreplgr2vr_w(uvalpha);
  1096. __m128i v_yalpha = __lsx_vreplgr2vr_w(yalpha);
  1097. __m128i uv = __lsx_vreplgr2vr_w(uvtemp);
  1098. __m128i a_bias = __lsx_vreplgr2vr_w(atemp);
  1099. __m128i y_temp = __lsx_vreplgr2vr_w(ytemp);
  1100. YUVTORGB_SETUP_LSX
  1101. av_assert2(yalpha <= 4096U);
  1102. av_assert2(uvalpha <= 4096U);
  1103. if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
  1104. || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
  1105. step = 1;
  1106. for (i = 0; i < len; i += 8) {
  1107. __m128i b0, b1, ub0, ub1, vb0, vb1;
  1108. __m128i y0_l, y0_h, y1_l, y1_h, u0_l, u0_h;
  1109. __m128i v0_l, v0_h, u1_l, u1_h, v1_l, v1_h;
  1110. __m128i y_l, y_h, v_l, v_h, u_l, u_h;
  1111. __m128i R_l, R_h, G_l, G_h, B_l, B_h;
  1112. int n = i << 1;
  1113. DUP4_ARG2(__lsx_vldx, buf0, n, buf1, n, ubuf0,
  1114. n, ubuf1, n, b0, b1, ub0, ub1);
  1115. DUP2_ARG2(__lsx_vldx, vbuf0, n, vbuf1, n, vb0 , vb1);
  1116. DUP2_ARG2(__lsx_vsllwil_w_h, b0, 0, b1, 0, y0_l, y1_l);
  1117. DUP4_ARG2(__lsx_vsllwil_w_h, ub0, 0, ub1, 0, vb0, 0, vb1, 0,
  1118. u0_l, u1_l, v0_l, v1_l);
  1119. DUP2_ARG1(__lsx_vexth_w_h, b0, b1, y0_h, y1_h);
  1120. DUP4_ARG1(__lsx_vexth_w_h, ub0, ub1, vb0, vb1,
  1121. u0_h, u1_h, v0_h, v1_h);
  1122. y0_l = __lsx_vmul_w(y0_l, v_yalpha1);
  1123. y0_h = __lsx_vmul_w(y0_h, v_yalpha1);
  1124. u0_l = __lsx_vmul_w(u0_l, v_uvalpha1);
  1125. u0_h = __lsx_vmul_w(u0_h, v_uvalpha1);
  1126. v0_l = __lsx_vmul_w(v0_l, v_uvalpha1);
  1127. v0_h = __lsx_vmul_w(v0_h, v_uvalpha1);
  1128. y_l = __lsx_vmadd_w(y0_l, v_yalpha, y1_l);
  1129. y_h = __lsx_vmadd_w(y0_h, v_yalpha, y1_h);
  1130. u_l = __lsx_vmadd_w(u0_l, v_uvalpha, u1_l);
  1131. u_h = __lsx_vmadd_w(u0_h, v_uvalpha, u1_h);
  1132. v_l = __lsx_vmadd_w(v0_l, v_uvalpha, v1_l);
  1133. v_h = __lsx_vmadd_w(v0_h, v_uvalpha, v1_h);
  1134. u_l = __lsx_vsub_w(u_l, uv);
  1135. u_h = __lsx_vsub_w(u_h, uv);
  1136. v_l = __lsx_vsub_w(v_l, uv);
  1137. v_h = __lsx_vsub_w(v_h, uv);
  1138. y_l = __lsx_vsrai_w(y_l, 10);
  1139. y_h = __lsx_vsrai_w(y_h, 10);
  1140. u_l = __lsx_vsrai_w(u_l, 10);
  1141. u_h = __lsx_vsrai_w(u_h, 10);
  1142. v_l = __lsx_vsrai_w(v_l, 10);
  1143. v_h = __lsx_vsrai_w(v_h, 10);
  1144. YUVTORGB_LSX(y_l, u_l, v_l, R_l, G_l, B_l, offset, coeff,
  1145. y_temp, v2r, v2g, u2g, u2b);
  1146. YUVTORGB_LSX(y_h, u_h, v_h, R_h, G_h, B_h, offset, coeff,
  1147. y_temp, v2r, v2g, u2g, u2b);
  1148. if (hasAlpha) {
  1149. __m128i a0, a1, a0_l, a0_h;
  1150. __m128i a_l, a_h, a1_l, a1_h;
  1151. DUP2_ARG2(__lsx_vldx, abuf0, n, abuf1, n, a0, a1);
  1152. DUP2_ARG2(__lsx_vsllwil_w_h, a0, 0, a1, 0, a0_l, a1_l);
  1153. DUP2_ARG1(__lsx_vexth_w_h, a0, a1, a0_h, a1_h);
  1154. a_l = __lsx_vmadd_w(a_bias, a0_l, v_yalpha1);
  1155. a_h = __lsx_vmadd_w(a_bias, a0_h, v_yalpha1);
  1156. a_l = __lsx_vmadd_w(a_l, v_yalpha, a1_l);
  1157. a_h = __lsx_vmadd_w(a_h, v_yalpha, a1_h);
  1158. a_l = __lsx_vsrai_w(a_l, 19);
  1159. a_h = __lsx_vsrai_w(a_h, 19);
  1160. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 0, 0);
  1161. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 1, 1);
  1162. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 2, 2);
  1163. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 3, 3);
  1164. WRITE_FULL_A_LSX(R_h, G_h, B_h, a_h, 0, 4);
  1165. WRITE_FULL_A_LSX(R_h, G_h, B_h, a_h, 1, 5);
  1166. WRITE_FULL_A_LSX(R_h, G_h, B_h, a_h, 2, 6);
  1167. WRITE_FULL_A_LSX(R_h, G_h, B_h, a_h, 3, 7);
  1168. } else {
  1169. WRITE_FULL_LSX(R_l, G_l, B_l, 0, 0);
  1170. WRITE_FULL_LSX(R_l, G_l, B_l, 1, 1);
  1171. WRITE_FULL_LSX(R_l, G_l, B_l, 2, 2);
  1172. WRITE_FULL_LSX(R_l, G_l, B_l, 3, 3);
  1173. WRITE_FULL_LSX(R_h, G_h, B_h, 0, 4);
  1174. WRITE_FULL_LSX(R_h, G_h, B_h, 1, 5);
  1175. WRITE_FULL_LSX(R_h, G_h, B_h, 2, 6);
  1176. WRITE_FULL_LSX(R_h, G_h, B_h, 3, 7);
  1177. }
  1178. }
  1179. if (dstW - i >= 4) {
  1180. __m128i b0, b1, ub0, ub1, vb0, vb1;
  1181. __m128i y0_l, y1_l, u0_l;
  1182. __m128i v0_l, u1_l, v1_l;
  1183. __m128i y_l, u_l, v_l;
  1184. __m128i R_l, G_l, B_l;
  1185. int n = i << 1;
  1186. DUP4_ARG2(__lsx_vldx, buf0, n, buf1, n, ubuf0, n,
  1187. ubuf1, n, b0, b1, ub0, ub1);
  1188. DUP2_ARG2(__lsx_vldx, vbuf0, n, vbuf1, n, vb0, vb1);
  1189. DUP2_ARG2(__lsx_vsllwil_w_h, b0, 0, b1, 0, y0_l, y1_l);
  1190. DUP4_ARG2(__lsx_vsllwil_w_h, ub0, 0, ub1, 0, vb0, 0, vb1, 0,
  1191. u0_l, u1_l, v0_l, v1_l);
  1192. y0_l = __lsx_vmul_w(y0_l, v_yalpha1);
  1193. u0_l = __lsx_vmul_w(u0_l, v_uvalpha1);
  1194. v0_l = __lsx_vmul_w(v0_l, v_uvalpha1);
  1195. y_l = __lsx_vmadd_w(y0_l, v_yalpha, y1_l);
  1196. u_l = __lsx_vmadd_w(u0_l, v_uvalpha, u1_l);
  1197. v_l = __lsx_vmadd_w(v0_l, v_uvalpha, v1_l);
  1198. u_l = __lsx_vsub_w(u_l, uv);
  1199. v_l = __lsx_vsub_w(v_l, uv);
  1200. y_l = __lsx_vsrai_w(y_l, 10);
  1201. u_l = __lsx_vsrai_w(u_l, 10);
  1202. v_l = __lsx_vsrai_w(v_l, 10);
  1203. YUVTORGB_LSX(y_l, u_l, v_l, R_l, G_l, B_l, offset, coeff,
  1204. y_temp, v2r, v2g, u2g, u2b);
  1205. if (hasAlpha) {
  1206. __m128i a0, a1, a0_l;
  1207. __m128i a_l, a1_l;
  1208. DUP2_ARG2(__lsx_vldx, abuf0, n, abuf1, n, a0, a1);
  1209. DUP2_ARG2(__lsx_vsllwil_w_h, a0, 0, a1, 0, a0_l, a1_l);
  1210. a_l = __lsx_vmadd_w(a_bias, a0_l, v_yalpha1);
  1211. a_l = __lsx_vmadd_w(a_l, v_yalpha, a1_l);
  1212. a_l = __lsx_vsrai_w(a_l, 19);
  1213. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 0, 0);
  1214. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 1, 1);
  1215. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 2, 2);
  1216. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 3, 3);
  1217. } else {
  1218. WRITE_FULL_LSX(R_l, G_l, B_l, 0, 0);
  1219. WRITE_FULL_LSX(R_l, G_l, B_l, 1, 1);
  1220. WRITE_FULL_LSX(R_l, G_l, B_l, 2, 2);
  1221. WRITE_FULL_LSX(R_l, G_l, B_l, 3, 3);
  1222. }
  1223. i += 4;
  1224. }
  1225. for (; i < dstW; i++){
  1226. int Y = ( buf0[i] * yalpha1 + buf1[i] * yalpha ) >> 10;
  1227. int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha- uvtemp) >> 10;
  1228. int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha- uvtemp) >> 10;
  1229. A = 0;
  1230. if (hasAlpha){
  1231. A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha + atemp) >> 19;
  1232. if (A & 0x100)
  1233. A = av_clip_uint8(A);
  1234. }
  1235. Y -= y_offset;
  1236. Y *= y_coeff;
  1237. Y += ytemp;
  1238. R = (unsigned)Y + V * v2r_coe;
  1239. G = (unsigned)Y + V * v2g_coe + U * u2g_coe;
  1240. B = (unsigned)Y + U * u2b_coe;
  1241. yuv2rgb_write_full(c, dest, i, R, A, G, B, y, target, hasAlpha, err);
  1242. dest += step;
  1243. }
  1244. c->dither_error[0][i] = err[0];
  1245. c->dither_error[1][i] = err[1];
  1246. c->dither_error[2][i] = err[2];
  1247. }
  1248. static void
  1249. yuv2rgb_full_1_template_lsx(SwsContext *c, const int16_t *buf0,
  1250. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1251. const int16_t *abuf0, uint8_t *dest, int dstW,
  1252. int uvalpha, int y, enum AVPixelFormat target,
  1253. int hasAlpha)
  1254. {
  1255. const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
  1256. int i, B, G, R, A;
  1257. int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
  1258. int err[4] = {0};
  1259. int ytemp = 1 << 21;
  1260. int bias_int = 64;
  1261. int len = dstW - 7;
  1262. __m128i y_temp = __lsx_vreplgr2vr_w(ytemp);
  1263. YUVTORGB_SETUP_LSX
  1264. if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
  1265. || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
  1266. step = 1;
  1267. if (uvalpha < 2048) {
  1268. int uvtemp = 128 << 7;
  1269. __m128i uv = __lsx_vreplgr2vr_w(uvtemp);
  1270. __m128i bias = __lsx_vreplgr2vr_w(bias_int);
  1271. for (i = 0; i < len; i += 8) {
  1272. __m128i b, ub, vb, ub_l, ub_h, vb_l, vb_h;
  1273. __m128i y_l, y_h, u_l, u_h, v_l, v_h;
  1274. __m128i R_l, R_h, G_l, G_h, B_l, B_h;
  1275. int n = i << 1;
  1276. DUP2_ARG2(__lsx_vldx, buf0, n, ubuf0, n, b, ub);
  1277. vb = __lsx_vldx(vbuf0, n);
  1278. y_l = __lsx_vsllwil_w_h(b, 2);
  1279. y_h = __lsx_vexth_w_h(b);
  1280. DUP2_ARG2(__lsx_vsllwil_w_h, ub, 0, vb, 0, ub_l, vb_l);
  1281. DUP2_ARG1(__lsx_vexth_w_h, ub, vb, ub_h, vb_h);
  1282. y_h = __lsx_vslli_w(y_h, 2);
  1283. u_l = __lsx_vsub_w(ub_l, uv);
  1284. u_h = __lsx_vsub_w(ub_h, uv);
  1285. v_l = __lsx_vsub_w(vb_l, uv);
  1286. v_h = __lsx_vsub_w(vb_h, uv);
  1287. u_l = __lsx_vslli_w(u_l, 2);
  1288. u_h = __lsx_vslli_w(u_h, 2);
  1289. v_l = __lsx_vslli_w(v_l, 2);
  1290. v_h = __lsx_vslli_w(v_h, 2);
  1291. YUVTORGB_LSX(y_l, u_l, v_l, R_l, G_l, B_l, offset, coeff,
  1292. y_temp, v2r, v2g, u2g, u2b);
  1293. YUVTORGB_LSX(y_h, u_h, v_h, R_h, G_h, B_h, offset, coeff,
  1294. y_temp, v2r, v2g, u2g, u2b);
  1295. if(hasAlpha) {
  1296. __m128i a_src;
  1297. __m128i a_l, a_h;
  1298. a_src = __lsx_vld(abuf0 + i, 0);
  1299. a_l = __lsx_vsllwil_w_h(a_src, 0);
  1300. a_h = __lsx_vexth_w_h(a_src);
  1301. a_l = __lsx_vadd_w(a_l, bias);
  1302. a_h = __lsx_vadd_w(a_h, bias);
  1303. a_l = __lsx_vsrai_w(a_l, 7);
  1304. a_h = __lsx_vsrai_w(a_h, 7);
  1305. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 0, 0);
  1306. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 1, 1);
  1307. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 2, 2);
  1308. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 3, 3);
  1309. WRITE_FULL_A_LSX(R_h, G_h, B_h, a_h, 0, 4);
  1310. WRITE_FULL_A_LSX(R_h, G_h, B_h, a_h, 1, 5);
  1311. WRITE_FULL_A_LSX(R_h, G_h, B_h, a_h, 2, 6);
  1312. WRITE_FULL_A_LSX(R_h, G_h, B_h, a_h, 3, 7);
  1313. } else {
  1314. WRITE_FULL_LSX(R_l, G_l, B_l, 0, 0);
  1315. WRITE_FULL_LSX(R_l, G_l, B_l, 1, 1);
  1316. WRITE_FULL_LSX(R_l, G_l, B_l, 2, 2);
  1317. WRITE_FULL_LSX(R_l, G_l, B_l, 3, 3);
  1318. WRITE_FULL_LSX(R_h, G_h, B_h, 0, 4);
  1319. WRITE_FULL_LSX(R_h, G_h, B_h, 1, 5);
  1320. WRITE_FULL_LSX(R_h, G_h, B_h, 2, 6);
  1321. WRITE_FULL_LSX(R_h, G_h, B_h, 3, 7);
  1322. }
  1323. }
  1324. if (dstW - i >= 4) {
  1325. __m128i b, ub, vb, ub_l, vb_l;
  1326. __m128i y_l, u_l, v_l;
  1327. __m128i R_l, G_l, B_l;
  1328. int n = i << 1;
  1329. DUP2_ARG2(__lsx_vldx, buf0, n, ubuf0, n, b, ub);
  1330. vb = __lsx_vldx(vbuf0, n);
  1331. y_l = __lsx_vsllwil_w_h(b, 0);
  1332. DUP2_ARG2(__lsx_vsllwil_w_h, ub, 0, vb, 0, ub_l, vb_l);
  1333. y_l = __lsx_vslli_w(y_l, 2);
  1334. u_l = __lsx_vsub_w(ub_l, uv);
  1335. v_l = __lsx_vsub_w(vb_l, uv);
  1336. u_l = __lsx_vslli_w(u_l, 2);
  1337. v_l = __lsx_vslli_w(v_l, 2);
  1338. YUVTORGB_LSX(y_l, u_l, v_l, R_l, G_l, B_l, offset, coeff,
  1339. y_temp, v2r, v2g, u2g, u2b);
  1340. if(hasAlpha) {
  1341. __m128i a_src, a_l;
  1342. a_src = __lsx_vldx(abuf0, n);
  1343. a_src = __lsx_vsllwil_w_h(a_src, 0);
  1344. a_l = __lsx_vadd_w(bias, a_src);
  1345. a_l = __lsx_vsrai_w(a_l, 7);
  1346. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 0, 0);
  1347. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 1, 1);
  1348. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 2, 2);
  1349. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 3, 3);
  1350. } else {
  1351. WRITE_FULL_LSX(R_l, G_l, B_l, 0, 0);
  1352. WRITE_FULL_LSX(R_l, G_l, B_l, 1, 1);
  1353. WRITE_FULL_LSX(R_l, G_l, B_l, 2, 2);
  1354. WRITE_FULL_LSX(R_l, G_l, B_l, 3, 3);
  1355. }
  1356. i += 4;
  1357. }
  1358. for (; i < dstW; i++) {
  1359. int Y = buf0[i] << 2;
  1360. int U = (ubuf0[i] - uvtemp) << 2;
  1361. int V = (vbuf0[i] - uvtemp) << 2;
  1362. A = 0;
  1363. if(hasAlpha) {
  1364. A = (abuf0[i] + 64) >> 7;
  1365. if (A & 0x100)
  1366. A = av_clip_uint8(A);
  1367. }
  1368. Y -= y_offset;
  1369. Y *= y_coeff;
  1370. Y += ytemp;
  1371. R = (unsigned)Y + V * v2r_coe;
  1372. G = (unsigned)Y + V * v2g_coe + U * u2g_coe;
  1373. B = (unsigned)Y + U * u2b_coe;
  1374. yuv2rgb_write_full(c, dest, i, R, A, G, B, y, target, hasAlpha, err);
  1375. dest += step;
  1376. }
  1377. } else {
  1378. const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
  1379. int uvtemp = 128 << 8;
  1380. __m128i uv = __lsx_vreplgr2vr_w(uvtemp);
  1381. __m128i zero = __lsx_vldi(0);
  1382. __m128i bias = __lsx_vreplgr2vr_h(bias_int);
  1383. for (i = 0; i < len; i += 8) {
  1384. __m128i b, ub0, ub1, vb0, vb1;
  1385. __m128i y_ev, y_od, u_ev, u_od, v_ev, v_od;
  1386. __m128i R_ev, R_od, G_ev, G_od, B_ev, B_od;
  1387. int n = i << 1;
  1388. DUP4_ARG2(__lsx_vldx, buf0, n, ubuf0, n, vbuf0, n,
  1389. ubuf1, n, b, ub0, vb0, ub1);
  1390. vb1 = __lsx_vldx(vbuf, n);
  1391. y_ev = __lsx_vaddwev_w_h(b, zero);
  1392. y_od = __lsx_vaddwod_w_h(b, zero);
  1393. DUP2_ARG2(__lsx_vaddwev_w_h, ub0, vb0, ub1, vb1, u_ev, v_ev);
  1394. DUP2_ARG2(__lsx_vaddwod_w_h, ub0, vb0, ub1, vb1, u_od, v_od);
  1395. DUP2_ARG2(__lsx_vslli_w, y_ev, 2, y_od, 2, y_ev, y_od);
  1396. DUP4_ARG2(__lsx_vsub_w, u_ev, uv, u_od, uv, v_ev, uv, v_od, uv,
  1397. u_ev, u_od, v_ev, v_od);
  1398. DUP4_ARG2(__lsx_vslli_w, u_ev, 1, u_od, 1, v_ev, 1, v_od, 1,
  1399. u_ev, u_od, v_ev, v_od);
  1400. YUVTORGB_LSX(y_ev, u_ev, v_ev, R_ev, G_ev, B_ev, offset, coeff,
  1401. y_temp, v2r, v2g, u2g, u2b);
  1402. YUVTORGB_LSX(y_od, u_od, v_od, R_od, G_od, B_od, offset, coeff,
  1403. y_temp, v2r, v2g, u2g, u2b);
  1404. if(hasAlpha) {
  1405. __m128i a_src;
  1406. __m128i a_ev, a_od;
  1407. a_src = __lsx_vld(abuf0 + i, 0);
  1408. a_ev = __lsx_vaddwev_w_h(bias, a_src);
  1409. a_od = __lsx_vaddwod_w_h(bias, a_src);
  1410. a_ev = __lsx_vsrai_w(a_ev, 7);
  1411. a_od = __lsx_vsrai_w(a_od, 7);
  1412. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 0, 0);
  1413. WRITE_FULL_A_LSX(R_od, G_od, B_od, a_od, 0, 1);
  1414. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 1, 2);
  1415. WRITE_FULL_A_LSX(R_od, G_od, B_od, a_od, 1, 3);
  1416. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 2, 4);
  1417. WRITE_FULL_A_LSX(R_od, G_od, B_od, a_od, 2, 5);
  1418. WRITE_FULL_A_LSX(R_ev, G_ev, B_ev, a_ev, 3, 6);
  1419. WRITE_FULL_A_LSX(R_od, G_od, B_od, a_od, 3, 7);
  1420. } else {
  1421. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 0, 0);
  1422. WRITE_FULL_LSX(R_od, G_od, B_od, 0, 1);
  1423. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 1, 2);
  1424. WRITE_FULL_LSX(R_od, G_od, B_od, 1, 3);
  1425. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 2, 4);
  1426. WRITE_FULL_LSX(R_od, G_od, B_od, 2, 5);
  1427. WRITE_FULL_LSX(R_ev, G_ev, B_ev, 3, 6);
  1428. WRITE_FULL_LSX(R_od, G_od, B_od, 3, 7);
  1429. }
  1430. }
  1431. if (dstW - i >= 4) {
  1432. __m128i b, ub0, ub1, vb0, vb1;
  1433. __m128i y_l, u_l, v_l;
  1434. __m128i R_l, G_l, B_l;
  1435. int n = i << 1;
  1436. DUP4_ARG2(__lsx_vldx, buf0, n, ubuf0, n, vbuf0, n,
  1437. ubuf1, n, b, ub0, vb0, ub1);
  1438. vb1 = __lsx_vldx(vbuf1, n);
  1439. y_l = __lsx_vsllwil_w_h(b, 0);
  1440. y_l = __lsx_vslli_w(y_l, 2);
  1441. DUP4_ARG2(__lsx_vsllwil_w_h, ub0, 0, vb0, 0, ub1, 0, vb1, 0,
  1442. ub0, vb0, ub1, vb1);
  1443. DUP2_ARG2(__lsx_vadd_w, ub0, ub1, vb0, vb1, u_l, v_l);
  1444. u_l = __lsx_vsub_w(u_l, uv);
  1445. v_l = __lsx_vsub_w(v_l, uv);
  1446. u_l = __lsx_vslli_w(u_l, 1);
  1447. v_l = __lsx_vslli_w(v_l, 1);
  1448. YUVTORGB_LSX(y_l, u_l, v_l, R_l, G_l, B_l, offset, coeff,
  1449. y_temp, v2r, v2g, u2g, u2b);
  1450. if(hasAlpha) {
  1451. __m128i a_src;
  1452. __m128i a_l;
  1453. a_src = __lsx_vld(abuf0 + i, 0);
  1454. a_src = __lsx_vilvl_h(a_src, a_src);
  1455. a_l = __lsx_vaddwev_w_h(bias, a_l);
  1456. a_l = __lsx_vsrai_w(a_l, 7);
  1457. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 0, 0);
  1458. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 1, 1);
  1459. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 2, 2);
  1460. WRITE_FULL_A_LSX(R_l, G_l, B_l, a_l, 3, 3);
  1461. } else {
  1462. WRITE_FULL_LSX(R_l, G_l, B_l, 0, 0);
  1463. WRITE_FULL_LSX(R_l, G_l, B_l, 1, 1);
  1464. WRITE_FULL_LSX(R_l, G_l, B_l, 2, 2);
  1465. WRITE_FULL_LSX(R_l, G_l, B_l, 3, 3);
  1466. }
  1467. i += 4;
  1468. }
  1469. for (; i < dstW; i++) {
  1470. int Y = buf0[i] << 2;
  1471. int U = (ubuf0[i] + ubuf1[i] - uvtemp) << 1;
  1472. int V = (vbuf0[i] + vbuf1[i] - uvtemp) << 1;
  1473. A = 0;
  1474. if(hasAlpha) {
  1475. A = (abuf0[i] + 64) >> 7;
  1476. if (A & 0x100)
  1477. A = av_clip_uint8(A);
  1478. }
  1479. Y -= y_offset;
  1480. Y *= y_coeff;
  1481. Y += ytemp;
  1482. R = (unsigned)Y + V * v2r_coe;
  1483. G = (unsigned)Y + V * v2g_coe + U * u2g_coe;
  1484. B = (unsigned)Y + U * u2b_coe;
  1485. yuv2rgb_write_full(c, dest, i, R, A, G, B, y, target, hasAlpha, err);
  1486. dest += step;
  1487. }
  1488. }
  1489. c->dither_error[0][i] = err[0];
  1490. c->dither_error[1][i] = err[1];
  1491. c->dither_error[2][i] = err[2];
  1492. }
  1493. #if CONFIG_SMALL
  1494. YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA,
  1495. CONFIG_SWSCALE_ALPHA && c->needAlpha)
  1496. YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR,
  1497. CONFIG_SWSCALE_ALPHA && c->needAlpha)
  1498. YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA,
  1499. CONFIG_SWSCALE_ALPHA && c->needAlpha)
  1500. YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB,
  1501. CONFIG_SWSCALE_ALPHA && c->needAlpha)
  1502. #else
  1503. #if CONFIG_SWSCALE_ALPHA
  1504. YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, 1)
  1505. YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, 1)
  1506. YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, 1)
  1507. YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, 1)
  1508. #endif
  1509. YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
  1510. YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
  1511. YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
  1512. YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
  1513. #endif
  1514. YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
  1515. YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
  1516. YUV2RGBWRAPPER(yuv2, rgb_full, bgr4_byte_full, AV_PIX_FMT_BGR4_BYTE, 0)
  1517. YUV2RGBWRAPPER(yuv2, rgb_full, rgb4_byte_full, AV_PIX_FMT_RGB4_BYTE, 0)
  1518. YUV2RGBWRAPPER(yuv2, rgb_full, bgr8_full, AV_PIX_FMT_BGR8, 0)
  1519. YUV2RGBWRAPPER(yuv2, rgb_full, rgb8_full, AV_PIX_FMT_RGB8, 0)
  1520. av_cold void ff_sws_init_output_lsx(SwsContext *c)
  1521. {
  1522. if(c->flags & SWS_FULL_CHR_H_INT) {
  1523. switch (c->dstFormat) {
  1524. case AV_PIX_FMT_RGBA:
  1525. #if CONFIG_SMALL
  1526. c->yuv2packedX = yuv2rgba32_full_X_lsx;
  1527. c->yuv2packed2 = yuv2rgba32_full_2_lsx;
  1528. c->yuv2packed1 = yuv2rgba32_full_1_lsx;
  1529. #else
  1530. #if CONFIG_SWSCALE_ALPHA
  1531. if (c->needAlpha) {
  1532. c->yuv2packedX = yuv2rgba32_full_X_lsx;
  1533. c->yuv2packed2 = yuv2rgba32_full_2_lsx;
  1534. c->yuv2packed1 = yuv2rgba32_full_1_lsx;
  1535. } else
  1536. #endif /* CONFIG_SWSCALE_ALPHA */
  1537. {
  1538. c->yuv2packedX = yuv2rgbx32_full_X_lsx;
  1539. c->yuv2packed2 = yuv2rgbx32_full_2_lsx;
  1540. c->yuv2packed1 = yuv2rgbx32_full_1_lsx;
  1541. }
  1542. #endif /* !CONFIG_SMALL */
  1543. break;
  1544. case AV_PIX_FMT_ARGB:
  1545. #if CONFIG_SMALL
  1546. c->yuv2packedX = yuv2argb32_full_X_lsx;
  1547. c->yuv2packed2 = yuv2argb32_full_2_lsx;
  1548. c->yuv2packed1 = yuv2argb32_full_1_lsx;
  1549. #else
  1550. #if CONFIG_SWSCALE_ALPHA
  1551. if (c->needAlpha) {
  1552. c->yuv2packedX = yuv2argb32_full_X_lsx;
  1553. c->yuv2packed2 = yuv2argb32_full_2_lsx;
  1554. c->yuv2packed1 = yuv2argb32_full_1_lsx;
  1555. } else
  1556. #endif /* CONFIG_SWSCALE_ALPHA */
  1557. {
  1558. c->yuv2packedX = yuv2xrgb32_full_X_lsx;
  1559. c->yuv2packed2 = yuv2xrgb32_full_2_lsx;
  1560. c->yuv2packed1 = yuv2xrgb32_full_1_lsx;
  1561. }
  1562. #endif /* !CONFIG_SMALL */
  1563. break;
  1564. case AV_PIX_FMT_BGRA:
  1565. #if CONFIG_SMALL
  1566. c->yuv2packedX = yuv2bgra32_full_X_lsx;
  1567. c->yuv2packed2 = yuv2bgra32_full_2_lsx;
  1568. c->yuv2packed1 = yuv2bgra32_full_1_lsx;
  1569. #else
  1570. #if CONFIG_SWSCALE_ALPHA
  1571. if (c->needAlpha) {
  1572. c->yuv2packedX = yuv2bgra32_full_X_lsx;
  1573. c->yuv2packed2 = yuv2bgra32_full_2_lsx;
  1574. c->yuv2packed1 = yuv2bgra32_full_1_lsx;
  1575. } else
  1576. #endif /* CONFIG_SWSCALE_ALPHA */
  1577. {
  1578. c->yuv2packedX = yuv2bgrx32_full_X_lsx;
  1579. c->yuv2packed2 = yuv2bgrx32_full_2_lsx;
  1580. c->yuv2packed1 = yuv2bgrx32_full_1_lsx;
  1581. }
  1582. #endif /* !CONFIG_SMALL */
  1583. break;
  1584. case AV_PIX_FMT_ABGR:
  1585. #if CONFIG_SMALL
  1586. c->yuv2packedX = yuv2abgr32_full_X_lsx;
  1587. c->yuv2packed2 = yuv2abgr32_full_2_lsx;
  1588. c->yuv2packed1 = yuv2abgr32_full_1_lsx;
  1589. #else
  1590. #if CONFIG_SWSCALE_ALPHA
  1591. if (c->needAlpha) {
  1592. c->yuv2packedX = yuv2abgr32_full_X_lsx;
  1593. c->yuv2packed2 = yuv2abgr32_full_2_lsx;
  1594. c->yuv2packed1 = yuv2abgr32_full_1_lsx;
  1595. } else
  1596. #endif /* CONFIG_SWSCALE_ALPHA */
  1597. {
  1598. c->yuv2packedX = yuv2xbgr32_full_X_lsx;
  1599. c->yuv2packed2 = yuv2xbgr32_full_2_lsx;
  1600. c->yuv2packed1 = yuv2xbgr32_full_1_lsx;
  1601. }
  1602. #endif /* !CONFIG_SMALL */
  1603. break;
  1604. case AV_PIX_FMT_RGB24:
  1605. c->yuv2packedX = yuv2rgb24_full_X_lsx;
  1606. c->yuv2packed2 = yuv2rgb24_full_2_lsx;
  1607. c->yuv2packed1 = yuv2rgb24_full_1_lsx;
  1608. break;
  1609. case AV_PIX_FMT_BGR24:
  1610. c->yuv2packedX = yuv2bgr24_full_X_lsx;
  1611. c->yuv2packed2 = yuv2bgr24_full_2_lsx;
  1612. c->yuv2packed1 = yuv2bgr24_full_1_lsx;
  1613. break;
  1614. case AV_PIX_FMT_BGR4_BYTE:
  1615. c->yuv2packedX = yuv2bgr4_byte_full_X_lsx;
  1616. c->yuv2packed2 = yuv2bgr4_byte_full_2_lsx;
  1617. c->yuv2packed1 = yuv2bgr4_byte_full_1_lsx;
  1618. break;
  1619. case AV_PIX_FMT_RGB4_BYTE:
  1620. c->yuv2packedX = yuv2rgb4_byte_full_X_lsx;
  1621. c->yuv2packed2 = yuv2rgb4_byte_full_2_lsx;
  1622. c->yuv2packed1 = yuv2rgb4_byte_full_1_lsx;
  1623. break;
  1624. case AV_PIX_FMT_BGR8:
  1625. c->yuv2packedX = yuv2bgr8_full_X_lsx;
  1626. c->yuv2packed2 = yuv2bgr8_full_2_lsx;
  1627. c->yuv2packed1 = yuv2bgr8_full_1_lsx;
  1628. break;
  1629. case AV_PIX_FMT_RGB8:
  1630. c->yuv2packedX = yuv2rgb8_full_X_lsx;
  1631. c->yuv2packed2 = yuv2rgb8_full_2_lsx;
  1632. c->yuv2packed1 = yuv2rgb8_full_1_lsx;
  1633. break;
  1634. }
  1635. } else {
  1636. switch (c->dstFormat) {
  1637. case AV_PIX_FMT_RGB32:
  1638. case AV_PIX_FMT_BGR32:
  1639. #if CONFIG_SMALL
  1640. #else
  1641. #if CONFIG_SWSCALE_ALPHA
  1642. if (c->needAlpha) {
  1643. } else
  1644. #endif /* CONFIG_SWSCALE_ALPHA */
  1645. {
  1646. c->yuv2packed1 = yuv2rgbx32_1_lsx;
  1647. c->yuv2packed2 = yuv2rgbx32_2_lsx;
  1648. c->yuv2packedX = yuv2rgbx32_X_lsx;
  1649. }
  1650. #endif /* !CONFIG_SMALL */
  1651. break;
  1652. case AV_PIX_FMT_RGB32_1:
  1653. case AV_PIX_FMT_BGR32_1:
  1654. #if CONFIG_SMALL
  1655. #else
  1656. #if CONFIG_SWSCALE_ALPHA
  1657. if (c->needAlpha) {
  1658. } else
  1659. #endif /* CONFIG_SWSCALE_ALPHA */
  1660. {
  1661. c->yuv2packed1 = yuv2rgbx32_1_1_lsx;
  1662. c->yuv2packed2 = yuv2rgbx32_1_2_lsx;
  1663. c->yuv2packedX = yuv2rgbx32_1_X_lsx;
  1664. }
  1665. #endif /* !CONFIG_SMALL */
  1666. break;
  1667. case AV_PIX_FMT_RGB24:
  1668. c->yuv2packed1 = yuv2rgb24_1_lsx;
  1669. c->yuv2packed2 = yuv2rgb24_2_lsx;
  1670. c->yuv2packedX = yuv2rgb24_X_lsx;
  1671. break;
  1672. case AV_PIX_FMT_BGR24:
  1673. c->yuv2packed1 = yuv2bgr24_1_lsx;
  1674. c->yuv2packed2 = yuv2bgr24_2_lsx;
  1675. c->yuv2packedX = yuv2bgr24_X_lsx;
  1676. break;
  1677. case AV_PIX_FMT_RGB565LE:
  1678. case AV_PIX_FMT_RGB565BE:
  1679. case AV_PIX_FMT_BGR565LE:
  1680. case AV_PIX_FMT_BGR565BE:
  1681. c->yuv2packed1 = yuv2rgb16_1_lsx;
  1682. c->yuv2packed2 = yuv2rgb16_2_lsx;
  1683. c->yuv2packedX = yuv2rgb16_X_lsx;
  1684. break;
  1685. case AV_PIX_FMT_RGB555LE:
  1686. case AV_PIX_FMT_RGB555BE:
  1687. case AV_PIX_FMT_BGR555LE:
  1688. case AV_PIX_FMT_BGR555BE:
  1689. c->yuv2packed1 = yuv2rgb15_1_lsx;
  1690. c->yuv2packed2 = yuv2rgb15_2_lsx;
  1691. c->yuv2packedX = yuv2rgb15_X_lsx;
  1692. break;
  1693. case AV_PIX_FMT_RGB444LE:
  1694. case AV_PIX_FMT_RGB444BE:
  1695. case AV_PIX_FMT_BGR444LE:
  1696. case AV_PIX_FMT_BGR444BE:
  1697. c->yuv2packed1 = yuv2rgb12_1_lsx;
  1698. c->yuv2packed2 = yuv2rgb12_2_lsx;
  1699. c->yuv2packedX = yuv2rgb12_X_lsx;
  1700. break;
  1701. case AV_PIX_FMT_RGB8:
  1702. case AV_PIX_FMT_BGR8:
  1703. c->yuv2packed1 = yuv2rgb8_1_lsx;
  1704. c->yuv2packed2 = yuv2rgb8_2_lsx;
  1705. c->yuv2packedX = yuv2rgb8_X_lsx;
  1706. break;
  1707. case AV_PIX_FMT_RGB4:
  1708. case AV_PIX_FMT_BGR4:
  1709. c->yuv2packed1 = yuv2rgb4_1_lsx;
  1710. c->yuv2packed2 = yuv2rgb4_2_lsx;
  1711. c->yuv2packedX = yuv2rgb4_X_lsx;
  1712. break;
  1713. case AV_PIX_FMT_RGB4_BYTE:
  1714. case AV_PIX_FMT_BGR4_BYTE:
  1715. c->yuv2packed1 = yuv2rgb4b_1_lsx;
  1716. c->yuv2packed2 = yuv2rgb4b_2_lsx;
  1717. c->yuv2packedX = yuv2rgb4b_X_lsx;
  1718. break;
  1719. }
  1720. }
  1721. }