swscale_unscaled.c 111 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709
  1. /*
  2. * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <inttypes.h>
  21. #include <string.h>
  22. #include <math.h>
  23. #include <stdio.h>
  24. #include "config.h"
  25. #include "swscale.h"
  26. #include "swscale_internal.h"
  27. #include "rgb2rgb.h"
  28. #include "libavutil/intreadwrite.h"
  29. #include "libavutil/avutil.h"
  30. #include "libavutil/mathematics.h"
  31. #include "libavutil/mem_internal.h"
  32. #include "libavutil/bswap.h"
  33. #include "libavutil/pixdesc.h"
  34. #include "libavutil/avassert.h"
  35. #include "libavutil/avconfig.h"
  36. DECLARE_ALIGNED(8, static const uint8_t, dithers)[8][8][8]={
  37. {
  38. { 0, 1, 0, 1, 0, 1, 0, 1,},
  39. { 1, 0, 1, 0, 1, 0, 1, 0,},
  40. { 0, 1, 0, 1, 0, 1, 0, 1,},
  41. { 1, 0, 1, 0, 1, 0, 1, 0,},
  42. { 0, 1, 0, 1, 0, 1, 0, 1,},
  43. { 1, 0, 1, 0, 1, 0, 1, 0,},
  44. { 0, 1, 0, 1, 0, 1, 0, 1,},
  45. { 1, 0, 1, 0, 1, 0, 1, 0,},
  46. },{
  47. { 1, 2, 1, 2, 1, 2, 1, 2,},
  48. { 3, 0, 3, 0, 3, 0, 3, 0,},
  49. { 1, 2, 1, 2, 1, 2, 1, 2,},
  50. { 3, 0, 3, 0, 3, 0, 3, 0,},
  51. { 1, 2, 1, 2, 1, 2, 1, 2,},
  52. { 3, 0, 3, 0, 3, 0, 3, 0,},
  53. { 1, 2, 1, 2, 1, 2, 1, 2,},
  54. { 3, 0, 3, 0, 3, 0, 3, 0,},
  55. },{
  56. { 2, 4, 3, 5, 2, 4, 3, 5,},
  57. { 6, 0, 7, 1, 6, 0, 7, 1,},
  58. { 3, 5, 2, 4, 3, 5, 2, 4,},
  59. { 7, 1, 6, 0, 7, 1, 6, 0,},
  60. { 2, 4, 3, 5, 2, 4, 3, 5,},
  61. { 6, 0, 7, 1, 6, 0, 7, 1,},
  62. { 3, 5, 2, 4, 3, 5, 2, 4,},
  63. { 7, 1, 6, 0, 7, 1, 6, 0,},
  64. },{
  65. { 4, 8, 7, 11, 4, 8, 7, 11,},
  66. { 12, 0, 15, 3, 12, 0, 15, 3,},
  67. { 6, 10, 5, 9, 6, 10, 5, 9,},
  68. { 14, 2, 13, 1, 14, 2, 13, 1,},
  69. { 4, 8, 7, 11, 4, 8, 7, 11,},
  70. { 12, 0, 15, 3, 12, 0, 15, 3,},
  71. { 6, 10, 5, 9, 6, 10, 5, 9,},
  72. { 14, 2, 13, 1, 14, 2, 13, 1,},
  73. },{
  74. { 9, 17, 15, 23, 8, 16, 14, 22,},
  75. { 25, 1, 31, 7, 24, 0, 30, 6,},
  76. { 13, 21, 11, 19, 12, 20, 10, 18,},
  77. { 29, 5, 27, 3, 28, 4, 26, 2,},
  78. { 8, 16, 14, 22, 9, 17, 15, 23,},
  79. { 24, 0, 30, 6, 25, 1, 31, 7,},
  80. { 12, 20, 10, 18, 13, 21, 11, 19,},
  81. { 28, 4, 26, 2, 29, 5, 27, 3,},
  82. },{
  83. { 18, 34, 30, 46, 17, 33, 29, 45,},
  84. { 50, 2, 62, 14, 49, 1, 61, 13,},
  85. { 26, 42, 22, 38, 25, 41, 21, 37,},
  86. { 58, 10, 54, 6, 57, 9, 53, 5,},
  87. { 16, 32, 28, 44, 19, 35, 31, 47,},
  88. { 48, 0, 60, 12, 51, 3, 63, 15,},
  89. { 24, 40, 20, 36, 27, 43, 23, 39,},
  90. { 56, 8, 52, 4, 59, 11, 55, 7,},
  91. },{
  92. { 18, 34, 30, 46, 17, 33, 29, 45,},
  93. { 50, 2, 62, 14, 49, 1, 61, 13,},
  94. { 26, 42, 22, 38, 25, 41, 21, 37,},
  95. { 58, 10, 54, 6, 57, 9, 53, 5,},
  96. { 16, 32, 28, 44, 19, 35, 31, 47,},
  97. { 48, 0, 60, 12, 51, 3, 63, 15,},
  98. { 24, 40, 20, 36, 27, 43, 23, 39,},
  99. { 56, 8, 52, 4, 59, 11, 55, 7,},
  100. },{
  101. { 36, 68, 60, 92, 34, 66, 58, 90,},
  102. { 100, 4,124, 28, 98, 2,122, 26,},
  103. { 52, 84, 44, 76, 50, 82, 42, 74,},
  104. { 116, 20,108, 12,114, 18,106, 10,},
  105. { 32, 64, 56, 88, 38, 70, 62, 94,},
  106. { 96, 0,120, 24,102, 6,126, 30,},
  107. { 48, 80, 40, 72, 54, 86, 46, 78,},
  108. { 112, 16,104, 8,118, 22,110, 14,},
  109. }};
  110. static void fillPlane(uint8_t *plane, int stride, int width, int height, int y,
  111. uint8_t val)
  112. {
  113. int i;
  114. uint8_t *ptr = plane + stride * y;
  115. for (i = 0; i < height; i++) {
  116. memset(ptr, val, width);
  117. ptr += stride;
  118. }
  119. }
  120. void ff_copyPlane(const uint8_t *src, int srcStride,
  121. int srcSliceY, int srcSliceH, int width,
  122. uint8_t *dst, int dstStride)
  123. {
  124. dst += dstStride * srcSliceY;
  125. if (dstStride == srcStride && srcStride > 0) {
  126. memcpy(dst, src, srcSliceH * dstStride);
  127. } else {
  128. int i;
  129. for (i = 0; i < srcSliceH; i++) {
  130. memcpy(dst, src, width);
  131. src += srcStride;
  132. dst += dstStride;
  133. }
  134. }
  135. }
  136. static int planarToNv12Wrapper(SwsInternal *c, const uint8_t *const src[],
  137. const int srcStride[], int srcSliceY,
  138. int srcSliceH, uint8_t *const dstParam[],
  139. const int dstStride[])
  140. {
  141. uint8_t *dst = dstParam[1] + dstStride[1] * srcSliceY / 2;
  142. ff_copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->opts.src_w,
  143. dstParam[0], dstStride[0]);
  144. if (c->opts.dst_format == AV_PIX_FMT_NV12)
  145. interleaveBytes(src[1], src[2], dst, c->chrSrcW, (srcSliceH + 1) / 2,
  146. srcStride[1], srcStride[2], dstStride[1]);
  147. else
  148. interleaveBytes(src[2], src[1], dst, c->chrSrcW, (srcSliceH + 1) / 2,
  149. srcStride[2], srcStride[1], dstStride[1]);
  150. return srcSliceH;
  151. }
  152. static int nv12ToPlanarWrapper(SwsInternal *c, const uint8_t *const src[],
  153. const int srcStride[], int srcSliceY,
  154. int srcSliceH, uint8_t *const dstParam[],
  155. const int dstStride[])
  156. {
  157. uint8_t *dst1 = dstParam[1] + dstStride[1] * srcSliceY / 2;
  158. uint8_t *dst2 = dstParam[2] + dstStride[2] * srcSliceY / 2;
  159. ff_copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->opts.src_w,
  160. dstParam[0], dstStride[0]);
  161. if (c->opts.src_format == AV_PIX_FMT_NV12)
  162. deinterleaveBytes(src[1], dst1, dst2, c->chrSrcW, (srcSliceH + 1) / 2,
  163. srcStride[1], dstStride[1], dstStride[2]);
  164. else
  165. deinterleaveBytes(src[1], dst2, dst1, c->chrSrcW, (srcSliceH + 1) / 2,
  166. srcStride[1], dstStride[2], dstStride[1]);
  167. return srcSliceH;
  168. }
  169. static int planarToNv24Wrapper(SwsInternal *c, const uint8_t *const src[],
  170. const int srcStride[], int srcSliceY,
  171. int srcSliceH, uint8_t *const dstParam[],
  172. const int dstStride[])
  173. {
  174. uint8_t *dst = dstParam[1] + dstStride[1] * srcSliceY;
  175. ff_copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->opts.src_w,
  176. dstParam[0], dstStride[0]);
  177. if (c->opts.dst_format == AV_PIX_FMT_NV24)
  178. interleaveBytes(src[1], src[2], dst, c->chrSrcW, srcSliceH,
  179. srcStride[1], srcStride[2], dstStride[1]);
  180. else
  181. interleaveBytes(src[2], src[1], dst, c->chrSrcW, srcSliceH,
  182. srcStride[2], srcStride[1], dstStride[1]);
  183. return srcSliceH;
  184. }
  185. static int nv24ToPlanarWrapper(SwsInternal *c, const uint8_t *const src[],
  186. const int srcStride[], int srcSliceY,
  187. int srcSliceH, uint8_t *const dstParam[],
  188. const int dstStride[])
  189. {
  190. uint8_t *dst1 = dstParam[1] + dstStride[1] * srcSliceY;
  191. uint8_t *dst2 = dstParam[2] + dstStride[2] * srcSliceY;
  192. ff_copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->opts.src_w,
  193. dstParam[0], dstStride[0]);
  194. if (c->opts.src_format == AV_PIX_FMT_NV24)
  195. deinterleaveBytes(src[1], dst1, dst2, c->chrSrcW, srcSliceH,
  196. srcStride[1], dstStride[1], dstStride[2]);
  197. else
  198. deinterleaveBytes(src[1], dst2, dst1, c->chrSrcW, srcSliceH,
  199. srcStride[1], dstStride[2], dstStride[1]);
  200. return srcSliceH;
  201. }
  202. static void nv24_to_yuv420p_chroma(uint8_t *dst1, int dstStride1,
  203. uint8_t *dst2, int dstStride2,
  204. const uint8_t *src, int srcStride,
  205. int w, int h)
  206. {
  207. const uint8_t *src1 = src;
  208. const uint8_t *src2 = src + srcStride;
  209. // average 4 pixels into 1 (interleaved U and V)
  210. for (int y = 0; y < h; y += 2) {
  211. if (y + 1 == h)
  212. src2 = src1;
  213. for (int x = 0; x < w; x++) {
  214. dst1[x] = (src1[4 * x + 0] + src1[4 * x + 2] +
  215. src2[4 * x + 0] + src2[4 * x + 2]) >> 2;
  216. dst2[x] = (src1[4 * x + 1] + src1[4 * x + 3] +
  217. src2[4 * x + 1] + src2[4 * x + 3]) >> 2;
  218. }
  219. src1 += srcStride * 2;
  220. src2 += srcStride * 2;
  221. dst1 += dstStride1;
  222. dst2 += dstStride2;
  223. }
  224. }
  225. static int nv24ToYuv420Wrapper(SwsInternal *c, const uint8_t *const src[],
  226. const int srcStride[], int srcSliceY, int srcSliceH,
  227. uint8_t *const dstParam[], const int dstStride[])
  228. {
  229. uint8_t *dst1 = dstParam[1] + dstStride[1] * srcSliceY / 2;
  230. uint8_t *dst2 = dstParam[2] + dstStride[2] * srcSliceY / 2;
  231. ff_copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->opts.src_w,
  232. dstParam[0], dstStride[0]);
  233. if (c->opts.src_format == AV_PIX_FMT_NV24)
  234. nv24_to_yuv420p_chroma(dst1, dstStride[1], dst2, dstStride[2],
  235. src[1], srcStride[1], c->opts.src_w / 2, srcSliceH);
  236. else
  237. nv24_to_yuv420p_chroma(dst2, dstStride[2], dst1, dstStride[1],
  238. src[1], srcStride[1], c->opts.src_w / 2, srcSliceH);
  239. return srcSliceH;
  240. }
  241. static int planarToP01xWrapper(SwsInternal *c, const uint8_t *const src8[],
  242. const int srcStride[], int srcSliceY,
  243. int srcSliceH, uint8_t *const dstParam8[],
  244. const int dstStride[])
  245. {
  246. const AVPixFmtDescriptor *src_format = av_pix_fmt_desc_get(c->opts.src_format);
  247. const AVPixFmtDescriptor *dst_format = av_pix_fmt_desc_get(c->opts.dst_format);
  248. const uint16_t **src = (const uint16_t**)src8;
  249. uint16_t *dstY = (uint16_t*)(dstParam8[0] + dstStride[0] * srcSliceY);
  250. uint16_t *dstUV = (uint16_t*)(dstParam8[1] + dstStride[1] * srcSliceY / 2);
  251. int x, y;
  252. /* Calculate net shift required for values. */
  253. const int shift[3] = {
  254. dst_format->comp[0].depth + dst_format->comp[0].shift -
  255. src_format->comp[0].depth - src_format->comp[0].shift,
  256. dst_format->comp[1].depth + dst_format->comp[1].shift -
  257. src_format->comp[1].depth - src_format->comp[1].shift,
  258. dst_format->comp[2].depth + dst_format->comp[2].shift -
  259. src_format->comp[2].depth - src_format->comp[2].shift,
  260. };
  261. av_assert0(!(srcStride[0] % 2 || srcStride[1] % 2 || srcStride[2] % 2 ||
  262. dstStride[0] % 2 || dstStride[1] % 2));
  263. for (y = 0; y < srcSliceH; y++) {
  264. uint16_t *tdstY = dstY;
  265. const uint16_t *tsrc0 = src[0];
  266. for (x = c->opts.src_w; x > 0; x--) {
  267. *tdstY++ = *tsrc0++ << shift[0];
  268. }
  269. src[0] += srcStride[0] / 2;
  270. dstY += dstStride[0] / 2;
  271. if (!(y & 1)) {
  272. uint16_t *tdstUV = dstUV;
  273. const uint16_t *tsrc1 = src[1];
  274. const uint16_t *tsrc2 = src[2];
  275. for (x = c->opts.src_w / 2; x > 0; x--) {
  276. *tdstUV++ = *tsrc1++ << shift[1];
  277. *tdstUV++ = *tsrc2++ << shift[2];
  278. }
  279. src[1] += srcStride[1] / 2;
  280. src[2] += srcStride[2] / 2;
  281. dstUV += dstStride[1] / 2;
  282. }
  283. }
  284. return srcSliceH;
  285. }
  286. #if AV_HAVE_BIGENDIAN
  287. #define output_pixel(p, v) do { \
  288. uint16_t *pp = (p); \
  289. AV_WL16(pp, (v)); \
  290. } while(0)
  291. #else
  292. #define output_pixel(p, v) (*p) = (v)
  293. #endif
  294. static int planar8ToP01xleWrapper(SwsInternal *c, const uint8_t *const src[],
  295. const int srcStride[], int srcSliceY,
  296. int srcSliceH, uint8_t *const dstParam8[],
  297. const int dstStride[])
  298. {
  299. const uint8_t *src0 = src[0], *src1 = src[1], *src2 = src[2];
  300. uint16_t *dstY = (uint16_t*)(dstParam8[0] + dstStride[0] * srcSliceY);
  301. uint16_t *dstUV = (uint16_t*)(dstParam8[1] + dstStride[1] * srcSliceY / 2);
  302. int x, y, t;
  303. av_assert0(!(dstStride[0] % 2 || dstStride[1] % 2));
  304. for (y = 0; y < srcSliceH; y++) {
  305. uint16_t *tdstY = dstY;
  306. const uint8_t *tsrc0 = src0;
  307. for (x = c->opts.src_w; x > 0; x--) {
  308. t = *tsrc0++;
  309. output_pixel(tdstY++, t << 8);
  310. }
  311. src0 += srcStride[0];
  312. dstY += dstStride[0] / 2;
  313. if (!(y & 1)) {
  314. uint16_t *tdstUV = dstUV;
  315. const uint8_t *tsrc1 = src1;
  316. const uint8_t *tsrc2 = src2;
  317. for (x = c->opts.src_w / 2; x > 0; x--) {
  318. t = *tsrc1++;
  319. output_pixel(tdstUV++, t << 8);
  320. t = *tsrc2++;
  321. output_pixel(tdstUV++, t << 8);
  322. }
  323. src1 += srcStride[1];
  324. src2 += srcStride[2];
  325. dstUV += dstStride[1] / 2;
  326. }
  327. }
  328. return srcSliceH;
  329. }
  330. #undef output_pixel
  331. static int planarToYuy2Wrapper(SwsInternal *c, const uint8_t *const src[],
  332. const int srcStride[], int srcSliceY, int srcSliceH,
  333. uint8_t *const dstParam[], const int dstStride[])
  334. {
  335. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  336. yv12toyuy2(src[0], src[1], src[2], dst, c->opts.src_w, srcSliceH, srcStride[0],
  337. srcStride[1], dstStride[0]);
  338. return srcSliceH;
  339. }
  340. static int planarToUyvyWrapper(SwsInternal *c, const uint8_t *const src[],
  341. const int srcStride[], int srcSliceY, int srcSliceH,
  342. uint8_t *const dstParam[], const int dstStride[])
  343. {
  344. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  345. yv12touyvy(src[0], src[1], src[2], dst, c->opts.src_w, srcSliceH, srcStride[0],
  346. srcStride[1], dstStride[0]);
  347. return srcSliceH;
  348. }
  349. static int yuv422pToYuy2Wrapper(SwsInternal *c, const uint8_t *const src[],
  350. const int srcStride[], int srcSliceY, int srcSliceH,
  351. uint8_t *const dstParam[], const int dstStride[])
  352. {
  353. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  354. yuv422ptoyuy2(src[0], src[1], src[2], dst, c->opts.src_w, srcSliceH, srcStride[0],
  355. srcStride[1], dstStride[0]);
  356. return srcSliceH;
  357. }
  358. static int yuv422pToUyvyWrapper(SwsInternal *c, const uint8_t *const src[],
  359. const int srcStride[], int srcSliceY, int srcSliceH,
  360. uint8_t *const dstParam[], const int dstStride[])
  361. {
  362. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  363. yuv422ptouyvy(src[0], src[1], src[2], dst, c->opts.src_w, srcSliceH, srcStride[0],
  364. srcStride[1], dstStride[0]);
  365. return srcSliceH;
  366. }
  367. static int yuyvToYuv420Wrapper(SwsInternal *c, const uint8_t *const src[],
  368. const int srcStride[], int srcSliceY, int srcSliceH,
  369. uint8_t *const dstParam[], const int dstStride[])
  370. {
  371. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  372. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY / 2;
  373. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY / 2;
  374. yuyvtoyuv420(ydst, udst, vdst, src[0], c->opts.src_w, srcSliceH, dstStride[0],
  375. dstStride[1], srcStride[0]);
  376. if (dstParam[3])
  377. fillPlane(dstParam[3], dstStride[3], c->opts.src_w, srcSliceH, srcSliceY, 255);
  378. return srcSliceH;
  379. }
  380. static int yuyvToYuv422Wrapper(SwsInternal *c, const uint8_t *const src[],
  381. const int srcStride[], int srcSliceY, int srcSliceH,
  382. uint8_t *const dstParam[], const int dstStride[])
  383. {
  384. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  385. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY;
  386. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY;
  387. yuyvtoyuv422(ydst, udst, vdst, src[0], c->opts.src_w, srcSliceH, dstStride[0],
  388. dstStride[1], srcStride[0]);
  389. return srcSliceH;
  390. }
  391. static int uyvyToYuv420Wrapper(SwsInternal *c, const uint8_t *const src[],
  392. const int srcStride[], int srcSliceY, int srcSliceH,
  393. uint8_t *const dstParam[], const int dstStride[])
  394. {
  395. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  396. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY / 2;
  397. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY / 2;
  398. uyvytoyuv420(ydst, udst, vdst, src[0], c->opts.src_w, srcSliceH, dstStride[0],
  399. dstStride[1], srcStride[0]);
  400. if (dstParam[3])
  401. fillPlane(dstParam[3], dstStride[3], c->opts.src_w, srcSliceH, srcSliceY, 255);
  402. return srcSliceH;
  403. }
  404. static int uyvyToYuv422Wrapper(SwsInternal *c, const uint8_t *const src[],
  405. const int srcStride[], int srcSliceY, int srcSliceH,
  406. uint8_t *const dstParam[], const int dstStride[])
  407. {
  408. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  409. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY;
  410. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY;
  411. uyvytoyuv422(ydst, udst, vdst, src[0], c->opts.src_w, srcSliceH, dstStride[0],
  412. dstStride[1], srcStride[0]);
  413. return srcSliceH;
  414. }
  415. static void gray8aToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels,
  416. const uint8_t *palette)
  417. {
  418. int i;
  419. for (i = 0; i < num_pixels; i++)
  420. ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i << 1]] | (src[(i << 1) + 1] << 24);
  421. }
  422. static void gray8aToPacked32_1(const uint8_t *src, uint8_t *dst, int num_pixels,
  423. const uint8_t *palette)
  424. {
  425. int i;
  426. for (i = 0; i < num_pixels; i++)
  427. ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i << 1]] | src[(i << 1) + 1];
  428. }
  429. static void gray8aToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels,
  430. const uint8_t *palette)
  431. {
  432. int i;
  433. for (i = 0; i < num_pixels; i++) {
  434. //FIXME slow?
  435. dst[0] = palette[src[i << 1] * 4 + 0];
  436. dst[1] = palette[src[i << 1] * 4 + 1];
  437. dst[2] = palette[src[i << 1] * 4 + 2];
  438. dst += 3;
  439. }
  440. }
  441. static void gray8aToPlanar8(const uint8_t *src, uint8_t *dst0, uint8_t *dst1,
  442. uint8_t *dst2, uint8_t *dstA, int num_pixels,
  443. const uint8_t *palette)
  444. {
  445. for (int i = 0; i < num_pixels; i++) {
  446. const uint8_t *rgb = &palette[src[i << 1] * 4];
  447. dst0[i] = rgb[0];
  448. dst1[i] = rgb[1];
  449. dst2[i] = rgb[2];
  450. if (dstA)
  451. dstA[i] = src[(i << 1) + 1];
  452. }
  453. }
  454. static void pal8ToPlanar8(const uint8_t *src, uint8_t *dst0, uint8_t *dst1,
  455. uint8_t *dst2, uint8_t *dstA, int num_pixels,
  456. const uint8_t *palette)
  457. {
  458. for (int i = 0; i < num_pixels; i++) {
  459. const uint8_t *rgba = &palette[src[i] * 4];
  460. dst0[i] = rgba[0];
  461. dst1[i] = rgba[1];
  462. dst2[i] = rgba[2];
  463. if (dstA)
  464. dstA[i] = rgba[3];
  465. }
  466. }
  467. static int bswap_16bpc(SwsInternal *c, const uint8_t *const src[],
  468. const int srcStride[], int srcSliceY, int srcSliceH,
  469. uint8_t *const dst[], const int dstStride[])
  470. {
  471. int i, j, p;
  472. for (p = 0; p < 4; p++) {
  473. int srcstr = srcStride[p] / 2;
  474. int dststr = dstStride[p] / 2;
  475. uint16_t *dstPtr = (uint16_t *) dst[p];
  476. const uint16_t *srcPtr = (const uint16_t *) src[p];
  477. int min_stride = FFMIN(FFABS(srcstr), FFABS(dststr));
  478. if(!dstPtr || !srcPtr)
  479. continue;
  480. dstPtr += (srcSliceY >> c->chrDstVSubSample) * dststr;
  481. for (i = 0; i < (srcSliceH >> c->chrDstVSubSample); i++) {
  482. for (j = 0; j < min_stride; j++) {
  483. dstPtr[j] = av_bswap16(srcPtr[j]);
  484. }
  485. srcPtr += srcstr;
  486. dstPtr += dststr;
  487. }
  488. }
  489. return srcSliceH;
  490. }
  491. static int bswap_32bpc(SwsInternal *c, const uint8_t *const src[],
  492. const int srcStride[], int srcSliceY, int srcSliceH,
  493. uint8_t *const dst[], const int dstStride[])
  494. {
  495. int i, j, p;
  496. for (p = 0; p < 4; p++) {
  497. int srcstr = srcStride[p] / 4;
  498. int dststr = dstStride[p] / 4;
  499. uint32_t *dstPtr = (uint32_t *) dst[p];
  500. const uint32_t *srcPtr = (const uint32_t *) src[p];
  501. int min_stride = FFMIN(FFABS(srcstr), FFABS(dststr));
  502. if(!dstPtr || !srcPtr)
  503. continue;
  504. dstPtr += (srcSliceY >> c->chrDstVSubSample) * dststr;
  505. for (i = 0; i < (srcSliceH >> c->chrDstVSubSample); i++) {
  506. for (j = 0; j < min_stride; j++) {
  507. dstPtr[j] = av_bswap32(srcPtr[j]);
  508. }
  509. srcPtr += srcstr;
  510. dstPtr += dststr;
  511. }
  512. }
  513. return srcSliceH;
  514. }
  515. static int palToRgbWrapper(SwsInternal *c, const uint8_t *const src[], const int srcStride[],
  516. int srcSliceY, int srcSliceH, uint8_t *const dst[],
  517. const int dstStride[])
  518. {
  519. const enum AVPixelFormat srcFormat = c->opts.src_format;
  520. const enum AVPixelFormat dstFormat = c->opts.dst_format;
  521. void (*conv)(const uint8_t *src, uint8_t *dst, int num_pixels,
  522. const uint8_t *palette) = NULL;
  523. int i;
  524. uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY;
  525. const uint8_t *srcPtr = src[0];
  526. if (srcFormat == AV_PIX_FMT_YA8) {
  527. switch (dstFormat) {
  528. case AV_PIX_FMT_RGB32 : conv = gray8aToPacked32; break;
  529. case AV_PIX_FMT_BGR32 : conv = gray8aToPacked32; break;
  530. case AV_PIX_FMT_BGR32_1: conv = gray8aToPacked32_1; break;
  531. case AV_PIX_FMT_RGB32_1: conv = gray8aToPacked32_1; break;
  532. case AV_PIX_FMT_RGB24 : conv = gray8aToPacked24; break;
  533. case AV_PIX_FMT_BGR24 : conv = gray8aToPacked24; break;
  534. }
  535. } else if (usePal(srcFormat)) {
  536. switch (dstFormat) {
  537. case AV_PIX_FMT_RGB32 : conv = sws_convertPalette8ToPacked32; break;
  538. case AV_PIX_FMT_BGR32 : conv = sws_convertPalette8ToPacked32; break;
  539. case AV_PIX_FMT_BGR32_1: conv = sws_convertPalette8ToPacked32; break;
  540. case AV_PIX_FMT_RGB32_1: conv = sws_convertPalette8ToPacked32; break;
  541. case AV_PIX_FMT_RGB24 : conv = sws_convertPalette8ToPacked24; break;
  542. case AV_PIX_FMT_BGR24 : conv = sws_convertPalette8ToPacked24; break;
  543. }
  544. }
  545. if (!conv)
  546. av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
  547. av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
  548. else {
  549. for (i = 0; i < srcSliceH; i++) {
  550. conv(srcPtr, dstPtr, c->opts.src_w, (uint8_t *) c->pal_rgb);
  551. srcPtr += srcStride[0];
  552. dstPtr += dstStride[0];
  553. }
  554. }
  555. return srcSliceH;
  556. }
  557. static int palToGbrpWrapper(SwsInternal *c, const uint8_t *const src[],
  558. const int srcStride[], int srcSliceY, int srcSliceH,
  559. uint8_t *const dst[], const int dstStride[])
  560. {
  561. const enum AVPixelFormat srcFormat = c->opts.src_format;
  562. const enum AVPixelFormat dstFormat = c->opts.dst_format;
  563. void (*conv)(const uint8_t *src, uint8_t *dstG, uint8_t *dstB, uint8_t *dstR,
  564. uint8_t *dstA, int num_pixels, const uint8_t *palette) = NULL;
  565. const int num_planes = isALPHA(dstFormat) ? 4 : 3;
  566. const uint8_t *srcPtr = src[0];
  567. uint8_t *dstPtr[4] = {0};
  568. for (int i = 0; i < num_planes; i++)
  569. dstPtr[i] = dst[i] + dstStride[i] * srcSliceY;
  570. if (srcFormat == AV_PIX_FMT_YA8) {
  571. switch (dstFormat) {
  572. case AV_PIX_FMT_GBRP: conv = gray8aToPlanar8; break;
  573. case AV_PIX_FMT_GBRAP: conv = gray8aToPlanar8; break;
  574. }
  575. } else if (usePal(srcFormat)) {
  576. switch (dstFormat) {
  577. case AV_PIX_FMT_GBRP: conv = pal8ToPlanar8; break;
  578. case AV_PIX_FMT_GBRAP: conv = pal8ToPlanar8; break;
  579. }
  580. }
  581. av_assert1(conv);
  582. for (int y = 0; y < srcSliceH; y++) {
  583. conv(srcPtr, dstPtr[0], dstPtr[1], dstPtr[2], dstPtr[3], c->opts.src_w,
  584. (uint8_t *) c->pal_rgb);
  585. srcPtr += srcStride[0];
  586. for (int i = 0; i < num_planes; i++)
  587. dstPtr[i] += dstStride[i];
  588. }
  589. return srcSliceH;
  590. }
  591. static void packed16togbra16(const uint8_t *src, int srcStride,
  592. uint16_t *dst[], const int dstStride[], int srcSliceH,
  593. int src_alpha, int swap, int shift, int width)
  594. {
  595. int x, h, i;
  596. int dst_alpha = dst[3] != NULL;
  597. for (h = 0; h < srcSliceH; h++) {
  598. uint16_t *src_line = (uint16_t *)(src + srcStride * h);
  599. switch (swap) {
  600. case 3:
  601. if (src_alpha && dst_alpha) {
  602. for (x = 0; x < width; x++) {
  603. dst[0][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  604. dst[1][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  605. dst[2][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  606. dst[3][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  607. }
  608. } else if (dst_alpha) {
  609. for (x = 0; x < width; x++) {
  610. dst[0][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  611. dst[1][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  612. dst[2][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  613. dst[3][x] = 0xFFFF;
  614. }
  615. } else if (src_alpha) {
  616. for (x = 0; x < width; x++) {
  617. dst[0][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  618. dst[1][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  619. dst[2][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  620. src_line++;
  621. }
  622. } else {
  623. for (x = 0; x < width; x++) {
  624. dst[0][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  625. dst[1][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  626. dst[2][x] = av_bswap16(av_bswap16(*src_line++) >> shift);
  627. }
  628. }
  629. break;
  630. case 2:
  631. if (src_alpha && dst_alpha) {
  632. for (x = 0; x < width; x++) {
  633. dst[0][x] = av_bswap16(*src_line++ >> shift);
  634. dst[1][x] = av_bswap16(*src_line++ >> shift);
  635. dst[2][x] = av_bswap16(*src_line++ >> shift);
  636. dst[3][x] = av_bswap16(*src_line++ >> shift);
  637. }
  638. } else if (dst_alpha) {
  639. for (x = 0; x < width; x++) {
  640. dst[0][x] = av_bswap16(*src_line++ >> shift);
  641. dst[1][x] = av_bswap16(*src_line++ >> shift);
  642. dst[2][x] = av_bswap16(*src_line++ >> shift);
  643. dst[3][x] = 0xFFFF;
  644. }
  645. } else if (src_alpha) {
  646. for (x = 0; x < width; x++) {
  647. dst[0][x] = av_bswap16(*src_line++ >> shift);
  648. dst[1][x] = av_bswap16(*src_line++ >> shift);
  649. dst[2][x] = av_bswap16(*src_line++ >> shift);
  650. src_line++;
  651. }
  652. } else {
  653. for (x = 0; x < width; x++) {
  654. dst[0][x] = av_bswap16(*src_line++ >> shift);
  655. dst[1][x] = av_bswap16(*src_line++ >> shift);
  656. dst[2][x] = av_bswap16(*src_line++ >> shift);
  657. }
  658. }
  659. break;
  660. case 1:
  661. if (src_alpha && dst_alpha) {
  662. for (x = 0; x < width; x++) {
  663. dst[0][x] = av_bswap16(*src_line++) >> shift;
  664. dst[1][x] = av_bswap16(*src_line++) >> shift;
  665. dst[2][x] = av_bswap16(*src_line++) >> shift;
  666. dst[3][x] = av_bswap16(*src_line++) >> shift;
  667. }
  668. } else if (dst_alpha) {
  669. for (x = 0; x < width; x++) {
  670. dst[0][x] = av_bswap16(*src_line++) >> shift;
  671. dst[1][x] = av_bswap16(*src_line++) >> shift;
  672. dst[2][x] = av_bswap16(*src_line++) >> shift;
  673. dst[3][x] = 0xFFFF;
  674. }
  675. } else if (src_alpha) {
  676. for (x = 0; x < width; x++) {
  677. dst[0][x] = av_bswap16(*src_line++) >> shift;
  678. dst[1][x] = av_bswap16(*src_line++) >> shift;
  679. dst[2][x] = av_bswap16(*src_line++) >> shift;
  680. src_line++;
  681. }
  682. } else {
  683. for (x = 0; x < width; x++) {
  684. dst[0][x] = av_bswap16(*src_line++) >> shift;
  685. dst[1][x] = av_bswap16(*src_line++) >> shift;
  686. dst[2][x] = av_bswap16(*src_line++) >> shift;
  687. }
  688. }
  689. break;
  690. default:
  691. if (src_alpha && dst_alpha) {
  692. for (x = 0; x < width; x++) {
  693. dst[0][x] = *src_line++ >> shift;
  694. dst[1][x] = *src_line++ >> shift;
  695. dst[2][x] = *src_line++ >> shift;
  696. dst[3][x] = *src_line++ >> shift;
  697. }
  698. } else if (dst_alpha) {
  699. for (x = 0; x < width; x++) {
  700. dst[0][x] = *src_line++ >> shift;
  701. dst[1][x] = *src_line++ >> shift;
  702. dst[2][x] = *src_line++ >> shift;
  703. dst[3][x] = 0xFFFF;
  704. }
  705. } else if (src_alpha) {
  706. for (x = 0; x < width; x++) {
  707. dst[0][x] = *src_line++ >> shift;
  708. dst[1][x] = *src_line++ >> shift;
  709. dst[2][x] = *src_line++ >> shift;
  710. src_line++;
  711. }
  712. } else {
  713. for (x = 0; x < width; x++) {
  714. dst[0][x] = *src_line++ >> shift;
  715. dst[1][x] = *src_line++ >> shift;
  716. dst[2][x] = *src_line++ >> shift;
  717. }
  718. }
  719. }
  720. for (i = 0; i < 4; i++)
  721. dst[i] += dstStride[i] >> 1;
  722. }
  723. }
  724. static void packed30togbra10(const uint8_t *src, int srcStride,
  725. uint16_t *dst[], const int dstStride[], int srcSliceH,
  726. int swap, int bpc, int width)
  727. {
  728. int x, h, i;
  729. int dst_alpha = dst[3] != NULL;
  730. int scale_high = bpc - 10, scale_low = 10 - scale_high;
  731. for (h = 0; h < srcSliceH; h++) {
  732. uint32_t *src_line = (uint32_t *)(src + srcStride * h);
  733. unsigned component;
  734. switch (swap) {
  735. case 3:
  736. case 2:
  737. if (dst_alpha) {
  738. for (x = 0; x < width; x++) {
  739. unsigned p = AV_RL32(src_line);
  740. component = (p >> 20) & 0x3FF;
  741. dst[0][x] = av_bswap16(component << scale_high | component >> scale_low);
  742. component = (p >> 10) & 0x3FF;
  743. dst[1][x] = av_bswap16(component << scale_high | component >> scale_low);
  744. component = p & 0x3FF;
  745. dst[2][x] = av_bswap16(component << scale_high | component >> scale_low);
  746. dst[3][x] = 0xFFFF;
  747. src_line++;
  748. }
  749. } else {
  750. for (x = 0; x < width; x++) {
  751. unsigned p = AV_RL32(src_line);
  752. component = (p >> 20) & 0x3FF;
  753. dst[0][x] = av_bswap16(component << scale_high | component >> scale_low);
  754. component = (p >> 10) & 0x3FF;
  755. dst[1][x] = av_bswap16(component << scale_high | component >> scale_low);
  756. component = p & 0x3FF;
  757. dst[2][x] = av_bswap16(component << scale_high | component >> scale_low);
  758. src_line++;
  759. }
  760. }
  761. break;
  762. default:
  763. if (dst_alpha) {
  764. for (x = 0; x < width; x++) {
  765. unsigned p = AV_RL32(src_line);
  766. component = (p >> 20) & 0x3FF;
  767. dst[0][x] = component << scale_high | component >> scale_low;
  768. component = (p >> 10) & 0x3FF;
  769. dst[1][x] = component << scale_high | component >> scale_low;
  770. component = p & 0x3FF;
  771. dst[2][x] = component << scale_high | component >> scale_low;
  772. dst[3][x] = 0xFFFF;
  773. src_line++;
  774. }
  775. } else {
  776. for (x = 0; x < width; x++) {
  777. unsigned p = AV_RL32(src_line);
  778. component = (p >> 20) & 0x3FF;
  779. dst[0][x] = component << scale_high | component >> scale_low;
  780. component = (p >> 10) & 0x3FF;
  781. dst[1][x] = component << scale_high | component >> scale_low;
  782. component = p & 0x3FF;
  783. dst[2][x] = component << scale_high | component >> scale_low;
  784. src_line++;
  785. }
  786. }
  787. break;
  788. }
  789. for (i = 0; i < 4; i++)
  790. dst[i] += dstStride[i] >> 1;
  791. }
  792. }
  793. static int Rgb16ToPlanarRgb16Wrapper(SwsInternal *c, const uint8_t *const src[],
  794. const int srcStride[], int srcSliceY, int srcSliceH,
  795. uint8_t *const dst[], const int dstStride[])
  796. {
  797. uint16_t *dst2013[] = { (uint16_t *)dst[2], (uint16_t *)dst[0], (uint16_t *)dst[1], (uint16_t *)dst[3] };
  798. uint16_t *dst1023[] = { (uint16_t *)dst[1], (uint16_t *)dst[0], (uint16_t *)dst[2], (uint16_t *)dst[3] };
  799. int stride2013[] = { dstStride[2], dstStride[0], dstStride[1], dstStride[3] };
  800. int stride1023[] = { dstStride[1], dstStride[0], dstStride[2], dstStride[3] };
  801. const AVPixFmtDescriptor *src_format = av_pix_fmt_desc_get(c->opts.src_format);
  802. const AVPixFmtDescriptor *dst_format = av_pix_fmt_desc_get(c->opts.dst_format);
  803. int bpc = dst_format->comp[0].depth;
  804. int alpha = src_format->flags & AV_PIX_FMT_FLAG_ALPHA;
  805. int swap = 0;
  806. int i;
  807. if ( HAVE_BIGENDIAN && !(src_format->flags & AV_PIX_FMT_FLAG_BE) ||
  808. !HAVE_BIGENDIAN && src_format->flags & AV_PIX_FMT_FLAG_BE)
  809. swap++;
  810. if ( HAVE_BIGENDIAN && !(dst_format->flags & AV_PIX_FMT_FLAG_BE) ||
  811. !HAVE_BIGENDIAN && dst_format->flags & AV_PIX_FMT_FLAG_BE)
  812. swap += 2;
  813. if ((dst_format->flags & (AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB)) !=
  814. (AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB) || bpc < 9) {
  815. av_log(c, AV_LOG_ERROR, "unsupported conversion to planar RGB %s -> %s\n",
  816. src_format->name, dst_format->name);
  817. return srcSliceH;
  818. }
  819. for(i=0; i<4; i++) {
  820. dst2013[i] += stride2013[i] * srcSliceY / 2;
  821. dst1023[i] += stride1023[i] * srcSliceY / 2;
  822. }
  823. switch (c->opts.src_format) {
  824. case AV_PIX_FMT_RGB48LE:
  825. case AV_PIX_FMT_RGB48BE:
  826. case AV_PIX_FMT_RGBA64LE:
  827. case AV_PIX_FMT_RGBA64BE:
  828. packed16togbra16(src[0], srcStride[0],
  829. dst2013, stride2013, srcSliceH, alpha, swap,
  830. 16 - bpc, c->opts.src_w);
  831. break;
  832. case AV_PIX_FMT_X2RGB10LE:
  833. av_assert0(bpc >= 10);
  834. packed30togbra10(src[0], srcStride[0],
  835. dst2013, stride2013, srcSliceH, swap,
  836. bpc, c->opts.src_w);
  837. break;
  838. case AV_PIX_FMT_BGR48LE:
  839. case AV_PIX_FMT_BGR48BE:
  840. case AV_PIX_FMT_BGRA64LE:
  841. case AV_PIX_FMT_BGRA64BE:
  842. packed16togbra16(src[0], srcStride[0],
  843. dst1023, stride1023, srcSliceH, alpha, swap,
  844. 16 - bpc, c->opts.src_w);
  845. break;
  846. case AV_PIX_FMT_X2BGR10LE:
  847. av_assert0(bpc >= 10);
  848. packed30togbra10(src[0], srcStride[0],
  849. dst1023, stride1023, srcSliceH, swap,
  850. bpc, c->opts.src_w);
  851. break;
  852. default:
  853. av_log(c, AV_LOG_ERROR,
  854. "unsupported conversion to planar RGB %s -> %s\n",
  855. src_format->name, dst_format->name);
  856. }
  857. return srcSliceH;
  858. }
  859. static void gbr16ptopacked16(const uint16_t *src[], const int srcStride[],
  860. uint8_t *dst, int dstStride, int srcSliceH,
  861. int alpha, int swap, int bpp, int width)
  862. {
  863. int x, h, i;
  864. int src_alpha = src[3] != NULL;
  865. int scale_high = 16 - bpp, scale_low = (bpp - 8) * 2;
  866. for (h = 0; h < srcSliceH; h++) {
  867. uint16_t *dest = (uint16_t *)(dst + dstStride * h);
  868. uint16_t component;
  869. switch(swap) {
  870. case 3:
  871. if (alpha && !src_alpha) {
  872. for (x = 0; x < width; x++) {
  873. component = av_bswap16(src[0][x]);
  874. *dest++ = av_bswap16(component << scale_high | component >> scale_low);
  875. component = av_bswap16(src[1][x]);
  876. *dest++ = av_bswap16(component << scale_high | component >> scale_low);
  877. component = av_bswap16(src[2][x]);
  878. *dest++ = av_bswap16(component << scale_high | component >> scale_low);
  879. *dest++ = 0xffff;
  880. }
  881. } else if (alpha && src_alpha) {
  882. for (x = 0; x < width; x++) {
  883. component = av_bswap16(src[0][x]);
  884. *dest++ = av_bswap16(component << scale_high | component >> scale_low);
  885. component = av_bswap16(src[1][x]);
  886. *dest++ = av_bswap16(component << scale_high | component >> scale_low);
  887. component = av_bswap16(src[2][x]);
  888. *dest++ = av_bswap16(component << scale_high | component >> scale_low);
  889. component = av_bswap16(src[3][x]);
  890. *dest++ = av_bswap16(component << scale_high | component >> scale_low);
  891. }
  892. } else {
  893. for (x = 0; x < width; x++) {
  894. component = av_bswap16(src[0][x]);
  895. *dest++ = av_bswap16(component << scale_high | component >> scale_low);
  896. component = av_bswap16(src[1][x]);
  897. *dest++ = av_bswap16(component << scale_high | component >> scale_low);
  898. component = av_bswap16(src[2][x]);
  899. *dest++ = av_bswap16(component << scale_high | component >> scale_low);
  900. }
  901. }
  902. break;
  903. case 2:
  904. if (alpha && !src_alpha) {
  905. for (x = 0; x < width; x++) {
  906. *dest++ = av_bswap16(src[0][x] << scale_high | src[0][x] >> scale_low);
  907. *dest++ = av_bswap16(src[1][x] << scale_high | src[1][x] >> scale_low);
  908. *dest++ = av_bswap16(src[2][x] << scale_high | src[2][x] >> scale_low);
  909. *dest++ = 0xffff;
  910. }
  911. } else if (alpha && src_alpha) {
  912. for (x = 0; x < width; x++) {
  913. *dest++ = av_bswap16(src[0][x] << scale_high | src[0][x] >> scale_low);
  914. *dest++ = av_bswap16(src[1][x] << scale_high | src[1][x] >> scale_low);
  915. *dest++ = av_bswap16(src[2][x] << scale_high | src[2][x] >> scale_low);
  916. *dest++ = av_bswap16(src[3][x] << scale_high | src[3][x] >> scale_low);
  917. }
  918. } else {
  919. for (x = 0; x < width; x++) {
  920. *dest++ = av_bswap16(src[0][x] << scale_high | src[0][x] >> scale_low);
  921. *dest++ = av_bswap16(src[1][x] << scale_high | src[1][x] >> scale_low);
  922. *dest++ = av_bswap16(src[2][x] << scale_high | src[2][x] >> scale_low);
  923. }
  924. }
  925. break;
  926. case 1:
  927. if (alpha && !src_alpha) {
  928. for (x = 0; x < width; x++) {
  929. *dest++ = av_bswap16(src[0][x]) << scale_high | av_bswap16(src[0][x]) >> scale_low;
  930. *dest++ = av_bswap16(src[1][x]) << scale_high | av_bswap16(src[1][x]) >> scale_low;
  931. *dest++ = av_bswap16(src[2][x]) << scale_high | av_bswap16(src[2][x]) >> scale_low;
  932. *dest++ = 0xffff;
  933. }
  934. } else if (alpha && src_alpha) {
  935. for (x = 0; x < width; x++) {
  936. *dest++ = av_bswap16(src[0][x]) << scale_high | av_bswap16(src[0][x]) >> scale_low;
  937. *dest++ = av_bswap16(src[1][x]) << scale_high | av_bswap16(src[1][x]) >> scale_low;
  938. *dest++ = av_bswap16(src[2][x]) << scale_high | av_bswap16(src[2][x]) >> scale_low;
  939. *dest++ = av_bswap16(src[3][x]) << scale_high | av_bswap16(src[3][x]) >> scale_low;
  940. }
  941. } else {
  942. for (x = 0; x < width; x++) {
  943. *dest++ = av_bswap16(src[0][x]) << scale_high | av_bswap16(src[0][x]) >> scale_low;
  944. *dest++ = av_bswap16(src[1][x]) << scale_high | av_bswap16(src[1][x]) >> scale_low;
  945. *dest++ = av_bswap16(src[2][x]) << scale_high | av_bswap16(src[2][x]) >> scale_low;
  946. }
  947. }
  948. break;
  949. default:
  950. if (alpha && !src_alpha) {
  951. for (x = 0; x < width; x++) {
  952. *dest++ = src[0][x] << scale_high | src[0][x] >> scale_low;
  953. *dest++ = src[1][x] << scale_high | src[1][x] >> scale_low;
  954. *dest++ = src[2][x] << scale_high | src[2][x] >> scale_low;
  955. *dest++ = 0xffff;
  956. }
  957. } else if (alpha && src_alpha) {
  958. for (x = 0; x < width; x++) {
  959. *dest++ = src[0][x] << scale_high | src[0][x] >> scale_low;
  960. *dest++ = src[1][x] << scale_high | src[1][x] >> scale_low;
  961. *dest++ = src[2][x] << scale_high | src[2][x] >> scale_low;
  962. *dest++ = src[3][x] << scale_high | src[3][x] >> scale_low;
  963. }
  964. } else {
  965. for (x = 0; x < width; x++) {
  966. *dest++ = src[0][x] << scale_high | src[0][x] >> scale_low;
  967. *dest++ = src[1][x] << scale_high | src[1][x] >> scale_low;
  968. *dest++ = src[2][x] << scale_high | src[2][x] >> scale_low;
  969. }
  970. }
  971. }
  972. for (i = 0; i < 3 + src_alpha; i++)
  973. src[i] += srcStride[i] >> 1;
  974. }
  975. }
  976. static void gbr16ptopacked30(const uint16_t *src[], const int srcStride[],
  977. uint8_t *dst, int dstStride, int srcSliceH,
  978. int swap, int bpp, int width)
  979. {
  980. int x, h, i;
  981. int shift = bpp - 10;
  982. av_assert0(bpp >= 0);
  983. for (h = 0; h < srcSliceH; h++) {
  984. uint8_t *dest = dst + dstStride * h;
  985. switch(swap) {
  986. case 3:
  987. case 1:
  988. for (x = 0; x < width; x++) {
  989. unsigned C0 = av_bswap16(src[0][x]) >> shift;
  990. unsigned C1 = av_bswap16(src[1][x]) >> shift;
  991. unsigned C2 = av_bswap16(src[2][x]) >> shift;
  992. AV_WL32(dest + 4 * x, (3U << 30) + (C0 << 20) + (C1 << 10) + C2);
  993. }
  994. break;
  995. default:
  996. for (x = 0; x < width; x++) {
  997. unsigned C0 = src[0][x] >> shift;
  998. unsigned C1 = src[1][x] >> shift;
  999. unsigned C2 = src[2][x] >> shift;
  1000. AV_WL32(dest + 4 * x, (3U << 30) + (C0 << 20) + (C1 << 10) + C2);
  1001. }
  1002. break;
  1003. }
  1004. for (i = 0; i < 3; i++)
  1005. src[i] += srcStride[i] >> 1;
  1006. }
  1007. }
  1008. static int planarRgb16ToRgb16Wrapper(SwsInternal *c, const uint8_t *const src[],
  1009. const int srcStride[], int srcSliceY, int srcSliceH,
  1010. uint8_t *const dst[], const int dstStride[])
  1011. {
  1012. const uint16_t *src102[] = { (uint16_t *)src[1], (uint16_t *)src[0], (uint16_t *)src[2], (uint16_t *)src[3] };
  1013. const uint16_t *src201[] = { (uint16_t *)src[2], (uint16_t *)src[0], (uint16_t *)src[1], (uint16_t *)src[3] };
  1014. int stride102[] = { srcStride[1], srcStride[0], srcStride[2], srcStride[3] };
  1015. int stride201[] = { srcStride[2], srcStride[0], srcStride[1], srcStride[3] };
  1016. const AVPixFmtDescriptor *src_format = av_pix_fmt_desc_get(c->opts.src_format);
  1017. const AVPixFmtDescriptor *dst_format = av_pix_fmt_desc_get(c->opts.dst_format);
  1018. int bits_per_sample = src_format->comp[0].depth;
  1019. int swap = 0;
  1020. if ( HAVE_BIGENDIAN && !(src_format->flags & AV_PIX_FMT_FLAG_BE) ||
  1021. !HAVE_BIGENDIAN && src_format->flags & AV_PIX_FMT_FLAG_BE)
  1022. swap++;
  1023. if ( HAVE_BIGENDIAN && !(dst_format->flags & AV_PIX_FMT_FLAG_BE) ||
  1024. !HAVE_BIGENDIAN && dst_format->flags & AV_PIX_FMT_FLAG_BE)
  1025. swap += 2;
  1026. if ((src_format->flags & (AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB)) !=
  1027. (AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB) ||
  1028. bits_per_sample <= 8) {
  1029. av_log(c, AV_LOG_ERROR, "unsupported planar RGB conversion %s -> %s\n",
  1030. src_format->name, dst_format->name);
  1031. return srcSliceH;
  1032. }
  1033. switch (c->opts.dst_format) {
  1034. case AV_PIX_FMT_BGR48LE:
  1035. case AV_PIX_FMT_BGR48BE:
  1036. gbr16ptopacked16(src102, stride102,
  1037. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1038. srcSliceH, 0, swap, bits_per_sample, c->opts.src_w);
  1039. break;
  1040. case AV_PIX_FMT_RGB48LE:
  1041. case AV_PIX_FMT_RGB48BE:
  1042. gbr16ptopacked16(src201, stride201,
  1043. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1044. srcSliceH, 0, swap, bits_per_sample, c->opts.src_w);
  1045. break;
  1046. case AV_PIX_FMT_RGBA64LE:
  1047. case AV_PIX_FMT_RGBA64BE:
  1048. gbr16ptopacked16(src201, stride201,
  1049. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1050. srcSliceH, 1, swap, bits_per_sample, c->opts.src_w);
  1051. break;
  1052. case AV_PIX_FMT_BGRA64LE:
  1053. case AV_PIX_FMT_BGRA64BE:
  1054. gbr16ptopacked16(src102, stride102,
  1055. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1056. srcSliceH, 1, swap, bits_per_sample, c->opts.src_w);
  1057. break;
  1058. case AV_PIX_FMT_X2RGB10LE:
  1059. gbr16ptopacked30(src201, stride201,
  1060. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1061. srcSliceH, swap, bits_per_sample, c->opts.src_w);
  1062. break;
  1063. case AV_PIX_FMT_X2BGR10LE:
  1064. gbr16ptopacked30(src102, stride102,
  1065. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1066. srcSliceH, swap, bits_per_sample, c->opts.src_w);
  1067. break;
  1068. default:
  1069. av_log(c, AV_LOG_ERROR,
  1070. "unsupported planar RGB conversion %s -> %s\n",
  1071. src_format->name, dst_format->name);
  1072. }
  1073. return srcSliceH;
  1074. }
  1075. static void gbr24ptopacked24(const uint8_t *src[], const int srcStride[],
  1076. uint8_t *dst, int dstStride, int srcSliceH,
  1077. int width)
  1078. {
  1079. int x, h, i;
  1080. for (h = 0; h < srcSliceH; h++) {
  1081. uint8_t *dest = dst + dstStride * h;
  1082. for (x = 0; x < width; x++) {
  1083. *dest++ = src[0][x];
  1084. *dest++ = src[1][x];
  1085. *dest++ = src[2][x];
  1086. }
  1087. for (i = 0; i < 3; i++)
  1088. src[i] += srcStride[i];
  1089. }
  1090. }
  1091. static void gbr24ptopacked32(const uint8_t *src[], const int srcStride[],
  1092. uint8_t *dst, int dstStride, int srcSliceH,
  1093. int alpha_first, int width)
  1094. {
  1095. int x, h, i;
  1096. for (h = 0; h < srcSliceH; h++) {
  1097. uint8_t *dest = dst + dstStride * h;
  1098. if (alpha_first) {
  1099. for (x = 0; x < width; x++) {
  1100. *dest++ = 0xff;
  1101. *dest++ = src[0][x];
  1102. *dest++ = src[1][x];
  1103. *dest++ = src[2][x];
  1104. }
  1105. } else {
  1106. for (x = 0; x < width; x++) {
  1107. *dest++ = src[0][x];
  1108. *dest++ = src[1][x];
  1109. *dest++ = src[2][x];
  1110. *dest++ = 0xff;
  1111. }
  1112. }
  1113. for (i = 0; i < 3; i++)
  1114. src[i] += srcStride[i];
  1115. }
  1116. }
  1117. static void gbraptopacked32(const uint8_t *src[], const int srcStride[],
  1118. uint8_t *dst, int dstStride, int srcSliceH,
  1119. int alpha_first, int width)
  1120. {
  1121. int x, h, i;
  1122. for (h = 0; h < srcSliceH; h++) {
  1123. uint8_t *dest = dst + dstStride * h;
  1124. if (alpha_first) {
  1125. for (x = 0; x < width; x++) {
  1126. *dest++ = src[3][x];
  1127. *dest++ = src[0][x];
  1128. *dest++ = src[1][x];
  1129. *dest++ = src[2][x];
  1130. }
  1131. } else {
  1132. for (x = 0; x < width; x++) {
  1133. *dest++ = src[0][x];
  1134. *dest++ = src[1][x];
  1135. *dest++ = src[2][x];
  1136. *dest++ = src[3][x];
  1137. }
  1138. }
  1139. for (i = 0; i < 4; i++)
  1140. src[i] += srcStride[i];
  1141. }
  1142. }
  1143. static int planarRgbaToRgbWrapper(SwsInternal *c, const uint8_t *const src[],
  1144. const int srcStride[], int srcSliceY, int srcSliceH,
  1145. uint8_t *const dst[], const int dstStride[])
  1146. {
  1147. int alpha_first = 0;
  1148. const uint8_t *src102[] = { src[1], src[0], src[2], src[3] };
  1149. const uint8_t *src201[] = { src[2], src[0], src[1], src[3] };
  1150. int stride102[] = { srcStride[1], srcStride[0], srcStride[2], srcStride[3] };
  1151. int stride201[] = { srcStride[2], srcStride[0], srcStride[1], srcStride[3] };
  1152. if (c->opts.src_format != AV_PIX_FMT_GBRAP) {
  1153. av_log(c, AV_LOG_ERROR, "unsupported planar RGB conversion %s -> %s\n",
  1154. av_get_pix_fmt_name(c->opts.src_format),
  1155. av_get_pix_fmt_name(c->opts.dst_format));
  1156. return srcSliceH;
  1157. }
  1158. switch (c->opts.dst_format) {
  1159. case AV_PIX_FMT_BGR24:
  1160. gbr24ptopacked24(src102, stride102,
  1161. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1162. srcSliceH, c->opts.src_w);
  1163. break;
  1164. case AV_PIX_FMT_RGB24:
  1165. gbr24ptopacked24(src201, stride201,
  1166. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1167. srcSliceH, c->opts.src_w);
  1168. break;
  1169. case AV_PIX_FMT_ARGB:
  1170. alpha_first = 1;
  1171. case AV_PIX_FMT_RGBA:
  1172. gbraptopacked32(src201, stride201,
  1173. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1174. srcSliceH, alpha_first, c->opts.src_w);
  1175. break;
  1176. case AV_PIX_FMT_ABGR:
  1177. alpha_first = 1;
  1178. case AV_PIX_FMT_BGRA:
  1179. gbraptopacked32(src102, stride102,
  1180. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1181. srcSliceH, alpha_first, c->opts.src_w);
  1182. break;
  1183. default:
  1184. av_log(c, AV_LOG_ERROR,
  1185. "unsupported planar RGB conversion %s -> %s\n",
  1186. av_get_pix_fmt_name(c->opts.src_format),
  1187. av_get_pix_fmt_name(c->opts.dst_format));
  1188. }
  1189. return srcSliceH;
  1190. }
  1191. static int planarRgbToRgbWrapper(SwsInternal *c, const uint8_t *const src[],
  1192. const int srcStride[], int srcSliceY, int srcSliceH,
  1193. uint8_t *const dst[], const int dstStride[])
  1194. {
  1195. int alpha_first = 0;
  1196. const uint8_t *src102[] = { src[1], src[0], src[2] };
  1197. const uint8_t *src201[] = { src[2], src[0], src[1] };
  1198. int stride102[] = { srcStride[1], srcStride[0], srcStride[2] };
  1199. int stride201[] = { srcStride[2], srcStride[0], srcStride[1] };
  1200. if (c->opts.src_format != AV_PIX_FMT_GBRP) {
  1201. av_log(c, AV_LOG_ERROR, "unsupported planar RGB conversion %s -> %s\n",
  1202. av_get_pix_fmt_name(c->opts.src_format),
  1203. av_get_pix_fmt_name(c->opts.dst_format));
  1204. return srcSliceH;
  1205. }
  1206. switch (c->opts.dst_format) {
  1207. case AV_PIX_FMT_BGR24:
  1208. gbr24ptopacked24(src102, stride102,
  1209. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1210. srcSliceH, c->opts.src_w);
  1211. break;
  1212. case AV_PIX_FMT_RGB24:
  1213. gbr24ptopacked24(src201, stride201,
  1214. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1215. srcSliceH, c->opts.src_w);
  1216. break;
  1217. case AV_PIX_FMT_ARGB:
  1218. alpha_first = 1;
  1219. case AV_PIX_FMT_RGBA:
  1220. gbr24ptopacked32(src201, stride201,
  1221. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1222. srcSliceH, alpha_first, c->opts.src_w);
  1223. break;
  1224. case AV_PIX_FMT_ABGR:
  1225. alpha_first = 1;
  1226. case AV_PIX_FMT_BGRA:
  1227. gbr24ptopacked32(src102, stride102,
  1228. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  1229. srcSliceH, alpha_first, c->opts.src_w);
  1230. break;
  1231. default:
  1232. av_log(c, AV_LOG_ERROR,
  1233. "unsupported planar RGB conversion %s -> %s\n",
  1234. av_get_pix_fmt_name(c->opts.src_format),
  1235. av_get_pix_fmt_name(c->opts.dst_format));
  1236. }
  1237. return srcSliceH;
  1238. }
  1239. static int planarRgbToplanarRgbWrapper(SwsInternal *c,
  1240. const uint8_t *const src[], const int srcStride[],
  1241. int srcSliceY, int srcSliceH,
  1242. uint8_t *const dst[], const int dstStride[])
  1243. {
  1244. ff_copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->opts.src_w,
  1245. dst[0], dstStride[0]);
  1246. ff_copyPlane(src[1], srcStride[1], srcSliceY, srcSliceH, c->opts.src_w,
  1247. dst[1], dstStride[1]);
  1248. ff_copyPlane(src[2], srcStride[2], srcSliceY, srcSliceH, c->opts.src_w,
  1249. dst[2], dstStride[2]);
  1250. if (dst[3])
  1251. fillPlane(dst[3], dstStride[3], c->opts.src_w, srcSliceH, srcSliceY, 255);
  1252. return srcSliceH;
  1253. }
  1254. static void packedtogbr24p(const uint8_t *src, int srcStride,
  1255. uint8_t *const dst[], const int dstStride[], int srcSliceH,
  1256. int alpha_first, int inc_size, int width)
  1257. {
  1258. uint8_t *dest[3];
  1259. int x, h;
  1260. dest[0] = dst[0];
  1261. dest[1] = dst[1];
  1262. dest[2] = dst[2];
  1263. if (alpha_first)
  1264. src++;
  1265. for (h = 0; h < srcSliceH; h++) {
  1266. for (x = 0; x < width; x++) {
  1267. dest[0][x] = src[0];
  1268. dest[1][x] = src[1];
  1269. dest[2][x] = src[2];
  1270. src += inc_size;
  1271. }
  1272. src += srcStride - width * inc_size;
  1273. dest[0] += dstStride[0];
  1274. dest[1] += dstStride[1];
  1275. dest[2] += dstStride[2];
  1276. }
  1277. }
  1278. static int rgbToPlanarRgbWrapper(SwsInternal *c, const uint8_t *const src[],
  1279. const int srcStride[], int srcSliceY, int srcSliceH,
  1280. uint8_t *const dst[], const int dstStride[])
  1281. {
  1282. int alpha_first = 0;
  1283. int stride102[] = { dstStride[1], dstStride[0], dstStride[2] };
  1284. int stride201[] = { dstStride[2], dstStride[0], dstStride[1] };
  1285. uint8_t *dst102[] = { dst[1] + srcSliceY * dstStride[1],
  1286. dst[0] + srcSliceY * dstStride[0],
  1287. dst[2] + srcSliceY * dstStride[2] };
  1288. uint8_t *dst201[] = { dst[2] + srcSliceY * dstStride[2],
  1289. dst[0] + srcSliceY * dstStride[0],
  1290. dst[1] + srcSliceY * dstStride[1] };
  1291. switch (c->opts.src_format) {
  1292. case AV_PIX_FMT_RGB24:
  1293. packedtogbr24p((const uint8_t *) src[0], srcStride[0], dst201,
  1294. stride201, srcSliceH, alpha_first, 3, c->opts.src_w);
  1295. break;
  1296. case AV_PIX_FMT_BGR24:
  1297. packedtogbr24p((const uint8_t *) src[0], srcStride[0], dst102,
  1298. stride102, srcSliceH, alpha_first, 3, c->opts.src_w);
  1299. break;
  1300. case AV_PIX_FMT_ARGB:
  1301. alpha_first = 1;
  1302. case AV_PIX_FMT_RGBA:
  1303. packedtogbr24p((const uint8_t *) src[0], srcStride[0], dst201,
  1304. stride201, srcSliceH, alpha_first, 4, c->opts.src_w);
  1305. break;
  1306. case AV_PIX_FMT_ABGR:
  1307. alpha_first = 1;
  1308. case AV_PIX_FMT_BGRA:
  1309. packedtogbr24p((const uint8_t *) src[0], srcStride[0], dst102,
  1310. stride102, srcSliceH, alpha_first, 4, c->opts.src_w);
  1311. break;
  1312. default:
  1313. av_log(c, AV_LOG_ERROR,
  1314. "unsupported planar RGB conversion %s -> %s\n",
  1315. av_get_pix_fmt_name(c->opts.src_format),
  1316. av_get_pix_fmt_name(c->opts.dst_format));
  1317. }
  1318. return srcSliceH;
  1319. }
  1320. static void packed24togbrap(const uint8_t *src, int srcStride,
  1321. uint8_t *const dst[], const int dstStride[],
  1322. int srcSliceH, int width)
  1323. {
  1324. uint8_t *dest[4];
  1325. int x, h;
  1326. dest[0] = dst[0];
  1327. dest[1] = dst[1];
  1328. dest[2] = dst[2];
  1329. dest[3] = dst[3];
  1330. for (h = 0; h < srcSliceH; h++) {
  1331. for (x = 0; x < width; x++) {
  1332. dest[0][x] = src[x * 3 + 0];
  1333. dest[1][x] = src[x * 3 + 1];
  1334. dest[2][x] = src[x * 3 + 2];
  1335. dest[3][x] = 0xff;
  1336. }
  1337. src += srcStride;
  1338. dest[0] += dstStride[0];
  1339. dest[1] += dstStride[1];
  1340. dest[2] += dstStride[2];
  1341. dest[3] += dstStride[3];
  1342. }
  1343. }
  1344. static void packed32togbrap(const uint8_t *src, int srcStride,
  1345. uint8_t *const dst[], const int dstStride[],
  1346. int srcSliceH, int alpha_first, int width)
  1347. {
  1348. uint8_t *dest[4];
  1349. int x, h;
  1350. dest[0] = dst[0];
  1351. dest[1] = dst[1];
  1352. dest[2] = dst[2];
  1353. dest[3] = dst[3];
  1354. for (h = 0; h < srcSliceH; h++) {
  1355. if (alpha_first) {
  1356. for (x = 0; x < width; x++) {
  1357. dest[0][x] = src[x * 4 + 1];
  1358. dest[1][x] = src[x * 4 + 2];
  1359. dest[2][x] = src[x * 4 + 3];
  1360. dest[3][x] = src[x * 4 + 0];
  1361. }
  1362. } else {
  1363. for (x = 0; x < width; x++) {
  1364. dest[0][x] = src[x * 4 + 0];
  1365. dest[1][x] = src[x * 4 + 1];
  1366. dest[2][x] = src[x * 4 + 2];
  1367. dest[3][x] = src[x * 4 + 3];
  1368. }
  1369. }
  1370. src += srcStride;
  1371. dest[0] += dstStride[0];
  1372. dest[1] += dstStride[1];
  1373. dest[2] += dstStride[2];
  1374. dest[3] += dstStride[3];
  1375. }
  1376. }
  1377. static int rgbToPlanarRgbaWrapper(SwsInternal *c, const uint8_t *const src[],
  1378. const int srcStride[], int srcSliceY, int srcSliceH,
  1379. uint8_t *const dst[], const int dstStride[])
  1380. {
  1381. int alpha_first = 0;
  1382. int stride102[] = { dstStride[1], dstStride[0], dstStride[2], dstStride[3] };
  1383. int stride201[] = { dstStride[2], dstStride[0], dstStride[1], dstStride[3] };
  1384. uint8_t *dst102[] = { dst[1] + srcSliceY * dstStride[1],
  1385. dst[0] + srcSliceY * dstStride[0],
  1386. dst[2] + srcSliceY * dstStride[2],
  1387. dst[3] + srcSliceY * dstStride[3] };
  1388. uint8_t *dst201[] = { dst[2] + srcSliceY * dstStride[2],
  1389. dst[0] + srcSliceY * dstStride[0],
  1390. dst[1] + srcSliceY * dstStride[1],
  1391. dst[3] + srcSliceY * dstStride[3] };
  1392. switch (c->opts.src_format) {
  1393. case AV_PIX_FMT_RGB24:
  1394. packed24togbrap((const uint8_t *) src[0], srcStride[0], dst201,
  1395. stride201, srcSliceH, c->opts.src_w);
  1396. break;
  1397. case AV_PIX_FMT_BGR24:
  1398. packed24togbrap((const uint8_t *) src[0], srcStride[0], dst102,
  1399. stride102, srcSliceH, c->opts.src_w);
  1400. break;
  1401. case AV_PIX_FMT_ARGB:
  1402. alpha_first = 1;
  1403. case AV_PIX_FMT_RGBA:
  1404. packed32togbrap((const uint8_t *) src[0], srcStride[0], dst201,
  1405. stride201, srcSliceH, alpha_first, c->opts.src_w);
  1406. break;
  1407. case AV_PIX_FMT_ABGR:
  1408. alpha_first = 1;
  1409. case AV_PIX_FMT_BGRA:
  1410. packed32togbrap((const uint8_t *) src[0], srcStride[0], dst102,
  1411. stride102, srcSliceH, alpha_first, c->opts.src_w);
  1412. break;
  1413. default:
  1414. av_log(c, AV_LOG_ERROR,
  1415. "unsupported planar RGB conversion %s -> %s\n",
  1416. av_get_pix_fmt_name(c->opts.src_format),
  1417. av_get_pix_fmt_name(c->opts.dst_format));
  1418. }
  1419. return srcSliceH;
  1420. }
  1421. #define BAYER_GBRG
  1422. #define BAYER_8
  1423. #define BAYER_RENAME(x) bayer_gbrg8_to_##x
  1424. #include "bayer_template.c"
  1425. #define BAYER_GBRG
  1426. #define BAYER_16LE
  1427. #define BAYER_RENAME(x) bayer_gbrg16le_to_##x
  1428. #include "bayer_template.c"
  1429. #define BAYER_GBRG
  1430. #define BAYER_16BE
  1431. #define BAYER_RENAME(x) bayer_gbrg16be_to_##x
  1432. #include "bayer_template.c"
  1433. #define BAYER_GRBG
  1434. #define BAYER_8
  1435. #define BAYER_RENAME(x) bayer_grbg8_to_##x
  1436. #include "bayer_template.c"
  1437. #define BAYER_GRBG
  1438. #define BAYER_16LE
  1439. #define BAYER_RENAME(x) bayer_grbg16le_to_##x
  1440. #include "bayer_template.c"
  1441. #define BAYER_GRBG
  1442. #define BAYER_16BE
  1443. #define BAYER_RENAME(x) bayer_grbg16be_to_##x
  1444. #include "bayer_template.c"
  1445. #define BAYER_BGGR
  1446. #define BAYER_8
  1447. #define BAYER_RENAME(x) bayer_bggr8_to_##x
  1448. #include "bayer_template.c"
  1449. #define BAYER_BGGR
  1450. #define BAYER_16LE
  1451. #define BAYER_RENAME(x) bayer_bggr16le_to_##x
  1452. #include "bayer_template.c"
  1453. #define BAYER_BGGR
  1454. #define BAYER_16BE
  1455. #define BAYER_RENAME(x) bayer_bggr16be_to_##x
  1456. #include "bayer_template.c"
  1457. #define BAYER_RGGB
  1458. #define BAYER_8
  1459. #define BAYER_RENAME(x) bayer_rggb8_to_##x
  1460. #include "bayer_template.c"
  1461. #define BAYER_RGGB
  1462. #define BAYER_16LE
  1463. #define BAYER_RENAME(x) bayer_rggb16le_to_##x
  1464. #include "bayer_template.c"
  1465. #define BAYER_RGGB
  1466. #define BAYER_16BE
  1467. #define BAYER_RENAME(x) bayer_rggb16be_to_##x
  1468. #include "bayer_template.c"
  1469. static int bayer_to_rgb24_wrapper(SwsInternal *c, const uint8_t *const src[],
  1470. const int srcStride[], int srcSliceY, int srcSliceH,
  1471. uint8_t *const dst[], const int dstStride[])
  1472. {
  1473. uint8_t *dstPtr= dst[0] + srcSliceY * dstStride[0];
  1474. const uint8_t *srcPtr= src[0];
  1475. int i;
  1476. void (*copy) (const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int width);
  1477. void (*interpolate)(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int width);
  1478. switch(c->opts.src_format) {
  1479. #define CASE(pixfmt, prefix) \
  1480. case pixfmt: copy = bayer_##prefix##_to_rgb24_copy; \
  1481. interpolate = bayer_##prefix##_to_rgb24_interpolate; \
  1482. break;
  1483. CASE(AV_PIX_FMT_BAYER_BGGR8, bggr8)
  1484. CASE(AV_PIX_FMT_BAYER_BGGR16LE, bggr16le)
  1485. CASE(AV_PIX_FMT_BAYER_BGGR16BE, bggr16be)
  1486. CASE(AV_PIX_FMT_BAYER_RGGB8, rggb8)
  1487. CASE(AV_PIX_FMT_BAYER_RGGB16LE, rggb16le)
  1488. CASE(AV_PIX_FMT_BAYER_RGGB16BE, rggb16be)
  1489. CASE(AV_PIX_FMT_BAYER_GBRG8, gbrg8)
  1490. CASE(AV_PIX_FMT_BAYER_GBRG16LE, gbrg16le)
  1491. CASE(AV_PIX_FMT_BAYER_GBRG16BE, gbrg16be)
  1492. CASE(AV_PIX_FMT_BAYER_GRBG8, grbg8)
  1493. CASE(AV_PIX_FMT_BAYER_GRBG16LE, grbg16le)
  1494. CASE(AV_PIX_FMT_BAYER_GRBG16BE, grbg16be)
  1495. #undef CASE
  1496. default: return 0;
  1497. }
  1498. av_assert0(srcSliceH > 1);
  1499. copy(srcPtr, srcStride[0], dstPtr, dstStride[0], c->opts.src_w);
  1500. srcPtr += 2 * srcStride[0];
  1501. dstPtr += 2 * dstStride[0];
  1502. for (i = 2; i < srcSliceH - 2; i += 2) {
  1503. interpolate(srcPtr, srcStride[0], dstPtr, dstStride[0], c->opts.src_w);
  1504. srcPtr += 2 * srcStride[0];
  1505. dstPtr += 2 * dstStride[0];
  1506. }
  1507. if (i + 1 == srcSliceH) {
  1508. copy(srcPtr, -srcStride[0], dstPtr, -dstStride[0], c->opts.src_w);
  1509. } else if (i < srcSliceH)
  1510. copy(srcPtr, srcStride[0], dstPtr, dstStride[0], c->opts.src_w);
  1511. return srcSliceH;
  1512. }
  1513. static int bayer_to_rgb48_wrapper(SwsInternal *c, const uint8_t *const src[],
  1514. const int srcStride[], int srcSliceY, int srcSliceH,
  1515. uint8_t *const dst[], const int dstStride[])
  1516. {
  1517. uint8_t *dstPtr= dst[0] + srcSliceY * dstStride[0];
  1518. const uint8_t *srcPtr= src[0];
  1519. int i;
  1520. void (*copy) (const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int width);
  1521. void (*interpolate)(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int width);
  1522. switch(c->opts.src_format) {
  1523. #define CASE(pixfmt, prefix) \
  1524. case pixfmt: copy = bayer_##prefix##_to_rgb48_copy; \
  1525. interpolate = bayer_##prefix##_to_rgb48_interpolate; \
  1526. break;
  1527. CASE(AV_PIX_FMT_BAYER_BGGR8, bggr8)
  1528. CASE(AV_PIX_FMT_BAYER_BGGR16LE, bggr16le)
  1529. CASE(AV_PIX_FMT_BAYER_BGGR16BE, bggr16be)
  1530. CASE(AV_PIX_FMT_BAYER_RGGB8, rggb8)
  1531. CASE(AV_PIX_FMT_BAYER_RGGB16LE, rggb16le)
  1532. CASE(AV_PIX_FMT_BAYER_RGGB16BE, rggb16be)
  1533. CASE(AV_PIX_FMT_BAYER_GBRG8, gbrg8)
  1534. CASE(AV_PIX_FMT_BAYER_GBRG16LE, gbrg16le)
  1535. CASE(AV_PIX_FMT_BAYER_GBRG16BE, gbrg16be)
  1536. CASE(AV_PIX_FMT_BAYER_GRBG8, grbg8)
  1537. CASE(AV_PIX_FMT_BAYER_GRBG16LE, grbg16le)
  1538. CASE(AV_PIX_FMT_BAYER_GRBG16BE, grbg16be)
  1539. #undef CASE
  1540. default: return 0;
  1541. }
  1542. av_assert0(srcSliceH > 1);
  1543. copy(srcPtr, srcStride[0], dstPtr, dstStride[0], c->opts.src_w);
  1544. srcPtr += 2 * srcStride[0];
  1545. dstPtr += 2 * dstStride[0];
  1546. for (i = 2; i < srcSliceH - 2; i += 2) {
  1547. interpolate(srcPtr, srcStride[0], dstPtr, dstStride[0], c->opts.src_w);
  1548. srcPtr += 2 * srcStride[0];
  1549. dstPtr += 2 * dstStride[0];
  1550. }
  1551. if (i + 1 == srcSliceH) {
  1552. copy(srcPtr, -srcStride[0], dstPtr, -dstStride[0], c->opts.src_w);
  1553. } else if (i < srcSliceH)
  1554. copy(srcPtr, srcStride[0], dstPtr, dstStride[0], c->opts.src_w);
  1555. return srcSliceH;
  1556. }
  1557. static int bayer_to_yv12_wrapper(SwsInternal *c, const uint8_t *const src[],
  1558. const int srcStride[], int srcSliceY, int srcSliceH,
  1559. uint8_t *const dst[], const int dstStride[])
  1560. {
  1561. const uint8_t *srcPtr= src[0];
  1562. uint8_t *dstY= dst[0] + srcSliceY * dstStride[0];
  1563. uint8_t *dstU= dst[1] + srcSliceY * dstStride[1] / 2;
  1564. uint8_t *dstV= dst[2] + srcSliceY * dstStride[2] / 2;
  1565. int i;
  1566. void (*copy) (const uint8_t *src, int src_stride, uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, int luma_stride, int width, const int32_t *rgb2yuv);
  1567. void (*interpolate)(const uint8_t *src, int src_stride, uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, int luma_stride, int width, const int32_t *rgb2yuv);
  1568. switch(c->opts.src_format) {
  1569. #define CASE(pixfmt, prefix) \
  1570. case pixfmt: copy = bayer_##prefix##_to_yv12_copy; \
  1571. interpolate = bayer_##prefix##_to_yv12_interpolate; \
  1572. break;
  1573. CASE(AV_PIX_FMT_BAYER_BGGR8, bggr8)
  1574. CASE(AV_PIX_FMT_BAYER_BGGR16LE, bggr16le)
  1575. CASE(AV_PIX_FMT_BAYER_BGGR16BE, bggr16be)
  1576. CASE(AV_PIX_FMT_BAYER_RGGB8, rggb8)
  1577. CASE(AV_PIX_FMT_BAYER_RGGB16LE, rggb16le)
  1578. CASE(AV_PIX_FMT_BAYER_RGGB16BE, rggb16be)
  1579. CASE(AV_PIX_FMT_BAYER_GBRG8, gbrg8)
  1580. CASE(AV_PIX_FMT_BAYER_GBRG16LE, gbrg16le)
  1581. CASE(AV_PIX_FMT_BAYER_GBRG16BE, gbrg16be)
  1582. CASE(AV_PIX_FMT_BAYER_GRBG8, grbg8)
  1583. CASE(AV_PIX_FMT_BAYER_GRBG16LE, grbg16le)
  1584. CASE(AV_PIX_FMT_BAYER_GRBG16BE, grbg16be)
  1585. #undef CASE
  1586. default: return 0;
  1587. }
  1588. av_assert0(srcSliceH > 1);
  1589. copy(srcPtr, srcStride[0], dstY, dstU, dstV, dstStride[0], c->opts.src_w, c->input_rgb2yuv_table);
  1590. srcPtr += 2 * srcStride[0];
  1591. dstY += 2 * dstStride[0];
  1592. dstU += dstStride[1];
  1593. dstV += dstStride[1];
  1594. for (i = 2; i < srcSliceH - 2; i += 2) {
  1595. interpolate(srcPtr, srcStride[0], dstY, dstU, dstV, dstStride[0], c->opts.src_w, c->input_rgb2yuv_table);
  1596. srcPtr += 2 * srcStride[0];
  1597. dstY += 2 * dstStride[0];
  1598. dstU += dstStride[1];
  1599. dstV += dstStride[1];
  1600. }
  1601. if (i + 1 == srcSliceH) {
  1602. copy(srcPtr, -srcStride[0], dstY, dstU, dstV, -dstStride[0], c->opts.src_w, c->input_rgb2yuv_table);
  1603. } else if (i < srcSliceH)
  1604. copy(srcPtr, srcStride[0], dstY, dstU, dstV, dstStride[0], c->opts.src_w, c->input_rgb2yuv_table);
  1605. return srcSliceH;
  1606. }
  1607. #define isRGBA32(x) ( \
  1608. (x) == AV_PIX_FMT_ARGB \
  1609. || (x) == AV_PIX_FMT_RGBA \
  1610. || (x) == AV_PIX_FMT_BGRA \
  1611. || (x) == AV_PIX_FMT_ABGR \
  1612. )
  1613. #define isRGBA64(x) ( \
  1614. (x) == AV_PIX_FMT_RGBA64LE \
  1615. || (x) == AV_PIX_FMT_RGBA64BE \
  1616. || (x) == AV_PIX_FMT_BGRA64LE \
  1617. || (x) == AV_PIX_FMT_BGRA64BE \
  1618. )
  1619. #define isRGB48(x) ( \
  1620. (x) == AV_PIX_FMT_RGB48LE \
  1621. || (x) == AV_PIX_FMT_RGB48BE \
  1622. || (x) == AV_PIX_FMT_BGR48LE \
  1623. || (x) == AV_PIX_FMT_BGR48BE \
  1624. )
  1625. #define isAYUV(x) ( \
  1626. (x) == AV_PIX_FMT_AYUV \
  1627. || (x) == AV_PIX_FMT_VUYA \
  1628. || (x) == AV_PIX_FMT_VUYX \
  1629. || (x) == AV_PIX_FMT_UYVA \
  1630. )
  1631. #define isX2RGB(x) ( \
  1632. (x) == AV_PIX_FMT_X2RGB10LE \
  1633. || (x) == AV_PIX_FMT_X2BGR10LE \
  1634. )
  1635. /* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */
  1636. typedef void (* rgbConvFn) (const uint8_t *, uint8_t *, int);
  1637. static rgbConvFn findRgbConvFn(SwsInternal *c)
  1638. {
  1639. const enum AVPixelFormat srcFormat = c->opts.src_format;
  1640. const enum AVPixelFormat dstFormat = c->opts.dst_format;
  1641. const int srcId = c->srcFormatBpp;
  1642. const int dstId = c->dstFormatBpp;
  1643. rgbConvFn conv = NULL;
  1644. #define IS_NOT_NE(bpp, desc) \
  1645. (((bpp + 7) >> 3) == 2 && \
  1646. (!(desc->flags & AV_PIX_FMT_FLAG_BE) != !HAVE_BIGENDIAN))
  1647. #define CONV_IS(src, dst) (srcFormat == AV_PIX_FMT_##src && dstFormat == AV_PIX_FMT_##dst)
  1648. if (isRGBA32(srcFormat) && isRGBA32(dstFormat)) {
  1649. if ( CONV_IS(ABGR, RGBA)
  1650. || CONV_IS(ARGB, BGRA)
  1651. || CONV_IS(BGRA, ARGB)
  1652. || CONV_IS(RGBA, ABGR)) conv = shuffle_bytes_3210;
  1653. else if (CONV_IS(ABGR, ARGB)
  1654. || CONV_IS(ARGB, ABGR)) conv = shuffle_bytes_0321;
  1655. else if (CONV_IS(ABGR, BGRA)
  1656. || CONV_IS(ARGB, RGBA)) conv = shuffle_bytes_1230;
  1657. else if (CONV_IS(BGRA, RGBA)
  1658. || CONV_IS(RGBA, BGRA)) conv = shuffle_bytes_2103;
  1659. else if (CONV_IS(BGRA, ABGR)
  1660. || CONV_IS(RGBA, ARGB)) conv = shuffle_bytes_3012;
  1661. } else if (isRGB48(srcFormat) && isRGB48(dstFormat)) {
  1662. if (CONV_IS(RGB48LE, BGR48LE)
  1663. || CONV_IS(BGR48LE, RGB48LE)
  1664. || CONV_IS(RGB48BE, BGR48BE)
  1665. || CONV_IS(BGR48BE, RGB48BE)) conv = rgb48tobgr48_nobswap;
  1666. else if (CONV_IS(RGB48LE, BGR48BE)
  1667. || CONV_IS(BGR48LE, RGB48BE)
  1668. || CONV_IS(RGB48BE, BGR48LE)
  1669. || CONV_IS(BGR48BE, RGB48LE)) conv = rgb48tobgr48_bswap;
  1670. } else if (isRGB48(srcFormat) && isRGBA64(dstFormat)) {
  1671. if (CONV_IS(RGB48LE, BGRA64LE)
  1672. || CONV_IS(BGR48LE, RGBA64LE)
  1673. || CONV_IS(RGB48BE, BGRA64BE)
  1674. || CONV_IS(BGR48BE, RGBA64BE)) conv = rgb48tobgr64_nobswap;
  1675. else if (CONV_IS(RGB48LE, BGRA64BE)
  1676. || CONV_IS(BGR48LE, RGBA64BE)
  1677. || CONV_IS(RGB48BE, BGRA64LE)
  1678. || CONV_IS(BGR48BE, RGBA64LE)) conv = rgb48tobgr64_bswap;
  1679. if (CONV_IS(RGB48LE, RGBA64LE)
  1680. || CONV_IS(BGR48LE, BGRA64LE)
  1681. || CONV_IS(RGB48BE, RGBA64BE)
  1682. || CONV_IS(BGR48BE, BGRA64BE)) conv = rgb48to64_nobswap;
  1683. else if (CONV_IS(RGB48LE, RGBA64BE)
  1684. || CONV_IS(BGR48LE, BGRA64BE)
  1685. || CONV_IS(RGB48BE, RGBA64LE)
  1686. || CONV_IS(BGR48BE, BGRA64LE)) conv = rgb48to64_bswap;
  1687. } else if (isRGBA64(srcFormat) && isRGB48(dstFormat)) {
  1688. if (CONV_IS(RGBA64LE, BGR48LE)
  1689. || CONV_IS(BGRA64LE, RGB48LE)
  1690. || CONV_IS(RGBA64BE, BGR48BE)
  1691. || CONV_IS(BGRA64BE, RGB48BE)) conv = rgb64tobgr48_nobswap;
  1692. else if (CONV_IS(RGBA64LE, BGR48BE)
  1693. || CONV_IS(BGRA64LE, RGB48BE)
  1694. || CONV_IS(RGBA64BE, BGR48LE)
  1695. || CONV_IS(BGRA64BE, RGB48LE)) conv = rgb64tobgr48_bswap;
  1696. else if (CONV_IS(RGBA64LE, RGB48LE)
  1697. || CONV_IS(BGRA64LE, BGR48LE)
  1698. || CONV_IS(RGBA64BE, RGB48BE)
  1699. || CONV_IS(BGRA64BE, BGR48BE)) conv = rgb64to48_nobswap;
  1700. else if (CONV_IS(RGBA64LE, RGB48BE)
  1701. || CONV_IS(BGRA64LE, BGR48BE)
  1702. || CONV_IS(RGBA64BE, RGB48LE)
  1703. || CONV_IS(BGRA64BE, BGR48LE)) conv = rgb64to48_bswap;
  1704. } else if (isX2RGB(srcFormat) && isRGB48(dstFormat)) {
  1705. if (CONV_IS(X2RGB10LE, RGB48LE)
  1706. || CONV_IS(X2BGR10LE, BGR48LE)) conv = HAVE_BIGENDIAN ? x2rgb10to48_bswap
  1707. : x2rgb10to48_nobswap;
  1708. else if (CONV_IS(X2RGB10LE, RGB48BE)
  1709. || CONV_IS(X2BGR10LE, BGR48BE)) conv = HAVE_BIGENDIAN ? x2rgb10to48_nobswap
  1710. : x2rgb10to48_bswap;
  1711. else if (CONV_IS(X2RGB10LE, BGR48LE)
  1712. || CONV_IS(X2BGR10LE, RGB48LE)) conv = HAVE_BIGENDIAN ? x2rgb10tobgr48_bswap
  1713. : x2rgb10tobgr48_nobswap;
  1714. else if (CONV_IS(X2RGB10LE, BGR48BE)
  1715. || CONV_IS(X2BGR10LE, RGB48BE)) conv = HAVE_BIGENDIAN ? x2rgb10tobgr48_nobswap
  1716. : x2rgb10tobgr48_bswap;
  1717. else if (CONV_IS(X2RGB10LE, RGBA64LE)
  1718. || CONV_IS(X2BGR10LE, BGRA64LE)) conv = HAVE_BIGENDIAN ? x2rgb10to64_bswap
  1719. : x2rgb10to64_nobswap;
  1720. else if (CONV_IS(X2RGB10LE, RGBA64BE)
  1721. || CONV_IS(X2BGR10LE, BGRA64BE)) conv = HAVE_BIGENDIAN ? x2rgb10to64_nobswap
  1722. : x2rgb10to64_bswap;
  1723. else if (CONV_IS(X2RGB10LE, BGRA64LE)
  1724. || CONV_IS(X2BGR10LE, RGBA64LE)) conv = HAVE_BIGENDIAN ? x2rgb10tobgr64_bswap
  1725. : x2rgb10tobgr64_nobswap;
  1726. else if (CONV_IS(X2RGB10LE, BGRA64BE)
  1727. || CONV_IS(X2BGR10LE, RGBA64BE)) conv = HAVE_BIGENDIAN ? x2rgb10tobgr64_nobswap
  1728. : x2rgb10tobgr64_bswap;
  1729. } else if (isAYUV(srcFormat) && isAYUV(dstFormat)) {
  1730. /* VUYX only for dst, to avoid copying undefined bytes */
  1731. if ( CONV_IS(AYUV, VUYA)
  1732. || CONV_IS(AYUV, VUYX)
  1733. || CONV_IS(VUYA, AYUV)) conv = shuffle_bytes_3210;
  1734. else if (CONV_IS(AYUV, UYVA)) conv = shuffle_bytes_2130;
  1735. else if (CONV_IS(VUYA, UYVA)) conv = shuffle_bytes_1203;
  1736. else if (CONV_IS(UYVA, AYUV)) conv = shuffle_bytes_3102;
  1737. else if (CONV_IS(UYVA, VUYA)
  1738. || CONV_IS(UYVA, VUYX)) conv = shuffle_bytes_2013;
  1739. } else
  1740. /* BGR -> BGR */
  1741. if ((isBGRinInt(srcFormat) && isBGRinInt(dstFormat)) ||
  1742. (isRGBinInt(srcFormat) && isRGBinInt(dstFormat))) {
  1743. switch (srcId | (dstId << 16)) {
  1744. case 0x000F000C: conv = rgb12to15; break;
  1745. case 0x000F0010: conv = rgb16to15; break;
  1746. case 0x000F0018: conv = rgb24to15; break;
  1747. case 0x000F0020: conv = rgb32to15; break;
  1748. case 0x0010000F: conv = rgb15to16; break;
  1749. case 0x00100018: conv = rgb24to16; break;
  1750. case 0x00100020: conv = rgb32to16; break;
  1751. case 0x0018000F: conv = rgb15to24; break;
  1752. case 0x00180010: conv = rgb16to24; break;
  1753. case 0x00180020: conv = rgb32to24; break;
  1754. case 0x0020000F: conv = rgb15to32; break;
  1755. case 0x00200010: conv = rgb16to32; break;
  1756. case 0x00200018: conv = rgb24to32; break;
  1757. }
  1758. } else if ((isBGRinInt(srcFormat) && isRGBinInt(dstFormat)) ||
  1759. (isRGBinInt(srcFormat) && isBGRinInt(dstFormat))) {
  1760. switch (srcId | (dstId << 16)) {
  1761. case 0x000C000C: conv = rgb12tobgr12; break;
  1762. case 0x000F000F: conv = rgb15tobgr15; break;
  1763. case 0x000F0010: conv = rgb16tobgr15; break;
  1764. case 0x000F0018: conv = rgb24tobgr15; break;
  1765. case 0x000F0020: conv = rgb32tobgr15; break;
  1766. case 0x0010000F: conv = rgb15tobgr16; break;
  1767. case 0x00100010: conv = rgb16tobgr16; break;
  1768. case 0x00100018: conv = rgb24tobgr16; break;
  1769. case 0x00100020: conv = rgb32tobgr16; break;
  1770. case 0x0018000F: conv = rgb15tobgr24; break;
  1771. case 0x00180010: conv = rgb16tobgr24; break;
  1772. case 0x00180018: conv = rgb24tobgr24; break;
  1773. case 0x00180020: conv = rgb32tobgr24; break;
  1774. case 0x0020000F: conv = rgb15tobgr32; break;
  1775. case 0x00200010: conv = rgb16tobgr32; break;
  1776. case 0x00200018: conv = rgb24tobgr32; break;
  1777. }
  1778. }
  1779. if ((dstFormat == AV_PIX_FMT_RGB32_1 || dstFormat == AV_PIX_FMT_BGR32_1) && !isRGBA32(srcFormat) && ALT32_CORR<0)
  1780. return NULL;
  1781. // Maintain symmetry between endianness
  1782. if (c->opts.flags & SWS_BITEXACT)
  1783. if ((dstFormat == AV_PIX_FMT_RGB32 || dstFormat == AV_PIX_FMT_BGR32 ) && !isRGBA32(srcFormat) && ALT32_CORR>0)
  1784. return NULL;
  1785. return conv;
  1786. }
  1787. /* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */
  1788. static int rgbToRgbWrapper(SwsInternal *c, const uint8_t *const src[], const int srcStride[],
  1789. int srcSliceY, int srcSliceH, uint8_t *const dst[],
  1790. const int dstStride[])
  1791. {
  1792. const enum AVPixelFormat srcFormat = c->opts.src_format;
  1793. const enum AVPixelFormat dstFormat = c->opts.dst_format;
  1794. const AVPixFmtDescriptor *desc_src = av_pix_fmt_desc_get(c->opts.src_format);
  1795. const AVPixFmtDescriptor *desc_dst = av_pix_fmt_desc_get(c->opts.dst_format);
  1796. const int srcBpp = (c->srcFormatBpp + 7) >> 3;
  1797. const int dstBpp = (c->dstFormatBpp + 7) >> 3;
  1798. rgbConvFn conv = findRgbConvFn(c);
  1799. if (!conv) {
  1800. av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
  1801. av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
  1802. } else {
  1803. const uint8_t *srcPtr = src[0];
  1804. uint8_t *dstPtr = dst[0];
  1805. int src_bswap = IS_NOT_NE(c->srcFormatBpp, desc_src);
  1806. int dst_bswap = IS_NOT_NE(c->dstFormatBpp, desc_dst);
  1807. if ((srcFormat == AV_PIX_FMT_RGB32_1 || srcFormat == AV_PIX_FMT_BGR32_1) &&
  1808. !isRGBA32(dstFormat))
  1809. srcPtr += ALT32_CORR;
  1810. if ((dstFormat == AV_PIX_FMT_RGB32_1 || dstFormat == AV_PIX_FMT_BGR32_1) &&
  1811. !isRGBA32(srcFormat)) {
  1812. int i;
  1813. av_assert0(ALT32_CORR == 1);
  1814. for (i = 0; i < srcSliceH; i++)
  1815. dstPtr[dstStride[0] * (srcSliceY + i)] = 255;
  1816. dstPtr += ALT32_CORR;
  1817. }
  1818. if (dstStride[0] * srcBpp == srcStride[0] * dstBpp && srcStride[0] > 0 &&
  1819. !(srcStride[0] % srcBpp) && !dst_bswap && !src_bswap)
  1820. conv(srcPtr, dstPtr + dstStride[0] * srcSliceY,
  1821. (srcSliceH - 1) * srcStride[0] + c->opts.src_w * srcBpp);
  1822. else {
  1823. int i, j;
  1824. dstPtr += dstStride[0] * srcSliceY;
  1825. for (i = 0; i < srcSliceH; i++) {
  1826. if(src_bswap) {
  1827. for(j=0; j<c->opts.src_w; j++)
  1828. ((uint16_t*)c->formatConvBuffer)[j] = av_bswap16(((uint16_t*)srcPtr)[j]);
  1829. conv(c->formatConvBuffer, dstPtr, c->opts.src_w * srcBpp);
  1830. }else
  1831. conv(srcPtr, dstPtr, c->opts.src_w * srcBpp);
  1832. if(dst_bswap)
  1833. for(j=0; j<c->opts.src_w; j++)
  1834. ((uint16_t*)dstPtr)[j] = av_bswap16(((uint16_t*)dstPtr)[j]);
  1835. srcPtr += srcStride[0];
  1836. dstPtr += dstStride[0];
  1837. }
  1838. }
  1839. }
  1840. return srcSliceH;
  1841. }
  1842. static int bgr24ToYv12Wrapper(SwsInternal *c, const uint8_t *const src[],
  1843. const int srcStride[], int srcSliceY, int srcSliceH,
  1844. uint8_t *const dst[], const int dstStride[])
  1845. {
  1846. ff_rgb24toyv12(
  1847. src[0],
  1848. dst[0] + srcSliceY * dstStride[0],
  1849. dst[1] + (srcSliceY >> 1) * dstStride[1],
  1850. dst[2] + (srcSliceY >> 1) * dstStride[2],
  1851. c->opts.src_w, srcSliceH,
  1852. dstStride[0], dstStride[1], srcStride[0],
  1853. c->input_rgb2yuv_table);
  1854. if (dst[3])
  1855. fillPlane(dst[3], dstStride[3], c->opts.src_w, srcSliceH, srcSliceY, 255);
  1856. return srcSliceH;
  1857. }
  1858. static int yvu9ToYv12Wrapper(SwsInternal *c, const uint8_t *const src[],
  1859. const int srcStride[], int srcSliceY, int srcSliceH,
  1860. uint8_t *const dst[], const int dstStride[])
  1861. {
  1862. ff_copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->opts.src_w,
  1863. dst[0], dstStride[0]);
  1864. planar2x(src[1], dst[1] + dstStride[1] * (srcSliceY >> 1), c->chrSrcW,
  1865. srcSliceH >> 2, srcStride[1], dstStride[1]);
  1866. planar2x(src[2], dst[2] + dstStride[2] * (srcSliceY >> 1), c->chrSrcW,
  1867. srcSliceH >> 2, srcStride[2], dstStride[2]);
  1868. if (dst[3])
  1869. fillPlane(dst[3], dstStride[3], c->opts.src_w, srcSliceH, srcSliceY, 255);
  1870. return srcSliceH;
  1871. }
  1872. static int uint_y_to_float_y_wrapper(SwsInternal *c, const uint8_t *const src[],
  1873. const int srcStride[], int srcSliceY,
  1874. int srcSliceH, uint8_t *const dst[], const int dstStride[])
  1875. {
  1876. int y, x;
  1877. ptrdiff_t dstStrideFloat = dstStride[0] >> 2;
  1878. const uint8_t *srcPtr = src[0];
  1879. float *dstPtr = (float *)(dst[0] + dstStride[0] * srcSliceY);
  1880. for (y = 0; y < srcSliceH; ++y){
  1881. for (x = 0; x < c->opts.src_w; ++x){
  1882. dstPtr[x] = c->uint2float_lut[srcPtr[x]];
  1883. }
  1884. srcPtr += srcStride[0];
  1885. dstPtr += dstStrideFloat;
  1886. }
  1887. return srcSliceH;
  1888. }
  1889. static int float_y_to_uint_y_wrapper(SwsInternal *c,
  1890. const uint8_t *const src[],
  1891. const int srcStride[], int srcSliceY,
  1892. int srcSliceH, uint8_t *const dst[],
  1893. const int dstStride[])
  1894. {
  1895. int y, x;
  1896. ptrdiff_t srcStrideFloat = srcStride[0] >> 2;
  1897. const float *srcPtr = (const float *)src[0];
  1898. uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY;
  1899. for (y = 0; y < srcSliceH; ++y){
  1900. for (x = 0; x < c->opts.src_w; ++x){
  1901. dstPtr[x] = av_clip_uint8(lrintf(255.0f * srcPtr[x]));
  1902. }
  1903. srcPtr += srcStrideFloat;
  1904. dstPtr += dstStride[0];
  1905. }
  1906. return srcSliceH;
  1907. }
  1908. /* unscaled copy like stuff (assumes nearly identical formats) */
  1909. static int packedCopyWrapper(SwsInternal *c, const uint8_t *const src[],
  1910. const int srcStride[], int srcSliceY, int srcSliceH,
  1911. uint8_t *const dst[], const int dstStride[])
  1912. {
  1913. if (dstStride[0] == srcStride[0] && srcStride[0] > 0)
  1914. memcpy(dst[0] + dstStride[0] * srcSliceY, src[0], srcSliceH * dstStride[0]);
  1915. else {
  1916. int i;
  1917. const uint8_t *srcPtr = src[0];
  1918. uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY;
  1919. int length = 0;
  1920. /* universal length finder */
  1921. while (length + c->opts.src_w <= FFABS(dstStride[0]) &&
  1922. length + c->opts.src_w <= FFABS(srcStride[0]))
  1923. length += c->opts.src_w;
  1924. av_assert1(length != 0);
  1925. for (i = 0; i < srcSliceH; i++) {
  1926. memcpy(dstPtr, srcPtr, length);
  1927. srcPtr += srcStride[0];
  1928. dstPtr += dstStride[0];
  1929. }
  1930. }
  1931. return srcSliceH;
  1932. }
  1933. #define DITHER_COPY(dst, dstStride, src, srcStride, bswap, dbswap)\
  1934. unsigned shift= src_depth-dst_depth, tmp;\
  1935. unsigned bias = 1 << (shift - 1);\
  1936. if (c->opts.dither == SWS_DITHER_NONE) {\
  1937. for (i = 0; i < height; i++) {\
  1938. for (j = 0; j < length-7; j+=8) {\
  1939. tmp = (bswap(src[j+0]) + bias)>>shift; dst[j+0] = dbswap(tmp - (tmp>>dst_depth));\
  1940. tmp = (bswap(src[j+1]) + bias)>>shift; dst[j+1] = dbswap(tmp - (tmp>>dst_depth));\
  1941. tmp = (bswap(src[j+2]) + bias)>>shift; dst[j+2] = dbswap(tmp - (tmp>>dst_depth));\
  1942. tmp = (bswap(src[j+3]) + bias)>>shift; dst[j+3] = dbswap(tmp - (tmp>>dst_depth));\
  1943. tmp = (bswap(src[j+4]) + bias)>>shift; dst[j+4] = dbswap(tmp - (tmp>>dst_depth));\
  1944. tmp = (bswap(src[j+5]) + bias)>>shift; dst[j+5] = dbswap(tmp - (tmp>>dst_depth));\
  1945. tmp = (bswap(src[j+6]) + bias)>>shift; dst[j+6] = dbswap(tmp - (tmp>>dst_depth));\
  1946. tmp = (bswap(src[j+7]) + bias)>>shift; dst[j+7] = dbswap(tmp - (tmp>>dst_depth));\
  1947. }\
  1948. for (; j < length; j++) {\
  1949. tmp = (bswap(src[j]) + bias)>>shift; dst[j] = dbswap(tmp - (tmp>>dst_depth));\
  1950. }\
  1951. dst += dstStride;\
  1952. src += srcStride;\
  1953. }\
  1954. } else if (shiftonly) {\
  1955. for (i = 0; i < height; i++) {\
  1956. const uint8_t *dither= dithers[shift-1][i&7];\
  1957. for (j = 0; j < length-7; j+=8) {\
  1958. tmp = (bswap(src[j+0]) + dither[0])>>shift; dst[j+0] = dbswap(tmp - (tmp>>dst_depth));\
  1959. tmp = (bswap(src[j+1]) + dither[1])>>shift; dst[j+1] = dbswap(tmp - (tmp>>dst_depth));\
  1960. tmp = (bswap(src[j+2]) + dither[2])>>shift; dst[j+2] = dbswap(tmp - (tmp>>dst_depth));\
  1961. tmp = (bswap(src[j+3]) + dither[3])>>shift; dst[j+3] = dbswap(tmp - (tmp>>dst_depth));\
  1962. tmp = (bswap(src[j+4]) + dither[4])>>shift; dst[j+4] = dbswap(tmp - (tmp>>dst_depth));\
  1963. tmp = (bswap(src[j+5]) + dither[5])>>shift; dst[j+5] = dbswap(tmp - (tmp>>dst_depth));\
  1964. tmp = (bswap(src[j+6]) + dither[6])>>shift; dst[j+6] = dbswap(tmp - (tmp>>dst_depth));\
  1965. tmp = (bswap(src[j+7]) + dither[7])>>shift; dst[j+7] = dbswap(tmp - (tmp>>dst_depth));\
  1966. }\
  1967. for (; j < length; j++) {\
  1968. tmp = (bswap(src[j]) + dither[j&7])>>shift; dst[j] = dbswap(tmp - (tmp>>dst_depth));\
  1969. }\
  1970. dst += dstStride;\
  1971. src += srcStride;\
  1972. }\
  1973. } else {\
  1974. for (i = 0; i < height; i++) {\
  1975. const uint8_t *dither= dithers[shift-1][i&7];\
  1976. for (j = 0; j < length-7; j+=8) {\
  1977. tmp = bswap(src[j+0]); dst[j+0] = dbswap((tmp - (tmp>>dst_depth) + dither[0])>>shift);\
  1978. tmp = bswap(src[j+1]); dst[j+1] = dbswap((tmp - (tmp>>dst_depth) + dither[1])>>shift);\
  1979. tmp = bswap(src[j+2]); dst[j+2] = dbswap((tmp - (tmp>>dst_depth) + dither[2])>>shift);\
  1980. tmp = bswap(src[j+3]); dst[j+3] = dbswap((tmp - (tmp>>dst_depth) + dither[3])>>shift);\
  1981. tmp = bswap(src[j+4]); dst[j+4] = dbswap((tmp - (tmp>>dst_depth) + dither[4])>>shift);\
  1982. tmp = bswap(src[j+5]); dst[j+5] = dbswap((tmp - (tmp>>dst_depth) + dither[5])>>shift);\
  1983. tmp = bswap(src[j+6]); dst[j+6] = dbswap((tmp - (tmp>>dst_depth) + dither[6])>>shift);\
  1984. tmp = bswap(src[j+7]); dst[j+7] = dbswap((tmp - (tmp>>dst_depth) + dither[7])>>shift);\
  1985. }\
  1986. for (; j < length; j++) {\
  1987. tmp = bswap(src[j]); dst[j] = dbswap((tmp - (tmp>>dst_depth) + dither[j&7])>>shift);\
  1988. }\
  1989. dst += dstStride;\
  1990. src += srcStride;\
  1991. }\
  1992. }
  1993. static int planarCopyWrapper(SwsInternal *c, const uint8_t *const src[],
  1994. const int srcStride[], int srcSliceY, int srcSliceH,
  1995. uint8_t *const dst[], const int dstStride[])
  1996. {
  1997. const AVPixFmtDescriptor *desc_src = av_pix_fmt_desc_get(c->opts.src_format);
  1998. const AVPixFmtDescriptor *desc_dst = av_pix_fmt_desc_get(c->opts.dst_format);
  1999. int plane, i, j;
  2000. for (plane = 0; plane < 4 && dst[plane] != NULL; plane++) {
  2001. int length = (plane == 0 || plane == 3) ? c->opts.src_w : AV_CEIL_RSHIFT(c->opts.src_w, c->chrDstHSubSample);
  2002. int y = (plane == 0 || plane == 3) ? srcSliceY: AV_CEIL_RSHIFT(srcSliceY, c->chrDstVSubSample);
  2003. int height = (plane == 0 || plane == 3) ? srcSliceH: AV_CEIL_RSHIFT(srcSliceH, c->chrDstVSubSample);
  2004. const uint8_t *srcPtr = src[plane];
  2005. uint8_t *dstPtr = dst[plane] + dstStride[plane] * y;
  2006. int shiftonly = plane == 1 || plane == 2 || (!c->opts.src_range && plane == 0);
  2007. if (plane == 1 && isSemiPlanarYUV(c->opts.dst_format))
  2008. length *= 2;
  2009. // ignore palette for GRAY8
  2010. if (plane == 1 && desc_dst->nb_components < 3) continue;
  2011. if (!src[plane] || (plane == 1 && desc_src->nb_components < 3)) {
  2012. if (is16BPS(c->opts.dst_format) || isNBPS(c->opts.dst_format)) {
  2013. fillPlane16(dst[plane], dstStride[plane], length, height, y,
  2014. plane == 3, desc_dst->comp[plane].depth,
  2015. isBE(c->opts.dst_format));
  2016. } else {
  2017. fillPlane(dst[plane], dstStride[plane], length, height, y,
  2018. (plane == 3) ? 255 : 128);
  2019. }
  2020. } else {
  2021. if(isNBPS(c->opts.src_format) || isNBPS(c->opts.dst_format)
  2022. || (is16BPS(c->opts.src_format) != is16BPS(c->opts.dst_format))
  2023. ) {
  2024. const int src_depth = desc_src->comp[plane].depth;
  2025. const int dst_depth = desc_dst->comp[plane].depth;
  2026. const uint16_t *srcPtr2 = (const uint16_t *) srcPtr;
  2027. uint16_t *dstPtr2 = (uint16_t*)dstPtr;
  2028. if (dst_depth == 8) {
  2029. av_assert1(src_depth > 8);
  2030. if(isBE(c->opts.src_format) == HAVE_BIGENDIAN){
  2031. DITHER_COPY(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, , )
  2032. } else {
  2033. DITHER_COPY(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, av_bswap16, )
  2034. }
  2035. } else if (src_depth == 8) {
  2036. for (i = 0; i < height; i++) {
  2037. #define COPY816(w)\
  2038. if (shiftonly) {\
  2039. for (j = 0; j < length; j++)\
  2040. w(&dstPtr2[j], srcPtr[j]<<(dst_depth-8));\
  2041. } else {\
  2042. for (j = 0; j < length; j++)\
  2043. w(&dstPtr2[j], (srcPtr[j]<<(dst_depth-8)) |\
  2044. (srcPtr[j]>>(2*8-dst_depth)));\
  2045. }
  2046. if(isBE(c->opts.dst_format)){
  2047. COPY816(AV_WB16)
  2048. } else {
  2049. COPY816(AV_WL16)
  2050. }
  2051. dstPtr2 += dstStride[plane]/2;
  2052. srcPtr += srcStride[plane];
  2053. }
  2054. } else if (src_depth <= dst_depth) {
  2055. for (i = 0; i < height; i++) {
  2056. j = 0;
  2057. if(isBE(c->opts.src_format) == HAVE_BIGENDIAN &&
  2058. isBE(c->opts.dst_format) == HAVE_BIGENDIAN &&
  2059. shiftonly) {
  2060. unsigned shift = dst_depth - src_depth;
  2061. #if HAVE_FAST_64BIT
  2062. #define FAST_COPY_UP(shift) \
  2063. for (; j < length - 3; j += 4) { \
  2064. uint64_t v = AV_RN64A(srcPtr2 + j); \
  2065. AV_WN64A(dstPtr2 + j, v << shift); \
  2066. }
  2067. #else
  2068. #define FAST_COPY_UP(shift) \
  2069. for (; j < length - 1; j += 2) { \
  2070. uint32_t v = AV_RN32A(srcPtr2 + j); \
  2071. AV_WN32A(dstPtr2 + j, v << shift); \
  2072. }
  2073. #endif
  2074. switch (shift)
  2075. {
  2076. case 6: FAST_COPY_UP(6); break;
  2077. case 7: FAST_COPY_UP(7); break;
  2078. }
  2079. }
  2080. #define COPY_UP(r,w) \
  2081. if(shiftonly){\
  2082. for (; j < length; j++){ \
  2083. unsigned int v= r(&srcPtr2[j]);\
  2084. w(&dstPtr2[j], v<<(dst_depth-src_depth));\
  2085. }\
  2086. }else{\
  2087. for (; j < length; j++){ \
  2088. unsigned int v= r(&srcPtr2[j]);\
  2089. w(&dstPtr2[j], (v<<(dst_depth-src_depth)) | \
  2090. (v>>(2*src_depth-dst_depth)));\
  2091. }\
  2092. }
  2093. if(isBE(c->opts.src_format)){
  2094. if(isBE(c->opts.dst_format)){
  2095. COPY_UP(AV_RB16, AV_WB16)
  2096. } else {
  2097. COPY_UP(AV_RB16, AV_WL16)
  2098. }
  2099. } else {
  2100. if(isBE(c->opts.dst_format)){
  2101. COPY_UP(AV_RL16, AV_WB16)
  2102. } else {
  2103. COPY_UP(AV_RL16, AV_WL16)
  2104. }
  2105. }
  2106. dstPtr2 += dstStride[plane]/2;
  2107. srcPtr2 += srcStride[plane]/2;
  2108. }
  2109. } else { /* src_depth > dst_depth */
  2110. if(isBE(c->opts.src_format) == HAVE_BIGENDIAN){
  2111. if(isBE(c->opts.dst_format) == HAVE_BIGENDIAN){
  2112. DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , )
  2113. } else {
  2114. DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , av_bswap16)
  2115. }
  2116. }else{
  2117. if(isBE(c->opts.dst_format) == HAVE_BIGENDIAN){
  2118. DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, )
  2119. } else {
  2120. DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, av_bswap16)
  2121. }
  2122. }
  2123. }
  2124. } else if (is16BPS(c->opts.src_format) && is16BPS(c->opts.dst_format) &&
  2125. isBE(c->opts.src_format) != isBE(c->opts.dst_format)) {
  2126. for (i = 0; i < height; i++) {
  2127. for (j = 0; j < length; j++)
  2128. ((uint16_t *) dstPtr)[j] = av_bswap16(((const uint16_t *) srcPtr)[j]);
  2129. srcPtr += srcStride[plane];
  2130. dstPtr += dstStride[plane];
  2131. }
  2132. } else if (isFloat(c->opts.src_format) && isFloat(c->opts.dst_format) &&
  2133. isBE(c->opts.src_format) != isBE(c->opts.dst_format)) { /* swap float plane */
  2134. for (i = 0; i < height; i++) {
  2135. for (j = 0; j < length; j++)
  2136. ((uint32_t *) dstPtr)[j] = av_bswap32(((const uint32_t *) srcPtr)[j]);
  2137. srcPtr += srcStride[plane];
  2138. dstPtr += dstStride[plane];
  2139. }
  2140. } else if (dstStride[plane] == srcStride[plane] &&
  2141. srcStride[plane] > 0 && srcStride[plane] == length) {
  2142. memcpy(dst[plane] + dstStride[plane] * y, src[plane],
  2143. height * dstStride[plane]);
  2144. } else {
  2145. if (is16BPS(c->opts.src_format) && is16BPS(c->opts.dst_format))
  2146. length *= 2;
  2147. else if (desc_src->comp[0].depth == 1)
  2148. length >>= 3; // monowhite/black
  2149. for (i = 0; i < height; i++) {
  2150. memcpy(dstPtr, srcPtr, length);
  2151. srcPtr += srcStride[plane];
  2152. dstPtr += dstStride[plane];
  2153. }
  2154. }
  2155. }
  2156. }
  2157. return srcSliceH;
  2158. }
  2159. #define IS_DIFFERENT_ENDIANESS(src_fmt, dst_fmt, pix_fmt) \
  2160. ((src_fmt == pix_fmt ## BE && dst_fmt == pix_fmt ## LE) || \
  2161. (src_fmt == pix_fmt ## LE && dst_fmt == pix_fmt ## BE))
  2162. void ff_get_unscaled_swscale(SwsInternal *c)
  2163. {
  2164. const enum AVPixelFormat srcFormat = c->opts.src_format;
  2165. const enum AVPixelFormat dstFormat = c->opts.dst_format;
  2166. const int flags = c->opts.flags;
  2167. const int dstH = c->opts.dst_h;
  2168. const int dstW = c->opts.dst_w;
  2169. int needsDither;
  2170. needsDither = isAnyRGB(dstFormat) &&
  2171. c->dstFormatBpp < 24 &&
  2172. (c->dstFormatBpp < c->srcFormatBpp || (!isAnyRGB(srcFormat)));
  2173. /* yv12_to_nv12 */
  2174. if ((srcFormat == AV_PIX_FMT_YUV420P || srcFormat == AV_PIX_FMT_YUVA420P) &&
  2175. (dstFormat == AV_PIX_FMT_NV12 || dstFormat == AV_PIX_FMT_NV21)) {
  2176. c->convert_unscaled = planarToNv12Wrapper;
  2177. }
  2178. /* yv24_to_nv24 */
  2179. if ((srcFormat == AV_PIX_FMT_YUV444P || srcFormat == AV_PIX_FMT_YUVA444P) &&
  2180. (dstFormat == AV_PIX_FMT_NV24 || dstFormat == AV_PIX_FMT_NV42)) {
  2181. c->convert_unscaled = planarToNv24Wrapper;
  2182. }
  2183. /* nv12_to_yv12 */
  2184. if (dstFormat == AV_PIX_FMT_YUV420P &&
  2185. (srcFormat == AV_PIX_FMT_NV12 || srcFormat == AV_PIX_FMT_NV21)) {
  2186. c->convert_unscaled = nv12ToPlanarWrapper;
  2187. }
  2188. /* nv24_to_yv24 */
  2189. if (dstFormat == AV_PIX_FMT_YUV444P &&
  2190. (srcFormat == AV_PIX_FMT_NV24 || srcFormat == AV_PIX_FMT_NV42)) {
  2191. c->convert_unscaled = nv24ToPlanarWrapper;
  2192. }
  2193. /* yuv2bgr */
  2194. if ((srcFormat == AV_PIX_FMT_YUV420P || srcFormat == AV_PIX_FMT_YUV422P ||
  2195. srcFormat == AV_PIX_FMT_YUVA420P) && isAnyRGB(dstFormat) &&
  2196. !(flags & SWS_ACCURATE_RND) && (c->opts.dither == SWS_DITHER_BAYER || c->opts.dither == SWS_DITHER_AUTO) && !(dstH & 1)) {
  2197. c->convert_unscaled = ff_yuv2rgb_get_func_ptr(c);
  2198. c->dst_slice_align = 2;
  2199. }
  2200. /* yuv420p1x_to_p01x */
  2201. if ((srcFormat == AV_PIX_FMT_YUV420P10 || srcFormat == AV_PIX_FMT_YUVA420P10 ||
  2202. srcFormat == AV_PIX_FMT_YUV420P12 ||
  2203. srcFormat == AV_PIX_FMT_YUV420P14 ||
  2204. srcFormat == AV_PIX_FMT_YUV420P16 || srcFormat == AV_PIX_FMT_YUVA420P16) &&
  2205. (dstFormat == AV_PIX_FMT_P010 || dstFormat == AV_PIX_FMT_P016)) {
  2206. c->convert_unscaled = planarToP01xWrapper;
  2207. }
  2208. /* yuv420p_to_p01xle */
  2209. if ((srcFormat == AV_PIX_FMT_YUV420P || srcFormat == AV_PIX_FMT_YUVA420P) &&
  2210. (dstFormat == AV_PIX_FMT_P010LE || dstFormat == AV_PIX_FMT_P016LE)) {
  2211. c->convert_unscaled = planar8ToP01xleWrapper;
  2212. }
  2213. if (srcFormat == AV_PIX_FMT_YUV410P && !(dstH & 3) &&
  2214. (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P) &&
  2215. !(flags & SWS_BITEXACT)) {
  2216. c->convert_unscaled = yvu9ToYv12Wrapper;
  2217. c->dst_slice_align = 4;
  2218. }
  2219. /* bgr24toYV12 */
  2220. if (srcFormat == AV_PIX_FMT_BGR24 &&
  2221. (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P) &&
  2222. !(flags & SWS_ACCURATE_RND) && !(dstW&1))
  2223. c->convert_unscaled = bgr24ToYv12Wrapper;
  2224. /* AYUV/VUYA/UYVA -> AYUV/VUYA/UYVA */
  2225. if (isAYUV(srcFormat) && isAYUV(dstFormat) && findRgbConvFn(c))
  2226. c->convert_unscaled = rgbToRgbWrapper;
  2227. /* RGB/BGR -> RGB/BGR (no dither needed forms) */
  2228. if (isAnyRGB(srcFormat) && isAnyRGB(dstFormat) && findRgbConvFn(c)
  2229. && (!needsDither || (c->opts.flags&(SWS_FAST_BILINEAR|SWS_POINT))))
  2230. c->convert_unscaled = rgbToRgbWrapper;
  2231. /* RGB to planar RGB */
  2232. if ((srcFormat == AV_PIX_FMT_GBRP && dstFormat == AV_PIX_FMT_GBRAP) ||
  2233. (srcFormat == AV_PIX_FMT_GBRP10 && dstFormat == AV_PIX_FMT_GBRAP10) ||
  2234. (srcFormat == AV_PIX_FMT_GBRP12 && dstFormat == AV_PIX_FMT_GBRAP12) ||
  2235. (srcFormat == AV_PIX_FMT_GBRP14 && dstFormat == AV_PIX_FMT_GBRAP14) ||
  2236. (srcFormat == AV_PIX_FMT_GBRP16 && dstFormat == AV_PIX_FMT_GBRAP16) ||
  2237. (srcFormat == AV_PIX_FMT_GBRAP && dstFormat == AV_PIX_FMT_GBRP) ||
  2238. (srcFormat == AV_PIX_FMT_GBRAP10 && dstFormat == AV_PIX_FMT_GBRP10) ||
  2239. (srcFormat == AV_PIX_FMT_GBRAP12 && dstFormat == AV_PIX_FMT_GBRP12) ||
  2240. (srcFormat == AV_PIX_FMT_GBRAP14 && dstFormat == AV_PIX_FMT_GBRP14) ||
  2241. (srcFormat == AV_PIX_FMT_GBRAP16 && dstFormat == AV_PIX_FMT_GBRP16))
  2242. c->convert_unscaled = planarRgbToplanarRgbWrapper;
  2243. #define isByteRGB(f) ( \
  2244. f == AV_PIX_FMT_RGB32 || \
  2245. f == AV_PIX_FMT_RGB32_1 || \
  2246. f == AV_PIX_FMT_RGB24 || \
  2247. f == AV_PIX_FMT_BGR32 || \
  2248. f == AV_PIX_FMT_BGR32_1 || \
  2249. f == AV_PIX_FMT_BGR24)
  2250. if (srcFormat == AV_PIX_FMT_GBRP && isPlanar(srcFormat) && isByteRGB(dstFormat))
  2251. c->convert_unscaled = planarRgbToRgbWrapper;
  2252. if (srcFormat == AV_PIX_FMT_GBRAP && isByteRGB(dstFormat))
  2253. c->convert_unscaled = planarRgbaToRgbWrapper;
  2254. if ((srcFormat == AV_PIX_FMT_RGB48LE || srcFormat == AV_PIX_FMT_RGB48BE ||
  2255. srcFormat == AV_PIX_FMT_BGR48LE || srcFormat == AV_PIX_FMT_BGR48BE ||
  2256. srcFormat == AV_PIX_FMT_RGBA64LE || srcFormat == AV_PIX_FMT_RGBA64BE ||
  2257. srcFormat == AV_PIX_FMT_BGRA64LE || srcFormat == AV_PIX_FMT_BGRA64BE) &&
  2258. (dstFormat == AV_PIX_FMT_GBRP9LE || dstFormat == AV_PIX_FMT_GBRP9BE ||
  2259. dstFormat == AV_PIX_FMT_GBRP10LE || dstFormat == AV_PIX_FMT_GBRP10BE ||
  2260. dstFormat == AV_PIX_FMT_GBRP12LE || dstFormat == AV_PIX_FMT_GBRP12BE ||
  2261. dstFormat == AV_PIX_FMT_GBRP14LE || dstFormat == AV_PIX_FMT_GBRP14BE ||
  2262. dstFormat == AV_PIX_FMT_GBRP16LE || dstFormat == AV_PIX_FMT_GBRP16BE ||
  2263. dstFormat == AV_PIX_FMT_GBRAP10LE || dstFormat == AV_PIX_FMT_GBRAP10BE ||
  2264. dstFormat == AV_PIX_FMT_GBRAP12LE || dstFormat == AV_PIX_FMT_GBRAP12BE ||
  2265. dstFormat == AV_PIX_FMT_GBRAP14LE || dstFormat == AV_PIX_FMT_GBRAP14BE ||
  2266. dstFormat == AV_PIX_FMT_GBRAP16LE || dstFormat == AV_PIX_FMT_GBRAP16BE ))
  2267. c->convert_unscaled = Rgb16ToPlanarRgb16Wrapper;
  2268. if (av_pix_fmt_desc_get(dstFormat)->comp[0].depth >= 10 &&
  2269. isPlanarRGB(dstFormat) && !isFloat(dstFormat) &&
  2270. (srcFormat == AV_PIX_FMT_X2RGB10LE || srcFormat == AV_PIX_FMT_X2BGR10LE))
  2271. c->convert_unscaled = Rgb16ToPlanarRgb16Wrapper;
  2272. if ((srcFormat == AV_PIX_FMT_GBRP9LE || srcFormat == AV_PIX_FMT_GBRP9BE ||
  2273. srcFormat == AV_PIX_FMT_GBRP16LE || srcFormat == AV_PIX_FMT_GBRP16BE ||
  2274. srcFormat == AV_PIX_FMT_GBRP10LE || srcFormat == AV_PIX_FMT_GBRP10BE ||
  2275. srcFormat == AV_PIX_FMT_GBRP12LE || srcFormat == AV_PIX_FMT_GBRP12BE ||
  2276. srcFormat == AV_PIX_FMT_GBRP14LE || srcFormat == AV_PIX_FMT_GBRP14BE ||
  2277. srcFormat == AV_PIX_FMT_GBRAP10LE || srcFormat == AV_PIX_FMT_GBRAP10BE ||
  2278. srcFormat == AV_PIX_FMT_GBRAP12LE || srcFormat == AV_PIX_FMT_GBRAP12BE ||
  2279. srcFormat == AV_PIX_FMT_GBRAP14LE || srcFormat == AV_PIX_FMT_GBRAP14BE ||
  2280. srcFormat == AV_PIX_FMT_GBRAP16LE || srcFormat == AV_PIX_FMT_GBRAP16BE) &&
  2281. (dstFormat == AV_PIX_FMT_RGB48LE || dstFormat == AV_PIX_FMT_RGB48BE ||
  2282. dstFormat == AV_PIX_FMT_BGR48LE || dstFormat == AV_PIX_FMT_BGR48BE ||
  2283. dstFormat == AV_PIX_FMT_RGBA64LE || dstFormat == AV_PIX_FMT_RGBA64BE ||
  2284. dstFormat == AV_PIX_FMT_BGRA64LE || dstFormat == AV_PIX_FMT_BGRA64BE))
  2285. c->convert_unscaled = planarRgb16ToRgb16Wrapper;
  2286. if (av_pix_fmt_desc_get(srcFormat)->comp[0].depth >= 10 &&
  2287. isPlanarRGB(srcFormat) && !isFloat(srcFormat) &&
  2288. (dstFormat == AV_PIX_FMT_X2RGB10LE || dstFormat == AV_PIX_FMT_X2BGR10LE))
  2289. c->convert_unscaled = planarRgb16ToRgb16Wrapper;
  2290. if (av_pix_fmt_desc_get(srcFormat)->comp[0].depth == 8 &&
  2291. isPackedRGB(srcFormat) && dstFormat == AV_PIX_FMT_GBRP)
  2292. c->convert_unscaled = rgbToPlanarRgbWrapper;
  2293. if (av_pix_fmt_desc_get(srcFormat)->comp[0].depth == 8 &&
  2294. isPackedRGB(srcFormat) && dstFormat == AV_PIX_FMT_GBRAP)
  2295. c->convert_unscaled = rgbToPlanarRgbaWrapper;
  2296. if (isBayer(srcFormat)) {
  2297. c->dst_slice_align = 2;
  2298. if (dstFormat == AV_PIX_FMT_RGB24)
  2299. c->convert_unscaled = bayer_to_rgb24_wrapper;
  2300. else if (dstFormat == AV_PIX_FMT_RGB48)
  2301. c->convert_unscaled = bayer_to_rgb48_wrapper;
  2302. else if (dstFormat == AV_PIX_FMT_YUV420P)
  2303. c->convert_unscaled = bayer_to_yv12_wrapper;
  2304. else if (!isBayer(dstFormat)) {
  2305. av_log(c, AV_LOG_ERROR, "unsupported bayer conversion\n");
  2306. av_assert0(0);
  2307. }
  2308. }
  2309. /* bswap 16 bits per pixel/component packed formats */
  2310. if (IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BAYER_BGGR16) ||
  2311. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BAYER_RGGB16) ||
  2312. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BAYER_GBRG16) ||
  2313. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BAYER_GRBG16) ||
  2314. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BGR444) ||
  2315. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BGR48) ||
  2316. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BGR555) ||
  2317. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BGR565) ||
  2318. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BGRA64) ||
  2319. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GRAY9) ||
  2320. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GRAY10) ||
  2321. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GRAY12) ||
  2322. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GRAY14) ||
  2323. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GRAY16) ||
  2324. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YA16) ||
  2325. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_AYUV64) ||
  2326. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRP9) ||
  2327. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRP10) ||
  2328. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRP12) ||
  2329. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRP14) ||
  2330. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRP16) ||
  2331. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRAP10) ||
  2332. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRAP12) ||
  2333. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRAP14) ||
  2334. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRAP16) ||
  2335. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_RGB444) ||
  2336. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_RGB48) ||
  2337. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_RGB555) ||
  2338. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_RGB565) ||
  2339. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_RGBA64) ||
  2340. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_XV36) ||
  2341. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_XV48) ||
  2342. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_XYZ12) ||
  2343. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV420P9) ||
  2344. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV420P10) ||
  2345. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV420P12) ||
  2346. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV420P14) ||
  2347. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV420P16) ||
  2348. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV422P9) ||
  2349. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV422P10) ||
  2350. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV422P12) ||
  2351. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV422P14) ||
  2352. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV422P16) ||
  2353. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV440P10) ||
  2354. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV440P12) ||
  2355. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV444P9) ||
  2356. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV444P10) ||
  2357. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV444P12) ||
  2358. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV444P14) ||
  2359. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV444P16))
  2360. c->convert_unscaled = bswap_16bpc;
  2361. /* bswap 32 bits per pixel/component formats */
  2362. if (IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRPF32) ||
  2363. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GBRAPF32))
  2364. c->convert_unscaled = bswap_32bpc;
  2365. if (usePal(srcFormat)) {
  2366. switch (dstFormat) {
  2367. case AV_PIX_FMT_GBRP:
  2368. case AV_PIX_FMT_GBRAP:
  2369. c->convert_unscaled = palToGbrpWrapper;
  2370. break;
  2371. default:
  2372. if (isByteRGB(dstFormat))
  2373. c->convert_unscaled = palToRgbWrapper;
  2374. break;
  2375. }
  2376. }
  2377. if (srcFormat == AV_PIX_FMT_YUV422P) {
  2378. if (dstFormat == AV_PIX_FMT_YUYV422)
  2379. c->convert_unscaled = yuv422pToYuy2Wrapper;
  2380. else if (dstFormat == AV_PIX_FMT_UYVY422)
  2381. c->convert_unscaled = yuv422pToUyvyWrapper;
  2382. }
  2383. /* uint Y to float Y */
  2384. if (srcFormat == AV_PIX_FMT_GRAY8 && dstFormat == AV_PIX_FMT_GRAYF32){
  2385. c->convert_unscaled = uint_y_to_float_y_wrapper;
  2386. }
  2387. /* float Y to uint Y */
  2388. if (srcFormat == AV_PIX_FMT_GRAYF32 && dstFormat == AV_PIX_FMT_GRAY8){
  2389. c->convert_unscaled = float_y_to_uint_y_wrapper;
  2390. }
  2391. /* LQ converters if -sws 0 or -sws 4*/
  2392. if (c->opts.flags&(SWS_FAST_BILINEAR|SWS_POINT)) {
  2393. /* yv12_to_yuy2 */
  2394. if (srcFormat == AV_PIX_FMT_YUV420P || srcFormat == AV_PIX_FMT_YUVA420P) {
  2395. if (dstFormat == AV_PIX_FMT_YUYV422)
  2396. c->convert_unscaled = planarToYuy2Wrapper;
  2397. else if (dstFormat == AV_PIX_FMT_UYVY422)
  2398. c->convert_unscaled = planarToUyvyWrapper;
  2399. }
  2400. }
  2401. if (srcFormat == AV_PIX_FMT_YUYV422 &&
  2402. (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P))
  2403. c->convert_unscaled = yuyvToYuv420Wrapper;
  2404. if (srcFormat == AV_PIX_FMT_UYVY422 &&
  2405. (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P))
  2406. c->convert_unscaled = uyvyToYuv420Wrapper;
  2407. if (srcFormat == AV_PIX_FMT_YUYV422 && dstFormat == AV_PIX_FMT_YUV422P)
  2408. c->convert_unscaled = yuyvToYuv422Wrapper;
  2409. if (srcFormat == AV_PIX_FMT_UYVY422 && dstFormat == AV_PIX_FMT_YUV422P)
  2410. c->convert_unscaled = uyvyToYuv422Wrapper;
  2411. if (dstFormat == AV_PIX_FMT_YUV420P &&
  2412. (srcFormat == AV_PIX_FMT_NV24 || srcFormat == AV_PIX_FMT_NV42))
  2413. c->convert_unscaled = nv24ToYuv420Wrapper;
  2414. #define isPlanarGray(x) (isGray(x) && (x) != AV_PIX_FMT_YA8 && (x) != AV_PIX_FMT_YA16LE && (x) != AV_PIX_FMT_YA16BE)
  2415. /* simple copy */
  2416. if ( srcFormat == dstFormat ||
  2417. (srcFormat == AV_PIX_FMT_YUVA420P && dstFormat == AV_PIX_FMT_YUV420P) ||
  2418. (srcFormat == AV_PIX_FMT_YUV420P && dstFormat == AV_PIX_FMT_YUVA420P) ||
  2419. (isFloat(srcFormat) == isFloat(dstFormat) && isFloat16(srcFormat) == isFloat16(dstFormat)) && ((isPlanarYUV(srcFormat) && isPlanarGray(dstFormat)) ||
  2420. (isPlanarYUV(dstFormat) && isPlanarGray(srcFormat)) ||
  2421. (isPlanarGray(dstFormat) && isPlanarGray(srcFormat)) ||
  2422. (isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat) &&
  2423. c->chrDstHSubSample == c->chrSrcHSubSample &&
  2424. c->chrDstVSubSample == c->chrSrcVSubSample &&
  2425. isSemiPlanarYUV(srcFormat) == isSemiPlanarYUV(dstFormat))))
  2426. {
  2427. if (isPacked(c->opts.src_format))
  2428. c->convert_unscaled = packedCopyWrapper;
  2429. else /* Planar YUV or gray */
  2430. c->convert_unscaled = planarCopyWrapper;
  2431. }
  2432. #if ARCH_PPC
  2433. ff_get_unscaled_swscale_ppc(c);
  2434. #elif ARCH_ARM
  2435. ff_get_unscaled_swscale_arm(c);
  2436. #elif ARCH_AARCH64
  2437. ff_get_unscaled_swscale_aarch64(c);
  2438. #endif
  2439. }
  2440. /* Convert the palette to the same packed 32-bit format as the palette */
  2441. void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst,
  2442. int num_pixels, const uint8_t *palette)
  2443. {
  2444. int i;
  2445. for (i = 0; i < num_pixels; i++)
  2446. ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i]];
  2447. }
  2448. /* Palette format: ABCD -> dst format: ABC */
  2449. void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst,
  2450. int num_pixels, const uint8_t *palette)
  2451. {
  2452. int i;
  2453. for (i = 0; i < num_pixels; i++) {
  2454. //FIXME slow?
  2455. dst[0] = palette[src[i] * 4 + 0];
  2456. dst[1] = palette[src[i] * 4 + 1];
  2457. dst[2] = palette[src[i] * 4 + 2];
  2458. dst += 3;
  2459. }
  2460. }