swscale_template.c 138 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066
  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #undef REAL_MOVNTQ
  21. #undef MOVNTQ
  22. #undef PAVGB
  23. #undef PREFETCH
  24. #if COMPILE_TEMPLATE_AMD3DNOW
  25. #define PREFETCH "prefetch"
  26. #elif COMPILE_TEMPLATE_MMX2
  27. #define PREFETCH "prefetchnta"
  28. #else
  29. #define PREFETCH " # nop"
  30. #endif
  31. #if COMPILE_TEMPLATE_MMX2
  32. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  33. #elif COMPILE_TEMPLATE_AMD3DNOW
  34. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  35. #endif
  36. #if COMPILE_TEMPLATE_MMX2
  37. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  38. #else
  39. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  40. #endif
  41. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  42. #if COMPILE_TEMPLATE_ALTIVEC
  43. #include "ppc/swscale_altivec_template.c"
  44. #endif
  45. #define YSCALEYUV2YV12X(x, offset, dest, width) \
  46. __asm__ volatile(\
  47. "xor %%"REG_a", %%"REG_a" \n\t"\
  48. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  49. "movq %%mm3, %%mm4 \n\t"\
  50. "lea " offset "(%0), %%"REG_d" \n\t"\
  51. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  52. ASMALIGN(4) /* FIXME Unroll? */\
  53. "1: \n\t"\
  54. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  55. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  56. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
  57. "add $16, %%"REG_d" \n\t"\
  58. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  59. "test %%"REG_S", %%"REG_S" \n\t"\
  60. "pmulhw %%mm0, %%mm2 \n\t"\
  61. "pmulhw %%mm0, %%mm5 \n\t"\
  62. "paddw %%mm2, %%mm3 \n\t"\
  63. "paddw %%mm5, %%mm4 \n\t"\
  64. " jnz 1b \n\t"\
  65. "psraw $3, %%mm3 \n\t"\
  66. "psraw $3, %%mm4 \n\t"\
  67. "packuswb %%mm4, %%mm3 \n\t"\
  68. MOVNTQ(%%mm3, (%1, %%REGa))\
  69. "add $8, %%"REG_a" \n\t"\
  70. "cmp %2, %%"REG_a" \n\t"\
  71. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  72. "movq %%mm3, %%mm4 \n\t"\
  73. "lea " offset "(%0), %%"REG_d" \n\t"\
  74. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  75. "jb 1b \n\t"\
  76. :: "r" (&c->redDither),\
  77. "r" (dest), "g" (width)\
  78. : "%"REG_a, "%"REG_d, "%"REG_S\
  79. );
  80. #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
  81. __asm__ volatile(\
  82. "lea " offset "(%0), %%"REG_d" \n\t"\
  83. "xor %%"REG_a", %%"REG_a" \n\t"\
  84. "pxor %%mm4, %%mm4 \n\t"\
  85. "pxor %%mm5, %%mm5 \n\t"\
  86. "pxor %%mm6, %%mm6 \n\t"\
  87. "pxor %%mm7, %%mm7 \n\t"\
  88. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  89. ASMALIGN(4) \
  90. "1: \n\t"\
  91. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
  92. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  93. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  94. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
  95. "movq %%mm0, %%mm3 \n\t"\
  96. "punpcklwd %%mm1, %%mm0 \n\t"\
  97. "punpckhwd %%mm1, %%mm3 \n\t"\
  98. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  99. "pmaddwd %%mm1, %%mm0 \n\t"\
  100. "pmaddwd %%mm1, %%mm3 \n\t"\
  101. "paddd %%mm0, %%mm4 \n\t"\
  102. "paddd %%mm3, %%mm5 \n\t"\
  103. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
  104. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  105. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  106. "test %%"REG_S", %%"REG_S" \n\t"\
  107. "movq %%mm2, %%mm0 \n\t"\
  108. "punpcklwd %%mm3, %%mm2 \n\t"\
  109. "punpckhwd %%mm3, %%mm0 \n\t"\
  110. "pmaddwd %%mm1, %%mm2 \n\t"\
  111. "pmaddwd %%mm1, %%mm0 \n\t"\
  112. "paddd %%mm2, %%mm6 \n\t"\
  113. "paddd %%mm0, %%mm7 \n\t"\
  114. " jnz 1b \n\t"\
  115. "psrad $16, %%mm4 \n\t"\
  116. "psrad $16, %%mm5 \n\t"\
  117. "psrad $16, %%mm6 \n\t"\
  118. "psrad $16, %%mm7 \n\t"\
  119. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  120. "packssdw %%mm5, %%mm4 \n\t"\
  121. "packssdw %%mm7, %%mm6 \n\t"\
  122. "paddw %%mm0, %%mm4 \n\t"\
  123. "paddw %%mm0, %%mm6 \n\t"\
  124. "psraw $3, %%mm4 \n\t"\
  125. "psraw $3, %%mm6 \n\t"\
  126. "packuswb %%mm6, %%mm4 \n\t"\
  127. MOVNTQ(%%mm4, (%1, %%REGa))\
  128. "add $8, %%"REG_a" \n\t"\
  129. "cmp %2, %%"REG_a" \n\t"\
  130. "lea " offset "(%0), %%"REG_d" \n\t"\
  131. "pxor %%mm4, %%mm4 \n\t"\
  132. "pxor %%mm5, %%mm5 \n\t"\
  133. "pxor %%mm6, %%mm6 \n\t"\
  134. "pxor %%mm7, %%mm7 \n\t"\
  135. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  136. "jb 1b \n\t"\
  137. :: "r" (&c->redDither),\
  138. "r" (dest), "g" (width)\
  139. : "%"REG_a, "%"REG_d, "%"REG_S\
  140. );
  141. #define YSCALEYUV2YV121 \
  142. "mov %2, %%"REG_a" \n\t"\
  143. ASMALIGN(4) /* FIXME Unroll? */\
  144. "1: \n\t"\
  145. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  146. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  147. "psraw $7, %%mm0 \n\t"\
  148. "psraw $7, %%mm1 \n\t"\
  149. "packuswb %%mm1, %%mm0 \n\t"\
  150. MOVNTQ(%%mm0, (%1, %%REGa))\
  151. "add $8, %%"REG_a" \n\t"\
  152. "jnc 1b \n\t"
  153. #define YSCALEYUV2YV121_ACCURATE \
  154. "mov %2, %%"REG_a" \n\t"\
  155. "pcmpeqw %%mm7, %%mm7 \n\t"\
  156. "psrlw $15, %%mm7 \n\t"\
  157. "psllw $6, %%mm7 \n\t"\
  158. ASMALIGN(4) /* FIXME Unroll? */\
  159. "1: \n\t"\
  160. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  161. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  162. "paddsw %%mm7, %%mm0 \n\t"\
  163. "paddsw %%mm7, %%mm1 \n\t"\
  164. "psraw $7, %%mm0 \n\t"\
  165. "psraw $7, %%mm1 \n\t"\
  166. "packuswb %%mm1, %%mm0 \n\t"\
  167. MOVNTQ(%%mm0, (%1, %%REGa))\
  168. "add $8, %%"REG_a" \n\t"\
  169. "jnc 1b \n\t"
  170. /*
  171. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  172. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  173. "r" (dest), "m" (dstW),
  174. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  175. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  176. */
  177. #define YSCALEYUV2PACKEDX_UV \
  178. __asm__ volatile(\
  179. "xor %%"REG_a", %%"REG_a" \n\t"\
  180. ASMALIGN(4)\
  181. "nop \n\t"\
  182. "1: \n\t"\
  183. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  184. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  185. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  186. "movq %%mm3, %%mm4 \n\t"\
  187. ASMALIGN(4)\
  188. "2: \n\t"\
  189. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  190. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  191. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  192. "add $16, %%"REG_d" \n\t"\
  193. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  194. "pmulhw %%mm0, %%mm2 \n\t"\
  195. "pmulhw %%mm0, %%mm5 \n\t"\
  196. "paddw %%mm2, %%mm3 \n\t"\
  197. "paddw %%mm5, %%mm4 \n\t"\
  198. "test %%"REG_S", %%"REG_S" \n\t"\
  199. " jnz 2b \n\t"\
  200. #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
  201. "lea "offset"(%0), %%"REG_d" \n\t"\
  202. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  203. "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
  204. "movq "#dst1", "#dst2" \n\t"\
  205. ASMALIGN(4)\
  206. "2: \n\t"\
  207. "movq 8(%%"REG_d"), "#coeff" \n\t" /* filterCoeff */\
  208. "movq (%%"REG_S", %%"REG_a", 2), "#src1" \n\t" /* Y1srcData */\
  209. "movq 8(%%"REG_S", %%"REG_a", 2), "#src2" \n\t" /* Y2srcData */\
  210. "add $16, %%"REG_d" \n\t"\
  211. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  212. "pmulhw "#coeff", "#src1" \n\t"\
  213. "pmulhw "#coeff", "#src2" \n\t"\
  214. "paddw "#src1", "#dst1" \n\t"\
  215. "paddw "#src2", "#dst2" \n\t"\
  216. "test %%"REG_S", %%"REG_S" \n\t"\
  217. " jnz 2b \n\t"\
  218. #define YSCALEYUV2PACKEDX \
  219. YSCALEYUV2PACKEDX_UV \
  220. YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
  221. #define YSCALEYUV2PACKEDX_END \
  222. :: "r" (&c->redDither), \
  223. "m" (dummy), "m" (dummy), "m" (dummy),\
  224. "r" (dest), "m" (dstW) \
  225. : "%"REG_a, "%"REG_d, "%"REG_S \
  226. );
  227. #define YSCALEYUV2PACKEDX_ACCURATE_UV \
  228. __asm__ volatile(\
  229. "xor %%"REG_a", %%"REG_a" \n\t"\
  230. ASMALIGN(4)\
  231. "nop \n\t"\
  232. "1: \n\t"\
  233. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  234. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  235. "pxor %%mm4, %%mm4 \n\t"\
  236. "pxor %%mm5, %%mm5 \n\t"\
  237. "pxor %%mm6, %%mm6 \n\t"\
  238. "pxor %%mm7, %%mm7 \n\t"\
  239. ASMALIGN(4)\
  240. "2: \n\t"\
  241. "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
  242. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
  243. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  244. "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
  245. "movq %%mm0, %%mm3 \n\t"\
  246. "punpcklwd %%mm1, %%mm0 \n\t"\
  247. "punpckhwd %%mm1, %%mm3 \n\t"\
  248. "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
  249. "pmaddwd %%mm1, %%mm0 \n\t"\
  250. "pmaddwd %%mm1, %%mm3 \n\t"\
  251. "paddd %%mm0, %%mm4 \n\t"\
  252. "paddd %%mm3, %%mm5 \n\t"\
  253. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
  254. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  255. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  256. "test %%"REG_S", %%"REG_S" \n\t"\
  257. "movq %%mm2, %%mm0 \n\t"\
  258. "punpcklwd %%mm3, %%mm2 \n\t"\
  259. "punpckhwd %%mm3, %%mm0 \n\t"\
  260. "pmaddwd %%mm1, %%mm2 \n\t"\
  261. "pmaddwd %%mm1, %%mm0 \n\t"\
  262. "paddd %%mm2, %%mm6 \n\t"\
  263. "paddd %%mm0, %%mm7 \n\t"\
  264. " jnz 2b \n\t"\
  265. "psrad $16, %%mm4 \n\t"\
  266. "psrad $16, %%mm5 \n\t"\
  267. "psrad $16, %%mm6 \n\t"\
  268. "psrad $16, %%mm7 \n\t"\
  269. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  270. "packssdw %%mm5, %%mm4 \n\t"\
  271. "packssdw %%mm7, %%mm6 \n\t"\
  272. "paddw %%mm0, %%mm4 \n\t"\
  273. "paddw %%mm0, %%mm6 \n\t"\
  274. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  275. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  276. #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
  277. "lea "offset"(%0), %%"REG_d" \n\t"\
  278. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  279. "pxor %%mm1, %%mm1 \n\t"\
  280. "pxor %%mm5, %%mm5 \n\t"\
  281. "pxor %%mm7, %%mm7 \n\t"\
  282. "pxor %%mm6, %%mm6 \n\t"\
  283. ASMALIGN(4)\
  284. "2: \n\t"\
  285. "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  286. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  287. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  288. "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  289. "movq %%mm0, %%mm3 \n\t"\
  290. "punpcklwd %%mm4, %%mm0 \n\t"\
  291. "punpckhwd %%mm4, %%mm3 \n\t"\
  292. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
  293. "pmaddwd %%mm4, %%mm0 \n\t"\
  294. "pmaddwd %%mm4, %%mm3 \n\t"\
  295. "paddd %%mm0, %%mm1 \n\t"\
  296. "paddd %%mm3, %%mm5 \n\t"\
  297. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  298. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  299. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  300. "test %%"REG_S", %%"REG_S" \n\t"\
  301. "movq %%mm2, %%mm0 \n\t"\
  302. "punpcklwd %%mm3, %%mm2 \n\t"\
  303. "punpckhwd %%mm3, %%mm0 \n\t"\
  304. "pmaddwd %%mm4, %%mm2 \n\t"\
  305. "pmaddwd %%mm4, %%mm0 \n\t"\
  306. "paddd %%mm2, %%mm7 \n\t"\
  307. "paddd %%mm0, %%mm6 \n\t"\
  308. " jnz 2b \n\t"\
  309. "psrad $16, %%mm1 \n\t"\
  310. "psrad $16, %%mm5 \n\t"\
  311. "psrad $16, %%mm7 \n\t"\
  312. "psrad $16, %%mm6 \n\t"\
  313. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  314. "packssdw %%mm5, %%mm1 \n\t"\
  315. "packssdw %%mm6, %%mm7 \n\t"\
  316. "paddw %%mm0, %%mm1 \n\t"\
  317. "paddw %%mm0, %%mm7 \n\t"\
  318. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  319. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  320. #define YSCALEYUV2PACKEDX_ACCURATE \
  321. YSCALEYUV2PACKEDX_ACCURATE_UV \
  322. YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
  323. #define YSCALEYUV2RGBX \
  324. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  325. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  326. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  327. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  328. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  329. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  330. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  331. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  332. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  333. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  334. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  335. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  336. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  337. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  338. "paddw %%mm3, %%mm4 \n\t"\
  339. "movq %%mm2, %%mm0 \n\t"\
  340. "movq %%mm5, %%mm6 \n\t"\
  341. "movq %%mm4, %%mm3 \n\t"\
  342. "punpcklwd %%mm2, %%mm2 \n\t"\
  343. "punpcklwd %%mm5, %%mm5 \n\t"\
  344. "punpcklwd %%mm4, %%mm4 \n\t"\
  345. "paddw %%mm1, %%mm2 \n\t"\
  346. "paddw %%mm1, %%mm5 \n\t"\
  347. "paddw %%mm1, %%mm4 \n\t"\
  348. "punpckhwd %%mm0, %%mm0 \n\t"\
  349. "punpckhwd %%mm6, %%mm6 \n\t"\
  350. "punpckhwd %%mm3, %%mm3 \n\t"\
  351. "paddw %%mm7, %%mm0 \n\t"\
  352. "paddw %%mm7, %%mm6 \n\t"\
  353. "paddw %%mm7, %%mm3 \n\t"\
  354. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  355. "packuswb %%mm0, %%mm2 \n\t"\
  356. "packuswb %%mm6, %%mm5 \n\t"\
  357. "packuswb %%mm3, %%mm4 \n\t"\
  358. #define REAL_YSCALEYUV2PACKED(index, c) \
  359. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  360. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
  361. "psraw $3, %%mm0 \n\t"\
  362. "psraw $3, %%mm1 \n\t"\
  363. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  364. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  365. "xor "#index", "#index" \n\t"\
  366. ASMALIGN(4)\
  367. "1: \n\t"\
  368. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  369. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  370. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  371. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  372. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  373. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  374. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  375. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  376. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  377. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  378. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  379. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  380. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  381. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  382. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  383. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  384. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  385. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  386. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  387. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  388. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  389. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  390. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  391. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  392. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  393. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  394. #define REAL_YSCALEYUV2RGB_UV(index, c) \
  395. "xor "#index", "#index" \n\t"\
  396. ASMALIGN(4)\
  397. "1: \n\t"\
  398. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  399. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  400. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  401. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  402. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  403. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  404. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  405. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  406. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  407. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  408. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  409. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  410. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  411. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  412. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  413. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  414. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  415. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  416. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  417. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  418. #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
  419. "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  420. "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  421. "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  422. "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  423. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  424. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  425. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  426. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  427. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  428. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  429. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  430. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  431. #define REAL_YSCALEYUV2RGB_COEFF(c) \
  432. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  433. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  434. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  435. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  436. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  437. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  438. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  439. "paddw %%mm3, %%mm4 \n\t"\
  440. "movq %%mm2, %%mm0 \n\t"\
  441. "movq %%mm5, %%mm6 \n\t"\
  442. "movq %%mm4, %%mm3 \n\t"\
  443. "punpcklwd %%mm2, %%mm2 \n\t"\
  444. "punpcklwd %%mm5, %%mm5 \n\t"\
  445. "punpcklwd %%mm4, %%mm4 \n\t"\
  446. "paddw %%mm1, %%mm2 \n\t"\
  447. "paddw %%mm1, %%mm5 \n\t"\
  448. "paddw %%mm1, %%mm4 \n\t"\
  449. "punpckhwd %%mm0, %%mm0 \n\t"\
  450. "punpckhwd %%mm6, %%mm6 \n\t"\
  451. "punpckhwd %%mm3, %%mm3 \n\t"\
  452. "paddw %%mm7, %%mm0 \n\t"\
  453. "paddw %%mm7, %%mm6 \n\t"\
  454. "paddw %%mm7, %%mm3 \n\t"\
  455. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  456. "packuswb %%mm0, %%mm2 \n\t"\
  457. "packuswb %%mm6, %%mm5 \n\t"\
  458. "packuswb %%mm3, %%mm4 \n\t"\
  459. #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
  460. #define YSCALEYUV2RGB(index, c) \
  461. REAL_YSCALEYUV2RGB_UV(index, c) \
  462. REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
  463. REAL_YSCALEYUV2RGB_COEFF(c)
  464. #define REAL_YSCALEYUV2PACKED1(index, c) \
  465. "xor "#index", "#index" \n\t"\
  466. ASMALIGN(4)\
  467. "1: \n\t"\
  468. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  469. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  470. "psraw $7, %%mm3 \n\t" \
  471. "psraw $7, %%mm4 \n\t" \
  472. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  473. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  474. "psraw $7, %%mm1 \n\t" \
  475. "psraw $7, %%mm7 \n\t" \
  476. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  477. #define REAL_YSCALEYUV2RGB1(index, c) \
  478. "xor "#index", "#index" \n\t"\
  479. ASMALIGN(4)\
  480. "1: \n\t"\
  481. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  482. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  483. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  484. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  485. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  486. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  487. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  488. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  489. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  490. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  491. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  492. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  493. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  494. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  495. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  496. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  497. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  498. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  499. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  500. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  501. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  502. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  503. "paddw %%mm3, %%mm4 \n\t"\
  504. "movq %%mm2, %%mm0 \n\t"\
  505. "movq %%mm5, %%mm6 \n\t"\
  506. "movq %%mm4, %%mm3 \n\t"\
  507. "punpcklwd %%mm2, %%mm2 \n\t"\
  508. "punpcklwd %%mm5, %%mm5 \n\t"\
  509. "punpcklwd %%mm4, %%mm4 \n\t"\
  510. "paddw %%mm1, %%mm2 \n\t"\
  511. "paddw %%mm1, %%mm5 \n\t"\
  512. "paddw %%mm1, %%mm4 \n\t"\
  513. "punpckhwd %%mm0, %%mm0 \n\t"\
  514. "punpckhwd %%mm6, %%mm6 \n\t"\
  515. "punpckhwd %%mm3, %%mm3 \n\t"\
  516. "paddw %%mm7, %%mm0 \n\t"\
  517. "paddw %%mm7, %%mm6 \n\t"\
  518. "paddw %%mm7, %%mm3 \n\t"\
  519. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  520. "packuswb %%mm0, %%mm2 \n\t"\
  521. "packuswb %%mm6, %%mm5 \n\t"\
  522. "packuswb %%mm3, %%mm4 \n\t"\
  523. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  524. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  525. "xor "#index", "#index" \n\t"\
  526. ASMALIGN(4)\
  527. "1: \n\t"\
  528. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  529. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  530. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  531. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  532. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  533. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  534. "psrlw $8, %%mm3 \n\t" \
  535. "psrlw $8, %%mm4 \n\t" \
  536. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  537. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  538. "psraw $7, %%mm1 \n\t" \
  539. "psraw $7, %%mm7 \n\t"
  540. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  541. // do vertical chrominance interpolation
  542. #define REAL_YSCALEYUV2RGB1b(index, c) \
  543. "xor "#index", "#index" \n\t"\
  544. ASMALIGN(4)\
  545. "1: \n\t"\
  546. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  547. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  548. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  549. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  550. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  551. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  552. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  553. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  554. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  555. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  556. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  557. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  558. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  559. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  560. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  561. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  562. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  563. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  564. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  565. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  566. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  567. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  568. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  569. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  570. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  571. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  572. "paddw %%mm3, %%mm4 \n\t"\
  573. "movq %%mm2, %%mm0 \n\t"\
  574. "movq %%mm5, %%mm6 \n\t"\
  575. "movq %%mm4, %%mm3 \n\t"\
  576. "punpcklwd %%mm2, %%mm2 \n\t"\
  577. "punpcklwd %%mm5, %%mm5 \n\t"\
  578. "punpcklwd %%mm4, %%mm4 \n\t"\
  579. "paddw %%mm1, %%mm2 \n\t"\
  580. "paddw %%mm1, %%mm5 \n\t"\
  581. "paddw %%mm1, %%mm4 \n\t"\
  582. "punpckhwd %%mm0, %%mm0 \n\t"\
  583. "punpckhwd %%mm6, %%mm6 \n\t"\
  584. "punpckhwd %%mm3, %%mm3 \n\t"\
  585. "paddw %%mm7, %%mm0 \n\t"\
  586. "paddw %%mm7, %%mm6 \n\t"\
  587. "paddw %%mm7, %%mm3 \n\t"\
  588. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  589. "packuswb %%mm0, %%mm2 \n\t"\
  590. "packuswb %%mm6, %%mm5 \n\t"\
  591. "packuswb %%mm3, %%mm4 \n\t"\
  592. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  593. #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
  594. "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
  595. "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
  596. "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
  597. "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
  598. "packuswb %%mm1, %%mm7 \n\t"
  599. #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
  600. #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
  601. "movq "#b", "#q2" \n\t" /* B */\
  602. "movq "#r", "#t" \n\t" /* R */\
  603. "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
  604. "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
  605. "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
  606. "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
  607. "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
  608. "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
  609. "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
  610. "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
  611. "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
  612. "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
  613. \
  614. MOVNTQ( q0, (dst, index, 4))\
  615. MOVNTQ( b, 8(dst, index, 4))\
  616. MOVNTQ( q2, 16(dst, index, 4))\
  617. MOVNTQ( q3, 24(dst, index, 4))\
  618. \
  619. "add $8, "#index" \n\t"\
  620. "cmp "#dstw", "#index" \n\t"\
  621. " jb 1b \n\t"
  622. #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
  623. #define REAL_WRITERGB16(dst, dstw, index) \
  624. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  625. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  626. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  627. "psrlq $3, %%mm2 \n\t"\
  628. \
  629. "movq %%mm2, %%mm1 \n\t"\
  630. "movq %%mm4, %%mm3 \n\t"\
  631. \
  632. "punpcklbw %%mm7, %%mm3 \n\t"\
  633. "punpcklbw %%mm5, %%mm2 \n\t"\
  634. "punpckhbw %%mm7, %%mm4 \n\t"\
  635. "punpckhbw %%mm5, %%mm1 \n\t"\
  636. \
  637. "psllq $3, %%mm3 \n\t"\
  638. "psllq $3, %%mm4 \n\t"\
  639. \
  640. "por %%mm3, %%mm2 \n\t"\
  641. "por %%mm4, %%mm1 \n\t"\
  642. \
  643. MOVNTQ(%%mm2, (dst, index, 2))\
  644. MOVNTQ(%%mm1, 8(dst, index, 2))\
  645. \
  646. "add $8, "#index" \n\t"\
  647. "cmp "#dstw", "#index" \n\t"\
  648. " jb 1b \n\t"
  649. #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
  650. #define REAL_WRITERGB15(dst, dstw, index) \
  651. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  652. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  653. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  654. "psrlq $3, %%mm2 \n\t"\
  655. "psrlq $1, %%mm5 \n\t"\
  656. \
  657. "movq %%mm2, %%mm1 \n\t"\
  658. "movq %%mm4, %%mm3 \n\t"\
  659. \
  660. "punpcklbw %%mm7, %%mm3 \n\t"\
  661. "punpcklbw %%mm5, %%mm2 \n\t"\
  662. "punpckhbw %%mm7, %%mm4 \n\t"\
  663. "punpckhbw %%mm5, %%mm1 \n\t"\
  664. \
  665. "psllq $2, %%mm3 \n\t"\
  666. "psllq $2, %%mm4 \n\t"\
  667. \
  668. "por %%mm3, %%mm2 \n\t"\
  669. "por %%mm4, %%mm1 \n\t"\
  670. \
  671. MOVNTQ(%%mm2, (dst, index, 2))\
  672. MOVNTQ(%%mm1, 8(dst, index, 2))\
  673. \
  674. "add $8, "#index" \n\t"\
  675. "cmp "#dstw", "#index" \n\t"\
  676. " jb 1b \n\t"
  677. #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
  678. #define WRITEBGR24OLD(dst, dstw, index) \
  679. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  680. "movq %%mm2, %%mm1 \n\t" /* B */\
  681. "movq %%mm5, %%mm6 \n\t" /* R */\
  682. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  683. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  684. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  685. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  686. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  687. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  688. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  689. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  690. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  691. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  692. \
  693. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  694. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  695. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
  696. "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
  697. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  698. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  699. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  700. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  701. \
  702. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  703. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  704. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  705. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  706. "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
  707. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  708. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  709. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
  710. "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
  711. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  712. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  713. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  714. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  715. \
  716. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  717. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  718. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  719. "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
  720. "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
  721. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  722. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  723. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  724. \
  725. MOVNTQ(%%mm0, (dst))\
  726. MOVNTQ(%%mm2, 8(dst))\
  727. MOVNTQ(%%mm3, 16(dst))\
  728. "add $24, "#dst" \n\t"\
  729. \
  730. "add $8, "#index" \n\t"\
  731. "cmp "#dstw", "#index" \n\t"\
  732. " jb 1b \n\t"
  733. #define WRITEBGR24MMX(dst, dstw, index) \
  734. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  735. "movq %%mm2, %%mm1 \n\t" /* B */\
  736. "movq %%mm5, %%mm6 \n\t" /* R */\
  737. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  738. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  739. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  740. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  741. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  742. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  743. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  744. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  745. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  746. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  747. \
  748. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  749. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  750. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  751. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  752. \
  753. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  754. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  755. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  756. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  757. \
  758. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  759. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  760. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  761. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  762. \
  763. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  764. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  765. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  766. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  767. MOVNTQ(%%mm0, (dst))\
  768. \
  769. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  770. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  771. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  772. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  773. MOVNTQ(%%mm6, 8(dst))\
  774. \
  775. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  776. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  777. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  778. MOVNTQ(%%mm5, 16(dst))\
  779. \
  780. "add $24, "#dst" \n\t"\
  781. \
  782. "add $8, "#index" \n\t"\
  783. "cmp "#dstw", "#index" \n\t"\
  784. " jb 1b \n\t"
  785. #define WRITEBGR24MMX2(dst, dstw, index) \
  786. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  787. "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
  788. "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
  789. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  790. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  791. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  792. \
  793. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  794. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  795. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  796. \
  797. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  798. "por %%mm1, %%mm6 \n\t"\
  799. "por %%mm3, %%mm6 \n\t"\
  800. MOVNTQ(%%mm6, (dst))\
  801. \
  802. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  803. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  804. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  805. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  806. \
  807. "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  808. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  809. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  810. \
  811. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  812. "por %%mm3, %%mm6 \n\t"\
  813. MOVNTQ(%%mm6, 8(dst))\
  814. \
  815. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  816. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  817. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  818. \
  819. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  820. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  821. "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  822. \
  823. "por %%mm1, %%mm3 \n\t"\
  824. "por %%mm3, %%mm6 \n\t"\
  825. MOVNTQ(%%mm6, 16(dst))\
  826. \
  827. "add $24, "#dst" \n\t"\
  828. \
  829. "add $8, "#index" \n\t"\
  830. "cmp "#dstw", "#index" \n\t"\
  831. " jb 1b \n\t"
  832. #if COMPILE_TEMPLATE_MMX2
  833. #undef WRITEBGR24
  834. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  835. #else
  836. #undef WRITEBGR24
  837. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  838. #endif
  839. #define REAL_WRITEYUY2(dst, dstw, index) \
  840. "packuswb %%mm3, %%mm3 \n\t"\
  841. "packuswb %%mm4, %%mm4 \n\t"\
  842. "packuswb %%mm7, %%mm1 \n\t"\
  843. "punpcklbw %%mm4, %%mm3 \n\t"\
  844. "movq %%mm1, %%mm7 \n\t"\
  845. "punpcklbw %%mm3, %%mm1 \n\t"\
  846. "punpckhbw %%mm3, %%mm7 \n\t"\
  847. \
  848. MOVNTQ(%%mm1, (dst, index, 2))\
  849. MOVNTQ(%%mm7, 8(dst, index, 2))\
  850. \
  851. "add $8, "#index" \n\t"\
  852. "cmp "#dstw", "#index" \n\t"\
  853. " jb 1b \n\t"
  854. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  855. static inline void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
  856. const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize, const int16_t **alpSrc,
  857. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
  858. {
  859. #if COMPILE_TEMPLATE_MMX
  860. if(!(c->flags & SWS_BITEXACT)) {
  861. if (c->flags & SWS_ACCURATE_RND) {
  862. if (uDest) {
  863. YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  864. YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  865. }
  866. if (CONFIG_SWSCALE_ALPHA && aDest) {
  867. YSCALEYUV2YV12X_ACCURATE( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
  868. }
  869. YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  870. } else {
  871. if (uDest) {
  872. YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  873. YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  874. }
  875. if (CONFIG_SWSCALE_ALPHA && aDest) {
  876. YSCALEYUV2YV12X( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
  877. }
  878. YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  879. }
  880. return;
  881. }
  882. #endif
  883. #if COMPILE_TEMPLATE_ALTIVEC
  884. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  885. chrFilter, chrSrc, chrFilterSize,
  886. dest, uDest, vDest, dstW, chrDstW);
  887. #else //COMPILE_TEMPLATE_ALTIVEC
  888. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  889. chrFilter, chrSrc, chrFilterSize,
  890. alpSrc, dest, uDest, vDest, aDest, dstW, chrDstW);
  891. #endif //!COMPILE_TEMPLATE_ALTIVEC
  892. }
  893. static inline void RENAME(yuv2nv12X)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
  894. const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
  895. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, enum PixelFormat dstFormat)
  896. {
  897. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  898. chrFilter, chrSrc, chrFilterSize,
  899. dest, uDest, dstW, chrDstW, dstFormat);
  900. }
  901. static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc,
  902. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
  903. {
  904. int i;
  905. #if COMPILE_TEMPLATE_MMX
  906. if(!(c->flags & SWS_BITEXACT)) {
  907. long p= 4;
  908. const uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
  909. uint8_t *dst[4]= {aDest, dest, uDest, vDest};
  910. x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
  911. if (c->flags & SWS_ACCURATE_RND) {
  912. while(p--) {
  913. if (dst[p]) {
  914. __asm__ volatile(
  915. YSCALEYUV2YV121_ACCURATE
  916. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  917. "g" (-counter[p])
  918. : "%"REG_a
  919. );
  920. }
  921. }
  922. } else {
  923. while(p--) {
  924. if (dst[p]) {
  925. __asm__ volatile(
  926. YSCALEYUV2YV121
  927. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  928. "g" (-counter[p])
  929. : "%"REG_a
  930. );
  931. }
  932. }
  933. }
  934. return;
  935. }
  936. #endif
  937. for (i=0; i<dstW; i++) {
  938. int val= (lumSrc[i]+64)>>7;
  939. if (val&256) {
  940. if (val<0) val=0;
  941. else val=255;
  942. }
  943. dest[i]= val;
  944. }
  945. if (uDest)
  946. for (i=0; i<chrDstW; i++) {
  947. int u=(chrSrc[i ]+64)>>7;
  948. int v=(chrSrc[i + VOFW]+64)>>7;
  949. if ((u|v)&256) {
  950. if (u<0) u=0;
  951. else if (u>255) u=255;
  952. if (v<0) v=0;
  953. else if (v>255) v=255;
  954. }
  955. uDest[i]= u;
  956. vDest[i]= v;
  957. }
  958. if (CONFIG_SWSCALE_ALPHA && aDest)
  959. for (i=0; i<dstW; i++) {
  960. int val= (alpSrc[i]+64)>>7;
  961. aDest[i]= av_clip_uint8(val);
  962. }
  963. }
  964. /**
  965. * vertical scale YV12 to RGB
  966. */
  967. static inline void RENAME(yuv2packedX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
  968. const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
  969. const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
  970. {
  971. #if COMPILE_TEMPLATE_MMX
  972. x86_reg dummy=0;
  973. if(!(c->flags & SWS_BITEXACT)) {
  974. if (c->flags & SWS_ACCURATE_RND) {
  975. switch(c->dstFormat) {
  976. case PIX_FMT_RGB32:
  977. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  978. YSCALEYUV2PACKEDX_ACCURATE
  979. YSCALEYUV2RGBX
  980. "movq %%mm2, "U_TEMP"(%0) \n\t"
  981. "movq %%mm4, "V_TEMP"(%0) \n\t"
  982. "movq %%mm5, "Y_TEMP"(%0) \n\t"
  983. YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
  984. "movq "Y_TEMP"(%0), %%mm5 \n\t"
  985. "psraw $3, %%mm1 \n\t"
  986. "psraw $3, %%mm7 \n\t"
  987. "packuswb %%mm7, %%mm1 \n\t"
  988. WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
  989. YSCALEYUV2PACKEDX_END
  990. } else {
  991. YSCALEYUV2PACKEDX_ACCURATE
  992. YSCALEYUV2RGBX
  993. "pcmpeqd %%mm7, %%mm7 \n\t"
  994. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  995. YSCALEYUV2PACKEDX_END
  996. }
  997. return;
  998. case PIX_FMT_BGR24:
  999. YSCALEYUV2PACKEDX_ACCURATE
  1000. YSCALEYUV2RGBX
  1001. "pxor %%mm7, %%mm7 \n\t"
  1002. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
  1003. "add %4, %%"REG_c" \n\t"
  1004. WRITEBGR24(%%REGc, %5, %%REGa)
  1005. :: "r" (&c->redDither),
  1006. "m" (dummy), "m" (dummy), "m" (dummy),
  1007. "r" (dest), "m" (dstW)
  1008. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1009. );
  1010. return;
  1011. case PIX_FMT_RGB555:
  1012. YSCALEYUV2PACKEDX_ACCURATE
  1013. YSCALEYUV2RGBX
  1014. "pxor %%mm7, %%mm7 \n\t"
  1015. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1016. #ifdef DITHER1XBPP
  1017. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  1018. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  1019. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  1020. #endif
  1021. WRITERGB15(%4, %5, %%REGa)
  1022. YSCALEYUV2PACKEDX_END
  1023. return;
  1024. case PIX_FMT_RGB565:
  1025. YSCALEYUV2PACKEDX_ACCURATE
  1026. YSCALEYUV2RGBX
  1027. "pxor %%mm7, %%mm7 \n\t"
  1028. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1029. #ifdef DITHER1XBPP
  1030. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  1031. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  1032. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  1033. #endif
  1034. WRITERGB16(%4, %5, %%REGa)
  1035. YSCALEYUV2PACKEDX_END
  1036. return;
  1037. case PIX_FMT_YUYV422:
  1038. YSCALEYUV2PACKEDX_ACCURATE
  1039. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1040. "psraw $3, %%mm3 \n\t"
  1041. "psraw $3, %%mm4 \n\t"
  1042. "psraw $3, %%mm1 \n\t"
  1043. "psraw $3, %%mm7 \n\t"
  1044. WRITEYUY2(%4, %5, %%REGa)
  1045. YSCALEYUV2PACKEDX_END
  1046. return;
  1047. }
  1048. } else {
  1049. switch(c->dstFormat) {
  1050. case PIX_FMT_RGB32:
  1051. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  1052. YSCALEYUV2PACKEDX
  1053. YSCALEYUV2RGBX
  1054. YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
  1055. "psraw $3, %%mm1 \n\t"
  1056. "psraw $3, %%mm7 \n\t"
  1057. "packuswb %%mm7, %%mm1 \n\t"
  1058. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  1059. YSCALEYUV2PACKEDX_END
  1060. } else {
  1061. YSCALEYUV2PACKEDX
  1062. YSCALEYUV2RGBX
  1063. "pcmpeqd %%mm7, %%mm7 \n\t"
  1064. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1065. YSCALEYUV2PACKEDX_END
  1066. }
  1067. return;
  1068. case PIX_FMT_BGR24:
  1069. YSCALEYUV2PACKEDX
  1070. YSCALEYUV2RGBX
  1071. "pxor %%mm7, %%mm7 \n\t"
  1072. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
  1073. "add %4, %%"REG_c" \n\t"
  1074. WRITEBGR24(%%REGc, %5, %%REGa)
  1075. :: "r" (&c->redDither),
  1076. "m" (dummy), "m" (dummy), "m" (dummy),
  1077. "r" (dest), "m" (dstW)
  1078. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1079. );
  1080. return;
  1081. case PIX_FMT_RGB555:
  1082. YSCALEYUV2PACKEDX
  1083. YSCALEYUV2RGBX
  1084. "pxor %%mm7, %%mm7 \n\t"
  1085. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1086. #ifdef DITHER1XBPP
  1087. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  1088. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  1089. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  1090. #endif
  1091. WRITERGB15(%4, %5, %%REGa)
  1092. YSCALEYUV2PACKEDX_END
  1093. return;
  1094. case PIX_FMT_RGB565:
  1095. YSCALEYUV2PACKEDX
  1096. YSCALEYUV2RGBX
  1097. "pxor %%mm7, %%mm7 \n\t"
  1098. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1099. #ifdef DITHER1XBPP
  1100. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  1101. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  1102. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  1103. #endif
  1104. WRITERGB16(%4, %5, %%REGa)
  1105. YSCALEYUV2PACKEDX_END
  1106. return;
  1107. case PIX_FMT_YUYV422:
  1108. YSCALEYUV2PACKEDX
  1109. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1110. "psraw $3, %%mm3 \n\t"
  1111. "psraw $3, %%mm4 \n\t"
  1112. "psraw $3, %%mm1 \n\t"
  1113. "psraw $3, %%mm7 \n\t"
  1114. WRITEYUY2(%4, %5, %%REGa)
  1115. YSCALEYUV2PACKEDX_END
  1116. return;
  1117. }
  1118. }
  1119. }
  1120. #endif /* COMPILE_TEMPLATE_MMX */
  1121. #if COMPILE_TEMPLATE_ALTIVEC
  1122. /* The following list of supported dstFormat values should
  1123. match what's found in the body of ff_yuv2packedX_altivec() */
  1124. if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf &&
  1125. (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
  1126. c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
  1127. c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB))
  1128. ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize,
  1129. chrFilter, chrSrc, chrFilterSize,
  1130. dest, dstW, dstY);
  1131. else
  1132. #endif
  1133. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  1134. chrFilter, chrSrc, chrFilterSize,
  1135. alpSrc, dest, dstW, dstY);
  1136. }
  1137. /**
  1138. * vertical bilinear scale YV12 to RGB
  1139. */
  1140. static inline void RENAME(yuv2packed2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
  1141. const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  1142. {
  1143. int yalpha1=4095- yalpha;
  1144. int uvalpha1=4095-uvalpha;
  1145. int i;
  1146. #if COMPILE_TEMPLATE_MMX
  1147. if(!(c->flags & SWS_BITEXACT)) {
  1148. switch(c->dstFormat) {
  1149. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1150. case PIX_FMT_RGB32:
  1151. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  1152. #if ARCH_X86_64
  1153. __asm__ volatile(
  1154. YSCALEYUV2RGB(%%r8, %5)
  1155. YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
  1156. "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  1157. "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  1158. "packuswb %%mm7, %%mm1 \n\t"
  1159. WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  1160. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "r" (dest),
  1161. "a" (&c->redDither)
  1162. ,"r" (abuf0), "r" (abuf1)
  1163. : "%r8"
  1164. );
  1165. #else
  1166. *(const uint16_t **)(&c->u_temp)=abuf0;
  1167. *(const uint16_t **)(&c->v_temp)=abuf1;
  1168. __asm__ volatile(
  1169. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1170. "mov %4, %%"REG_b" \n\t"
  1171. "push %%"REG_BP" \n\t"
  1172. YSCALEYUV2RGB(%%REGBP, %5)
  1173. "push %0 \n\t"
  1174. "push %1 \n\t"
  1175. "mov "U_TEMP"(%5), %0 \n\t"
  1176. "mov "V_TEMP"(%5), %1 \n\t"
  1177. YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
  1178. "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  1179. "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  1180. "packuswb %%mm7, %%mm1 \n\t"
  1181. "pop %1 \n\t"
  1182. "pop %0 \n\t"
  1183. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  1184. "pop %%"REG_BP" \n\t"
  1185. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1186. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1187. "a" (&c->redDither)
  1188. );
  1189. #endif
  1190. } else {
  1191. __asm__ volatile(
  1192. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1193. "mov %4, %%"REG_b" \n\t"
  1194. "push %%"REG_BP" \n\t"
  1195. YSCALEYUV2RGB(%%REGBP, %5)
  1196. "pcmpeqd %%mm7, %%mm7 \n\t"
  1197. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1198. "pop %%"REG_BP" \n\t"
  1199. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1200. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1201. "a" (&c->redDither)
  1202. );
  1203. }
  1204. return;
  1205. case PIX_FMT_BGR24:
  1206. __asm__ volatile(
  1207. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1208. "mov %4, %%"REG_b" \n\t"
  1209. "push %%"REG_BP" \n\t"
  1210. YSCALEYUV2RGB(%%REGBP, %5)
  1211. "pxor %%mm7, %%mm7 \n\t"
  1212. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1213. "pop %%"REG_BP" \n\t"
  1214. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1215. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1216. "a" (&c->redDither)
  1217. );
  1218. return;
  1219. case PIX_FMT_RGB555:
  1220. __asm__ volatile(
  1221. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1222. "mov %4, %%"REG_b" \n\t"
  1223. "push %%"REG_BP" \n\t"
  1224. YSCALEYUV2RGB(%%REGBP, %5)
  1225. "pxor %%mm7, %%mm7 \n\t"
  1226. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1227. #ifdef DITHER1XBPP
  1228. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1229. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1230. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1231. #endif
  1232. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1233. "pop %%"REG_BP" \n\t"
  1234. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1235. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1236. "a" (&c->redDither)
  1237. );
  1238. return;
  1239. case PIX_FMT_RGB565:
  1240. __asm__ volatile(
  1241. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1242. "mov %4, %%"REG_b" \n\t"
  1243. "push %%"REG_BP" \n\t"
  1244. YSCALEYUV2RGB(%%REGBP, %5)
  1245. "pxor %%mm7, %%mm7 \n\t"
  1246. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1247. #ifdef DITHER1XBPP
  1248. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1249. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1250. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1251. #endif
  1252. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1253. "pop %%"REG_BP" \n\t"
  1254. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1255. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1256. "a" (&c->redDither)
  1257. );
  1258. return;
  1259. case PIX_FMT_YUYV422:
  1260. __asm__ volatile(
  1261. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1262. "mov %4, %%"REG_b" \n\t"
  1263. "push %%"REG_BP" \n\t"
  1264. YSCALEYUV2PACKED(%%REGBP, %5)
  1265. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1266. "pop %%"REG_BP" \n\t"
  1267. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1268. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1269. "a" (&c->redDither)
  1270. );
  1271. return;
  1272. default: break;
  1273. }
  1274. }
  1275. #endif //COMPILE_TEMPLATE_MMX
  1276. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C(void,0), YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
  1277. }
  1278. /**
  1279. * YV12 to RGB without scaling or interpolating
  1280. */
  1281. static inline void RENAME(yuv2packed1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
  1282. const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y)
  1283. {
  1284. const int yalpha1=0;
  1285. int i;
  1286. const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1287. const int yalpha= 4096; //FIXME ...
  1288. if (flags&SWS_FULL_CHR_H_INT) {
  1289. c->yuv2packed2(c, buf0, buf0, uvbuf0, uvbuf1, abuf0, abuf0, dest, dstW, 0, uvalpha, y);
  1290. return;
  1291. }
  1292. #if COMPILE_TEMPLATE_MMX
  1293. if(!(flags & SWS_BITEXACT)) {
  1294. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1295. switch(dstFormat) {
  1296. case PIX_FMT_RGB32:
  1297. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  1298. __asm__ volatile(
  1299. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1300. "mov %4, %%"REG_b" \n\t"
  1301. "push %%"REG_BP" \n\t"
  1302. YSCALEYUV2RGB1(%%REGBP, %5)
  1303. YSCALEYUV2RGB1_ALPHA(%%REGBP)
  1304. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1305. "pop %%"REG_BP" \n\t"
  1306. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1307. :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1308. "a" (&c->redDither)
  1309. );
  1310. } else {
  1311. __asm__ volatile(
  1312. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1313. "mov %4, %%"REG_b" \n\t"
  1314. "push %%"REG_BP" \n\t"
  1315. YSCALEYUV2RGB1(%%REGBP, %5)
  1316. "pcmpeqd %%mm7, %%mm7 \n\t"
  1317. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1318. "pop %%"REG_BP" \n\t"
  1319. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1320. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1321. "a" (&c->redDither)
  1322. );
  1323. }
  1324. return;
  1325. case PIX_FMT_BGR24:
  1326. __asm__ volatile(
  1327. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1328. "mov %4, %%"REG_b" \n\t"
  1329. "push %%"REG_BP" \n\t"
  1330. YSCALEYUV2RGB1(%%REGBP, %5)
  1331. "pxor %%mm7, %%mm7 \n\t"
  1332. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1333. "pop %%"REG_BP" \n\t"
  1334. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1335. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1336. "a" (&c->redDither)
  1337. );
  1338. return;
  1339. case PIX_FMT_RGB555:
  1340. __asm__ volatile(
  1341. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1342. "mov %4, %%"REG_b" \n\t"
  1343. "push %%"REG_BP" \n\t"
  1344. YSCALEYUV2RGB1(%%REGBP, %5)
  1345. "pxor %%mm7, %%mm7 \n\t"
  1346. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1347. #ifdef DITHER1XBPP
  1348. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1349. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1350. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1351. #endif
  1352. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1353. "pop %%"REG_BP" \n\t"
  1354. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1355. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1356. "a" (&c->redDither)
  1357. );
  1358. return;
  1359. case PIX_FMT_RGB565:
  1360. __asm__ volatile(
  1361. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1362. "mov %4, %%"REG_b" \n\t"
  1363. "push %%"REG_BP" \n\t"
  1364. YSCALEYUV2RGB1(%%REGBP, %5)
  1365. "pxor %%mm7, %%mm7 \n\t"
  1366. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1367. #ifdef DITHER1XBPP
  1368. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1369. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1370. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1371. #endif
  1372. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1373. "pop %%"REG_BP" \n\t"
  1374. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1375. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1376. "a" (&c->redDither)
  1377. );
  1378. return;
  1379. case PIX_FMT_YUYV422:
  1380. __asm__ volatile(
  1381. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1382. "mov %4, %%"REG_b" \n\t"
  1383. "push %%"REG_BP" \n\t"
  1384. YSCALEYUV2PACKED1(%%REGBP, %5)
  1385. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1386. "pop %%"REG_BP" \n\t"
  1387. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1388. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1389. "a" (&c->redDither)
  1390. );
  1391. return;
  1392. }
  1393. } else {
  1394. switch(dstFormat) {
  1395. case PIX_FMT_RGB32:
  1396. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  1397. __asm__ volatile(
  1398. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1399. "mov %4, %%"REG_b" \n\t"
  1400. "push %%"REG_BP" \n\t"
  1401. YSCALEYUV2RGB1b(%%REGBP, %5)
  1402. YSCALEYUV2RGB1_ALPHA(%%REGBP)
  1403. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1404. "pop %%"REG_BP" \n\t"
  1405. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1406. :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1407. "a" (&c->redDither)
  1408. );
  1409. } else {
  1410. __asm__ volatile(
  1411. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1412. "mov %4, %%"REG_b" \n\t"
  1413. "push %%"REG_BP" \n\t"
  1414. YSCALEYUV2RGB1b(%%REGBP, %5)
  1415. "pcmpeqd %%mm7, %%mm7 \n\t"
  1416. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1417. "pop %%"REG_BP" \n\t"
  1418. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1419. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1420. "a" (&c->redDither)
  1421. );
  1422. }
  1423. return;
  1424. case PIX_FMT_BGR24:
  1425. __asm__ volatile(
  1426. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1427. "mov %4, %%"REG_b" \n\t"
  1428. "push %%"REG_BP" \n\t"
  1429. YSCALEYUV2RGB1b(%%REGBP, %5)
  1430. "pxor %%mm7, %%mm7 \n\t"
  1431. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1432. "pop %%"REG_BP" \n\t"
  1433. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1434. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1435. "a" (&c->redDither)
  1436. );
  1437. return;
  1438. case PIX_FMT_RGB555:
  1439. __asm__ volatile(
  1440. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1441. "mov %4, %%"REG_b" \n\t"
  1442. "push %%"REG_BP" \n\t"
  1443. YSCALEYUV2RGB1b(%%REGBP, %5)
  1444. "pxor %%mm7, %%mm7 \n\t"
  1445. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1446. #ifdef DITHER1XBPP
  1447. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1448. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1449. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1450. #endif
  1451. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1452. "pop %%"REG_BP" \n\t"
  1453. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1454. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1455. "a" (&c->redDither)
  1456. );
  1457. return;
  1458. case PIX_FMT_RGB565:
  1459. __asm__ volatile(
  1460. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1461. "mov %4, %%"REG_b" \n\t"
  1462. "push %%"REG_BP" \n\t"
  1463. YSCALEYUV2RGB1b(%%REGBP, %5)
  1464. "pxor %%mm7, %%mm7 \n\t"
  1465. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1466. #ifdef DITHER1XBPP
  1467. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1468. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1469. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1470. #endif
  1471. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1472. "pop %%"REG_BP" \n\t"
  1473. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1474. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1475. "a" (&c->redDither)
  1476. );
  1477. return;
  1478. case PIX_FMT_YUYV422:
  1479. __asm__ volatile(
  1480. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1481. "mov %4, %%"REG_b" \n\t"
  1482. "push %%"REG_BP" \n\t"
  1483. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1484. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1485. "pop %%"REG_BP" \n\t"
  1486. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1487. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1488. "a" (&c->redDither)
  1489. );
  1490. return;
  1491. }
  1492. }
  1493. }
  1494. #endif /* COMPILE_TEMPLATE_MMX */
  1495. if (uvalpha < 2048) {
  1496. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
  1497. } else {
  1498. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
  1499. }
  1500. }
  1501. //FIXME yuy2* can read up to 7 samples too much
  1502. static inline void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
  1503. {
  1504. #if COMPILE_TEMPLATE_MMX
  1505. __asm__ volatile(
  1506. "movq "MANGLE(bm01010101)", %%mm2 \n\t"
  1507. "mov %0, %%"REG_a" \n\t"
  1508. "1: \n\t"
  1509. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1510. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1511. "pand %%mm2, %%mm0 \n\t"
  1512. "pand %%mm2, %%mm1 \n\t"
  1513. "packuswb %%mm1, %%mm0 \n\t"
  1514. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1515. "add $8, %%"REG_a" \n\t"
  1516. " js 1b \n\t"
  1517. : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
  1518. : "%"REG_a
  1519. );
  1520. #else
  1521. int i;
  1522. for (i=0; i<width; i++)
  1523. dst[i]= src[2*i];
  1524. #endif
  1525. }
  1526. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1527. {
  1528. #if COMPILE_TEMPLATE_MMX
  1529. __asm__ volatile(
  1530. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1531. "mov %0, %%"REG_a" \n\t"
  1532. "1: \n\t"
  1533. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1534. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1535. "psrlw $8, %%mm0 \n\t"
  1536. "psrlw $8, %%mm1 \n\t"
  1537. "packuswb %%mm1, %%mm0 \n\t"
  1538. "movq %%mm0, %%mm1 \n\t"
  1539. "psrlw $8, %%mm0 \n\t"
  1540. "pand %%mm4, %%mm1 \n\t"
  1541. "packuswb %%mm0, %%mm0 \n\t"
  1542. "packuswb %%mm1, %%mm1 \n\t"
  1543. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1544. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1545. "add $4, %%"REG_a" \n\t"
  1546. " js 1b \n\t"
  1547. : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1548. : "%"REG_a
  1549. );
  1550. #else
  1551. int i;
  1552. for (i=0; i<width; i++) {
  1553. dstU[i]= src1[4*i + 1];
  1554. dstV[i]= src1[4*i + 3];
  1555. }
  1556. #endif
  1557. assert(src1 == src2);
  1558. }
  1559. static inline void RENAME(LEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1560. {
  1561. #if COMPILE_TEMPLATE_MMX
  1562. __asm__ volatile(
  1563. "mov %0, %%"REG_a" \n\t"
  1564. "1: \n\t"
  1565. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1566. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1567. "movq (%2, %%"REG_a",2), %%mm2 \n\t"
  1568. "movq 8(%2, %%"REG_a",2), %%mm3 \n\t"
  1569. "psrlw $8, %%mm0 \n\t"
  1570. "psrlw $8, %%mm1 \n\t"
  1571. "psrlw $8, %%mm2 \n\t"
  1572. "psrlw $8, %%mm3 \n\t"
  1573. "packuswb %%mm1, %%mm0 \n\t"
  1574. "packuswb %%mm3, %%mm2 \n\t"
  1575. "movq %%mm0, (%3, %%"REG_a") \n\t"
  1576. "movq %%mm2, (%4, %%"REG_a") \n\t"
  1577. "add $8, %%"REG_a" \n\t"
  1578. " js 1b \n\t"
  1579. : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
  1580. : "%"REG_a
  1581. );
  1582. #else
  1583. int i;
  1584. for (i=0; i<width; i++) {
  1585. dstU[i]= src1[2*i + 1];
  1586. dstV[i]= src2[2*i + 1];
  1587. }
  1588. #endif
  1589. }
  1590. /* This is almost identical to the previous, end exists only because
  1591. * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
  1592. static inline void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
  1593. {
  1594. #if COMPILE_TEMPLATE_MMX
  1595. __asm__ volatile(
  1596. "mov %0, %%"REG_a" \n\t"
  1597. "1: \n\t"
  1598. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1599. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1600. "psrlw $8, %%mm0 \n\t"
  1601. "psrlw $8, %%mm1 \n\t"
  1602. "packuswb %%mm1, %%mm0 \n\t"
  1603. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1604. "add $8, %%"REG_a" \n\t"
  1605. " js 1b \n\t"
  1606. : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
  1607. : "%"REG_a
  1608. );
  1609. #else
  1610. int i;
  1611. for (i=0; i<width; i++)
  1612. dst[i]= src[2*i+1];
  1613. #endif
  1614. }
  1615. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1616. {
  1617. #if COMPILE_TEMPLATE_MMX
  1618. __asm__ volatile(
  1619. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1620. "mov %0, %%"REG_a" \n\t"
  1621. "1: \n\t"
  1622. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1623. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1624. "pand %%mm4, %%mm0 \n\t"
  1625. "pand %%mm4, %%mm1 \n\t"
  1626. "packuswb %%mm1, %%mm0 \n\t"
  1627. "movq %%mm0, %%mm1 \n\t"
  1628. "psrlw $8, %%mm0 \n\t"
  1629. "pand %%mm4, %%mm1 \n\t"
  1630. "packuswb %%mm0, %%mm0 \n\t"
  1631. "packuswb %%mm1, %%mm1 \n\t"
  1632. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1633. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1634. "add $4, %%"REG_a" \n\t"
  1635. " js 1b \n\t"
  1636. : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1637. : "%"REG_a
  1638. );
  1639. #else
  1640. int i;
  1641. for (i=0; i<width; i++) {
  1642. dstU[i]= src1[4*i + 0];
  1643. dstV[i]= src1[4*i + 2];
  1644. }
  1645. #endif
  1646. assert(src1 == src2);
  1647. }
  1648. static inline void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1649. {
  1650. #if COMPILE_TEMPLATE_MMX
  1651. __asm__ volatile(
  1652. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1653. "mov %0, %%"REG_a" \n\t"
  1654. "1: \n\t"
  1655. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1656. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1657. "movq (%2, %%"REG_a",2), %%mm2 \n\t"
  1658. "movq 8(%2, %%"REG_a",2), %%mm3 \n\t"
  1659. "pand %%mm4, %%mm0 \n\t"
  1660. "pand %%mm4, %%mm1 \n\t"
  1661. "pand %%mm4, %%mm2 \n\t"
  1662. "pand %%mm4, %%mm3 \n\t"
  1663. "packuswb %%mm1, %%mm0 \n\t"
  1664. "packuswb %%mm3, %%mm2 \n\t"
  1665. "movq %%mm0, (%3, %%"REG_a") \n\t"
  1666. "movq %%mm2, (%4, %%"REG_a") \n\t"
  1667. "add $8, %%"REG_a" \n\t"
  1668. " js 1b \n\t"
  1669. : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
  1670. : "%"REG_a
  1671. );
  1672. #else
  1673. int i;
  1674. for (i=0; i<width; i++) {
  1675. dstU[i]= src1[2*i];
  1676. dstV[i]= src2[2*i];
  1677. }
  1678. #endif
  1679. }
  1680. static inline void RENAME(nvXXtoUV)(uint8_t *dst1, uint8_t *dst2,
  1681. const uint8_t *src, long width)
  1682. {
  1683. #if COMPILE_TEMPLATE_MMX
  1684. __asm__ volatile(
  1685. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1686. "mov %0, %%"REG_a" \n\t"
  1687. "1: \n\t"
  1688. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1689. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1690. "movq %%mm0, %%mm2 \n\t"
  1691. "movq %%mm1, %%mm3 \n\t"
  1692. "pand %%mm4, %%mm0 \n\t"
  1693. "pand %%mm4, %%mm1 \n\t"
  1694. "psrlw $8, %%mm2 \n\t"
  1695. "psrlw $8, %%mm3 \n\t"
  1696. "packuswb %%mm1, %%mm0 \n\t"
  1697. "packuswb %%mm3, %%mm2 \n\t"
  1698. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1699. "movq %%mm2, (%3, %%"REG_a") \n\t"
  1700. "add $8, %%"REG_a" \n\t"
  1701. " js 1b \n\t"
  1702. : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst1+width), "r" (dst2+width)
  1703. : "%"REG_a
  1704. );
  1705. #else
  1706. int i;
  1707. for (i = 0; i < width; i++) {
  1708. dst1[i] = src[2*i+0];
  1709. dst2[i] = src[2*i+1];
  1710. }
  1711. #endif
  1712. }
  1713. static inline void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
  1714. const uint8_t *src1, const uint8_t *src2,
  1715. long width, uint32_t *unused)
  1716. {
  1717. RENAME(nvXXtoUV)(dstU, dstV, src1, width);
  1718. }
  1719. static inline void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV,
  1720. const uint8_t *src1, const uint8_t *src2,
  1721. long width, uint32_t *unused)
  1722. {
  1723. RENAME(nvXXtoUV)(dstV, dstU, src1, width);
  1724. }
  1725. #if COMPILE_TEMPLATE_MMX
  1726. static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src, long width, enum PixelFormat srcFormat)
  1727. {
  1728. if(srcFormat == PIX_FMT_BGR24) {
  1729. __asm__ volatile(
  1730. "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
  1731. "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
  1732. :
  1733. );
  1734. } else {
  1735. __asm__ volatile(
  1736. "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
  1737. "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
  1738. :
  1739. );
  1740. }
  1741. __asm__ volatile(
  1742. "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
  1743. "mov %2, %%"REG_a" \n\t"
  1744. "pxor %%mm7, %%mm7 \n\t"
  1745. "1: \n\t"
  1746. PREFETCH" 64(%0) \n\t"
  1747. "movd (%0), %%mm0 \n\t"
  1748. "movd 2(%0), %%mm1 \n\t"
  1749. "movd 6(%0), %%mm2 \n\t"
  1750. "movd 8(%0), %%mm3 \n\t"
  1751. "add $12, %0 \n\t"
  1752. "punpcklbw %%mm7, %%mm0 \n\t"
  1753. "punpcklbw %%mm7, %%mm1 \n\t"
  1754. "punpcklbw %%mm7, %%mm2 \n\t"
  1755. "punpcklbw %%mm7, %%mm3 \n\t"
  1756. "pmaddwd %%mm5, %%mm0 \n\t"
  1757. "pmaddwd %%mm6, %%mm1 \n\t"
  1758. "pmaddwd %%mm5, %%mm2 \n\t"
  1759. "pmaddwd %%mm6, %%mm3 \n\t"
  1760. "paddd %%mm1, %%mm0 \n\t"
  1761. "paddd %%mm3, %%mm2 \n\t"
  1762. "paddd %%mm4, %%mm0 \n\t"
  1763. "paddd %%mm4, %%mm2 \n\t"
  1764. "psrad $15, %%mm0 \n\t"
  1765. "psrad $15, %%mm2 \n\t"
  1766. "packssdw %%mm2, %%mm0 \n\t"
  1767. "packuswb %%mm0, %%mm0 \n\t"
  1768. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1769. "add $4, %%"REG_a" \n\t"
  1770. " js 1b \n\t"
  1771. : "+r" (src)
  1772. : "r" (dst+width), "g" ((x86_reg)-width)
  1773. : "%"REG_a
  1774. );
  1775. }
  1776. static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, long width, enum PixelFormat srcFormat)
  1777. {
  1778. __asm__ volatile(
  1779. "movq 24(%4), %%mm6 \n\t"
  1780. "mov %3, %%"REG_a" \n\t"
  1781. "pxor %%mm7, %%mm7 \n\t"
  1782. "1: \n\t"
  1783. PREFETCH" 64(%0) \n\t"
  1784. "movd (%0), %%mm0 \n\t"
  1785. "movd 2(%0), %%mm1 \n\t"
  1786. "punpcklbw %%mm7, %%mm0 \n\t"
  1787. "punpcklbw %%mm7, %%mm1 \n\t"
  1788. "movq %%mm0, %%mm2 \n\t"
  1789. "movq %%mm1, %%mm3 \n\t"
  1790. "pmaddwd (%4), %%mm0 \n\t"
  1791. "pmaddwd 8(%4), %%mm1 \n\t"
  1792. "pmaddwd 16(%4), %%mm2 \n\t"
  1793. "pmaddwd %%mm6, %%mm3 \n\t"
  1794. "paddd %%mm1, %%mm0 \n\t"
  1795. "paddd %%mm3, %%mm2 \n\t"
  1796. "movd 6(%0), %%mm1 \n\t"
  1797. "movd 8(%0), %%mm3 \n\t"
  1798. "add $12, %0 \n\t"
  1799. "punpcklbw %%mm7, %%mm1 \n\t"
  1800. "punpcklbw %%mm7, %%mm3 \n\t"
  1801. "movq %%mm1, %%mm4 \n\t"
  1802. "movq %%mm3, %%mm5 \n\t"
  1803. "pmaddwd (%4), %%mm1 \n\t"
  1804. "pmaddwd 8(%4), %%mm3 \n\t"
  1805. "pmaddwd 16(%4), %%mm4 \n\t"
  1806. "pmaddwd %%mm6, %%mm5 \n\t"
  1807. "paddd %%mm3, %%mm1 \n\t"
  1808. "paddd %%mm5, %%mm4 \n\t"
  1809. "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
  1810. "paddd %%mm3, %%mm0 \n\t"
  1811. "paddd %%mm3, %%mm2 \n\t"
  1812. "paddd %%mm3, %%mm1 \n\t"
  1813. "paddd %%mm3, %%mm4 \n\t"
  1814. "psrad $15, %%mm0 \n\t"
  1815. "psrad $15, %%mm2 \n\t"
  1816. "psrad $15, %%mm1 \n\t"
  1817. "psrad $15, %%mm4 \n\t"
  1818. "packssdw %%mm1, %%mm0 \n\t"
  1819. "packssdw %%mm4, %%mm2 \n\t"
  1820. "packuswb %%mm0, %%mm0 \n\t"
  1821. "packuswb %%mm2, %%mm2 \n\t"
  1822. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1823. "movd %%mm2, (%2, %%"REG_a") \n\t"
  1824. "add $4, %%"REG_a" \n\t"
  1825. " js 1b \n\t"
  1826. : "+r" (src)
  1827. : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "r"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24])
  1828. : "%"REG_a
  1829. );
  1830. }
  1831. #endif
  1832. static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
  1833. {
  1834. #if COMPILE_TEMPLATE_MMX
  1835. RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
  1836. #else
  1837. int i;
  1838. for (i=0; i<width; i++) {
  1839. int b= src[i*3+0];
  1840. int g= src[i*3+1];
  1841. int r= src[i*3+2];
  1842. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1843. }
  1844. #endif /* COMPILE_TEMPLATE_MMX */
  1845. }
  1846. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1847. {
  1848. #if COMPILE_TEMPLATE_MMX
  1849. RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
  1850. #else
  1851. int i;
  1852. for (i=0; i<width; i++) {
  1853. int b= src1[3*i + 0];
  1854. int g= src1[3*i + 1];
  1855. int r= src1[3*i + 2];
  1856. dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1857. dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1858. }
  1859. #endif /* COMPILE_TEMPLATE_MMX */
  1860. assert(src1 == src2);
  1861. }
  1862. static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1863. {
  1864. int i;
  1865. for (i=0; i<width; i++) {
  1866. int b= src1[6*i + 0] + src1[6*i + 3];
  1867. int g= src1[6*i + 1] + src1[6*i + 4];
  1868. int r= src1[6*i + 2] + src1[6*i + 5];
  1869. dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1870. dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1871. }
  1872. assert(src1 == src2);
  1873. }
  1874. static inline void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
  1875. {
  1876. #if COMPILE_TEMPLATE_MMX
  1877. RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
  1878. #else
  1879. int i;
  1880. for (i=0; i<width; i++) {
  1881. int r= src[i*3+0];
  1882. int g= src[i*3+1];
  1883. int b= src[i*3+2];
  1884. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1885. }
  1886. #endif
  1887. }
  1888. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1889. {
  1890. #if COMPILE_TEMPLATE_MMX
  1891. assert(src1==src2);
  1892. RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
  1893. #else
  1894. int i;
  1895. assert(src1==src2);
  1896. for (i=0; i<width; i++) {
  1897. int r= src1[3*i + 0];
  1898. int g= src1[3*i + 1];
  1899. int b= src1[3*i + 2];
  1900. dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1901. dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1902. }
  1903. #endif
  1904. }
  1905. static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1906. {
  1907. int i;
  1908. assert(src1==src2);
  1909. for (i=0; i<width; i++) {
  1910. int r= src1[6*i + 0] + src1[6*i + 3];
  1911. int g= src1[6*i + 1] + src1[6*i + 4];
  1912. int b= src1[6*i + 2] + src1[6*i + 5];
  1913. dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1914. dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1915. }
  1916. }
  1917. // bilinear / bicubic scaling
  1918. static inline void RENAME(hScale)(int16_t *dst, int dstW, const uint8_t *src, int srcW, int xInc,
  1919. const int16_t *filter, const int16_t *filterPos, long filterSize)
  1920. {
  1921. #if COMPILE_TEMPLATE_MMX
  1922. assert(filterSize % 4 == 0 && filterSize>0);
  1923. if (filterSize==4) { // Always true for upscaling, sometimes for down, too.
  1924. x86_reg counter= -2*dstW;
  1925. filter-= counter*2;
  1926. filterPos-= counter/2;
  1927. dst-= counter/2;
  1928. __asm__ volatile(
  1929. #if defined(PIC)
  1930. "push %%"REG_b" \n\t"
  1931. #endif
  1932. "pxor %%mm7, %%mm7 \n\t"
  1933. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1934. "mov %%"REG_a", %%"REG_BP" \n\t"
  1935. ASMALIGN(4)
  1936. "1: \n\t"
  1937. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1938. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  1939. "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
  1940. "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
  1941. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1942. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1943. "punpcklbw %%mm7, %%mm0 \n\t"
  1944. "punpcklbw %%mm7, %%mm2 \n\t"
  1945. "pmaddwd %%mm1, %%mm0 \n\t"
  1946. "pmaddwd %%mm2, %%mm3 \n\t"
  1947. "movq %%mm0, %%mm4 \n\t"
  1948. "punpckldq %%mm3, %%mm0 \n\t"
  1949. "punpckhdq %%mm3, %%mm4 \n\t"
  1950. "paddd %%mm4, %%mm0 \n\t"
  1951. "psrad $7, %%mm0 \n\t"
  1952. "packssdw %%mm0, %%mm0 \n\t"
  1953. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1954. "add $4, %%"REG_BP" \n\t"
  1955. " jnc 1b \n\t"
  1956. "pop %%"REG_BP" \n\t"
  1957. #if defined(PIC)
  1958. "pop %%"REG_b" \n\t"
  1959. #endif
  1960. : "+a" (counter)
  1961. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1962. #if !defined(PIC)
  1963. : "%"REG_b
  1964. #endif
  1965. );
  1966. } else if (filterSize==8) {
  1967. x86_reg counter= -2*dstW;
  1968. filter-= counter*4;
  1969. filterPos-= counter/2;
  1970. dst-= counter/2;
  1971. __asm__ volatile(
  1972. #if defined(PIC)
  1973. "push %%"REG_b" \n\t"
  1974. #endif
  1975. "pxor %%mm7, %%mm7 \n\t"
  1976. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1977. "mov %%"REG_a", %%"REG_BP" \n\t"
  1978. ASMALIGN(4)
  1979. "1: \n\t"
  1980. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1981. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  1982. "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
  1983. "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
  1984. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1985. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1986. "punpcklbw %%mm7, %%mm0 \n\t"
  1987. "punpcklbw %%mm7, %%mm2 \n\t"
  1988. "pmaddwd %%mm1, %%mm0 \n\t"
  1989. "pmaddwd %%mm2, %%mm3 \n\t"
  1990. "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
  1991. "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
  1992. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  1993. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  1994. "punpcklbw %%mm7, %%mm4 \n\t"
  1995. "punpcklbw %%mm7, %%mm2 \n\t"
  1996. "pmaddwd %%mm1, %%mm4 \n\t"
  1997. "pmaddwd %%mm2, %%mm5 \n\t"
  1998. "paddd %%mm4, %%mm0 \n\t"
  1999. "paddd %%mm5, %%mm3 \n\t"
  2000. "movq %%mm0, %%mm4 \n\t"
  2001. "punpckldq %%mm3, %%mm0 \n\t"
  2002. "punpckhdq %%mm3, %%mm4 \n\t"
  2003. "paddd %%mm4, %%mm0 \n\t"
  2004. "psrad $7, %%mm0 \n\t"
  2005. "packssdw %%mm0, %%mm0 \n\t"
  2006. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2007. "add $4, %%"REG_BP" \n\t"
  2008. " jnc 1b \n\t"
  2009. "pop %%"REG_BP" \n\t"
  2010. #if defined(PIC)
  2011. "pop %%"REG_b" \n\t"
  2012. #endif
  2013. : "+a" (counter)
  2014. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2015. #if !defined(PIC)
  2016. : "%"REG_b
  2017. #endif
  2018. );
  2019. } else {
  2020. const uint8_t *offset = src+filterSize;
  2021. x86_reg counter= -2*dstW;
  2022. //filter-= counter*filterSize/2;
  2023. filterPos-= counter/2;
  2024. dst-= counter/2;
  2025. __asm__ volatile(
  2026. "pxor %%mm7, %%mm7 \n\t"
  2027. ASMALIGN(4)
  2028. "1: \n\t"
  2029. "mov %2, %%"REG_c" \n\t"
  2030. "movzwl (%%"REG_c", %0), %%eax \n\t"
  2031. "movzwl 2(%%"REG_c", %0), %%edx \n\t"
  2032. "mov %5, %%"REG_c" \n\t"
  2033. "pxor %%mm4, %%mm4 \n\t"
  2034. "pxor %%mm5, %%mm5 \n\t"
  2035. "2: \n\t"
  2036. "movq (%1), %%mm1 \n\t"
  2037. "movq (%1, %6), %%mm3 \n\t"
  2038. "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
  2039. "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
  2040. "punpcklbw %%mm7, %%mm0 \n\t"
  2041. "punpcklbw %%mm7, %%mm2 \n\t"
  2042. "pmaddwd %%mm1, %%mm0 \n\t"
  2043. "pmaddwd %%mm2, %%mm3 \n\t"
  2044. "paddd %%mm3, %%mm5 \n\t"
  2045. "paddd %%mm0, %%mm4 \n\t"
  2046. "add $8, %1 \n\t"
  2047. "add $4, %%"REG_c" \n\t"
  2048. "cmp %4, %%"REG_c" \n\t"
  2049. " jb 2b \n\t"
  2050. "add %6, %1 \n\t"
  2051. "movq %%mm4, %%mm0 \n\t"
  2052. "punpckldq %%mm5, %%mm4 \n\t"
  2053. "punpckhdq %%mm5, %%mm0 \n\t"
  2054. "paddd %%mm0, %%mm4 \n\t"
  2055. "psrad $7, %%mm4 \n\t"
  2056. "packssdw %%mm4, %%mm4 \n\t"
  2057. "mov %3, %%"REG_a" \n\t"
  2058. "movd %%mm4, (%%"REG_a", %0) \n\t"
  2059. "add $4, %0 \n\t"
  2060. " jnc 1b \n\t"
  2061. : "+r" (counter), "+r" (filter)
  2062. : "m" (filterPos), "m" (dst), "m"(offset),
  2063. "m" (src), "r" ((x86_reg)filterSize*2)
  2064. : "%"REG_a, "%"REG_c, "%"REG_d
  2065. );
  2066. }
  2067. #else
  2068. #if COMPILE_TEMPLATE_ALTIVEC
  2069. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2070. #else
  2071. int i;
  2072. for (i=0; i<dstW; i++) {
  2073. int j;
  2074. int srcPos= filterPos[i];
  2075. int val=0;
  2076. //printf("filterPos: %d\n", filterPos[i]);
  2077. for (j=0; j<filterSize; j++) {
  2078. //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2079. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2080. }
  2081. //filter += hFilterSize;
  2082. dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ...
  2083. //dst[i] = val>>7;
  2084. }
  2085. #endif /* COMPILE_TEMPLATE_ALTIVEC */
  2086. #endif /* COMPILE_MMX */
  2087. }
  2088. //FIXME all pal and rgb srcFormats could do this convertion as well
  2089. //FIXME all scalers more complex than bilinear could do half of this transform
  2090. static void RENAME(chrRangeToJpeg)(uint16_t *dst, int width)
  2091. {
  2092. int i;
  2093. for (i = 0; i < width; i++) {
  2094. dst[i ] = (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264
  2095. dst[i+VOFW] = (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264
  2096. }
  2097. }
  2098. static void RENAME(chrRangeFromJpeg)(uint16_t *dst, int width)
  2099. {
  2100. int i;
  2101. for (i = 0; i < width; i++) {
  2102. dst[i ] = (dst[i ]*1799 + 4081085)>>11; //1469
  2103. dst[i+VOFW] = (dst[i+VOFW]*1799 + 4081085)>>11; //1469
  2104. }
  2105. }
  2106. static void RENAME(lumRangeToJpeg)(uint16_t *dst, int width)
  2107. {
  2108. int i;
  2109. for (i = 0; i < width; i++)
  2110. dst[i] = (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
  2111. }
  2112. static void RENAME(lumRangeFromJpeg)(uint16_t *dst, int width)
  2113. {
  2114. int i;
  2115. for (i = 0; i < width; i++)
  2116. dst[i] = (dst[i]*14071 + 33561947)>>14;
  2117. }
  2118. #define FAST_BILINEAR_X86 \
  2119. "subl %%edi, %%esi \n\t" /* src[xx+1] - src[xx] */ \
  2120. "imull %%ecx, %%esi \n\t" /* (src[xx+1] - src[xx])*xalpha */ \
  2121. "shll $16, %%edi \n\t" \
  2122. "addl %%edi, %%esi \n\t" /* src[xx+1]*xalpha + src[xx]*(1-xalpha) */ \
  2123. "mov %1, %%"REG_D"\n\t" \
  2124. "shrl $9, %%esi \n\t" \
  2125. static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
  2126. long dstWidth, const uint8_t *src, int srcW,
  2127. int xInc)
  2128. {
  2129. #if ARCH_X86
  2130. #if COMPILE_TEMPLATE_MMX2
  2131. int32_t *filterPos = c->hLumFilterPos;
  2132. int16_t *filter = c->hLumFilter;
  2133. int canMMX2BeUsed = c->canMMX2BeUsed;
  2134. void *mmx2FilterCode= c->lumMmx2FilterCode;
  2135. int i;
  2136. #if defined(PIC)
  2137. DECLARE_ALIGNED(8, uint64_t, ebxsave);
  2138. #endif
  2139. if (canMMX2BeUsed) {
  2140. __asm__ volatile(
  2141. #if defined(PIC)
  2142. "mov %%"REG_b", %5 \n\t"
  2143. #endif
  2144. "pxor %%mm7, %%mm7 \n\t"
  2145. "mov %0, %%"REG_c" \n\t"
  2146. "mov %1, %%"REG_D" \n\t"
  2147. "mov %2, %%"REG_d" \n\t"
  2148. "mov %3, %%"REG_b" \n\t"
  2149. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2150. PREFETCH" (%%"REG_c") \n\t"
  2151. PREFETCH" 32(%%"REG_c") \n\t"
  2152. PREFETCH" 64(%%"REG_c") \n\t"
  2153. #if ARCH_X86_64
  2154. #define CALL_MMX2_FILTER_CODE \
  2155. "movl (%%"REG_b"), %%esi \n\t"\
  2156. "call *%4 \n\t"\
  2157. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2158. "add %%"REG_S", %%"REG_c" \n\t"\
  2159. "add %%"REG_a", %%"REG_D" \n\t"\
  2160. "xor %%"REG_a", %%"REG_a" \n\t"\
  2161. #else
  2162. #define CALL_MMX2_FILTER_CODE \
  2163. "movl (%%"REG_b"), %%esi \n\t"\
  2164. "call *%4 \n\t"\
  2165. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2166. "add %%"REG_a", %%"REG_D" \n\t"\
  2167. "xor %%"REG_a", %%"REG_a" \n\t"\
  2168. #endif /* ARCH_X86_64 */
  2169. CALL_MMX2_FILTER_CODE
  2170. CALL_MMX2_FILTER_CODE
  2171. CALL_MMX2_FILTER_CODE
  2172. CALL_MMX2_FILTER_CODE
  2173. CALL_MMX2_FILTER_CODE
  2174. CALL_MMX2_FILTER_CODE
  2175. CALL_MMX2_FILTER_CODE
  2176. CALL_MMX2_FILTER_CODE
  2177. #if defined(PIC)
  2178. "mov %5, %%"REG_b" \n\t"
  2179. #endif
  2180. :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
  2181. "m" (mmx2FilterCode)
  2182. #if defined(PIC)
  2183. ,"m" (ebxsave)
  2184. #endif
  2185. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2186. #if !defined(PIC)
  2187. ,"%"REG_b
  2188. #endif
  2189. );
  2190. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2191. } else {
  2192. #endif /* COMPILE_TEMPLATE_MMX2 */
  2193. x86_reg xInc_shr16 = xInc >> 16;
  2194. uint16_t xInc_mask = xInc & 0xffff;
  2195. //NO MMX just normal asm ...
  2196. __asm__ volatile(
  2197. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2198. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2199. "xorl %%ecx, %%ecx \n\t" // xalpha
  2200. ASMALIGN(4)
  2201. "1: \n\t"
  2202. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2203. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2204. FAST_BILINEAR_X86
  2205. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2206. "addw %4, %%cx \n\t" //xalpha += xInc&0xFFFF
  2207. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>16 + carry
  2208. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2209. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2210. FAST_BILINEAR_X86
  2211. "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
  2212. "addw %4, %%cx \n\t" //xalpha += xInc&0xFFFF
  2213. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>16 + carry
  2214. "add $2, %%"REG_a" \n\t"
  2215. "cmp %2, %%"REG_a" \n\t"
  2216. " jb 1b \n\t"
  2217. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2218. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2219. );
  2220. #if COMPILE_TEMPLATE_MMX2
  2221. } //if MMX2 can't be used
  2222. #endif
  2223. #else
  2224. int i;
  2225. unsigned int xpos=0;
  2226. for (i=0;i<dstWidth;i++) {
  2227. register unsigned int xx=xpos>>16;
  2228. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2229. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2230. xpos+=xInc;
  2231. }
  2232. #endif /* ARCH_X86 */
  2233. }
  2234. // *** horizontal scale Y line to temp buffer
  2235. static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src, int srcW, int xInc,
  2236. const int16_t *hLumFilter,
  2237. const int16_t *hLumFilterPos, int hLumFilterSize,
  2238. uint8_t *formatConvBuffer,
  2239. uint32_t *pal, int isAlpha)
  2240. {
  2241. void (*toYV12)(uint8_t *, const uint8_t *, long, uint32_t *) = isAlpha ? c->alpToYV12 : c->lumToYV12;
  2242. void (*convertRange)(uint16_t *, int) = isAlpha ? NULL : c->lumConvertRange;
  2243. src += isAlpha ? c->alpSrcOffset : c->lumSrcOffset;
  2244. if (toYV12) {
  2245. toYV12(formatConvBuffer, src, srcW, pal);
  2246. src= formatConvBuffer;
  2247. }
  2248. if (!c->hyscale_fast) {
  2249. c->hScale(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2250. } else { // fast bilinear upscale / crap downscale
  2251. c->hyscale_fast(c, dst, dstWidth, src, srcW, xInc);
  2252. }
  2253. if (convertRange)
  2254. convertRange(dst, dstWidth);
  2255. }
  2256. static inline void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst,
  2257. long dstWidth, const uint8_t *src1,
  2258. const uint8_t *src2, int srcW, int xInc)
  2259. {
  2260. #if ARCH_X86
  2261. #if COMPILE_TEMPLATE_MMX2
  2262. int32_t *filterPos = c->hChrFilterPos;
  2263. int16_t *filter = c->hChrFilter;
  2264. int canMMX2BeUsed = c->canMMX2BeUsed;
  2265. void *mmx2FilterCode= c->chrMmx2FilterCode;
  2266. int i;
  2267. #if defined(PIC)
  2268. DECLARE_ALIGNED(8, uint64_t, ebxsave);
  2269. #endif
  2270. if (canMMX2BeUsed) {
  2271. __asm__ volatile(
  2272. #if defined(PIC)
  2273. "mov %%"REG_b", %6 \n\t"
  2274. #endif
  2275. "pxor %%mm7, %%mm7 \n\t"
  2276. "mov %0, %%"REG_c" \n\t"
  2277. "mov %1, %%"REG_D" \n\t"
  2278. "mov %2, %%"REG_d" \n\t"
  2279. "mov %3, %%"REG_b" \n\t"
  2280. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2281. PREFETCH" (%%"REG_c") \n\t"
  2282. PREFETCH" 32(%%"REG_c") \n\t"
  2283. PREFETCH" 64(%%"REG_c") \n\t"
  2284. CALL_MMX2_FILTER_CODE
  2285. CALL_MMX2_FILTER_CODE
  2286. CALL_MMX2_FILTER_CODE
  2287. CALL_MMX2_FILTER_CODE
  2288. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2289. "mov %5, %%"REG_c" \n\t" // src
  2290. "mov %1, %%"REG_D" \n\t" // buf1
  2291. "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
  2292. PREFETCH" (%%"REG_c") \n\t"
  2293. PREFETCH" 32(%%"REG_c") \n\t"
  2294. PREFETCH" 64(%%"REG_c") \n\t"
  2295. CALL_MMX2_FILTER_CODE
  2296. CALL_MMX2_FILTER_CODE
  2297. CALL_MMX2_FILTER_CODE
  2298. CALL_MMX2_FILTER_CODE
  2299. #if defined(PIC)
  2300. "mov %6, %%"REG_b" \n\t"
  2301. #endif
  2302. :: "m" (src1), "m" (dst), "m" (filter), "m" (filterPos),
  2303. "m" (mmx2FilterCode), "m" (src2)
  2304. #if defined(PIC)
  2305. ,"m" (ebxsave)
  2306. #endif
  2307. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2308. #if !defined(PIC)
  2309. ,"%"REG_b
  2310. #endif
  2311. );
  2312. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
  2313. //printf("%d %d %d\n", dstWidth, i, srcW);
  2314. dst[i] = src1[srcW-1]*128;
  2315. dst[i+VOFW] = src2[srcW-1]*128;
  2316. }
  2317. } else {
  2318. #endif /* COMPILE_TEMPLATE_MMX2 */
  2319. x86_reg xInc_shr16 = (x86_reg) (xInc >> 16);
  2320. uint16_t xInc_mask = xInc & 0xffff;
  2321. __asm__ volatile(
  2322. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2323. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2324. "xorl %%ecx, %%ecx \n\t" // xalpha
  2325. ASMALIGN(4)
  2326. "1: \n\t"
  2327. "mov %0, %%"REG_S" \n\t"
  2328. "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
  2329. "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
  2330. FAST_BILINEAR_X86
  2331. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2332. "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
  2333. "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2334. FAST_BILINEAR_X86
  2335. "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
  2336. "addw %4, %%cx \n\t" //xalpha += xInc&0xFFFF
  2337. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>16 + carry
  2338. "add $1, %%"REG_a" \n\t"
  2339. "cmp %2, %%"REG_a" \n\t"
  2340. " jb 1b \n\t"
  2341. /* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2342. which is needed to support GCC 4.0. */
  2343. #if ARCH_X86_64 && AV_GCC_VERSION_AT_LEAST(3,4)
  2344. :: "m" (src1), "m" (dst), "g" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2345. #else
  2346. :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2347. #endif
  2348. "r" (src2)
  2349. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2350. );
  2351. #if COMPILE_TEMPLATE_MMX2
  2352. } //if MMX2 can't be used
  2353. #endif
  2354. #else
  2355. int i;
  2356. unsigned int xpos=0;
  2357. for (i=0;i<dstWidth;i++) {
  2358. register unsigned int xx=xpos>>16;
  2359. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2360. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2361. dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2362. /* slower
  2363. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2364. dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2365. */
  2366. xpos+=xInc;
  2367. }
  2368. #endif /* ARCH_X86 */
  2369. }
  2370. inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src1, const uint8_t *src2,
  2371. int srcW, int xInc, const int16_t *hChrFilter,
  2372. const int16_t *hChrFilterPos, int hChrFilterSize,
  2373. uint8_t *formatConvBuffer,
  2374. uint32_t *pal)
  2375. {
  2376. src1 += c->chrSrcOffset;
  2377. src2 += c->chrSrcOffset;
  2378. if (c->chrToYV12) {
  2379. c->chrToYV12(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2380. src1= formatConvBuffer;
  2381. src2= formatConvBuffer+VOFW;
  2382. }
  2383. if (!c->hcscale_fast) {
  2384. c->hScale(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2385. c->hScale(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2386. } else { // fast bilinear upscale / crap downscale
  2387. c->hcscale_fast(c, dst, dstWidth, src1, src2, srcW, xInc);
  2388. }
  2389. if (c->chrConvertRange)
  2390. c->chrConvertRange(dst, dstWidth);
  2391. }
  2392. #define DEBUG_SWSCALE_BUFFERS 0
  2393. #define DEBUG_BUFFERS(...) if (DEBUG_SWSCALE_BUFFERS) av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
  2394. static int RENAME(swScale)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
  2395. int srcSliceH, uint8_t* dst[], int dstStride[])
  2396. {
  2397. /* load a few things into local vars to make the code more readable? and faster */
  2398. const int srcW= c->srcW;
  2399. const int dstW= c->dstW;
  2400. const int dstH= c->dstH;
  2401. const int chrDstW= c->chrDstW;
  2402. const int chrSrcW= c->chrSrcW;
  2403. const int lumXInc= c->lumXInc;
  2404. const int chrXInc= c->chrXInc;
  2405. const enum PixelFormat dstFormat= c->dstFormat;
  2406. const int flags= c->flags;
  2407. int16_t *vLumFilterPos= c->vLumFilterPos;
  2408. int16_t *vChrFilterPos= c->vChrFilterPos;
  2409. int16_t *hLumFilterPos= c->hLumFilterPos;
  2410. int16_t *hChrFilterPos= c->hChrFilterPos;
  2411. int16_t *vLumFilter= c->vLumFilter;
  2412. int16_t *vChrFilter= c->vChrFilter;
  2413. int16_t *hLumFilter= c->hLumFilter;
  2414. int16_t *hChrFilter= c->hChrFilter;
  2415. int32_t *lumMmxFilter= c->lumMmxFilter;
  2416. int32_t *chrMmxFilter= c->chrMmxFilter;
  2417. int32_t av_unused *alpMmxFilter= c->alpMmxFilter;
  2418. const int vLumFilterSize= c->vLumFilterSize;
  2419. const int vChrFilterSize= c->vChrFilterSize;
  2420. const int hLumFilterSize= c->hLumFilterSize;
  2421. const int hChrFilterSize= c->hChrFilterSize;
  2422. int16_t **lumPixBuf= c->lumPixBuf;
  2423. int16_t **chrPixBuf= c->chrPixBuf;
  2424. int16_t **alpPixBuf= c->alpPixBuf;
  2425. const int vLumBufSize= c->vLumBufSize;
  2426. const int vChrBufSize= c->vChrBufSize;
  2427. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2428. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2429. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2430. int lastDstY;
  2431. uint32_t *pal=c->pal_yuv;
  2432. /* vars which will change and which we need to store back in the context */
  2433. int dstY= c->dstY;
  2434. int lumBufIndex= c->lumBufIndex;
  2435. int chrBufIndex= c->chrBufIndex;
  2436. int lastInLumBuf= c->lastInLumBuf;
  2437. int lastInChrBuf= c->lastInChrBuf;
  2438. if (isPacked(c->srcFormat)) {
  2439. src[0]=
  2440. src[1]=
  2441. src[2]=
  2442. src[3]= src[0];
  2443. srcStride[0]=
  2444. srcStride[1]=
  2445. srcStride[2]=
  2446. srcStride[3]= srcStride[0];
  2447. }
  2448. srcStride[1]<<= c->vChrDrop;
  2449. srcStride[2]<<= c->vChrDrop;
  2450. DEBUG_BUFFERS("swScale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
  2451. src[0], srcStride[0], src[1], srcStride[1], src[2], srcStride[2], src[3], srcStride[3],
  2452. dst[0], dstStride[0], dst[1], dstStride[1], dst[2], dstStride[2], dst[3], dstStride[3]);
  2453. DEBUG_BUFFERS("srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
  2454. srcSliceY, srcSliceH, dstY, dstH);
  2455. DEBUG_BUFFERS("vLumFilterSize: %d vLumBufSize: %d vChrFilterSize: %d vChrBufSize: %d\n",
  2456. vLumFilterSize, vLumBufSize, vChrFilterSize, vChrBufSize);
  2457. if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0 || dstStride[3]%8 != 0) {
  2458. static int warnedAlready=0; //FIXME move this into the context perhaps
  2459. if (flags & SWS_PRINT_INFO && !warnedAlready) {
  2460. av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
  2461. " ->cannot do aligned memory accesses anymore\n");
  2462. warnedAlready=1;
  2463. }
  2464. }
  2465. /* Note the user might start scaling the picture in the middle so this
  2466. will not get executed. This is not really intended but works
  2467. currently, so people might do it. */
  2468. if (srcSliceY ==0) {
  2469. lumBufIndex=-1;
  2470. chrBufIndex=-1;
  2471. dstY=0;
  2472. lastInLumBuf= -1;
  2473. lastInChrBuf= -1;
  2474. }
  2475. lastDstY= dstY;
  2476. for (;dstY < dstH; dstY++) {
  2477. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2478. const int chrDstY= dstY>>c->chrDstVSubSample;
  2479. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2480. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2481. unsigned char *aDest=(CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3]+dstStride[3]*dstY : NULL;
  2482. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2483. const int firstLumSrcY2= vLumFilterPos[FFMIN(dstY | ((1<<c->chrDstVSubSample) - 1), dstH-1)];
  2484. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2485. int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2486. int lastLumSrcY2=firstLumSrcY2+ vLumFilterSize -1; // Last line needed as input
  2487. int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2488. int enough_lines;
  2489. //handle holes (FAST_BILINEAR & weird filters)
  2490. if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2491. if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2492. assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
  2493. assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
  2494. DEBUG_BUFFERS("dstY: %d\n", dstY);
  2495. DEBUG_BUFFERS("\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
  2496. firstLumSrcY, lastLumSrcY, lastInLumBuf);
  2497. DEBUG_BUFFERS("\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
  2498. firstChrSrcY, lastChrSrcY, lastInChrBuf);
  2499. // Do we have enough lines in this slice to output the dstY line
  2500. enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample);
  2501. if (!enough_lines) {
  2502. lastLumSrcY = srcSliceY + srcSliceH - 1;
  2503. lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
  2504. DEBUG_BUFFERS("buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
  2505. lastLumSrcY, lastChrSrcY);
  2506. }
  2507. //Do horizontal scaling
  2508. while(lastInLumBuf < lastLumSrcY) {
  2509. const uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2510. const uint8_t *src2= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3];
  2511. lumBufIndex++;
  2512. assert(lumBufIndex < 2*vLumBufSize);
  2513. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2514. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2515. RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, src1, srcW, lumXInc,
  2516. hLumFilter, hLumFilterPos, hLumFilterSize,
  2517. formatConvBuffer,
  2518. pal, 0);
  2519. if (CONFIG_SWSCALE_ALPHA && alpPixBuf)
  2520. RENAME(hyscale)(c, alpPixBuf[ lumBufIndex ], dstW, src2, srcW, lumXInc,
  2521. hLumFilter, hLumFilterPos, hLumFilterSize,
  2522. formatConvBuffer,
  2523. pal, 1);
  2524. lastInLumBuf++;
  2525. DEBUG_BUFFERS("\t\tlumBufIndex %d: lastInLumBuf: %d\n",
  2526. lumBufIndex, lastInLumBuf);
  2527. }
  2528. while(lastInChrBuf < lastChrSrcY) {
  2529. const uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2530. const uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2531. chrBufIndex++;
  2532. assert(chrBufIndex < 2*vChrBufSize);
  2533. assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
  2534. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2535. //FIXME replace parameters through context struct (some at least)
  2536. if (c->needs_hcscale)
  2537. RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2538. hChrFilter, hChrFilterPos, hChrFilterSize,
  2539. formatConvBuffer,
  2540. pal);
  2541. lastInChrBuf++;
  2542. DEBUG_BUFFERS("\t\tchrBufIndex %d: lastInChrBuf: %d\n",
  2543. chrBufIndex, lastInChrBuf);
  2544. }
  2545. //wrap buf index around to stay inside the ring buffer
  2546. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2547. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2548. if (!enough_lines)
  2549. break; //we can't output a dstY line so let's try with the next slice
  2550. #if COMPILE_TEMPLATE_MMX
  2551. c->blueDither= ff_dither8[dstY&1];
  2552. if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
  2553. c->greenDither= ff_dither8[dstY&1];
  2554. else
  2555. c->greenDither= ff_dither4[dstY&1];
  2556. c->redDither= ff_dither8[(dstY+1)&1];
  2557. #endif
  2558. if (dstY < dstH-2) {
  2559. const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2560. const int16_t **chrSrcPtr= (const int16_t **) chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2561. const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
  2562. #if COMPILE_TEMPLATE_MMX
  2563. int i;
  2564. if (flags & SWS_ACCURATE_RND) {
  2565. int s= APCK_SIZE / 8;
  2566. for (i=0; i<vLumFilterSize; i+=2) {
  2567. *(const void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
  2568. *(const void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
  2569. lumMmxFilter[s*i+APCK_COEF/4 ]=
  2570. lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
  2571. + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
  2572. if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
  2573. *(const void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ];
  2574. *(const void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)];
  2575. alpMmxFilter[s*i+APCK_COEF/4 ]=
  2576. alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ];
  2577. }
  2578. }
  2579. for (i=0; i<vChrFilterSize; i+=2) {
  2580. *(const void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ];
  2581. *(const void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
  2582. chrMmxFilter[s*i+APCK_COEF/4 ]=
  2583. chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
  2584. + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
  2585. }
  2586. } else {
  2587. for (i=0; i<vLumFilterSize; i++) {
  2588. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2589. lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
  2590. lumMmxFilter[4*i+2]=
  2591. lumMmxFilter[4*i+3]=
  2592. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2593. if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
  2594. alpMmxFilter[4*i+0]= (int32_t)alpSrcPtr[i];
  2595. alpMmxFilter[4*i+1]= (uint64_t)alpSrcPtr[i] >> 32;
  2596. alpMmxFilter[4*i+2]=
  2597. alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
  2598. }
  2599. }
  2600. for (i=0; i<vChrFilterSize; i++) {
  2601. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2602. chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
  2603. chrMmxFilter[4*i+2]=
  2604. chrMmxFilter[4*i+3]=
  2605. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2606. }
  2607. }
  2608. #endif
  2609. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) {
  2610. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2611. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2612. c->yuv2nv12X(c,
  2613. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2614. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2615. dest, uDest, dstW, chrDstW, dstFormat);
  2616. } else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12 like
  2617. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2618. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2619. if (is16BPS(dstFormat)) {
  2620. yuv2yuvX16inC(
  2621. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2622. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2623. alpSrcPtr, (uint16_t *) dest, (uint16_t *) uDest, (uint16_t *) vDest, (uint16_t *) aDest, dstW, chrDstW,
  2624. dstFormat);
  2625. } else if (vLumFilterSize == 1 && vChrFilterSize == 1) { // unscaled YV12
  2626. const int16_t *lumBuf = lumSrcPtr[0];
  2627. const int16_t *chrBuf= chrSrcPtr[0];
  2628. const int16_t *alpBuf= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpSrcPtr[0] : NULL;
  2629. c->yuv2yuv1(c, lumBuf, chrBuf, alpBuf, dest, uDest, vDest, aDest, dstW, chrDstW);
  2630. } else { //General YV12
  2631. c->yuv2yuvX(c,
  2632. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2633. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2634. alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
  2635. }
  2636. } else {
  2637. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2638. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2639. if (vLumFilterSize == 1 && vChrFilterSize == 2) { //unscaled RGB
  2640. int chrAlpha= vChrFilter[2*dstY+1];
  2641. if(flags & SWS_FULL_CHR_H_INT) {
  2642. yuv2rgbXinC_full(c, //FIXME write a packed1_full function
  2643. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2644. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2645. alpSrcPtr, dest, dstW, dstY);
  2646. } else {
  2647. c->yuv2packed1(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2648. alpPixBuf ? *alpSrcPtr : NULL,
  2649. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2650. }
  2651. } else if (vLumFilterSize == 2 && vChrFilterSize == 2) { //bilinear upscale RGB
  2652. int lumAlpha= vLumFilter[2*dstY+1];
  2653. int chrAlpha= vChrFilter[2*dstY+1];
  2654. lumMmxFilter[2]=
  2655. lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
  2656. chrMmxFilter[2]=
  2657. chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
  2658. if(flags & SWS_FULL_CHR_H_INT) {
  2659. yuv2rgbXinC_full(c, //FIXME write a packed2_full function
  2660. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2661. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2662. alpSrcPtr, dest, dstW, dstY);
  2663. } else {
  2664. c->yuv2packed2(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2665. alpPixBuf ? *alpSrcPtr : NULL, alpPixBuf ? *(alpSrcPtr+1) : NULL,
  2666. dest, dstW, lumAlpha, chrAlpha, dstY);
  2667. }
  2668. } else { //general RGB
  2669. if(flags & SWS_FULL_CHR_H_INT) {
  2670. yuv2rgbXinC_full(c,
  2671. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2672. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2673. alpSrcPtr, dest, dstW, dstY);
  2674. } else {
  2675. c->yuv2packedX(c,
  2676. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2677. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2678. alpSrcPtr, dest, dstW, dstY);
  2679. }
  2680. }
  2681. }
  2682. } else { // hmm looks like we can't use MMX here without overwriting this array's tail
  2683. const int16_t **lumSrcPtr= (const int16_t **)lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2684. const int16_t **chrSrcPtr= (const int16_t **)chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2685. const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **)alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
  2686. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) {
  2687. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2688. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2689. yuv2nv12XinC(
  2690. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2691. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2692. dest, uDest, dstW, chrDstW, dstFormat);
  2693. } else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12
  2694. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2695. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2696. if (is16BPS(dstFormat)) {
  2697. yuv2yuvX16inC(
  2698. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2699. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2700. alpSrcPtr, (uint16_t *) dest, (uint16_t *) uDest, (uint16_t *) vDest, (uint16_t *) aDest, dstW, chrDstW,
  2701. dstFormat);
  2702. } else {
  2703. yuv2yuvXinC(
  2704. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2705. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2706. alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
  2707. }
  2708. } else {
  2709. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2710. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2711. if(flags & SWS_FULL_CHR_H_INT) {
  2712. yuv2rgbXinC_full(c,
  2713. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2714. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2715. alpSrcPtr, dest, dstW, dstY);
  2716. } else {
  2717. yuv2packedXinC(c,
  2718. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2719. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2720. alpSrcPtr, dest, dstW, dstY);
  2721. }
  2722. }
  2723. }
  2724. }
  2725. if ((dstFormat == PIX_FMT_YUVA420P) && !alpPixBuf)
  2726. fillPlane(dst[3], dstStride[3], dstW, dstY-lastDstY, lastDstY, 255);
  2727. #if COMPILE_TEMPLATE_MMX
  2728. if (flags & SWS_CPU_CAPS_MMX2 ) __asm__ volatile("sfence":::"memory");
  2729. /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
  2730. if (flags & SWS_CPU_CAPS_3DNOW) __asm__ volatile("femms" :::"memory");
  2731. else __asm__ volatile("emms" :::"memory");
  2732. #endif
  2733. /* store changed local vars back in the context */
  2734. c->dstY= dstY;
  2735. c->lumBufIndex= lumBufIndex;
  2736. c->chrBufIndex= chrBufIndex;
  2737. c->lastInLumBuf= lastInLumBuf;
  2738. c->lastInChrBuf= lastInChrBuf;
  2739. return dstY - lastDstY;
  2740. }
  2741. static void RENAME(sws_init_swScale)(SwsContext *c)
  2742. {
  2743. enum PixelFormat srcFormat = c->srcFormat;
  2744. c->yuv2nv12X = RENAME(yuv2nv12X );
  2745. c->yuv2yuv1 = RENAME(yuv2yuv1 );
  2746. c->yuv2yuvX = RENAME(yuv2yuvX );
  2747. c->yuv2packed1 = RENAME(yuv2packed1 );
  2748. c->yuv2packed2 = RENAME(yuv2packed2 );
  2749. c->yuv2packedX = RENAME(yuv2packedX );
  2750. c->hScale = RENAME(hScale );
  2751. #if COMPILE_TEMPLATE_MMX
  2752. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2753. if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
  2754. #else
  2755. if (c->flags & SWS_FAST_BILINEAR)
  2756. #endif
  2757. {
  2758. c->hyscale_fast = RENAME(hyscale_fast);
  2759. c->hcscale_fast = RENAME(hcscale_fast);
  2760. }
  2761. c->chrToYV12 = NULL;
  2762. switch(srcFormat) {
  2763. case PIX_FMT_YUYV422 : c->chrToYV12 = RENAME(yuy2ToUV); break;
  2764. case PIX_FMT_UYVY422 : c->chrToYV12 = RENAME(uyvyToUV); break;
  2765. case PIX_FMT_NV12 : c->chrToYV12 = RENAME(nv12ToUV); break;
  2766. case PIX_FMT_NV21 : c->chrToYV12 = RENAME(nv21ToUV); break;
  2767. case PIX_FMT_RGB8 :
  2768. case PIX_FMT_BGR8 :
  2769. case PIX_FMT_PAL8 :
  2770. case PIX_FMT_BGR4_BYTE:
  2771. case PIX_FMT_RGB4_BYTE: c->chrToYV12 = palToUV; break;
  2772. case PIX_FMT_YUV420P16BE:
  2773. case PIX_FMT_YUV422P16BE:
  2774. case PIX_FMT_YUV444P16BE: c->chrToYV12 = RENAME(BEToUV); break;
  2775. case PIX_FMT_YUV420P16LE:
  2776. case PIX_FMT_YUV422P16LE:
  2777. case PIX_FMT_YUV444P16LE: c->chrToYV12 = RENAME(LEToUV); break;
  2778. }
  2779. if (c->chrSrcHSubSample) {
  2780. switch(srcFormat) {
  2781. case PIX_FMT_RGB48BE:
  2782. case PIX_FMT_RGB48LE: c->chrToYV12 = rgb48ToUV_half; break;
  2783. case PIX_FMT_RGB32 :
  2784. case PIX_FMT_RGB32_1: c->chrToYV12 = bgr32ToUV_half; break;
  2785. case PIX_FMT_BGR24 : c->chrToYV12 = RENAME(bgr24ToUV_half); break;
  2786. case PIX_FMT_BGR565 : c->chrToYV12 = bgr16ToUV_half; break;
  2787. case PIX_FMT_BGR555 : c->chrToYV12 = bgr15ToUV_half; break;
  2788. case PIX_FMT_BGR32 :
  2789. case PIX_FMT_BGR32_1: c->chrToYV12 = rgb32ToUV_half; break;
  2790. case PIX_FMT_RGB24 : c->chrToYV12 = RENAME(rgb24ToUV_half); break;
  2791. case PIX_FMT_RGB565 : c->chrToYV12 = rgb16ToUV_half; break;
  2792. case PIX_FMT_RGB555 : c->chrToYV12 = rgb15ToUV_half; break;
  2793. }
  2794. } else {
  2795. switch(srcFormat) {
  2796. case PIX_FMT_RGB48BE:
  2797. case PIX_FMT_RGB48LE: c->chrToYV12 = rgb48ToUV; break;
  2798. case PIX_FMT_RGB32 :
  2799. case PIX_FMT_RGB32_1: c->chrToYV12 = bgr32ToUV; break;
  2800. case PIX_FMT_BGR24 : c->chrToYV12 = RENAME(bgr24ToUV); break;
  2801. case PIX_FMT_BGR565 : c->chrToYV12 = bgr16ToUV; break;
  2802. case PIX_FMT_BGR555 : c->chrToYV12 = bgr15ToUV; break;
  2803. case PIX_FMT_BGR32 :
  2804. case PIX_FMT_BGR32_1: c->chrToYV12 = rgb32ToUV; break;
  2805. case PIX_FMT_RGB24 : c->chrToYV12 = RENAME(rgb24ToUV); break;
  2806. case PIX_FMT_RGB565 : c->chrToYV12 = rgb16ToUV; break;
  2807. case PIX_FMT_RGB555 : c->chrToYV12 = rgb15ToUV; break;
  2808. }
  2809. }
  2810. c->lumToYV12 = NULL;
  2811. c->alpToYV12 = NULL;
  2812. switch (srcFormat) {
  2813. case PIX_FMT_YUYV422 :
  2814. case PIX_FMT_YUV420P16BE:
  2815. case PIX_FMT_YUV422P16BE:
  2816. case PIX_FMT_YUV444P16BE:
  2817. case PIX_FMT_GRAY16BE : c->lumToYV12 = RENAME(yuy2ToY); break;
  2818. case PIX_FMT_UYVY422 :
  2819. case PIX_FMT_YUV420P16LE:
  2820. case PIX_FMT_YUV422P16LE:
  2821. case PIX_FMT_YUV444P16LE:
  2822. case PIX_FMT_GRAY16LE : c->lumToYV12 = RENAME(uyvyToY); break;
  2823. case PIX_FMT_BGR24 : c->lumToYV12 = RENAME(bgr24ToY); break;
  2824. case PIX_FMT_BGR565 : c->lumToYV12 = bgr16ToY; break;
  2825. case PIX_FMT_BGR555 : c->lumToYV12 = bgr15ToY; break;
  2826. case PIX_FMT_RGB24 : c->lumToYV12 = RENAME(rgb24ToY); break;
  2827. case PIX_FMT_RGB565 : c->lumToYV12 = rgb16ToY; break;
  2828. case PIX_FMT_RGB555 : c->lumToYV12 = rgb15ToY; break;
  2829. case PIX_FMT_RGB8 :
  2830. case PIX_FMT_BGR8 :
  2831. case PIX_FMT_PAL8 :
  2832. case PIX_FMT_BGR4_BYTE:
  2833. case PIX_FMT_RGB4_BYTE: c->lumToYV12 = palToY; break;
  2834. case PIX_FMT_MONOBLACK: c->lumToYV12 = monoblack2Y; break;
  2835. case PIX_FMT_MONOWHITE: c->lumToYV12 = monowhite2Y; break;
  2836. case PIX_FMT_RGB32 :
  2837. case PIX_FMT_RGB32_1: c->lumToYV12 = bgr32ToY; break;
  2838. case PIX_FMT_BGR32 :
  2839. case PIX_FMT_BGR32_1: c->lumToYV12 = rgb32ToY; break;
  2840. case PIX_FMT_RGB48BE:
  2841. case PIX_FMT_RGB48LE: c->lumToYV12 = rgb48ToY; break;
  2842. }
  2843. if (c->alpPixBuf) {
  2844. switch (srcFormat) {
  2845. case PIX_FMT_RGB32 :
  2846. case PIX_FMT_RGB32_1:
  2847. case PIX_FMT_BGR32 :
  2848. case PIX_FMT_BGR32_1: c->alpToYV12 = abgrToA; break;
  2849. }
  2850. }
  2851. switch (srcFormat) {
  2852. case PIX_FMT_RGB32 :
  2853. case PIX_FMT_BGR32 :
  2854. c->alpSrcOffset = 3;
  2855. break;
  2856. case PIX_FMT_RGB32_1:
  2857. case PIX_FMT_BGR32_1:
  2858. c->lumSrcOffset = ALT32_CORR;
  2859. c->chrSrcOffset = ALT32_CORR;
  2860. break;
  2861. case PIX_FMT_RGB48LE:
  2862. c->lumSrcOffset = 1;
  2863. c->chrSrcOffset = 1;
  2864. c->alpSrcOffset = 1;
  2865. break;
  2866. }
  2867. if (c->srcRange != c->dstRange && !isAnyRGB(c->dstFormat)) {
  2868. if (c->srcRange) {
  2869. c->lumConvertRange = RENAME(lumRangeFromJpeg);
  2870. c->chrConvertRange = RENAME(chrRangeFromJpeg);
  2871. } else {
  2872. c->lumConvertRange = RENAME(lumRangeToJpeg);
  2873. c->chrConvertRange = RENAME(chrRangeToJpeg);
  2874. }
  2875. }
  2876. if (!(isGray(srcFormat) || isGray(c->dstFormat) ||
  2877. srcFormat == PIX_FMT_MONOBLACK || srcFormat == PIX_FMT_MONOWHITE))
  2878. c->needs_hcscale = 1;
  2879. }