swscale_template.c 128 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041
  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * The C code (not assembly, MMX, ...) of this file can be used
  21. * under the LGPL license.
  22. */
  23. #undef REAL_MOVNTQ
  24. #undef MOVNTQ
  25. #undef PAVGB
  26. #undef PREFETCH
  27. #undef PREFETCHW
  28. #undef EMMS
  29. #undef SFENCE
  30. #if HAVE_AMD3DNOW
  31. /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
  32. #define EMMS "femms"
  33. #else
  34. #define EMMS "emms"
  35. #endif
  36. #if HAVE_AMD3DNOW
  37. #define PREFETCH "prefetch"
  38. #define PREFETCHW "prefetchw"
  39. #elif HAVE_MMX2
  40. #define PREFETCH "prefetchnta"
  41. #define PREFETCHW "prefetcht0"
  42. #else
  43. #define PREFETCH " # nop"
  44. #define PREFETCHW " # nop"
  45. #endif
  46. #if HAVE_MMX2
  47. #define SFENCE "sfence"
  48. #else
  49. #define SFENCE " # nop"
  50. #endif
  51. #if HAVE_MMX2
  52. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  53. #elif HAVE_AMD3DNOW
  54. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  55. #endif
  56. #if HAVE_MMX2
  57. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  58. #else
  59. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  60. #endif
  61. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  62. #if HAVE_ALTIVEC
  63. #include "swscale_altivec_template.c"
  64. #endif
  65. #define YSCALEYUV2YV12X(x, offset, dest, width) \
  66. __asm__ volatile(\
  67. "xor %%"REG_a", %%"REG_a" \n\t"\
  68. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  69. "movq %%mm3, %%mm4 \n\t"\
  70. "lea " offset "(%0), %%"REG_d" \n\t"\
  71. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  72. ASMALIGN(4) /* FIXME Unroll? */\
  73. "1: \n\t"\
  74. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  75. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  76. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
  77. "add $16, %%"REG_d" \n\t"\
  78. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  79. "test %%"REG_S", %%"REG_S" \n\t"\
  80. "pmulhw %%mm0, %%mm2 \n\t"\
  81. "pmulhw %%mm0, %%mm5 \n\t"\
  82. "paddw %%mm2, %%mm3 \n\t"\
  83. "paddw %%mm5, %%mm4 \n\t"\
  84. " jnz 1b \n\t"\
  85. "psraw $3, %%mm3 \n\t"\
  86. "psraw $3, %%mm4 \n\t"\
  87. "packuswb %%mm4, %%mm3 \n\t"\
  88. MOVNTQ(%%mm3, (%1, %%REGa))\
  89. "add $8, %%"REG_a" \n\t"\
  90. "cmp %2, %%"REG_a" \n\t"\
  91. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  92. "movq %%mm3, %%mm4 \n\t"\
  93. "lea " offset "(%0), %%"REG_d" \n\t"\
  94. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  95. "jb 1b \n\t"\
  96. :: "r" (&c->redDither),\
  97. "r" (dest), "g" (width)\
  98. : "%"REG_a, "%"REG_d, "%"REG_S\
  99. );
  100. #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
  101. __asm__ volatile(\
  102. "lea " offset "(%0), %%"REG_d" \n\t"\
  103. "xor %%"REG_a", %%"REG_a" \n\t"\
  104. "pxor %%mm4, %%mm4 \n\t"\
  105. "pxor %%mm5, %%mm5 \n\t"\
  106. "pxor %%mm6, %%mm6 \n\t"\
  107. "pxor %%mm7, %%mm7 \n\t"\
  108. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  109. ASMALIGN(4) \
  110. "1: \n\t"\
  111. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
  112. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  113. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  114. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
  115. "movq %%mm0, %%mm3 \n\t"\
  116. "punpcklwd %%mm1, %%mm0 \n\t"\
  117. "punpckhwd %%mm1, %%mm3 \n\t"\
  118. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  119. "pmaddwd %%mm1, %%mm0 \n\t"\
  120. "pmaddwd %%mm1, %%mm3 \n\t"\
  121. "paddd %%mm0, %%mm4 \n\t"\
  122. "paddd %%mm3, %%mm5 \n\t"\
  123. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
  124. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  125. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  126. "test %%"REG_S", %%"REG_S" \n\t"\
  127. "movq %%mm2, %%mm0 \n\t"\
  128. "punpcklwd %%mm3, %%mm2 \n\t"\
  129. "punpckhwd %%mm3, %%mm0 \n\t"\
  130. "pmaddwd %%mm1, %%mm2 \n\t"\
  131. "pmaddwd %%mm1, %%mm0 \n\t"\
  132. "paddd %%mm2, %%mm6 \n\t"\
  133. "paddd %%mm0, %%mm7 \n\t"\
  134. " jnz 1b \n\t"\
  135. "psrad $16, %%mm4 \n\t"\
  136. "psrad $16, %%mm5 \n\t"\
  137. "psrad $16, %%mm6 \n\t"\
  138. "psrad $16, %%mm7 \n\t"\
  139. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  140. "packssdw %%mm5, %%mm4 \n\t"\
  141. "packssdw %%mm7, %%mm6 \n\t"\
  142. "paddw %%mm0, %%mm4 \n\t"\
  143. "paddw %%mm0, %%mm6 \n\t"\
  144. "psraw $3, %%mm4 \n\t"\
  145. "psraw $3, %%mm6 \n\t"\
  146. "packuswb %%mm6, %%mm4 \n\t"\
  147. MOVNTQ(%%mm4, (%1, %%REGa))\
  148. "add $8, %%"REG_a" \n\t"\
  149. "cmp %2, %%"REG_a" \n\t"\
  150. "lea " offset "(%0), %%"REG_d" \n\t"\
  151. "pxor %%mm4, %%mm4 \n\t"\
  152. "pxor %%mm5, %%mm5 \n\t"\
  153. "pxor %%mm6, %%mm6 \n\t"\
  154. "pxor %%mm7, %%mm7 \n\t"\
  155. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  156. "jb 1b \n\t"\
  157. :: "r" (&c->redDither),\
  158. "r" (dest), "g" (width)\
  159. : "%"REG_a, "%"REG_d, "%"REG_S\
  160. );
  161. #define YSCALEYUV2YV121 \
  162. "mov %2, %%"REG_a" \n\t"\
  163. ASMALIGN(4) /* FIXME Unroll? */\
  164. "1: \n\t"\
  165. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  166. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  167. "psraw $7, %%mm0 \n\t"\
  168. "psraw $7, %%mm1 \n\t"\
  169. "packuswb %%mm1, %%mm0 \n\t"\
  170. MOVNTQ(%%mm0, (%1, %%REGa))\
  171. "add $8, %%"REG_a" \n\t"\
  172. "jnc 1b \n\t"
  173. #define YSCALEYUV2YV121_ACCURATE \
  174. "mov %2, %%"REG_a" \n\t"\
  175. "pcmpeqw %%mm7, %%mm7 \n\t"\
  176. "psrlw $15, %%mm7 \n\t"\
  177. "psllw $6, %%mm7 \n\t"\
  178. ASMALIGN(4) /* FIXME Unroll? */\
  179. "1: \n\t"\
  180. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  181. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  182. "paddsw %%mm7, %%mm0 \n\t"\
  183. "paddsw %%mm7, %%mm1 \n\t"\
  184. "psraw $7, %%mm0 \n\t"\
  185. "psraw $7, %%mm1 \n\t"\
  186. "packuswb %%mm1, %%mm0 \n\t"\
  187. MOVNTQ(%%mm0, (%1, %%REGa))\
  188. "add $8, %%"REG_a" \n\t"\
  189. "jnc 1b \n\t"
  190. /*
  191. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  192. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  193. "r" (dest), "m" (dstW),
  194. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  195. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  196. */
  197. #define YSCALEYUV2PACKEDX_UV \
  198. __asm__ volatile(\
  199. "xor %%"REG_a", %%"REG_a" \n\t"\
  200. ASMALIGN(4)\
  201. "nop \n\t"\
  202. "1: \n\t"\
  203. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  204. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  205. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  206. "movq %%mm3, %%mm4 \n\t"\
  207. ASMALIGN(4)\
  208. "2: \n\t"\
  209. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  210. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  211. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  212. "add $16, %%"REG_d" \n\t"\
  213. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  214. "pmulhw %%mm0, %%mm2 \n\t"\
  215. "pmulhw %%mm0, %%mm5 \n\t"\
  216. "paddw %%mm2, %%mm3 \n\t"\
  217. "paddw %%mm5, %%mm4 \n\t"\
  218. "test %%"REG_S", %%"REG_S" \n\t"\
  219. " jnz 2b \n\t"\
  220. #define YSCALEYUV2PACKEDX_YA(offset) \
  221. "lea "offset"(%0), %%"REG_d" \n\t"\
  222. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  223. "movq "VROUNDER_OFFSET"(%0), %%mm1 \n\t"\
  224. "movq %%mm1, %%mm7 \n\t"\
  225. ASMALIGN(4)\
  226. "2: \n\t"\
  227. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  228. "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
  229. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
  230. "add $16, %%"REG_d" \n\t"\
  231. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  232. "pmulhw %%mm0, %%mm2 \n\t"\
  233. "pmulhw %%mm0, %%mm5 \n\t"\
  234. "paddw %%mm2, %%mm1 \n\t"\
  235. "paddw %%mm5, %%mm7 \n\t"\
  236. "test %%"REG_S", %%"REG_S" \n\t"\
  237. " jnz 2b \n\t"\
  238. #define YSCALEYUV2PACKEDX \
  239. YSCALEYUV2PACKEDX_UV \
  240. YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET) \
  241. #define YSCALEYUV2PACKEDX_END \
  242. :: "r" (&c->redDither), \
  243. "m" (dummy), "m" (dummy), "m" (dummy),\
  244. "r" (dest), "m" (dstW) \
  245. : "%"REG_a, "%"REG_d, "%"REG_S \
  246. );
  247. #define YSCALEYUV2PACKEDX_ACCURATE_UV \
  248. __asm__ volatile(\
  249. "xor %%"REG_a", %%"REG_a" \n\t"\
  250. ASMALIGN(4)\
  251. "nop \n\t"\
  252. "1: \n\t"\
  253. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  254. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  255. "pxor %%mm4, %%mm4 \n\t"\
  256. "pxor %%mm5, %%mm5 \n\t"\
  257. "pxor %%mm6, %%mm6 \n\t"\
  258. "pxor %%mm7, %%mm7 \n\t"\
  259. ASMALIGN(4)\
  260. "2: \n\t"\
  261. "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
  262. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
  263. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  264. "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
  265. "movq %%mm0, %%mm3 \n\t"\
  266. "punpcklwd %%mm1, %%mm0 \n\t"\
  267. "punpckhwd %%mm1, %%mm3 \n\t"\
  268. "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
  269. "pmaddwd %%mm1, %%mm0 \n\t"\
  270. "pmaddwd %%mm1, %%mm3 \n\t"\
  271. "paddd %%mm0, %%mm4 \n\t"\
  272. "paddd %%mm3, %%mm5 \n\t"\
  273. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
  274. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  275. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  276. "test %%"REG_S", %%"REG_S" \n\t"\
  277. "movq %%mm2, %%mm0 \n\t"\
  278. "punpcklwd %%mm3, %%mm2 \n\t"\
  279. "punpckhwd %%mm3, %%mm0 \n\t"\
  280. "pmaddwd %%mm1, %%mm2 \n\t"\
  281. "pmaddwd %%mm1, %%mm0 \n\t"\
  282. "paddd %%mm2, %%mm6 \n\t"\
  283. "paddd %%mm0, %%mm7 \n\t"\
  284. " jnz 2b \n\t"\
  285. "psrad $16, %%mm4 \n\t"\
  286. "psrad $16, %%mm5 \n\t"\
  287. "psrad $16, %%mm6 \n\t"\
  288. "psrad $16, %%mm7 \n\t"\
  289. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  290. "packssdw %%mm5, %%mm4 \n\t"\
  291. "packssdw %%mm7, %%mm6 \n\t"\
  292. "paddw %%mm0, %%mm4 \n\t"\
  293. "paddw %%mm0, %%mm6 \n\t"\
  294. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  295. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  296. #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
  297. "lea "offset"(%0), %%"REG_d" \n\t"\
  298. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  299. "pxor %%mm1, %%mm1 \n\t"\
  300. "pxor %%mm5, %%mm5 \n\t"\
  301. "pxor %%mm7, %%mm7 \n\t"\
  302. "pxor %%mm6, %%mm6 \n\t"\
  303. ASMALIGN(4)\
  304. "2: \n\t"\
  305. "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  306. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  307. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  308. "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  309. "movq %%mm0, %%mm3 \n\t"\
  310. "punpcklwd %%mm4, %%mm0 \n\t"\
  311. "punpckhwd %%mm4, %%mm3 \n\t"\
  312. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
  313. "pmaddwd %%mm4, %%mm0 \n\t"\
  314. "pmaddwd %%mm4, %%mm3 \n\t"\
  315. "paddd %%mm0, %%mm1 \n\t"\
  316. "paddd %%mm3, %%mm5 \n\t"\
  317. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  318. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  319. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  320. "test %%"REG_S", %%"REG_S" \n\t"\
  321. "movq %%mm2, %%mm0 \n\t"\
  322. "punpcklwd %%mm3, %%mm2 \n\t"\
  323. "punpckhwd %%mm3, %%mm0 \n\t"\
  324. "pmaddwd %%mm4, %%mm2 \n\t"\
  325. "pmaddwd %%mm4, %%mm0 \n\t"\
  326. "paddd %%mm2, %%mm7 \n\t"\
  327. "paddd %%mm0, %%mm6 \n\t"\
  328. " jnz 2b \n\t"\
  329. "psrad $16, %%mm1 \n\t"\
  330. "psrad $16, %%mm5 \n\t"\
  331. "psrad $16, %%mm7 \n\t"\
  332. "psrad $16, %%mm6 \n\t"\
  333. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  334. "packssdw %%mm5, %%mm1 \n\t"\
  335. "packssdw %%mm6, %%mm7 \n\t"\
  336. "paddw %%mm0, %%mm1 \n\t"\
  337. "paddw %%mm0, %%mm7 \n\t"\
  338. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  339. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  340. #define YSCALEYUV2PACKEDX_ACCURATE \
  341. YSCALEYUV2PACKEDX_ACCURATE_UV \
  342. YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
  343. #define YSCALEYUV2RGBX \
  344. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  345. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  346. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  347. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  348. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  349. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  350. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  351. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  352. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  353. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  354. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  355. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  356. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  357. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  358. "paddw %%mm3, %%mm4 \n\t"\
  359. "movq %%mm2, %%mm0 \n\t"\
  360. "movq %%mm5, %%mm6 \n\t"\
  361. "movq %%mm4, %%mm3 \n\t"\
  362. "punpcklwd %%mm2, %%mm2 \n\t"\
  363. "punpcklwd %%mm5, %%mm5 \n\t"\
  364. "punpcklwd %%mm4, %%mm4 \n\t"\
  365. "paddw %%mm1, %%mm2 \n\t"\
  366. "paddw %%mm1, %%mm5 \n\t"\
  367. "paddw %%mm1, %%mm4 \n\t"\
  368. "punpckhwd %%mm0, %%mm0 \n\t"\
  369. "punpckhwd %%mm6, %%mm6 \n\t"\
  370. "punpckhwd %%mm3, %%mm3 \n\t"\
  371. "paddw %%mm7, %%mm0 \n\t"\
  372. "paddw %%mm7, %%mm6 \n\t"\
  373. "paddw %%mm7, %%mm3 \n\t"\
  374. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  375. "packuswb %%mm0, %%mm2 \n\t"\
  376. "packuswb %%mm6, %%mm5 \n\t"\
  377. "packuswb %%mm3, %%mm4 \n\t"\
  378. #define REAL_YSCALEYUV2PACKED(index, c) \
  379. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  380. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
  381. "psraw $3, %%mm0 \n\t"\
  382. "psraw $3, %%mm1 \n\t"\
  383. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  384. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  385. "xor "#index", "#index" \n\t"\
  386. ASMALIGN(4)\
  387. "1: \n\t"\
  388. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  389. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  390. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  391. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  392. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  393. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  394. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  395. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  396. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  397. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  398. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  399. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  400. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  401. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  402. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  403. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  404. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  405. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  406. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  407. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  408. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  409. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  410. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  411. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  412. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  413. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  414. #define REAL_YSCALEYUV2RGB_UV(index, c) \
  415. "xor "#index", "#index" \n\t"\
  416. ASMALIGN(4)\
  417. "1: \n\t"\
  418. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  419. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  420. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  421. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  422. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  423. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  424. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  425. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  426. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  427. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  428. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  429. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  430. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  431. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  432. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  433. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  434. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  435. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  436. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  437. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  438. #define REAL_YSCALEYUV2RGB_YA(index, c) \
  439. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  440. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  441. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  442. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  443. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  444. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  445. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  446. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  447. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  448. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  449. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  450. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  451. #define REAL_YSCALEYUV2RGB_COEFF(c) \
  452. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  453. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  454. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  455. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  456. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  457. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  458. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  459. "paddw %%mm3, %%mm4 \n\t"\
  460. "movq %%mm2, %%mm0 \n\t"\
  461. "movq %%mm5, %%mm6 \n\t"\
  462. "movq %%mm4, %%mm3 \n\t"\
  463. "punpcklwd %%mm2, %%mm2 \n\t"\
  464. "punpcklwd %%mm5, %%mm5 \n\t"\
  465. "punpcklwd %%mm4, %%mm4 \n\t"\
  466. "paddw %%mm1, %%mm2 \n\t"\
  467. "paddw %%mm1, %%mm5 \n\t"\
  468. "paddw %%mm1, %%mm4 \n\t"\
  469. "punpckhwd %%mm0, %%mm0 \n\t"\
  470. "punpckhwd %%mm6, %%mm6 \n\t"\
  471. "punpckhwd %%mm3, %%mm3 \n\t"\
  472. "paddw %%mm7, %%mm0 \n\t"\
  473. "paddw %%mm7, %%mm6 \n\t"\
  474. "paddw %%mm7, %%mm3 \n\t"\
  475. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  476. "packuswb %%mm0, %%mm2 \n\t"\
  477. "packuswb %%mm6, %%mm5 \n\t"\
  478. "packuswb %%mm3, %%mm4 \n\t"\
  479. #define YSCALEYUV2RGB_YA(index, c) REAL_YSCALEYUV2RGB_YA(index, c)
  480. #define YSCALEYUV2RGB(index, c) \
  481. REAL_YSCALEYUV2RGB_UV(index, c) \
  482. REAL_YSCALEYUV2RGB_YA(index, c) \
  483. REAL_YSCALEYUV2RGB_COEFF(c)
  484. #define REAL_YSCALEYUV2PACKED1(index, c) \
  485. "xor "#index", "#index" \n\t"\
  486. ASMALIGN(4)\
  487. "1: \n\t"\
  488. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  489. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  490. "psraw $7, %%mm3 \n\t" \
  491. "psraw $7, %%mm4 \n\t" \
  492. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  493. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  494. "psraw $7, %%mm1 \n\t" \
  495. "psraw $7, %%mm7 \n\t" \
  496. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  497. #define REAL_YSCALEYUV2RGB1(index, c) \
  498. "xor "#index", "#index" \n\t"\
  499. ASMALIGN(4)\
  500. "1: \n\t"\
  501. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  502. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  503. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  504. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  505. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  506. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  507. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  508. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  509. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  510. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  511. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  512. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  513. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  514. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  515. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  516. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  517. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  518. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  519. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  520. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  521. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  522. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  523. "paddw %%mm3, %%mm4 \n\t"\
  524. "movq %%mm2, %%mm0 \n\t"\
  525. "movq %%mm5, %%mm6 \n\t"\
  526. "movq %%mm4, %%mm3 \n\t"\
  527. "punpcklwd %%mm2, %%mm2 \n\t"\
  528. "punpcklwd %%mm5, %%mm5 \n\t"\
  529. "punpcklwd %%mm4, %%mm4 \n\t"\
  530. "paddw %%mm1, %%mm2 \n\t"\
  531. "paddw %%mm1, %%mm5 \n\t"\
  532. "paddw %%mm1, %%mm4 \n\t"\
  533. "punpckhwd %%mm0, %%mm0 \n\t"\
  534. "punpckhwd %%mm6, %%mm6 \n\t"\
  535. "punpckhwd %%mm3, %%mm3 \n\t"\
  536. "paddw %%mm7, %%mm0 \n\t"\
  537. "paddw %%mm7, %%mm6 \n\t"\
  538. "paddw %%mm7, %%mm3 \n\t"\
  539. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  540. "packuswb %%mm0, %%mm2 \n\t"\
  541. "packuswb %%mm6, %%mm5 \n\t"\
  542. "packuswb %%mm3, %%mm4 \n\t"\
  543. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  544. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  545. "xor "#index", "#index" \n\t"\
  546. ASMALIGN(4)\
  547. "1: \n\t"\
  548. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  549. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  550. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  551. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  552. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  553. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  554. "psrlw $8, %%mm3 \n\t" \
  555. "psrlw $8, %%mm4 \n\t" \
  556. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  557. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  558. "psraw $7, %%mm1 \n\t" \
  559. "psraw $7, %%mm7 \n\t"
  560. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  561. // do vertical chrominance interpolation
  562. #define REAL_YSCALEYUV2RGB1b(index, c) \
  563. "xor "#index", "#index" \n\t"\
  564. ASMALIGN(4)\
  565. "1: \n\t"\
  566. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  567. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  568. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  569. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  570. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  571. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  572. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  573. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  574. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  575. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  576. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  577. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  578. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  579. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  580. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  581. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  582. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  583. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  584. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  585. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  586. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  587. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  588. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  589. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  590. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  591. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  592. "paddw %%mm3, %%mm4 \n\t"\
  593. "movq %%mm2, %%mm0 \n\t"\
  594. "movq %%mm5, %%mm6 \n\t"\
  595. "movq %%mm4, %%mm3 \n\t"\
  596. "punpcklwd %%mm2, %%mm2 \n\t"\
  597. "punpcklwd %%mm5, %%mm5 \n\t"\
  598. "punpcklwd %%mm4, %%mm4 \n\t"\
  599. "paddw %%mm1, %%mm2 \n\t"\
  600. "paddw %%mm1, %%mm5 \n\t"\
  601. "paddw %%mm1, %%mm4 \n\t"\
  602. "punpckhwd %%mm0, %%mm0 \n\t"\
  603. "punpckhwd %%mm6, %%mm6 \n\t"\
  604. "punpckhwd %%mm3, %%mm3 \n\t"\
  605. "paddw %%mm7, %%mm0 \n\t"\
  606. "paddw %%mm7, %%mm6 \n\t"\
  607. "paddw %%mm7, %%mm3 \n\t"\
  608. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  609. "packuswb %%mm0, %%mm2 \n\t"\
  610. "packuswb %%mm6, %%mm5 \n\t"\
  611. "packuswb %%mm3, %%mm4 \n\t"\
  612. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  613. #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
  614. "movq "#b", "#q2" \n\t" /* B */\
  615. "movq "#r", "#t" \n\t" /* R */\
  616. "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
  617. "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
  618. "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
  619. "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
  620. "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
  621. "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
  622. "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
  623. "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
  624. "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
  625. "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
  626. \
  627. MOVNTQ( q0, (dst, index, 4))\
  628. MOVNTQ( b, 8(dst, index, 4))\
  629. MOVNTQ( q2, 16(dst, index, 4))\
  630. MOVNTQ( q3, 24(dst, index, 4))\
  631. \
  632. "add $8, "#index" \n\t"\
  633. "cmp "#dstw", "#index" \n\t"\
  634. " jb 1b \n\t"
  635. #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
  636. #define REAL_WRITERGB16(dst, dstw, index) \
  637. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  638. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  639. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  640. "psrlq $3, %%mm2 \n\t"\
  641. \
  642. "movq %%mm2, %%mm1 \n\t"\
  643. "movq %%mm4, %%mm3 \n\t"\
  644. \
  645. "punpcklbw %%mm7, %%mm3 \n\t"\
  646. "punpcklbw %%mm5, %%mm2 \n\t"\
  647. "punpckhbw %%mm7, %%mm4 \n\t"\
  648. "punpckhbw %%mm5, %%mm1 \n\t"\
  649. \
  650. "psllq $3, %%mm3 \n\t"\
  651. "psllq $3, %%mm4 \n\t"\
  652. \
  653. "por %%mm3, %%mm2 \n\t"\
  654. "por %%mm4, %%mm1 \n\t"\
  655. \
  656. MOVNTQ(%%mm2, (dst, index, 2))\
  657. MOVNTQ(%%mm1, 8(dst, index, 2))\
  658. \
  659. "add $8, "#index" \n\t"\
  660. "cmp "#dstw", "#index" \n\t"\
  661. " jb 1b \n\t"
  662. #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
  663. #define REAL_WRITERGB15(dst, dstw, index) \
  664. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  665. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  666. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  667. "psrlq $3, %%mm2 \n\t"\
  668. "psrlq $1, %%mm5 \n\t"\
  669. \
  670. "movq %%mm2, %%mm1 \n\t"\
  671. "movq %%mm4, %%mm3 \n\t"\
  672. \
  673. "punpcklbw %%mm7, %%mm3 \n\t"\
  674. "punpcklbw %%mm5, %%mm2 \n\t"\
  675. "punpckhbw %%mm7, %%mm4 \n\t"\
  676. "punpckhbw %%mm5, %%mm1 \n\t"\
  677. \
  678. "psllq $2, %%mm3 \n\t"\
  679. "psllq $2, %%mm4 \n\t"\
  680. \
  681. "por %%mm3, %%mm2 \n\t"\
  682. "por %%mm4, %%mm1 \n\t"\
  683. \
  684. MOVNTQ(%%mm2, (dst, index, 2))\
  685. MOVNTQ(%%mm1, 8(dst, index, 2))\
  686. \
  687. "add $8, "#index" \n\t"\
  688. "cmp "#dstw", "#index" \n\t"\
  689. " jb 1b \n\t"
  690. #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
  691. #define WRITEBGR24OLD(dst, dstw, index) \
  692. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  693. "movq %%mm2, %%mm1 \n\t" /* B */\
  694. "movq %%mm5, %%mm6 \n\t" /* R */\
  695. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  696. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  697. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  698. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  699. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  700. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  701. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  702. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  703. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  704. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  705. \
  706. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  707. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  708. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
  709. "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
  710. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  711. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  712. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  713. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  714. \
  715. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  716. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  717. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  718. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  719. "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
  720. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  721. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  722. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
  723. "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
  724. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  725. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  726. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  727. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  728. \
  729. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  730. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  731. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  732. "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
  733. "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
  734. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  735. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  736. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  737. \
  738. MOVNTQ(%%mm0, (dst))\
  739. MOVNTQ(%%mm2, 8(dst))\
  740. MOVNTQ(%%mm3, 16(dst))\
  741. "add $24, "#dst" \n\t"\
  742. \
  743. "add $8, "#index" \n\t"\
  744. "cmp "#dstw", "#index" \n\t"\
  745. " jb 1b \n\t"
  746. #define WRITEBGR24MMX(dst, dstw, index) \
  747. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  748. "movq %%mm2, %%mm1 \n\t" /* B */\
  749. "movq %%mm5, %%mm6 \n\t" /* R */\
  750. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  751. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  752. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  753. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  754. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  755. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  756. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  757. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  758. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  759. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  760. \
  761. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  762. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  763. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  764. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  765. \
  766. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  767. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  768. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  769. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  770. \
  771. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  772. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  773. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  774. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  775. \
  776. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  777. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  778. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  779. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  780. MOVNTQ(%%mm0, (dst))\
  781. \
  782. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  783. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  784. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  785. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  786. MOVNTQ(%%mm6, 8(dst))\
  787. \
  788. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  789. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  790. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  791. MOVNTQ(%%mm5, 16(dst))\
  792. \
  793. "add $24, "#dst" \n\t"\
  794. \
  795. "add $8, "#index" \n\t"\
  796. "cmp "#dstw", "#index" \n\t"\
  797. " jb 1b \n\t"
  798. #define WRITEBGR24MMX2(dst, dstw, index) \
  799. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  800. "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
  801. "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
  802. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  803. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  804. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  805. \
  806. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  807. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  808. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  809. \
  810. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  811. "por %%mm1, %%mm6 \n\t"\
  812. "por %%mm3, %%mm6 \n\t"\
  813. MOVNTQ(%%mm6, (dst))\
  814. \
  815. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  816. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  817. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  818. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  819. \
  820. "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  821. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  822. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  823. \
  824. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  825. "por %%mm3, %%mm6 \n\t"\
  826. MOVNTQ(%%mm6, 8(dst))\
  827. \
  828. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  829. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  830. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  831. \
  832. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  833. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  834. "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  835. \
  836. "por %%mm1, %%mm3 \n\t"\
  837. "por %%mm3, %%mm6 \n\t"\
  838. MOVNTQ(%%mm6, 16(dst))\
  839. \
  840. "add $24, "#dst" \n\t"\
  841. \
  842. "add $8, "#index" \n\t"\
  843. "cmp "#dstw", "#index" \n\t"\
  844. " jb 1b \n\t"
  845. #if HAVE_MMX2
  846. #undef WRITEBGR24
  847. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  848. #else
  849. #undef WRITEBGR24
  850. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  851. #endif
  852. #define REAL_WRITEYUY2(dst, dstw, index) \
  853. "packuswb %%mm3, %%mm3 \n\t"\
  854. "packuswb %%mm4, %%mm4 \n\t"\
  855. "packuswb %%mm7, %%mm1 \n\t"\
  856. "punpcklbw %%mm4, %%mm3 \n\t"\
  857. "movq %%mm1, %%mm7 \n\t"\
  858. "punpcklbw %%mm3, %%mm1 \n\t"\
  859. "punpckhbw %%mm3, %%mm7 \n\t"\
  860. \
  861. MOVNTQ(%%mm1, (dst, index, 2))\
  862. MOVNTQ(%%mm7, 8(dst, index, 2))\
  863. \
  864. "add $8, "#index" \n\t"\
  865. "cmp "#dstw", "#index" \n\t"\
  866. " jb 1b \n\t"
  867. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  868. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  869. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  870. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  871. {
  872. #if HAVE_MMX
  873. if(!(c->flags & SWS_BITEXACT)){
  874. if (c->flags & SWS_ACCURATE_RND){
  875. if (uDest){
  876. YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  877. YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  878. }
  879. YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  880. }else{
  881. if (uDest){
  882. YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  883. YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  884. }
  885. YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  886. }
  887. return;
  888. }
  889. #endif
  890. #if HAVE_ALTIVEC
  891. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  892. chrFilter, chrSrc, chrFilterSize,
  893. dest, uDest, vDest, dstW, chrDstW);
  894. #else //HAVE_ALTIVEC
  895. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  896. chrFilter, chrSrc, chrFilterSize,
  897. dest, uDest, vDest, dstW, chrDstW);
  898. #endif //!HAVE_ALTIVEC
  899. }
  900. static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  901. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  902. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
  903. {
  904. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  905. chrFilter, chrSrc, chrFilterSize,
  906. dest, uDest, dstW, chrDstW, dstFormat);
  907. }
  908. static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chrSrc,
  909. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  910. {
  911. int i;
  912. #if HAVE_MMX
  913. if(!(c->flags & SWS_BITEXACT)){
  914. long p= uDest ? 3 : 1;
  915. uint8_t *src[3]= {lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
  916. uint8_t *dst[3]= {dest, uDest, vDest};
  917. long counter[3] = {dstW, chrDstW, chrDstW};
  918. if (c->flags & SWS_ACCURATE_RND){
  919. while(p--){
  920. __asm__ volatile(
  921. YSCALEYUV2YV121_ACCURATE
  922. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  923. "g" (-counter[p])
  924. : "%"REG_a
  925. );
  926. }
  927. }else{
  928. while(p--){
  929. __asm__ volatile(
  930. YSCALEYUV2YV121
  931. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  932. "g" (-counter[p])
  933. : "%"REG_a
  934. );
  935. }
  936. }
  937. return;
  938. }
  939. #endif
  940. for (i=0; i<dstW; i++)
  941. {
  942. int val= (lumSrc[i]+64)>>7;
  943. if (val&256){
  944. if (val<0) val=0;
  945. else val=255;
  946. }
  947. dest[i]= val;
  948. }
  949. if (uDest)
  950. for (i=0; i<chrDstW; i++)
  951. {
  952. int u=(chrSrc[i ]+64)>>7;
  953. int v=(chrSrc[i + VOFW]+64)>>7;
  954. if ((u|v)&256){
  955. if (u<0) u=0;
  956. else if (u>255) u=255;
  957. if (v<0) v=0;
  958. else if (v>255) v=255;
  959. }
  960. uDest[i]= u;
  961. vDest[i]= v;
  962. }
  963. }
  964. /**
  965. * vertical scale YV12 to RGB
  966. */
  967. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  968. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  969. uint8_t *dest, long dstW, long dstY)
  970. {
  971. #if HAVE_MMX
  972. long dummy=0;
  973. if(!(c->flags & SWS_BITEXACT)){
  974. if (c->flags & SWS_ACCURATE_RND){
  975. switch(c->dstFormat){
  976. case PIX_FMT_RGB32:
  977. YSCALEYUV2PACKEDX_ACCURATE
  978. YSCALEYUV2RGBX
  979. "pcmpeqd %%mm7, %%mm7 \n\t"
  980. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  981. YSCALEYUV2PACKEDX_END
  982. return;
  983. case PIX_FMT_BGR24:
  984. YSCALEYUV2PACKEDX_ACCURATE
  985. YSCALEYUV2RGBX
  986. "pxor %%mm7, %%mm7 \n\t"
  987. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
  988. "add %4, %%"REG_c" \n\t"
  989. WRITEBGR24(%%REGc, %5, %%REGa)
  990. :: "r" (&c->redDither),
  991. "m" (dummy), "m" (dummy), "m" (dummy),
  992. "r" (dest), "m" (dstW)
  993. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  994. );
  995. return;
  996. case PIX_FMT_RGB555:
  997. YSCALEYUV2PACKEDX_ACCURATE
  998. YSCALEYUV2RGBX
  999. "pxor %%mm7, %%mm7 \n\t"
  1000. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1001. #ifdef DITHER1XBPP
  1002. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  1003. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  1004. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  1005. #endif
  1006. WRITERGB15(%4, %5, %%REGa)
  1007. YSCALEYUV2PACKEDX_END
  1008. return;
  1009. case PIX_FMT_RGB565:
  1010. YSCALEYUV2PACKEDX_ACCURATE
  1011. YSCALEYUV2RGBX
  1012. "pxor %%mm7, %%mm7 \n\t"
  1013. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1014. #ifdef DITHER1XBPP
  1015. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  1016. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  1017. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  1018. #endif
  1019. WRITERGB16(%4, %5, %%REGa)
  1020. YSCALEYUV2PACKEDX_END
  1021. return;
  1022. case PIX_FMT_YUYV422:
  1023. YSCALEYUV2PACKEDX_ACCURATE
  1024. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1025. "psraw $3, %%mm3 \n\t"
  1026. "psraw $3, %%mm4 \n\t"
  1027. "psraw $3, %%mm1 \n\t"
  1028. "psraw $3, %%mm7 \n\t"
  1029. WRITEYUY2(%4, %5, %%REGa)
  1030. YSCALEYUV2PACKEDX_END
  1031. return;
  1032. }
  1033. }else{
  1034. switch(c->dstFormat)
  1035. {
  1036. case PIX_FMT_RGB32:
  1037. YSCALEYUV2PACKEDX
  1038. YSCALEYUV2RGBX
  1039. "pcmpeqd %%mm7, %%mm7 \n\t"
  1040. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1041. YSCALEYUV2PACKEDX_END
  1042. return;
  1043. case PIX_FMT_BGR24:
  1044. YSCALEYUV2PACKEDX
  1045. YSCALEYUV2RGBX
  1046. "pxor %%mm7, %%mm7 \n\t"
  1047. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
  1048. "add %4, %%"REG_c" \n\t"
  1049. WRITEBGR24(%%REGc, %5, %%REGa)
  1050. :: "r" (&c->redDither),
  1051. "m" (dummy), "m" (dummy), "m" (dummy),
  1052. "r" (dest), "m" (dstW)
  1053. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1054. );
  1055. return;
  1056. case PIX_FMT_RGB555:
  1057. YSCALEYUV2PACKEDX
  1058. YSCALEYUV2RGBX
  1059. "pxor %%mm7, %%mm7 \n\t"
  1060. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1061. #ifdef DITHER1XBPP
  1062. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  1063. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  1064. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  1065. #endif
  1066. WRITERGB15(%4, %5, %%REGa)
  1067. YSCALEYUV2PACKEDX_END
  1068. return;
  1069. case PIX_FMT_RGB565:
  1070. YSCALEYUV2PACKEDX
  1071. YSCALEYUV2RGBX
  1072. "pxor %%mm7, %%mm7 \n\t"
  1073. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1074. #ifdef DITHER1XBPP
  1075. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  1076. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  1077. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  1078. #endif
  1079. WRITERGB16(%4, %5, %%REGa)
  1080. YSCALEYUV2PACKEDX_END
  1081. return;
  1082. case PIX_FMT_YUYV422:
  1083. YSCALEYUV2PACKEDX
  1084. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1085. "psraw $3, %%mm3 \n\t"
  1086. "psraw $3, %%mm4 \n\t"
  1087. "psraw $3, %%mm1 \n\t"
  1088. "psraw $3, %%mm7 \n\t"
  1089. WRITEYUY2(%4, %5, %%REGa)
  1090. YSCALEYUV2PACKEDX_END
  1091. return;
  1092. }
  1093. }
  1094. }
  1095. #endif /* HAVE_MMX */
  1096. #if HAVE_ALTIVEC
  1097. /* The following list of supported dstFormat values should
  1098. match what's found in the body of altivec_yuv2packedX() */
  1099. if (!(c->flags & SWS_BITEXACT) &&
  1100. (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
  1101. c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
  1102. c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB))
  1103. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  1104. chrFilter, chrSrc, chrFilterSize,
  1105. dest, dstW, dstY);
  1106. else
  1107. #endif
  1108. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  1109. chrFilter, chrSrc, chrFilterSize,
  1110. dest, dstW, dstY);
  1111. }
  1112. /**
  1113. * vertical bilinear scale YV12 to RGB
  1114. */
  1115. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1116. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  1117. {
  1118. int yalpha1=4095- yalpha;
  1119. int uvalpha1=4095-uvalpha;
  1120. int i;
  1121. #if HAVE_MMX
  1122. if(!(c->flags & SWS_BITEXACT)){
  1123. switch(c->dstFormat)
  1124. {
  1125. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1126. case PIX_FMT_RGB32:
  1127. __asm__ volatile(
  1128. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1129. "mov %4, %%"REG_b" \n\t"
  1130. "push %%"REG_BP" \n\t"
  1131. YSCALEYUV2RGB(%%REGBP, %5)
  1132. "pcmpeqd %%mm7, %%mm7 \n\t"
  1133. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1134. "pop %%"REG_BP" \n\t"
  1135. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1136. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1137. "a" (&c->redDither)
  1138. );
  1139. return;
  1140. case PIX_FMT_BGR24:
  1141. __asm__ volatile(
  1142. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1143. "mov %4, %%"REG_b" \n\t"
  1144. "push %%"REG_BP" \n\t"
  1145. YSCALEYUV2RGB(%%REGBP, %5)
  1146. "pxor %%mm7, %%mm7 \n\t"
  1147. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1148. "pop %%"REG_BP" \n\t"
  1149. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1150. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1151. "a" (&c->redDither)
  1152. );
  1153. return;
  1154. case PIX_FMT_RGB555:
  1155. __asm__ volatile(
  1156. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1157. "mov %4, %%"REG_b" \n\t"
  1158. "push %%"REG_BP" \n\t"
  1159. YSCALEYUV2RGB(%%REGBP, %5)
  1160. "pxor %%mm7, %%mm7 \n\t"
  1161. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1162. #ifdef DITHER1XBPP
  1163. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1164. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1165. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1166. #endif
  1167. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1168. "pop %%"REG_BP" \n\t"
  1169. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1170. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1171. "a" (&c->redDither)
  1172. );
  1173. return;
  1174. case PIX_FMT_RGB565:
  1175. __asm__ volatile(
  1176. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1177. "mov %4, %%"REG_b" \n\t"
  1178. "push %%"REG_BP" \n\t"
  1179. YSCALEYUV2RGB(%%REGBP, %5)
  1180. "pxor %%mm7, %%mm7 \n\t"
  1181. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1182. #ifdef DITHER1XBPP
  1183. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1184. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1185. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1186. #endif
  1187. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1188. "pop %%"REG_BP" \n\t"
  1189. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1190. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1191. "a" (&c->redDither)
  1192. );
  1193. return;
  1194. case PIX_FMT_YUYV422:
  1195. __asm__ volatile(
  1196. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1197. "mov %4, %%"REG_b" \n\t"
  1198. "push %%"REG_BP" \n\t"
  1199. YSCALEYUV2PACKED(%%REGBP, %5)
  1200. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1201. "pop %%"REG_BP" \n\t"
  1202. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1203. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1204. "a" (&c->redDither)
  1205. );
  1206. return;
  1207. default: break;
  1208. }
  1209. }
  1210. #endif //HAVE_MMX
  1211. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C, YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
  1212. }
  1213. /**
  1214. * YV12 to RGB without scaling or interpolating
  1215. */
  1216. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1217. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1218. {
  1219. const int yalpha1=0;
  1220. int i;
  1221. uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1222. const int yalpha= 4096; //FIXME ...
  1223. if (flags&SWS_FULL_CHR_H_INT)
  1224. {
  1225. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1226. return;
  1227. }
  1228. #if HAVE_MMX
  1229. if(!(flags & SWS_BITEXACT)){
  1230. if (uvalpha < 2048) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1231. {
  1232. switch(dstFormat)
  1233. {
  1234. case PIX_FMT_RGB32:
  1235. __asm__ volatile(
  1236. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1237. "mov %4, %%"REG_b" \n\t"
  1238. "push %%"REG_BP" \n\t"
  1239. YSCALEYUV2RGB1(%%REGBP, %5)
  1240. "pcmpeqd %%mm7, %%mm7 \n\t"
  1241. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1242. "pop %%"REG_BP" \n\t"
  1243. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1244. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1245. "a" (&c->redDither)
  1246. );
  1247. return;
  1248. case PIX_FMT_BGR24:
  1249. __asm__ volatile(
  1250. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1251. "mov %4, %%"REG_b" \n\t"
  1252. "push %%"REG_BP" \n\t"
  1253. YSCALEYUV2RGB1(%%REGBP, %5)
  1254. "pxor %%mm7, %%mm7 \n\t"
  1255. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1256. "pop %%"REG_BP" \n\t"
  1257. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1258. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1259. "a" (&c->redDither)
  1260. );
  1261. return;
  1262. case PIX_FMT_RGB555:
  1263. __asm__ volatile(
  1264. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1265. "mov %4, %%"REG_b" \n\t"
  1266. "push %%"REG_BP" \n\t"
  1267. YSCALEYUV2RGB1(%%REGBP, %5)
  1268. "pxor %%mm7, %%mm7 \n\t"
  1269. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1270. #ifdef DITHER1XBPP
  1271. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1272. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1273. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1274. #endif
  1275. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1276. "pop %%"REG_BP" \n\t"
  1277. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1278. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1279. "a" (&c->redDither)
  1280. );
  1281. return;
  1282. case PIX_FMT_RGB565:
  1283. __asm__ volatile(
  1284. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1285. "mov %4, %%"REG_b" \n\t"
  1286. "push %%"REG_BP" \n\t"
  1287. YSCALEYUV2RGB1(%%REGBP, %5)
  1288. "pxor %%mm7, %%mm7 \n\t"
  1289. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1290. #ifdef DITHER1XBPP
  1291. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1292. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1293. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1294. #endif
  1295. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1296. "pop %%"REG_BP" \n\t"
  1297. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1298. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1299. "a" (&c->redDither)
  1300. );
  1301. return;
  1302. case PIX_FMT_YUYV422:
  1303. __asm__ volatile(
  1304. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1305. "mov %4, %%"REG_b" \n\t"
  1306. "push %%"REG_BP" \n\t"
  1307. YSCALEYUV2PACKED1(%%REGBP, %5)
  1308. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1309. "pop %%"REG_BP" \n\t"
  1310. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1311. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1312. "a" (&c->redDither)
  1313. );
  1314. return;
  1315. }
  1316. }
  1317. else
  1318. {
  1319. switch(dstFormat)
  1320. {
  1321. case PIX_FMT_RGB32:
  1322. __asm__ volatile(
  1323. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1324. "mov %4, %%"REG_b" \n\t"
  1325. "push %%"REG_BP" \n\t"
  1326. YSCALEYUV2RGB1b(%%REGBP, %5)
  1327. "pcmpeqd %%mm7, %%mm7 \n\t"
  1328. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1329. "pop %%"REG_BP" \n\t"
  1330. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1331. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1332. "a" (&c->redDither)
  1333. );
  1334. return;
  1335. case PIX_FMT_BGR24:
  1336. __asm__ volatile(
  1337. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1338. "mov %4, %%"REG_b" \n\t"
  1339. "push %%"REG_BP" \n\t"
  1340. YSCALEYUV2RGB1b(%%REGBP, %5)
  1341. "pxor %%mm7, %%mm7 \n\t"
  1342. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1343. "pop %%"REG_BP" \n\t"
  1344. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1345. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1346. "a" (&c->redDither)
  1347. );
  1348. return;
  1349. case PIX_FMT_RGB555:
  1350. __asm__ volatile(
  1351. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1352. "mov %4, %%"REG_b" \n\t"
  1353. "push %%"REG_BP" \n\t"
  1354. YSCALEYUV2RGB1b(%%REGBP, %5)
  1355. "pxor %%mm7, %%mm7 \n\t"
  1356. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1357. #ifdef DITHER1XBPP
  1358. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1359. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1360. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1361. #endif
  1362. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1363. "pop %%"REG_BP" \n\t"
  1364. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1365. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1366. "a" (&c->redDither)
  1367. );
  1368. return;
  1369. case PIX_FMT_RGB565:
  1370. __asm__ volatile(
  1371. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1372. "mov %4, %%"REG_b" \n\t"
  1373. "push %%"REG_BP" \n\t"
  1374. YSCALEYUV2RGB1b(%%REGBP, %5)
  1375. "pxor %%mm7, %%mm7 \n\t"
  1376. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1377. #ifdef DITHER1XBPP
  1378. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1379. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1380. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1381. #endif
  1382. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1383. "pop %%"REG_BP" \n\t"
  1384. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1385. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1386. "a" (&c->redDither)
  1387. );
  1388. return;
  1389. case PIX_FMT_YUYV422:
  1390. __asm__ volatile(
  1391. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1392. "mov %4, %%"REG_b" \n\t"
  1393. "push %%"REG_BP" \n\t"
  1394. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1395. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1396. "pop %%"REG_BP" \n\t"
  1397. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1398. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1399. "a" (&c->redDither)
  1400. );
  1401. return;
  1402. }
  1403. }
  1404. }
  1405. #endif /* HAVE_MMX */
  1406. if (uvalpha < 2048)
  1407. {
  1408. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C, YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
  1409. }else{
  1410. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C, YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
  1411. }
  1412. }
  1413. //FIXME yuy2* can read up to 7 samples too much
  1414. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1415. {
  1416. #if HAVE_MMX
  1417. __asm__ volatile(
  1418. "movq "MANGLE(bm01010101)", %%mm2 \n\t"
  1419. "mov %0, %%"REG_a" \n\t"
  1420. "1: \n\t"
  1421. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1422. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1423. "pand %%mm2, %%mm0 \n\t"
  1424. "pand %%mm2, %%mm1 \n\t"
  1425. "packuswb %%mm1, %%mm0 \n\t"
  1426. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1427. "add $8, %%"REG_a" \n\t"
  1428. " js 1b \n\t"
  1429. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1430. : "%"REG_a
  1431. );
  1432. #else
  1433. int i;
  1434. for (i=0; i<width; i++)
  1435. dst[i]= src[2*i];
  1436. #endif
  1437. }
  1438. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1439. {
  1440. #if HAVE_MMX
  1441. __asm__ volatile(
  1442. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1443. "mov %0, %%"REG_a" \n\t"
  1444. "1: \n\t"
  1445. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1446. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1447. "psrlw $8, %%mm0 \n\t"
  1448. "psrlw $8, %%mm1 \n\t"
  1449. "packuswb %%mm1, %%mm0 \n\t"
  1450. "movq %%mm0, %%mm1 \n\t"
  1451. "psrlw $8, %%mm0 \n\t"
  1452. "pand %%mm4, %%mm1 \n\t"
  1453. "packuswb %%mm0, %%mm0 \n\t"
  1454. "packuswb %%mm1, %%mm1 \n\t"
  1455. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1456. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1457. "add $4, %%"REG_a" \n\t"
  1458. " js 1b \n\t"
  1459. : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1460. : "%"REG_a
  1461. );
  1462. #else
  1463. int i;
  1464. for (i=0; i<width; i++)
  1465. {
  1466. dstU[i]= src1[4*i + 1];
  1467. dstV[i]= src1[4*i + 3];
  1468. }
  1469. #endif
  1470. assert(src1 == src2);
  1471. }
  1472. /* This is almost identical to the previous, end exists only because
  1473. * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
  1474. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1475. {
  1476. #if HAVE_MMX
  1477. __asm__ volatile(
  1478. "mov %0, %%"REG_a" \n\t"
  1479. "1: \n\t"
  1480. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1481. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1482. "psrlw $8, %%mm0 \n\t"
  1483. "psrlw $8, %%mm1 \n\t"
  1484. "packuswb %%mm1, %%mm0 \n\t"
  1485. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1486. "add $8, %%"REG_a" \n\t"
  1487. " js 1b \n\t"
  1488. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1489. : "%"REG_a
  1490. );
  1491. #else
  1492. int i;
  1493. for (i=0; i<width; i++)
  1494. dst[i]= src[2*i+1];
  1495. #endif
  1496. }
  1497. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1498. {
  1499. #if HAVE_MMX
  1500. __asm__ volatile(
  1501. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1502. "mov %0, %%"REG_a" \n\t"
  1503. "1: \n\t"
  1504. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1505. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1506. "pand %%mm4, %%mm0 \n\t"
  1507. "pand %%mm4, %%mm1 \n\t"
  1508. "packuswb %%mm1, %%mm0 \n\t"
  1509. "movq %%mm0, %%mm1 \n\t"
  1510. "psrlw $8, %%mm0 \n\t"
  1511. "pand %%mm4, %%mm1 \n\t"
  1512. "packuswb %%mm0, %%mm0 \n\t"
  1513. "packuswb %%mm1, %%mm1 \n\t"
  1514. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1515. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1516. "add $4, %%"REG_a" \n\t"
  1517. " js 1b \n\t"
  1518. : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1519. : "%"REG_a
  1520. );
  1521. #else
  1522. int i;
  1523. for (i=0; i<width; i++)
  1524. {
  1525. dstU[i]= src1[4*i + 0];
  1526. dstV[i]= src1[4*i + 2];
  1527. }
  1528. #endif
  1529. assert(src1 == src2);
  1530. }
  1531. #define BGR2Y(type, name, shr, shg, shb, maskr, maskg, maskb, RY, GY, BY, S)\
  1532. static inline void RENAME(name)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)\
  1533. {\
  1534. int i;\
  1535. for (i=0; i<width; i++)\
  1536. {\
  1537. int b= (((type*)src)[i]>>shb)&maskb;\
  1538. int g= (((type*)src)[i]>>shg)&maskg;\
  1539. int r= (((type*)src)[i]>>shr)&maskr;\
  1540. \
  1541. dst[i]= (((RY)*r + (GY)*g + (BY)*b + (33<<((S)-1)))>>(S));\
  1542. }\
  1543. }
  1544. BGR2Y(uint32_t, bgr32ToY,16, 0, 0, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
  1545. BGR2Y(uint32_t, rgb32ToY, 0, 0,16, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
  1546. BGR2Y(uint16_t, bgr16ToY, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RY<<11, GY<<5, BY , RGB2YUV_SHIFT+8)
  1547. BGR2Y(uint16_t, bgr15ToY, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RY<<10, GY<<5, BY , RGB2YUV_SHIFT+7)
  1548. BGR2Y(uint16_t, rgb16ToY, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RY , GY<<5, BY<<11, RGB2YUV_SHIFT+8)
  1549. BGR2Y(uint16_t, rgb15ToY, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RY , GY<<5, BY<<10, RGB2YUV_SHIFT+7)
  1550. #define BGR2UV(type, name, shr, shg, shb, maska, maskr, maskg, maskb, RU, GU, BU, RV, GV, BV, S)\
  1551. static inline void RENAME(name)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\
  1552. {\
  1553. int i;\
  1554. for (i=0; i<width; i++)\
  1555. {\
  1556. int b= (((type*)src)[i]&maskb)>>shb;\
  1557. int g= (((type*)src)[i]&maskg)>>shg;\
  1558. int r= (((type*)src)[i]&maskr)>>shr;\
  1559. \
  1560. dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<((S)-1)))>>(S);\
  1561. dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<((S)-1)))>>(S);\
  1562. }\
  1563. }\
  1564. static inline void RENAME(name ## _half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\
  1565. {\
  1566. int i;\
  1567. for (i=0; i<width; i++)\
  1568. {\
  1569. int pix0= ((type*)src)[2*i+0];\
  1570. int pix1= ((type*)src)[2*i+1];\
  1571. int g= (pix0&(maskg|maska))+(pix1&(maskg|maska));\
  1572. int b= ((pix0+pix1-g)&(maskb|(2*maskb)))>>shb;\
  1573. int r= ((pix0+pix1-g)&(maskr|(2*maskr)))>>shr;\
  1574. g&= maskg|(2*maskg);\
  1575. \
  1576. g>>=shg;\
  1577. \
  1578. dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<(S)))>>((S)+1);\
  1579. dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<(S)))>>((S)+1);\
  1580. }\
  1581. }
  1582. BGR2UV(uint32_t, bgr32ToUV,16, 0, 0, 0xFF000000, 0xFF0000, 0xFF00, 0x00FF, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
  1583. BGR2UV(uint32_t, rgb32ToUV, 0, 0,16, 0xFF000000, 0x00FF, 0xFF00, 0xFF0000, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
  1584. BGR2UV(uint16_t, bgr16ToUV, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RU<<11, GU<<5, BU , RV<<11, GV<<5, BV , RGB2YUV_SHIFT+8)
  1585. BGR2UV(uint16_t, bgr15ToUV, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RU<<10, GU<<5, BU , RV<<10, GV<<5, BV , RGB2YUV_SHIFT+7)
  1586. BGR2UV(uint16_t, rgb16ToUV, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RU , GU<<5, BU<<11, RV , GV<<5, BV<<11, RGB2YUV_SHIFT+8)
  1587. BGR2UV(uint16_t, rgb15ToUV, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RU , GU<<5, BU<<10, RV , GV<<5, BV<<10, RGB2YUV_SHIFT+7)
  1588. #if HAVE_MMX
  1589. static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, uint8_t *src, long width, int srcFormat)
  1590. {
  1591. if(srcFormat == PIX_FMT_BGR24){
  1592. __asm__ volatile(
  1593. "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
  1594. "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
  1595. :
  1596. );
  1597. }else{
  1598. __asm__ volatile(
  1599. "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
  1600. "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
  1601. :
  1602. );
  1603. }
  1604. __asm__ volatile(
  1605. "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
  1606. "mov %2, %%"REG_a" \n\t"
  1607. "pxor %%mm7, %%mm7 \n\t"
  1608. "1: \n\t"
  1609. PREFETCH" 64(%0) \n\t"
  1610. "movd (%0), %%mm0 \n\t"
  1611. "movd 2(%0), %%mm1 \n\t"
  1612. "movd 6(%0), %%mm2 \n\t"
  1613. "movd 8(%0), %%mm3 \n\t"
  1614. "add $12, %0 \n\t"
  1615. "punpcklbw %%mm7, %%mm0 \n\t"
  1616. "punpcklbw %%mm7, %%mm1 \n\t"
  1617. "punpcklbw %%mm7, %%mm2 \n\t"
  1618. "punpcklbw %%mm7, %%mm3 \n\t"
  1619. "pmaddwd %%mm5, %%mm0 \n\t"
  1620. "pmaddwd %%mm6, %%mm1 \n\t"
  1621. "pmaddwd %%mm5, %%mm2 \n\t"
  1622. "pmaddwd %%mm6, %%mm3 \n\t"
  1623. "paddd %%mm1, %%mm0 \n\t"
  1624. "paddd %%mm3, %%mm2 \n\t"
  1625. "paddd %%mm4, %%mm0 \n\t"
  1626. "paddd %%mm4, %%mm2 \n\t"
  1627. "psrad $15, %%mm0 \n\t"
  1628. "psrad $15, %%mm2 \n\t"
  1629. "packssdw %%mm2, %%mm0 \n\t"
  1630. "packuswb %%mm0, %%mm0 \n\t"
  1631. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1632. "add $4, %%"REG_a" \n\t"
  1633. " js 1b \n\t"
  1634. : "+r" (src)
  1635. : "r" (dst+width), "g" (-width)
  1636. : "%"REG_a
  1637. );
  1638. }
  1639. static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, long width, int srcFormat)
  1640. {
  1641. __asm__ volatile(
  1642. "movq 24(%4), %%mm6 \n\t"
  1643. "mov %3, %%"REG_a" \n\t"
  1644. "pxor %%mm7, %%mm7 \n\t"
  1645. "1: \n\t"
  1646. PREFETCH" 64(%0) \n\t"
  1647. "movd (%0), %%mm0 \n\t"
  1648. "movd 2(%0), %%mm1 \n\t"
  1649. "punpcklbw %%mm7, %%mm0 \n\t"
  1650. "punpcklbw %%mm7, %%mm1 \n\t"
  1651. "movq %%mm0, %%mm2 \n\t"
  1652. "movq %%mm1, %%mm3 \n\t"
  1653. "pmaddwd (%4), %%mm0 \n\t"
  1654. "pmaddwd 8(%4), %%mm1 \n\t"
  1655. "pmaddwd 16(%4), %%mm2 \n\t"
  1656. "pmaddwd %%mm6, %%mm3 \n\t"
  1657. "paddd %%mm1, %%mm0 \n\t"
  1658. "paddd %%mm3, %%mm2 \n\t"
  1659. "movd 6(%0), %%mm1 \n\t"
  1660. "movd 8(%0), %%mm3 \n\t"
  1661. "add $12, %0 \n\t"
  1662. "punpcklbw %%mm7, %%mm1 \n\t"
  1663. "punpcklbw %%mm7, %%mm3 \n\t"
  1664. "movq %%mm1, %%mm4 \n\t"
  1665. "movq %%mm3, %%mm5 \n\t"
  1666. "pmaddwd (%4), %%mm1 \n\t"
  1667. "pmaddwd 8(%4), %%mm3 \n\t"
  1668. "pmaddwd 16(%4), %%mm4 \n\t"
  1669. "pmaddwd %%mm6, %%mm5 \n\t"
  1670. "paddd %%mm3, %%mm1 \n\t"
  1671. "paddd %%mm5, %%mm4 \n\t"
  1672. "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
  1673. "paddd %%mm3, %%mm0 \n\t"
  1674. "paddd %%mm3, %%mm2 \n\t"
  1675. "paddd %%mm3, %%mm1 \n\t"
  1676. "paddd %%mm3, %%mm4 \n\t"
  1677. "psrad $15, %%mm0 \n\t"
  1678. "psrad $15, %%mm2 \n\t"
  1679. "psrad $15, %%mm1 \n\t"
  1680. "psrad $15, %%mm4 \n\t"
  1681. "packssdw %%mm1, %%mm0 \n\t"
  1682. "packssdw %%mm4, %%mm2 \n\t"
  1683. "packuswb %%mm0, %%mm0 \n\t"
  1684. "packuswb %%mm2, %%mm2 \n\t"
  1685. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1686. "movd %%mm2, (%2, %%"REG_a") \n\t"
  1687. "add $4, %%"REG_a" \n\t"
  1688. " js 1b \n\t"
  1689. : "+r" (src)
  1690. : "r" (dstU+width), "r" (dstV+width), "g" (-width), "r"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24])
  1691. : "%"REG_a
  1692. );
  1693. }
  1694. #endif
  1695. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1696. {
  1697. #if HAVE_MMX
  1698. RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
  1699. #else
  1700. int i;
  1701. for (i=0; i<width; i++)
  1702. {
  1703. int b= src[i*3+0];
  1704. int g= src[i*3+1];
  1705. int r= src[i*3+2];
  1706. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1707. }
  1708. #endif /* HAVE_MMX */
  1709. }
  1710. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1711. {
  1712. #if HAVE_MMX
  1713. RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
  1714. #else
  1715. int i;
  1716. for (i=0; i<width; i++)
  1717. {
  1718. int b= src1[3*i + 0];
  1719. int g= src1[3*i + 1];
  1720. int r= src1[3*i + 2];
  1721. dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1722. dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1723. }
  1724. #endif /* HAVE_MMX */
  1725. assert(src1 == src2);
  1726. }
  1727. static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1728. {
  1729. int i;
  1730. for (i=0; i<width; i++)
  1731. {
  1732. int b= src1[6*i + 0] + src1[6*i + 3];
  1733. int g= src1[6*i + 1] + src1[6*i + 4];
  1734. int r= src1[6*i + 2] + src1[6*i + 5];
  1735. dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1736. dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1737. }
  1738. assert(src1 == src2);
  1739. }
  1740. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1741. {
  1742. #if HAVE_MMX
  1743. RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
  1744. #else
  1745. int i;
  1746. for (i=0; i<width; i++)
  1747. {
  1748. int r= src[i*3+0];
  1749. int g= src[i*3+1];
  1750. int b= src[i*3+2];
  1751. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1752. }
  1753. #endif
  1754. }
  1755. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1756. {
  1757. #if HAVE_MMX
  1758. assert(src1==src2);
  1759. RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
  1760. #else
  1761. int i;
  1762. assert(src1==src2);
  1763. for (i=0; i<width; i++)
  1764. {
  1765. int r= src1[3*i + 0];
  1766. int g= src1[3*i + 1];
  1767. int b= src1[3*i + 2];
  1768. dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1769. dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1770. }
  1771. #endif
  1772. }
  1773. static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1774. {
  1775. int i;
  1776. assert(src1==src2);
  1777. for (i=0; i<width; i++)
  1778. {
  1779. int r= src1[6*i + 0] + src1[6*i + 3];
  1780. int g= src1[6*i + 1] + src1[6*i + 4];
  1781. int b= src1[6*i + 2] + src1[6*i + 5];
  1782. dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1783. dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1784. }
  1785. }
  1786. static inline void RENAME(palToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *pal)
  1787. {
  1788. int i;
  1789. for (i=0; i<width; i++)
  1790. {
  1791. int d= src[i];
  1792. dst[i]= pal[d] & 0xFF;
  1793. }
  1794. }
  1795. static inline void RENAME(palToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *pal)
  1796. {
  1797. int i;
  1798. assert(src1 == src2);
  1799. for (i=0; i<width; i++)
  1800. {
  1801. int p= pal[src1[i]];
  1802. dstU[i]= p>>8;
  1803. dstV[i]= p>>16;
  1804. }
  1805. }
  1806. static inline void RENAME(monowhite2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1807. {
  1808. int i, j;
  1809. for (i=0; i<width/8; i++){
  1810. int d= ~src[i];
  1811. for(j=0; j<8; j++)
  1812. dst[8*i+j]= ((d>>(7-j))&1)*255;
  1813. }
  1814. }
  1815. static inline void RENAME(monoblack2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1816. {
  1817. int i, j;
  1818. for (i=0; i<width/8; i++){
  1819. int d= src[i];
  1820. for(j=0; j<8; j++)
  1821. dst[8*i+j]= ((d>>(7-j))&1)*255;
  1822. }
  1823. }
  1824. // bilinear / bicubic scaling
  1825. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  1826. int16_t *filter, int16_t *filterPos, long filterSize)
  1827. {
  1828. #if HAVE_MMX
  1829. assert(filterSize % 4 == 0 && filterSize>0);
  1830. if (filterSize==4) // Always true for upscaling, sometimes for down, too.
  1831. {
  1832. long counter= -2*dstW;
  1833. filter-= counter*2;
  1834. filterPos-= counter/2;
  1835. dst-= counter/2;
  1836. __asm__ volatile(
  1837. #if defined(PIC)
  1838. "push %%"REG_b" \n\t"
  1839. #endif
  1840. "pxor %%mm7, %%mm7 \n\t"
  1841. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1842. "mov %%"REG_a", %%"REG_BP" \n\t"
  1843. ASMALIGN(4)
  1844. "1: \n\t"
  1845. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1846. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  1847. "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
  1848. "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
  1849. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1850. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1851. "punpcklbw %%mm7, %%mm0 \n\t"
  1852. "punpcklbw %%mm7, %%mm2 \n\t"
  1853. "pmaddwd %%mm1, %%mm0 \n\t"
  1854. "pmaddwd %%mm2, %%mm3 \n\t"
  1855. "movq %%mm0, %%mm4 \n\t"
  1856. "punpckldq %%mm3, %%mm0 \n\t"
  1857. "punpckhdq %%mm3, %%mm4 \n\t"
  1858. "paddd %%mm4, %%mm0 \n\t"
  1859. "psrad $7, %%mm0 \n\t"
  1860. "packssdw %%mm0, %%mm0 \n\t"
  1861. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1862. "add $4, %%"REG_BP" \n\t"
  1863. " jnc 1b \n\t"
  1864. "pop %%"REG_BP" \n\t"
  1865. #if defined(PIC)
  1866. "pop %%"REG_b" \n\t"
  1867. #endif
  1868. : "+a" (counter)
  1869. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1870. #if !defined(PIC)
  1871. : "%"REG_b
  1872. #endif
  1873. );
  1874. }
  1875. else if (filterSize==8)
  1876. {
  1877. long counter= -2*dstW;
  1878. filter-= counter*4;
  1879. filterPos-= counter/2;
  1880. dst-= counter/2;
  1881. __asm__ volatile(
  1882. #if defined(PIC)
  1883. "push %%"REG_b" \n\t"
  1884. #endif
  1885. "pxor %%mm7, %%mm7 \n\t"
  1886. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1887. "mov %%"REG_a", %%"REG_BP" \n\t"
  1888. ASMALIGN(4)
  1889. "1: \n\t"
  1890. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1891. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  1892. "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
  1893. "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
  1894. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1895. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1896. "punpcklbw %%mm7, %%mm0 \n\t"
  1897. "punpcklbw %%mm7, %%mm2 \n\t"
  1898. "pmaddwd %%mm1, %%mm0 \n\t"
  1899. "pmaddwd %%mm2, %%mm3 \n\t"
  1900. "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
  1901. "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
  1902. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  1903. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  1904. "punpcklbw %%mm7, %%mm4 \n\t"
  1905. "punpcklbw %%mm7, %%mm2 \n\t"
  1906. "pmaddwd %%mm1, %%mm4 \n\t"
  1907. "pmaddwd %%mm2, %%mm5 \n\t"
  1908. "paddd %%mm4, %%mm0 \n\t"
  1909. "paddd %%mm5, %%mm3 \n\t"
  1910. "movq %%mm0, %%mm4 \n\t"
  1911. "punpckldq %%mm3, %%mm0 \n\t"
  1912. "punpckhdq %%mm3, %%mm4 \n\t"
  1913. "paddd %%mm4, %%mm0 \n\t"
  1914. "psrad $7, %%mm0 \n\t"
  1915. "packssdw %%mm0, %%mm0 \n\t"
  1916. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1917. "add $4, %%"REG_BP" \n\t"
  1918. " jnc 1b \n\t"
  1919. "pop %%"REG_BP" \n\t"
  1920. #if defined(PIC)
  1921. "pop %%"REG_b" \n\t"
  1922. #endif
  1923. : "+a" (counter)
  1924. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1925. #if !defined(PIC)
  1926. : "%"REG_b
  1927. #endif
  1928. );
  1929. }
  1930. else
  1931. {
  1932. uint8_t *offset = src+filterSize;
  1933. long counter= -2*dstW;
  1934. //filter-= counter*filterSize/2;
  1935. filterPos-= counter/2;
  1936. dst-= counter/2;
  1937. __asm__ volatile(
  1938. "pxor %%mm7, %%mm7 \n\t"
  1939. ASMALIGN(4)
  1940. "1: \n\t"
  1941. "mov %2, %%"REG_c" \n\t"
  1942. "movzwl (%%"REG_c", %0), %%eax \n\t"
  1943. "movzwl 2(%%"REG_c", %0), %%edx \n\t"
  1944. "mov %5, %%"REG_c" \n\t"
  1945. "pxor %%mm4, %%mm4 \n\t"
  1946. "pxor %%mm5, %%mm5 \n\t"
  1947. "2: \n\t"
  1948. "movq (%1), %%mm1 \n\t"
  1949. "movq (%1, %6), %%mm3 \n\t"
  1950. "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
  1951. "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
  1952. "punpcklbw %%mm7, %%mm0 \n\t"
  1953. "punpcklbw %%mm7, %%mm2 \n\t"
  1954. "pmaddwd %%mm1, %%mm0 \n\t"
  1955. "pmaddwd %%mm2, %%mm3 \n\t"
  1956. "paddd %%mm3, %%mm5 \n\t"
  1957. "paddd %%mm0, %%mm4 \n\t"
  1958. "add $8, %1 \n\t"
  1959. "add $4, %%"REG_c" \n\t"
  1960. "cmp %4, %%"REG_c" \n\t"
  1961. " jb 2b \n\t"
  1962. "add %6, %1 \n\t"
  1963. "movq %%mm4, %%mm0 \n\t"
  1964. "punpckldq %%mm5, %%mm4 \n\t"
  1965. "punpckhdq %%mm5, %%mm0 \n\t"
  1966. "paddd %%mm0, %%mm4 \n\t"
  1967. "psrad $7, %%mm4 \n\t"
  1968. "packssdw %%mm4, %%mm4 \n\t"
  1969. "mov %3, %%"REG_a" \n\t"
  1970. "movd %%mm4, (%%"REG_a", %0) \n\t"
  1971. "add $4, %0 \n\t"
  1972. " jnc 1b \n\t"
  1973. : "+r" (counter), "+r" (filter)
  1974. : "m" (filterPos), "m" (dst), "m"(offset),
  1975. "m" (src), "r" (filterSize*2)
  1976. : "%"REG_a, "%"REG_c, "%"REG_d
  1977. );
  1978. }
  1979. #else
  1980. #if HAVE_ALTIVEC
  1981. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  1982. #else
  1983. int i;
  1984. for (i=0; i<dstW; i++)
  1985. {
  1986. int j;
  1987. int srcPos= filterPos[i];
  1988. int val=0;
  1989. //printf("filterPos: %d\n", filterPos[i]);
  1990. for (j=0; j<filterSize; j++)
  1991. {
  1992. //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  1993. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  1994. }
  1995. //filter += hFilterSize;
  1996. dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ...
  1997. //dst[i] = val>>7;
  1998. }
  1999. #endif /* HAVE_ALTIVEC */
  2000. #endif /* HAVE_MMX */
  2001. }
  2002. // *** horizontal scale Y line to temp buffer
  2003. static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
  2004. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2005. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2006. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2007. int32_t *mmx2FilterPos, uint32_t *pal)
  2008. {
  2009. if (srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
  2010. {
  2011. RENAME(yuy2ToY)(formatConvBuffer, src, srcW, pal);
  2012. src= formatConvBuffer;
  2013. }
  2014. else if (srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
  2015. {
  2016. RENAME(uyvyToY)(formatConvBuffer, src, srcW, pal);
  2017. src= formatConvBuffer;
  2018. }
  2019. else if (srcFormat==PIX_FMT_RGB32)
  2020. {
  2021. RENAME(bgr32ToY)(formatConvBuffer, src, srcW, pal);
  2022. src= formatConvBuffer;
  2023. }
  2024. else if (srcFormat==PIX_FMT_RGB32_1)
  2025. {
  2026. RENAME(bgr32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal);
  2027. src= formatConvBuffer;
  2028. }
  2029. else if (srcFormat==PIX_FMT_BGR24)
  2030. {
  2031. RENAME(bgr24ToY)(formatConvBuffer, src, srcW, pal);
  2032. src= formatConvBuffer;
  2033. }
  2034. else if (srcFormat==PIX_FMT_BGR565)
  2035. {
  2036. RENAME(bgr16ToY)(formatConvBuffer, src, srcW, pal);
  2037. src= formatConvBuffer;
  2038. }
  2039. else if (srcFormat==PIX_FMT_BGR555)
  2040. {
  2041. RENAME(bgr15ToY)(formatConvBuffer, src, srcW, pal);
  2042. src= formatConvBuffer;
  2043. }
  2044. else if (srcFormat==PIX_FMT_BGR32)
  2045. {
  2046. RENAME(rgb32ToY)(formatConvBuffer, src, srcW, pal);
  2047. src= formatConvBuffer;
  2048. }
  2049. else if (srcFormat==PIX_FMT_BGR32_1)
  2050. {
  2051. RENAME(rgb32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal);
  2052. src= formatConvBuffer;
  2053. }
  2054. else if (srcFormat==PIX_FMT_RGB24)
  2055. {
  2056. RENAME(rgb24ToY)(formatConvBuffer, src, srcW, pal);
  2057. src= formatConvBuffer;
  2058. }
  2059. else if (srcFormat==PIX_FMT_RGB565)
  2060. {
  2061. RENAME(rgb16ToY)(formatConvBuffer, src, srcW, pal);
  2062. src= formatConvBuffer;
  2063. }
  2064. else if (srcFormat==PIX_FMT_RGB555)
  2065. {
  2066. RENAME(rgb15ToY)(formatConvBuffer, src, srcW, pal);
  2067. src= formatConvBuffer;
  2068. }
  2069. else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
  2070. {
  2071. RENAME(palToY)(formatConvBuffer, src, srcW, pal);
  2072. src= formatConvBuffer;
  2073. }
  2074. else if (srcFormat==PIX_FMT_MONOBLACK)
  2075. {
  2076. RENAME(monoblack2Y)(formatConvBuffer, src, srcW, pal);
  2077. src= formatConvBuffer;
  2078. }
  2079. else if (srcFormat==PIX_FMT_MONOWHITE)
  2080. {
  2081. RENAME(monowhite2Y)(formatConvBuffer, src, srcW, pal);
  2082. src= formatConvBuffer;
  2083. }
  2084. #if HAVE_MMX
  2085. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2086. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2087. #else
  2088. if (!(flags&SWS_FAST_BILINEAR))
  2089. #endif
  2090. {
  2091. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2092. }
  2093. else // fast bilinear upscale / crap downscale
  2094. {
  2095. #if ARCH_X86 && CONFIG_GPL
  2096. #if HAVE_MMX2
  2097. int i;
  2098. #if defined(PIC)
  2099. uint64_t ebxsave __attribute__((aligned(8)));
  2100. #endif
  2101. if (canMMX2BeUsed)
  2102. {
  2103. __asm__ volatile(
  2104. #if defined(PIC)
  2105. "mov %%"REG_b", %5 \n\t"
  2106. #endif
  2107. "pxor %%mm7, %%mm7 \n\t"
  2108. "mov %0, %%"REG_c" \n\t"
  2109. "mov %1, %%"REG_D" \n\t"
  2110. "mov %2, %%"REG_d" \n\t"
  2111. "mov %3, %%"REG_b" \n\t"
  2112. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2113. PREFETCH" (%%"REG_c") \n\t"
  2114. PREFETCH" 32(%%"REG_c") \n\t"
  2115. PREFETCH" 64(%%"REG_c") \n\t"
  2116. #if ARCH_X86_64
  2117. #define FUNNY_Y_CODE \
  2118. "movl (%%"REG_b"), %%esi \n\t"\
  2119. "call *%4 \n\t"\
  2120. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2121. "add %%"REG_S", %%"REG_c" \n\t"\
  2122. "add %%"REG_a", %%"REG_D" \n\t"\
  2123. "xor %%"REG_a", %%"REG_a" \n\t"\
  2124. #else
  2125. #define FUNNY_Y_CODE \
  2126. "movl (%%"REG_b"), %%esi \n\t"\
  2127. "call *%4 \n\t"\
  2128. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2129. "add %%"REG_a", %%"REG_D" \n\t"\
  2130. "xor %%"REG_a", %%"REG_a" \n\t"\
  2131. #endif /* ARCH_X86_64 */
  2132. FUNNY_Y_CODE
  2133. FUNNY_Y_CODE
  2134. FUNNY_Y_CODE
  2135. FUNNY_Y_CODE
  2136. FUNNY_Y_CODE
  2137. FUNNY_Y_CODE
  2138. FUNNY_Y_CODE
  2139. FUNNY_Y_CODE
  2140. #if defined(PIC)
  2141. "mov %5, %%"REG_b" \n\t"
  2142. #endif
  2143. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2144. "m" (funnyYCode)
  2145. #if defined(PIC)
  2146. ,"m" (ebxsave)
  2147. #endif
  2148. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2149. #if !defined(PIC)
  2150. ,"%"REG_b
  2151. #endif
  2152. );
  2153. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2154. }
  2155. else
  2156. {
  2157. #endif /* HAVE_MMX2 */
  2158. long xInc_shr16 = xInc >> 16;
  2159. uint16_t xInc_mask = xInc & 0xffff;
  2160. //NO MMX just normal asm ...
  2161. __asm__ volatile(
  2162. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2163. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2164. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2165. ASMALIGN(4)
  2166. "1: \n\t"
  2167. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2168. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2169. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2170. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2171. "shll $16, %%edi \n\t"
  2172. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2173. "mov %1, %%"REG_D" \n\t"
  2174. "shrl $9, %%esi \n\t"
  2175. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2176. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2177. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2178. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2179. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2180. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2181. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2182. "shll $16, %%edi \n\t"
  2183. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2184. "mov %1, %%"REG_D" \n\t"
  2185. "shrl $9, %%esi \n\t"
  2186. "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
  2187. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2188. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2189. "add $2, %%"REG_a" \n\t"
  2190. "cmp %2, %%"REG_a" \n\t"
  2191. " jb 1b \n\t"
  2192. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2193. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2194. );
  2195. #if HAVE_MMX2
  2196. } //if MMX2 can't be used
  2197. #endif
  2198. #else
  2199. int i;
  2200. unsigned int xpos=0;
  2201. for (i=0;i<dstWidth;i++)
  2202. {
  2203. register unsigned int xx=xpos>>16;
  2204. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2205. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2206. xpos+=xInc;
  2207. }
  2208. #endif /* ARCH_X86 */
  2209. }
  2210. if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
  2211. int i;
  2212. //FIXME all pal and rgb srcFormats could do this convertion as well
  2213. //FIXME all scalers more complex than bilinear could do half of this transform
  2214. if(c->srcRange){
  2215. for (i=0; i<dstWidth; i++)
  2216. dst[i]= (dst[i]*14071 + 33561947)>>14;
  2217. }else{
  2218. for (i=0; i<dstWidth; i++)
  2219. dst[i]= (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
  2220. }
  2221. }
  2222. }
  2223. inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
  2224. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2225. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2226. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2227. int32_t *mmx2FilterPos, uint32_t *pal)
  2228. {
  2229. if (srcFormat==PIX_FMT_YUYV422)
  2230. {
  2231. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2232. src1= formatConvBuffer;
  2233. src2= formatConvBuffer+VOFW;
  2234. }
  2235. else if (srcFormat==PIX_FMT_UYVY422)
  2236. {
  2237. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2238. src1= formatConvBuffer;
  2239. src2= formatConvBuffer+VOFW;
  2240. }
  2241. else if (srcFormat==PIX_FMT_RGB32)
  2242. {
  2243. if(c->chrSrcHSubSample)
  2244. RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2245. else
  2246. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2247. src1= formatConvBuffer;
  2248. src2= formatConvBuffer+VOFW;
  2249. }
  2250. else if (srcFormat==PIX_FMT_RGB32_1)
  2251. {
  2252. if(c->chrSrcHSubSample)
  2253. RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2254. else
  2255. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2256. src1= formatConvBuffer;
  2257. src2= formatConvBuffer+VOFW;
  2258. }
  2259. else if (srcFormat==PIX_FMT_BGR24)
  2260. {
  2261. if(c->chrSrcHSubSample)
  2262. RENAME(bgr24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2263. else
  2264. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2265. src1= formatConvBuffer;
  2266. src2= formatConvBuffer+VOFW;
  2267. }
  2268. else if (srcFormat==PIX_FMT_BGR565)
  2269. {
  2270. if(c->chrSrcHSubSample)
  2271. RENAME(bgr16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2272. else
  2273. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2274. src1= formatConvBuffer;
  2275. src2= formatConvBuffer+VOFW;
  2276. }
  2277. else if (srcFormat==PIX_FMT_BGR555)
  2278. {
  2279. if(c->chrSrcHSubSample)
  2280. RENAME(bgr15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2281. else
  2282. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2283. src1= formatConvBuffer;
  2284. src2= formatConvBuffer+VOFW;
  2285. }
  2286. else if (srcFormat==PIX_FMT_BGR32)
  2287. {
  2288. if(c->chrSrcHSubSample)
  2289. RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2290. else
  2291. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2292. src1= formatConvBuffer;
  2293. src2= formatConvBuffer+VOFW;
  2294. }
  2295. else if (srcFormat==PIX_FMT_BGR32_1)
  2296. {
  2297. if(c->chrSrcHSubSample)
  2298. RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2299. else
  2300. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2301. src1= formatConvBuffer;
  2302. src2= formatConvBuffer+VOFW;
  2303. }
  2304. else if (srcFormat==PIX_FMT_RGB24)
  2305. {
  2306. if(c->chrSrcHSubSample)
  2307. RENAME(rgb24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2308. else
  2309. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2310. src1= formatConvBuffer;
  2311. src2= formatConvBuffer+VOFW;
  2312. }
  2313. else if (srcFormat==PIX_FMT_RGB565)
  2314. {
  2315. if(c->chrSrcHSubSample)
  2316. RENAME(rgb16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2317. else
  2318. RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2319. src1= formatConvBuffer;
  2320. src2= formatConvBuffer+VOFW;
  2321. }
  2322. else if (srcFormat==PIX_FMT_RGB555)
  2323. {
  2324. if(c->chrSrcHSubSample)
  2325. RENAME(rgb15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2326. else
  2327. RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2328. src1= formatConvBuffer;
  2329. src2= formatConvBuffer+VOFW;
  2330. }
  2331. else if (isGray(srcFormat) || srcFormat==PIX_FMT_MONOBLACK || srcFormat==PIX_FMT_MONOWHITE)
  2332. {
  2333. return;
  2334. }
  2335. else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
  2336. {
  2337. RENAME(palToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2338. src1= formatConvBuffer;
  2339. src2= formatConvBuffer+VOFW;
  2340. }
  2341. #if HAVE_MMX
  2342. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2343. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2344. #else
  2345. if (!(flags&SWS_FAST_BILINEAR))
  2346. #endif
  2347. {
  2348. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2349. RENAME(hScale)(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2350. }
  2351. else // fast bilinear upscale / crap downscale
  2352. {
  2353. #if ARCH_X86 && CONFIG_GPL
  2354. #if HAVE_MMX2
  2355. int i;
  2356. #if defined(PIC)
  2357. uint64_t ebxsave __attribute__((aligned(8)));
  2358. #endif
  2359. if (canMMX2BeUsed)
  2360. {
  2361. __asm__ volatile(
  2362. #if defined(PIC)
  2363. "mov %%"REG_b", %6 \n\t"
  2364. #endif
  2365. "pxor %%mm7, %%mm7 \n\t"
  2366. "mov %0, %%"REG_c" \n\t"
  2367. "mov %1, %%"REG_D" \n\t"
  2368. "mov %2, %%"REG_d" \n\t"
  2369. "mov %3, %%"REG_b" \n\t"
  2370. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2371. PREFETCH" (%%"REG_c") \n\t"
  2372. PREFETCH" 32(%%"REG_c") \n\t"
  2373. PREFETCH" 64(%%"REG_c") \n\t"
  2374. #if ARCH_X86_64
  2375. #define FUNNY_UV_CODE \
  2376. "movl (%%"REG_b"), %%esi \n\t"\
  2377. "call *%4 \n\t"\
  2378. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2379. "add %%"REG_S", %%"REG_c" \n\t"\
  2380. "add %%"REG_a", %%"REG_D" \n\t"\
  2381. "xor %%"REG_a", %%"REG_a" \n\t"\
  2382. #else
  2383. #define FUNNY_UV_CODE \
  2384. "movl (%%"REG_b"), %%esi \n\t"\
  2385. "call *%4 \n\t"\
  2386. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2387. "add %%"REG_a", %%"REG_D" \n\t"\
  2388. "xor %%"REG_a", %%"REG_a" \n\t"\
  2389. #endif /* ARCH_X86_64 */
  2390. FUNNY_UV_CODE
  2391. FUNNY_UV_CODE
  2392. FUNNY_UV_CODE
  2393. FUNNY_UV_CODE
  2394. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2395. "mov %5, %%"REG_c" \n\t" // src
  2396. "mov %1, %%"REG_D" \n\t" // buf1
  2397. "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
  2398. PREFETCH" (%%"REG_c") \n\t"
  2399. PREFETCH" 32(%%"REG_c") \n\t"
  2400. PREFETCH" 64(%%"REG_c") \n\t"
  2401. FUNNY_UV_CODE
  2402. FUNNY_UV_CODE
  2403. FUNNY_UV_CODE
  2404. FUNNY_UV_CODE
  2405. #if defined(PIC)
  2406. "mov %6, %%"REG_b" \n\t"
  2407. #endif
  2408. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2409. "m" (funnyUVCode), "m" (src2)
  2410. #if defined(PIC)
  2411. ,"m" (ebxsave)
  2412. #endif
  2413. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2414. #if !defined(PIC)
  2415. ,"%"REG_b
  2416. #endif
  2417. );
  2418. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2419. {
  2420. //printf("%d %d %d\n", dstWidth, i, srcW);
  2421. dst[i] = src1[srcW-1]*128;
  2422. dst[i+VOFW] = src2[srcW-1]*128;
  2423. }
  2424. }
  2425. else
  2426. {
  2427. #endif /* HAVE_MMX2 */
  2428. long xInc_shr16 = (long) (xInc >> 16);
  2429. uint16_t xInc_mask = xInc & 0xffff;
  2430. __asm__ volatile(
  2431. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2432. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2433. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2434. ASMALIGN(4)
  2435. "1: \n\t"
  2436. "mov %0, %%"REG_S" \n\t"
  2437. "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
  2438. "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
  2439. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2440. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2441. "shll $16, %%edi \n\t"
  2442. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2443. "mov %1, %%"REG_D" \n\t"
  2444. "shrl $9, %%esi \n\t"
  2445. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2446. "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
  2447. "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2448. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2449. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2450. "shll $16, %%edi \n\t"
  2451. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2452. "mov %1, %%"REG_D" \n\t"
  2453. "shrl $9, %%esi \n\t"
  2454. "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
  2455. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2456. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2457. "add $1, %%"REG_a" \n\t"
  2458. "cmp %2, %%"REG_a" \n\t"
  2459. " jb 1b \n\t"
  2460. /* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2461. which is needed to support GCC 4.0. */
  2462. #if ARCH_X86_64 && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
  2463. :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2464. #else
  2465. :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2466. #endif
  2467. "r" (src2)
  2468. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2469. );
  2470. #if HAVE_MMX2
  2471. } //if MMX2 can't be used
  2472. #endif
  2473. #else
  2474. int i;
  2475. unsigned int xpos=0;
  2476. for (i=0;i<dstWidth;i++)
  2477. {
  2478. register unsigned int xx=xpos>>16;
  2479. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2480. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2481. dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2482. /* slower
  2483. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2484. dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2485. */
  2486. xpos+=xInc;
  2487. }
  2488. #endif /* ARCH_X86 */
  2489. }
  2490. if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
  2491. int i;
  2492. //FIXME all pal and rgb srcFormats could do this convertion as well
  2493. //FIXME all scalers more complex than bilinear could do half of this transform
  2494. if(c->srcRange){
  2495. for (i=0; i<dstWidth; i++){
  2496. dst[i ]= (dst[i ]*1799 + 4081085)>>11; //1469
  2497. dst[i+VOFW]= (dst[i+VOFW]*1799 + 4081085)>>11; //1469
  2498. }
  2499. }else{
  2500. for (i=0; i<dstWidth; i++){
  2501. dst[i ]= (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264
  2502. dst[i+VOFW]= (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264
  2503. }
  2504. }
  2505. }
  2506. }
  2507. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2508. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2509. /* load a few things into local vars to make the code more readable? and faster */
  2510. const int srcW= c->srcW;
  2511. const int dstW= c->dstW;
  2512. const int dstH= c->dstH;
  2513. const int chrDstW= c->chrDstW;
  2514. const int chrSrcW= c->chrSrcW;
  2515. const int lumXInc= c->lumXInc;
  2516. const int chrXInc= c->chrXInc;
  2517. const int dstFormat= c->dstFormat;
  2518. const int srcFormat= c->srcFormat;
  2519. const int flags= c->flags;
  2520. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2521. int16_t *vLumFilterPos= c->vLumFilterPos;
  2522. int16_t *vChrFilterPos= c->vChrFilterPos;
  2523. int16_t *hLumFilterPos= c->hLumFilterPos;
  2524. int16_t *hChrFilterPos= c->hChrFilterPos;
  2525. int16_t *vLumFilter= c->vLumFilter;
  2526. int16_t *vChrFilter= c->vChrFilter;
  2527. int16_t *hLumFilter= c->hLumFilter;
  2528. int16_t *hChrFilter= c->hChrFilter;
  2529. int32_t *lumMmxFilter= c->lumMmxFilter;
  2530. int32_t *chrMmxFilter= c->chrMmxFilter;
  2531. const int vLumFilterSize= c->vLumFilterSize;
  2532. const int vChrFilterSize= c->vChrFilterSize;
  2533. const int hLumFilterSize= c->hLumFilterSize;
  2534. const int hChrFilterSize= c->hChrFilterSize;
  2535. int16_t **lumPixBuf= c->lumPixBuf;
  2536. int16_t **chrPixBuf= c->chrPixBuf;
  2537. const int vLumBufSize= c->vLumBufSize;
  2538. const int vChrBufSize= c->vChrBufSize;
  2539. uint8_t *funnyYCode= c->funnyYCode;
  2540. uint8_t *funnyUVCode= c->funnyUVCode;
  2541. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2542. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2543. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2544. int lastDstY;
  2545. uint32_t *pal=c->pal_yuv;
  2546. /* vars which will change and which we need to store back in the context */
  2547. int dstY= c->dstY;
  2548. int lumBufIndex= c->lumBufIndex;
  2549. int chrBufIndex= c->chrBufIndex;
  2550. int lastInLumBuf= c->lastInLumBuf;
  2551. int lastInChrBuf= c->lastInChrBuf;
  2552. if (isPacked(c->srcFormat)){
  2553. src[0]=
  2554. src[1]=
  2555. src[2]= src[0];
  2556. srcStride[0]=
  2557. srcStride[1]=
  2558. srcStride[2]= srcStride[0];
  2559. }
  2560. srcStride[1]<<= c->vChrDrop;
  2561. srcStride[2]<<= c->vChrDrop;
  2562. //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2563. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2564. #if 0 //self test FIXME move to a vfilter or something
  2565. {
  2566. static volatile int i=0;
  2567. i++;
  2568. if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
  2569. selfTest(src, srcStride, c->srcW, c->srcH);
  2570. i--;
  2571. }
  2572. #endif
  2573. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2574. //dstStride[0],dstStride[1],dstStride[2]);
  2575. if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2576. {
  2577. static int warnedAlready=0; //FIXME move this into the context perhaps
  2578. if (flags & SWS_PRINT_INFO && !warnedAlready)
  2579. {
  2580. av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
  2581. " ->cannot do aligned memory accesses anymore\n");
  2582. warnedAlready=1;
  2583. }
  2584. }
  2585. /* Note the user might start scaling the picture in the middle so this
  2586. will not get executed. This is not really intended but works
  2587. currently, so people might do it. */
  2588. if (srcSliceY ==0){
  2589. lumBufIndex=0;
  2590. chrBufIndex=0;
  2591. dstY=0;
  2592. lastInLumBuf= -1;
  2593. lastInChrBuf= -1;
  2594. }
  2595. lastDstY= dstY;
  2596. for (;dstY < dstH; dstY++){
  2597. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2598. const int chrDstY= dstY>>c->chrDstVSubSample;
  2599. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2600. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2601. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2602. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2603. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2604. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2605. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2606. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2607. //handle holes (FAST_BILINEAR & weird filters)
  2608. if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2609. if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2610. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2611. assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
  2612. assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
  2613. // Do we have enough lines in this slice to output the dstY line
  2614. if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2615. {
  2616. //Do horizontal scaling
  2617. while(lastInLumBuf < lastLumSrcY)
  2618. {
  2619. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2620. lumBufIndex++;
  2621. //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2622. assert(lumBufIndex < 2*vLumBufSize);
  2623. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2624. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2625. //printf("%d %d\n", lumBufIndex, vLumBufSize);
  2626. RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2627. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2628. funnyYCode, c->srcFormat, formatConvBuffer,
  2629. c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
  2630. lastInLumBuf++;
  2631. }
  2632. while(lastInChrBuf < lastChrSrcY)
  2633. {
  2634. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2635. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2636. chrBufIndex++;
  2637. assert(chrBufIndex < 2*vChrBufSize);
  2638. assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
  2639. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2640. //FIXME replace parameters through context struct (some at least)
  2641. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2642. RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2643. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2644. funnyUVCode, c->srcFormat, formatConvBuffer,
  2645. c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
  2646. lastInChrBuf++;
  2647. }
  2648. //wrap buf index around to stay inside the ring buffer
  2649. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2650. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2651. }
  2652. else // not enough lines left in this slice -> load the rest in the buffer
  2653. {
  2654. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2655. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2656. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2657. vChrBufSize, vLumBufSize);*/
  2658. //Do horizontal scaling
  2659. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2660. {
  2661. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2662. lumBufIndex++;
  2663. assert(lumBufIndex < 2*vLumBufSize);
  2664. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2665. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2666. RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2667. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2668. funnyYCode, c->srcFormat, formatConvBuffer,
  2669. c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
  2670. lastInLumBuf++;
  2671. }
  2672. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2673. {
  2674. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2675. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2676. chrBufIndex++;
  2677. assert(chrBufIndex < 2*vChrBufSize);
  2678. assert(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH);
  2679. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2680. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2681. RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2682. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2683. funnyUVCode, c->srcFormat, formatConvBuffer,
  2684. c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
  2685. lastInChrBuf++;
  2686. }
  2687. //wrap buf index around to stay inside the ring buffer
  2688. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2689. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2690. break; //we can't output a dstY line so let's try with the next slice
  2691. }
  2692. #if HAVE_MMX
  2693. c->blueDither= ff_dither8[dstY&1];
  2694. if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
  2695. c->greenDither= ff_dither8[dstY&1];
  2696. else
  2697. c->greenDither= ff_dither4[dstY&1];
  2698. c->redDither= ff_dither8[(dstY+1)&1];
  2699. #endif
  2700. if (dstY < dstH-2)
  2701. {
  2702. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2703. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2704. #if HAVE_MMX
  2705. int i;
  2706. if (flags & SWS_ACCURATE_RND){
  2707. int s= APCK_SIZE / 8;
  2708. for (i=0; i<vLumFilterSize; i+=2){
  2709. *(void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
  2710. *(void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
  2711. lumMmxFilter[s*i+APCK_COEF/4 ]=
  2712. lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
  2713. + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
  2714. }
  2715. for (i=0; i<vChrFilterSize; i+=2){
  2716. *(void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ];
  2717. *(void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
  2718. chrMmxFilter[s*i+APCK_COEF/4 ]=
  2719. chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
  2720. + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
  2721. }
  2722. }else{
  2723. for (i=0; i<vLumFilterSize; i++)
  2724. {
  2725. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2726. lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
  2727. lumMmxFilter[4*i+2]=
  2728. lumMmxFilter[4*i+3]=
  2729. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2730. }
  2731. for (i=0; i<vChrFilterSize; i++)
  2732. {
  2733. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2734. chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
  2735. chrMmxFilter[4*i+2]=
  2736. chrMmxFilter[4*i+3]=
  2737. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2738. }
  2739. }
  2740. #endif
  2741. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  2742. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2743. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2744. RENAME(yuv2nv12X)(c,
  2745. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2746. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2747. dest, uDest, dstW, chrDstW, dstFormat);
  2748. }
  2749. else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12 like
  2750. {
  2751. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2752. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2753. if (vLumFilterSize == 1 && vChrFilterSize == 1) // unscaled YV12
  2754. {
  2755. int16_t *lumBuf = lumPixBuf[0];
  2756. int16_t *chrBuf= chrPixBuf[0];
  2757. RENAME(yuv2yuv1)(c, lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2758. }
  2759. else //General YV12
  2760. {
  2761. RENAME(yuv2yuvX)(c,
  2762. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2763. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2764. dest, uDest, vDest, dstW, chrDstW);
  2765. }
  2766. }
  2767. else
  2768. {
  2769. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2770. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2771. if (vLumFilterSize == 1 && vChrFilterSize == 2) //unscaled RGB
  2772. {
  2773. int chrAlpha= vChrFilter[2*dstY+1];
  2774. if(flags & SWS_FULL_CHR_H_INT){
  2775. yuv2rgbXinC_full(c, //FIXME write a packed1_full function
  2776. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2777. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2778. dest, dstW, dstY);
  2779. }else{
  2780. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2781. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2782. }
  2783. }
  2784. else if (vLumFilterSize == 2 && vChrFilterSize == 2) //bilinear upscale RGB
  2785. {
  2786. int lumAlpha= vLumFilter[2*dstY+1];
  2787. int chrAlpha= vChrFilter[2*dstY+1];
  2788. lumMmxFilter[2]=
  2789. lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
  2790. chrMmxFilter[2]=
  2791. chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
  2792. if(flags & SWS_FULL_CHR_H_INT){
  2793. yuv2rgbXinC_full(c, //FIXME write a packed2_full function
  2794. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2795. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2796. dest, dstW, dstY);
  2797. }else{
  2798. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2799. dest, dstW, lumAlpha, chrAlpha, dstY);
  2800. }
  2801. }
  2802. else //general RGB
  2803. {
  2804. if(flags & SWS_FULL_CHR_H_INT){
  2805. yuv2rgbXinC_full(c,
  2806. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2807. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2808. dest, dstW, dstY);
  2809. }else{
  2810. RENAME(yuv2packedX)(c,
  2811. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2812. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2813. dest, dstW, dstY);
  2814. }
  2815. }
  2816. }
  2817. }
  2818. else // hmm looks like we can't use MMX here without overwriting this array's tail
  2819. {
  2820. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2821. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2822. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  2823. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2824. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2825. yuv2nv12XinC(
  2826. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2827. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2828. dest, uDest, dstW, chrDstW, dstFormat);
  2829. }
  2830. else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12
  2831. {
  2832. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2833. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2834. yuv2yuvXinC(
  2835. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2836. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2837. dest, uDest, vDest, dstW, chrDstW);
  2838. }
  2839. else
  2840. {
  2841. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2842. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2843. if(flags & SWS_FULL_CHR_H_INT){
  2844. yuv2rgbXinC_full(c,
  2845. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2846. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2847. dest, dstW, dstY);
  2848. }else{
  2849. yuv2packedXinC(c,
  2850. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2851. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2852. dest, dstW, dstY);
  2853. }
  2854. }
  2855. }
  2856. }
  2857. #if HAVE_MMX
  2858. __asm__ volatile(SFENCE:::"memory");
  2859. __asm__ volatile(EMMS:::"memory");
  2860. #endif
  2861. /* store changed local vars back in the context */
  2862. c->dstY= dstY;
  2863. c->lumBufIndex= lumBufIndex;
  2864. c->chrBufIndex= chrBufIndex;
  2865. c->lastInLumBuf= lastInLumBuf;
  2866. c->lastInChrBuf= lastInChrBuf;
  2867. return dstY - lastDstY;
  2868. }