swscale_template.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480
  1. /*
  2. * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <stdint.h>
  21. #include "libavutil/x86/asm.h"
  22. #include "libswscale/swscale_internal.h"
  23. #undef REAL_MOVNTQ
  24. #undef MOVNTQ
  25. #undef MOVNTQ2
  26. #undef PREFETCH
  27. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  28. #define MOVNTQ2 "movntq "
  29. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  30. #define YSCALEYUV2PACKEDX_UV \
  31. __asm__ volatile(\
  32. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\
  33. ".p2align 4 \n\t"\
  34. "nop \n\t"\
  35. "1: \n\t"\
  36. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"FF_REG_d" \n\t"\
  37. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  38. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  39. "movq %%mm3, %%mm4 \n\t"\
  40. ".p2align 4 \n\t"\
  41. "2: \n\t"\
  42. "movq 8(%%"FF_REG_d"), %%mm0 \n\t" /* filterCoeff */\
  43. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm2 \n\t" /* UsrcData */\
  44. "add %6, %%"FF_REG_S" \n\t" \
  45. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm5 \n\t" /* VsrcData */\
  46. "add $16, %%"FF_REG_d" \n\t"\
  47. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  48. "pmulhw %%mm0, %%mm2 \n\t"\
  49. "pmulhw %%mm0, %%mm5 \n\t"\
  50. "paddw %%mm2, %%mm3 \n\t"\
  51. "paddw %%mm5, %%mm4 \n\t"\
  52. "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\
  53. " jnz 2b \n\t"\
  54. #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
  55. "lea "offset"(%0), %%"FF_REG_d" \n\t"\
  56. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  57. "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
  58. "movq "#dst1", "#dst2" \n\t"\
  59. ".p2align 4 \n\t"\
  60. "2: \n\t"\
  61. "movq 8(%%"FF_REG_d"), "#coeff" \n\t" /* filterCoeff */\
  62. "movq (%%"FF_REG_S", %%"FF_REG_a", 2), "#src1" \n\t" /* Y1srcData */\
  63. "movq 8(%%"FF_REG_S", %%"FF_REG_a", 2), "#src2" \n\t" /* Y2srcData */\
  64. "add $16, %%"FF_REG_d" \n\t"\
  65. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  66. "pmulhw "#coeff", "#src1" \n\t"\
  67. "pmulhw "#coeff", "#src2" \n\t"\
  68. "paddw "#src1", "#dst1" \n\t"\
  69. "paddw "#src2", "#dst2" \n\t"\
  70. "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\
  71. " jnz 2b \n\t"\
  72. #define YSCALEYUV2PACKEDX \
  73. YSCALEYUV2PACKEDX_UV \
  74. YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
  75. #define YSCALEYUV2PACKEDX_END \
  76. :: "r" (&c->redDither), \
  77. "m" (dummy), "m" (dummy), "m" (dummy),\
  78. "r" (dest), "m" (dstW_reg), "m"(uv_off) \
  79. NAMED_CONSTRAINTS_ADD(bF8,bFC) \
  80. : "%"FF_REG_a, "%"FF_REG_d, "%"FF_REG_S \
  81. );
  82. #define YSCALEYUV2PACKEDX_ACCURATE_UV \
  83. __asm__ volatile(\
  84. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\
  85. ".p2align 4 \n\t"\
  86. "nop \n\t"\
  87. "1: \n\t"\
  88. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"FF_REG_d" \n\t"\
  89. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  90. "pxor %%mm4, %%mm4 \n\t"\
  91. "pxor %%mm5, %%mm5 \n\t"\
  92. "pxor %%mm6, %%mm6 \n\t"\
  93. "pxor %%mm7, %%mm7 \n\t"\
  94. ".p2align 4 \n\t"\
  95. "2: \n\t"\
  96. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm0 \n\t" /* UsrcData */\
  97. "add %6, %%"FF_REG_S" \n\t" \
  98. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm2 \n\t" /* VsrcData */\
  99. "mov "STR(APCK_PTR2)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  100. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm1 \n\t" /* UsrcData */\
  101. "movq %%mm0, %%mm3 \n\t"\
  102. "punpcklwd %%mm1, %%mm0 \n\t"\
  103. "punpckhwd %%mm1, %%mm3 \n\t"\
  104. "movq "STR(APCK_COEF)"(%%"FF_REG_d"),%%mm1 \n\t" /* filterCoeff */\
  105. "pmaddwd %%mm1, %%mm0 \n\t"\
  106. "pmaddwd %%mm1, %%mm3 \n\t"\
  107. "paddd %%mm0, %%mm4 \n\t"\
  108. "paddd %%mm3, %%mm5 \n\t"\
  109. "add %6, %%"FF_REG_S" \n\t" \
  110. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm3 \n\t" /* VsrcData */\
  111. "mov "STR(APCK_SIZE)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  112. "add $"STR(APCK_SIZE)", %%"FF_REG_d" \n\t"\
  113. "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\
  114. "movq %%mm2, %%mm0 \n\t"\
  115. "punpcklwd %%mm3, %%mm2 \n\t"\
  116. "punpckhwd %%mm3, %%mm0 \n\t"\
  117. "pmaddwd %%mm1, %%mm2 \n\t"\
  118. "pmaddwd %%mm1, %%mm0 \n\t"\
  119. "paddd %%mm2, %%mm6 \n\t"\
  120. "paddd %%mm0, %%mm7 \n\t"\
  121. " jnz 2b \n\t"\
  122. "psrad $16, %%mm4 \n\t"\
  123. "psrad $16, %%mm5 \n\t"\
  124. "psrad $16, %%mm6 \n\t"\
  125. "psrad $16, %%mm7 \n\t"\
  126. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  127. "packssdw %%mm5, %%mm4 \n\t"\
  128. "packssdw %%mm7, %%mm6 \n\t"\
  129. "paddw %%mm0, %%mm4 \n\t"\
  130. "paddw %%mm0, %%mm6 \n\t"\
  131. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  132. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  133. #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
  134. "lea "offset"(%0), %%"FF_REG_d" \n\t"\
  135. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  136. "pxor %%mm1, %%mm1 \n\t"\
  137. "pxor %%mm5, %%mm5 \n\t"\
  138. "pxor %%mm7, %%mm7 \n\t"\
  139. "pxor %%mm6, %%mm6 \n\t"\
  140. ".p2align 4 \n\t"\
  141. "2: \n\t"\
  142. "movq (%%"FF_REG_S", %%"FF_REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  143. "movq 8(%%"FF_REG_S", %%"FF_REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  144. "mov "STR(APCK_PTR2)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  145. "movq (%%"FF_REG_S", %%"FF_REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  146. "movq %%mm0, %%mm3 \n\t"\
  147. "punpcklwd %%mm4, %%mm0 \n\t"\
  148. "punpckhwd %%mm4, %%mm3 \n\t"\
  149. "movq "STR(APCK_COEF)"(%%"FF_REG_d"), %%mm4 \n\t" /* filterCoeff */\
  150. "pmaddwd %%mm4, %%mm0 \n\t"\
  151. "pmaddwd %%mm4, %%mm3 \n\t"\
  152. "paddd %%mm0, %%mm1 \n\t"\
  153. "paddd %%mm3, %%mm5 \n\t"\
  154. "movq 8(%%"FF_REG_S", %%"FF_REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  155. "mov "STR(APCK_SIZE)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  156. "add $"STR(APCK_SIZE)", %%"FF_REG_d" \n\t"\
  157. "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\
  158. "movq %%mm2, %%mm0 \n\t"\
  159. "punpcklwd %%mm3, %%mm2 \n\t"\
  160. "punpckhwd %%mm3, %%mm0 \n\t"\
  161. "pmaddwd %%mm4, %%mm2 \n\t"\
  162. "pmaddwd %%mm4, %%mm0 \n\t"\
  163. "paddd %%mm2, %%mm7 \n\t"\
  164. "paddd %%mm0, %%mm6 \n\t"\
  165. " jnz 2b \n\t"\
  166. "psrad $16, %%mm1 \n\t"\
  167. "psrad $16, %%mm5 \n\t"\
  168. "psrad $16, %%mm7 \n\t"\
  169. "psrad $16, %%mm6 \n\t"\
  170. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  171. "packssdw %%mm5, %%mm1 \n\t"\
  172. "packssdw %%mm6, %%mm7 \n\t"\
  173. "paddw %%mm0, %%mm1 \n\t"\
  174. "paddw %%mm0, %%mm7 \n\t"\
  175. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  176. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  177. #define YSCALEYUV2PACKEDX_ACCURATE \
  178. YSCALEYUV2PACKEDX_ACCURATE_UV \
  179. YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
  180. #define YSCALEYUV2RGBX \
  181. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  182. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  183. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  184. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  185. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  186. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  187. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  188. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  189. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  190. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  191. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  192. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  193. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  194. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  195. "paddw %%mm3, %%mm4 \n\t"\
  196. "movq %%mm2, %%mm0 \n\t"\
  197. "movq %%mm5, %%mm6 \n\t"\
  198. "movq %%mm4, %%mm3 \n\t"\
  199. "punpcklwd %%mm2, %%mm2 \n\t"\
  200. "punpcklwd %%mm5, %%mm5 \n\t"\
  201. "punpcklwd %%mm4, %%mm4 \n\t"\
  202. "paddw %%mm1, %%mm2 \n\t"\
  203. "paddw %%mm1, %%mm5 \n\t"\
  204. "paddw %%mm1, %%mm4 \n\t"\
  205. "punpckhwd %%mm0, %%mm0 \n\t"\
  206. "punpckhwd %%mm6, %%mm6 \n\t"\
  207. "punpckhwd %%mm3, %%mm3 \n\t"\
  208. "paddw %%mm7, %%mm0 \n\t"\
  209. "paddw %%mm7, %%mm6 \n\t"\
  210. "paddw %%mm7, %%mm3 \n\t"\
  211. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  212. "packuswb %%mm0, %%mm2 \n\t"\
  213. "packuswb %%mm6, %%mm5 \n\t"\
  214. "packuswb %%mm3, %%mm4 \n\t"\
  215. #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
  216. "movq "#b", "#q2" \n\t" /* B */\
  217. "movq "#r", "#t" \n\t" /* R */\
  218. "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
  219. "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
  220. "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
  221. "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
  222. "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
  223. "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
  224. "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
  225. "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
  226. "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
  227. "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
  228. \
  229. MOVNTQ( q0, (dst, index, 4))\
  230. MOVNTQ( b, 8(dst, index, 4))\
  231. MOVNTQ( q2, 16(dst, index, 4))\
  232. MOVNTQ( q3, 24(dst, index, 4))\
  233. \
  234. "add $8, "#index" \n\t"\
  235. "cmp "dstw", "#index" \n\t"\
  236. " jb 1b \n\t"
  237. #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
  238. static void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter,
  239. const int16_t **lumSrc, int lumFilterSize,
  240. const int16_t *chrFilter, const int16_t **chrUSrc,
  241. const int16_t **chrVSrc,
  242. int chrFilterSize, const int16_t **alpSrc,
  243. uint8_t *dest, int dstW, int dstY)
  244. {
  245. x86_reg dummy=0;
  246. x86_reg dstW_reg = dstW;
  247. x86_reg uv_off = c->uv_offx2;
  248. if (CONFIG_SWSCALE_ALPHA && c->needAlpha) {
  249. YSCALEYUV2PACKEDX_ACCURATE
  250. YSCALEYUV2RGBX
  251. "movq %%mm2, "U_TEMP"(%0) \n\t"
  252. "movq %%mm4, "V_TEMP"(%0) \n\t"
  253. "movq %%mm5, "Y_TEMP"(%0) \n\t"
  254. YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
  255. "movq "Y_TEMP"(%0), %%mm5 \n\t"
  256. "psraw $3, %%mm1 \n\t"
  257. "psraw $3, %%mm7 \n\t"
  258. "packuswb %%mm7, %%mm1 \n\t"
  259. WRITEBGR32(%4, "%5", %%FF_REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
  260. YSCALEYUV2PACKEDX_END
  261. } else {
  262. YSCALEYUV2PACKEDX_ACCURATE
  263. YSCALEYUV2RGBX
  264. "pcmpeqd %%mm7, %%mm7 \n\t"
  265. WRITEBGR32(%4, "%5", %%FF_REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  266. YSCALEYUV2PACKEDX_END
  267. }
  268. }
  269. static void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter,
  270. const int16_t **lumSrc, int lumFilterSize,
  271. const int16_t *chrFilter, const int16_t **chrUSrc,
  272. const int16_t **chrVSrc,
  273. int chrFilterSize, const int16_t **alpSrc,
  274. uint8_t *dest, int dstW, int dstY)
  275. {
  276. x86_reg dummy=0;
  277. x86_reg dstW_reg = dstW;
  278. x86_reg uv_off = c->uv_offx2;
  279. if (CONFIG_SWSCALE_ALPHA && c->needAlpha) {
  280. YSCALEYUV2PACKEDX
  281. YSCALEYUV2RGBX
  282. YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
  283. "psraw $3, %%mm1 \n\t"
  284. "psraw $3, %%mm7 \n\t"
  285. "packuswb %%mm7, %%mm1 \n\t"
  286. WRITEBGR32(%4, "%5", %%FF_REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  287. YSCALEYUV2PACKEDX_END
  288. } else {
  289. YSCALEYUV2PACKEDX
  290. YSCALEYUV2RGBX
  291. "pcmpeqd %%mm7, %%mm7 \n\t"
  292. WRITEBGR32(%4, "%5", %%FF_REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  293. YSCALEYUV2PACKEDX_END
  294. }
  295. }
  296. static void RENAME(yuv2bgr32_X)(SwsContext *c, const int16_t *lumFilter,
  297. const int16_t **lumSrc, int lumFilterSize,
  298. const int16_t *chrFilter, const int16_t **chrUSrc,
  299. const int16_t **chrVSrc,
  300. int chrFilterSize, const int16_t **alpSrc,
  301. uint8_t *dest, int dstW, int dstY)
  302. {
  303. x86_reg dummy=0;
  304. x86_reg dstW_reg = dstW;
  305. x86_reg uv_off = c->uv_offx2;
  306. if (CONFIG_SWSCALE_ALPHA && c->needAlpha) {
  307. YSCALEYUV2PACKEDX
  308. YSCALEYUV2RGBX
  309. YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
  310. "psraw $3, %%mm1 \n\t"
  311. "psraw $3, %%mm7 \n\t"
  312. "packuswb %%mm7, %%mm1 \n\t"
  313. WRITEBGR32(%4, "%5", %%FF_REGa, %%mm5, %%mm4, %%mm2, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  314. YSCALEYUV2PACKEDX_END
  315. } else {
  316. YSCALEYUV2PACKEDX
  317. YSCALEYUV2RGBX
  318. "pcmpeqd %%mm7, %%mm7 \n\t"
  319. WRITEBGR32(%4, "%5", %%FF_REGa, %%mm5, %%mm4, %%mm2, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  320. YSCALEYUV2PACKEDX_END
  321. }
  322. }
  323. #define REAL_WRITERGB16(dst, dstw, index) \
  324. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  325. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  326. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  327. "psrlq $3, %%mm2 \n\t"\
  328. \
  329. "movq %%mm2, %%mm1 \n\t"\
  330. "movq %%mm4, %%mm3 \n\t"\
  331. \
  332. "punpcklbw %%mm7, %%mm3 \n\t"\
  333. "punpcklbw %%mm5, %%mm2 \n\t"\
  334. "punpckhbw %%mm7, %%mm4 \n\t"\
  335. "punpckhbw %%mm5, %%mm1 \n\t"\
  336. \
  337. "psllq $3, %%mm3 \n\t"\
  338. "psllq $3, %%mm4 \n\t"\
  339. \
  340. "por %%mm3, %%mm2 \n\t"\
  341. "por %%mm4, %%mm1 \n\t"\
  342. \
  343. MOVNTQ(%%mm2, (dst, index, 2))\
  344. MOVNTQ(%%mm1, 8(dst, index, 2))\
  345. \
  346. "add $8, "#index" \n\t"\
  347. "cmp "dstw", "#index" \n\t"\
  348. " jb 1b \n\t"
  349. #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
  350. static void RENAME(yuv2rgb565_X_ar)(SwsContext *c, const int16_t *lumFilter,
  351. const int16_t **lumSrc, int lumFilterSize,
  352. const int16_t *chrFilter, const int16_t **chrUSrc,
  353. const int16_t **chrVSrc,
  354. int chrFilterSize, const int16_t **alpSrc,
  355. uint8_t *dest, int dstW, int dstY)
  356. {
  357. x86_reg dummy=0;
  358. x86_reg dstW_reg = dstW;
  359. x86_reg uv_off = c->uv_offx2;
  360. YSCALEYUV2PACKEDX_ACCURATE
  361. YSCALEYUV2RGBX
  362. "pxor %%mm7, %%mm7 \n\t"
  363. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  364. #ifdef DITHER1XBPP
  365. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  366. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  367. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  368. #endif
  369. WRITERGB16(%4, "%5", %%FF_REGa)
  370. YSCALEYUV2PACKEDX_END
  371. }
  372. static void RENAME(yuv2rgb565_X)(SwsContext *c, const int16_t *lumFilter,
  373. const int16_t **lumSrc, int lumFilterSize,
  374. const int16_t *chrFilter, const int16_t **chrUSrc,
  375. const int16_t **chrVSrc,
  376. int chrFilterSize, const int16_t **alpSrc,
  377. uint8_t *dest, int dstW, int dstY)
  378. {
  379. x86_reg dummy=0;
  380. x86_reg dstW_reg = dstW;
  381. x86_reg uv_off = c->uv_offx2;
  382. YSCALEYUV2PACKEDX
  383. YSCALEYUV2RGBX
  384. "pxor %%mm7, %%mm7 \n\t"
  385. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  386. #ifdef DITHER1XBPP
  387. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  388. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  389. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  390. #endif
  391. WRITERGB16(%4, "%5", %%FF_REGa)
  392. YSCALEYUV2PACKEDX_END
  393. }
  394. #define REAL_WRITERGB15(dst, dstw, index) \
  395. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  396. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  397. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  398. "psrlq $3, %%mm2 \n\t"\
  399. "psrlq $1, %%mm5 \n\t"\
  400. \
  401. "movq %%mm2, %%mm1 \n\t"\
  402. "movq %%mm4, %%mm3 \n\t"\
  403. \
  404. "punpcklbw %%mm7, %%mm3 \n\t"\
  405. "punpcklbw %%mm5, %%mm2 \n\t"\
  406. "punpckhbw %%mm7, %%mm4 \n\t"\
  407. "punpckhbw %%mm5, %%mm1 \n\t"\
  408. \
  409. "psllq $2, %%mm3 \n\t"\
  410. "psllq $2, %%mm4 \n\t"\
  411. \
  412. "por %%mm3, %%mm2 \n\t"\
  413. "por %%mm4, %%mm1 \n\t"\
  414. \
  415. MOVNTQ(%%mm2, (dst, index, 2))\
  416. MOVNTQ(%%mm1, 8(dst, index, 2))\
  417. \
  418. "add $8, "#index" \n\t"\
  419. "cmp "dstw", "#index" \n\t"\
  420. " jb 1b \n\t"
  421. #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
  422. static void RENAME(yuv2rgb555_X_ar)(SwsContext *c, const int16_t *lumFilter,
  423. const int16_t **lumSrc, int lumFilterSize,
  424. const int16_t *chrFilter, const int16_t **chrUSrc,
  425. const int16_t **chrVSrc,
  426. int chrFilterSize, const int16_t **alpSrc,
  427. uint8_t *dest, int dstW, int dstY)
  428. {
  429. x86_reg dummy=0;
  430. x86_reg dstW_reg = dstW;
  431. x86_reg uv_off = c->uv_offx2;
  432. YSCALEYUV2PACKEDX_ACCURATE
  433. YSCALEYUV2RGBX
  434. "pxor %%mm7, %%mm7 \n\t"
  435. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  436. #ifdef DITHER1XBPP
  437. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  438. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  439. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  440. #endif
  441. WRITERGB15(%4, "%5", %%FF_REGa)
  442. YSCALEYUV2PACKEDX_END
  443. }
  444. static void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter,
  445. const int16_t **lumSrc, int lumFilterSize,
  446. const int16_t *chrFilter, const int16_t **chrUSrc,
  447. const int16_t **chrVSrc,
  448. int chrFilterSize, const int16_t **alpSrc,
  449. uint8_t *dest, int dstW, int dstY)
  450. {
  451. x86_reg dummy=0;
  452. x86_reg dstW_reg = dstW;
  453. x86_reg uv_off = c->uv_offx2;
  454. YSCALEYUV2PACKEDX
  455. YSCALEYUV2RGBX
  456. "pxor %%mm7, %%mm7 \n\t"
  457. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  458. #ifdef DITHER1XBPP
  459. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  460. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  461. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  462. #endif
  463. WRITERGB15(%4, "%5", %%FF_REGa)
  464. YSCALEYUV2PACKEDX_END
  465. }
  466. #define WRITEBGR24MMX(dst, dstw, index) \
  467. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  468. "movq %%mm2, %%mm1 \n\t" /* B */\
  469. "movq %%mm5, %%mm6 \n\t" /* R */\
  470. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  471. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  472. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  473. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  474. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  475. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  476. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  477. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  478. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  479. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  480. \
  481. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  482. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  483. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  484. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  485. \
  486. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  487. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  488. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  489. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  490. \
  491. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  492. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  493. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  494. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  495. \
  496. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  497. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  498. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  499. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  500. MOVNTQ(%%mm0, (dst))\
  501. \
  502. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  503. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  504. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  505. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  506. MOVNTQ(%%mm6, 8(dst))\
  507. \
  508. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  509. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  510. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  511. MOVNTQ(%%mm5, 16(dst))\
  512. \
  513. "add $24, "#dst" \n\t"\
  514. \
  515. "add $8, "#index" \n\t"\
  516. "cmp "dstw", "#index" \n\t"\
  517. " jb 1b \n\t"
  518. #define WRITEBGR24MMXEXT(dst, dstw, index) \
  519. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  520. "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
  521. "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
  522. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  523. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  524. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  525. \
  526. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  527. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  528. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  529. \
  530. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  531. "por %%mm1, %%mm6 \n\t"\
  532. "por %%mm3, %%mm6 \n\t"\
  533. MOVNTQ(%%mm6, (dst))\
  534. \
  535. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  536. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  537. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  538. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  539. \
  540. "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  541. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  542. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  543. \
  544. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  545. "por %%mm3, %%mm6 \n\t"\
  546. MOVNTQ(%%mm6, 8(dst))\
  547. \
  548. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  549. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  550. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  551. \
  552. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  553. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  554. "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  555. \
  556. "por %%mm1, %%mm3 \n\t"\
  557. "por %%mm3, %%mm6 \n\t"\
  558. MOVNTQ(%%mm6, 16(dst))\
  559. \
  560. "add $24, "#dst" \n\t"\
  561. \
  562. "add $8, "#index" \n\t"\
  563. "cmp "dstw", "#index" \n\t"\
  564. " jb 1b \n\t"
  565. #undef WRITEBGR24
  566. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMXEXT(dst, dstw, index)
  567. #if HAVE_6REGS
  568. static void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter,
  569. const int16_t **lumSrc, int lumFilterSize,
  570. const int16_t *chrFilter, const int16_t **chrUSrc,
  571. const int16_t **chrVSrc,
  572. int chrFilterSize, const int16_t **alpSrc,
  573. uint8_t *dest, int dstW, int dstY)
  574. {
  575. x86_reg dummy=0;
  576. x86_reg dstW_reg = dstW;
  577. x86_reg uv_off = c->uv_offx2;
  578. YSCALEYUV2PACKEDX_ACCURATE
  579. YSCALEYUV2RGBX
  580. "pxor %%mm7, %%mm7 \n\t"
  581. "lea (%%"FF_REG_a", %%"FF_REG_a", 2), %%"FF_REG_c"\n\t" //FIXME optimize
  582. "add %4, %%"FF_REG_c" \n\t"
  583. WRITEBGR24(%%FF_REGc, "%5", %%FF_REGa)
  584. :: "r" (&c->redDither),
  585. "m" (dummy), "m" (dummy), "m" (dummy),
  586. "r" (dest), "m" (dstW_reg), "m"(uv_off)
  587. NAMED_CONSTRAINTS_ADD(ff_M24A,ff_M24C,ff_M24B)
  588. : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S
  589. );
  590. }
  591. static void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter,
  592. const int16_t **lumSrc, int lumFilterSize,
  593. const int16_t *chrFilter, const int16_t **chrUSrc,
  594. const int16_t **chrVSrc,
  595. int chrFilterSize, const int16_t **alpSrc,
  596. uint8_t *dest, int dstW, int dstY)
  597. {
  598. x86_reg dummy=0;
  599. x86_reg dstW_reg = dstW;
  600. x86_reg uv_off = c->uv_offx2;
  601. YSCALEYUV2PACKEDX
  602. YSCALEYUV2RGBX
  603. "pxor %%mm7, %%mm7 \n\t"
  604. "lea (%%"FF_REG_a", %%"FF_REG_a", 2), %%"FF_REG_c" \n\t" //FIXME optimize
  605. "add %4, %%"FF_REG_c" \n\t"
  606. WRITEBGR24(%%FF_REGc, "%5", %%FF_REGa)
  607. :: "r" (&c->redDither),
  608. "m" (dummy), "m" (dummy), "m" (dummy),
  609. "r" (dest), "m" (dstW_reg), "m"(uv_off)
  610. NAMED_CONSTRAINTS_ADD(ff_M24A,ff_M24C,ff_M24B)
  611. : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S
  612. );
  613. }
  614. #endif /* HAVE_6REGS */
  615. #define REAL_WRITEYUY2(dst, dstw, index) \
  616. "packuswb %%mm3, %%mm3 \n\t"\
  617. "packuswb %%mm4, %%mm4 \n\t"\
  618. "packuswb %%mm7, %%mm1 \n\t"\
  619. "punpcklbw %%mm4, %%mm3 \n\t"\
  620. "movq %%mm1, %%mm7 \n\t"\
  621. "punpcklbw %%mm3, %%mm1 \n\t"\
  622. "punpckhbw %%mm3, %%mm7 \n\t"\
  623. \
  624. MOVNTQ(%%mm1, (dst, index, 2))\
  625. MOVNTQ(%%mm7, 8(dst, index, 2))\
  626. \
  627. "add $8, "#index" \n\t"\
  628. "cmp "dstw", "#index" \n\t"\
  629. " jb 1b \n\t"
  630. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  631. static void RENAME(yuv2yuyv422_X_ar)(SwsContext *c, const int16_t *lumFilter,
  632. const int16_t **lumSrc, int lumFilterSize,
  633. const int16_t *chrFilter, const int16_t **chrUSrc,
  634. const int16_t **chrVSrc,
  635. int chrFilterSize, const int16_t **alpSrc,
  636. uint8_t *dest, int dstW, int dstY)
  637. {
  638. x86_reg dummy=0;
  639. x86_reg dstW_reg = dstW;
  640. x86_reg uv_off = c->uv_offx2;
  641. YSCALEYUV2PACKEDX_ACCURATE
  642. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  643. "psraw $3, %%mm3 \n\t"
  644. "psraw $3, %%mm4 \n\t"
  645. "psraw $3, %%mm1 \n\t"
  646. "psraw $3, %%mm7 \n\t"
  647. WRITEYUY2(%4, "%5", %%FF_REGa)
  648. YSCALEYUV2PACKEDX_END
  649. }
  650. static void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter,
  651. const int16_t **lumSrc, int lumFilterSize,
  652. const int16_t *chrFilter, const int16_t **chrUSrc,
  653. const int16_t **chrVSrc,
  654. int chrFilterSize, const int16_t **alpSrc,
  655. uint8_t *dest, int dstW, int dstY)
  656. {
  657. x86_reg dummy=0;
  658. x86_reg dstW_reg = dstW;
  659. x86_reg uv_off = c->uv_offx2;
  660. YSCALEYUV2PACKEDX
  661. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  662. "psraw $3, %%mm3 \n\t"
  663. "psraw $3, %%mm4 \n\t"
  664. "psraw $3, %%mm1 \n\t"
  665. "psraw $3, %%mm7 \n\t"
  666. WRITEYUY2(%4, "%5", %%FF_REGa)
  667. YSCALEYUV2PACKEDX_END
  668. }
  669. #define REAL_YSCALEYUV2RGB_UV(index, c) \
  670. "xor "#index", "#index" \n\t"\
  671. ".p2align 4 \n\t"\
  672. "1: \n\t"\
  673. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  674. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  675. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  676. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  677. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  678. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  679. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  680. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  681. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  682. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  683. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  684. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  685. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  686. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  687. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  688. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  689. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  690. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  691. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  692. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  693. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  694. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  695. #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
  696. "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  697. "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  698. "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  699. "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  700. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  701. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  702. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  703. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  704. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  705. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  706. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  707. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  708. #define REAL_YSCALEYUV2RGB_COEFF(c) \
  709. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  710. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  711. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  712. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  713. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  714. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  715. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  716. "paddw %%mm3, %%mm4 \n\t"\
  717. "movq %%mm2, %%mm0 \n\t"\
  718. "movq %%mm5, %%mm6 \n\t"\
  719. "movq %%mm4, %%mm3 \n\t"\
  720. "punpcklwd %%mm2, %%mm2 \n\t"\
  721. "punpcklwd %%mm5, %%mm5 \n\t"\
  722. "punpcklwd %%mm4, %%mm4 \n\t"\
  723. "paddw %%mm1, %%mm2 \n\t"\
  724. "paddw %%mm1, %%mm5 \n\t"\
  725. "paddw %%mm1, %%mm4 \n\t"\
  726. "punpckhwd %%mm0, %%mm0 \n\t"\
  727. "punpckhwd %%mm6, %%mm6 \n\t"\
  728. "punpckhwd %%mm3, %%mm3 \n\t"\
  729. "paddw %%mm7, %%mm0 \n\t"\
  730. "paddw %%mm7, %%mm6 \n\t"\
  731. "paddw %%mm7, %%mm3 \n\t"\
  732. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  733. "packuswb %%mm0, %%mm2 \n\t"\
  734. "packuswb %%mm6, %%mm5 \n\t"\
  735. "packuswb %%mm3, %%mm4 \n\t"\
  736. #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
  737. #define YSCALEYUV2RGB(index, c) \
  738. REAL_YSCALEYUV2RGB_UV(index, c) \
  739. REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
  740. REAL_YSCALEYUV2RGB_COEFF(c)
  741. /**
  742. * vertical bilinear scale YV12 to RGB
  743. */
  744. static void RENAME(yuv2rgb32_2)(SwsContext *c, const int16_t *buf[2],
  745. const int16_t *ubuf[2], const int16_t *vbuf[2],
  746. const int16_t *abuf[2], uint8_t *dest,
  747. int dstW, int yalpha, int uvalpha, int y)
  748. {
  749. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  750. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  751. if (CONFIG_SWSCALE_ALPHA && c->needAlpha) {
  752. const int16_t *abuf0 = abuf[0], *abuf1 = abuf[1];
  753. #if ARCH_X86_64
  754. __asm__ volatile(
  755. YSCALEYUV2RGB(%%r8, %5)
  756. YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
  757. "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  758. "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  759. "packuswb %%mm7, %%mm1 \n\t"
  760. WRITEBGR32(%4, DSTW_OFFSET"(%5)", %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  761. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "r" (dest),
  762. "a" (&c->redDither),
  763. "r" (abuf0), "r" (abuf1)
  764. : "%r8"
  765. );
  766. #else
  767. c->u_temp=(intptr_t)abuf0;
  768. c->v_temp=(intptr_t)abuf1;
  769. __asm__ volatile(
  770. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  771. "mov %4, %%"FF_REG_b" \n\t"
  772. "push %%"FF_REG_BP" \n\t"
  773. YSCALEYUV2RGB(%%FF_REGBP, %5)
  774. "push %0 \n\t"
  775. "push %1 \n\t"
  776. "mov "U_TEMP"(%5), %0 \n\t"
  777. "mov "V_TEMP"(%5), %1 \n\t"
  778. YSCALEYUV2RGB_YA(%%FF_REGBP, %5, %0, %1)
  779. "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  780. "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  781. "packuswb %%mm7, %%mm1 \n\t"
  782. "pop %1 \n\t"
  783. "pop %0 \n\t"
  784. WRITEBGR32(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  785. "pop %%"FF_REG_BP" \n\t"
  786. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  787. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  788. "a" (&c->redDither)
  789. );
  790. #endif
  791. } else {
  792. __asm__ volatile(
  793. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  794. "mov %4, %%"FF_REG_b" \n\t"
  795. "push %%"FF_REG_BP" \n\t"
  796. YSCALEYUV2RGB(%%FF_REGBP, %5)
  797. "pcmpeqd %%mm7, %%mm7 \n\t"
  798. WRITEBGR32(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  799. "pop %%"FF_REG_BP" \n\t"
  800. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  801. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  802. "a" (&c->redDither)
  803. );
  804. }
  805. }
  806. static void RENAME(yuv2bgr24_2)(SwsContext *c, const int16_t *buf[2],
  807. const int16_t *ubuf[2], const int16_t *vbuf[2],
  808. const int16_t *abuf[2], uint8_t *dest,
  809. int dstW, int yalpha, int uvalpha, int y)
  810. {
  811. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  812. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  813. __asm__ volatile(
  814. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  815. "mov %4, %%"FF_REG_b" \n\t"
  816. "push %%"FF_REG_BP" \n\t"
  817. YSCALEYUV2RGB(%%FF_REGBP, %5)
  818. "pxor %%mm7, %%mm7 \n\t"
  819. WRITEBGR24(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  820. "pop %%"FF_REG_BP" \n\t"
  821. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  822. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  823. "a" (&c->redDither)
  824. NAMED_CONSTRAINTS_ADD(ff_M24A,ff_M24C,ff_M24B)
  825. );
  826. }
  827. static void RENAME(yuv2rgb555_2)(SwsContext *c, const int16_t *buf[2],
  828. const int16_t *ubuf[2], const int16_t *vbuf[2],
  829. const int16_t *abuf[2], uint8_t *dest,
  830. int dstW, int yalpha, int uvalpha, int y)
  831. {
  832. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  833. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  834. __asm__ volatile(
  835. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  836. "mov %4, %%"FF_REG_b" \n\t"
  837. "push %%"FF_REG_BP" \n\t"
  838. YSCALEYUV2RGB(%%FF_REGBP, %5)
  839. "pxor %%mm7, %%mm7 \n\t"
  840. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  841. #ifdef DITHER1XBPP
  842. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  843. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  844. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  845. #endif
  846. WRITERGB15(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  847. "pop %%"FF_REG_BP" \n\t"
  848. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  849. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  850. "a" (&c->redDither)
  851. NAMED_CONSTRAINTS_ADD(bF8)
  852. );
  853. }
  854. static void RENAME(yuv2rgb565_2)(SwsContext *c, const int16_t *buf[2],
  855. const int16_t *ubuf[2], const int16_t *vbuf[2],
  856. const int16_t *abuf[2], uint8_t *dest,
  857. int dstW, int yalpha, int uvalpha, int y)
  858. {
  859. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  860. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  861. __asm__ volatile(
  862. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  863. "mov %4, %%"FF_REG_b" \n\t"
  864. "push %%"FF_REG_BP" \n\t"
  865. YSCALEYUV2RGB(%%FF_REGBP, %5)
  866. "pxor %%mm7, %%mm7 \n\t"
  867. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  868. #ifdef DITHER1XBPP
  869. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  870. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  871. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  872. #endif
  873. WRITERGB16(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  874. "pop %%"FF_REG_BP" \n\t"
  875. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  876. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  877. "a" (&c->redDither)
  878. NAMED_CONSTRAINTS_ADD(bF8,bFC)
  879. );
  880. }
  881. #define REAL_YSCALEYUV2PACKED(index, c) \
  882. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  883. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
  884. "psraw $3, %%mm0 \n\t"\
  885. "psraw $3, %%mm1 \n\t"\
  886. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  887. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  888. "xor "#index", "#index" \n\t"\
  889. ".p2align 4 \n\t"\
  890. "1: \n\t"\
  891. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  892. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  893. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  894. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  895. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  896. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  897. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  898. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  899. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  900. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  901. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  902. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  903. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  904. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  905. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  906. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  907. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  908. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  909. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  910. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  911. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  912. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  913. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  914. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  915. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  916. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  917. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  918. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  919. static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2],
  920. const int16_t *ubuf[2], const int16_t *vbuf[2],
  921. const int16_t *abuf[2], uint8_t *dest,
  922. int dstW, int yalpha, int uvalpha, int y)
  923. {
  924. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  925. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  926. __asm__ volatile(
  927. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  928. "mov %4, %%"FF_REG_b" \n\t"
  929. "push %%"FF_REG_BP" \n\t"
  930. YSCALEYUV2PACKED(%%FF_REGBP, %5)
  931. WRITEYUY2(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  932. "pop %%"FF_REG_BP" \n\t"
  933. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  934. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  935. "a" (&c->redDither)
  936. );
  937. }
  938. #define REAL_YSCALEYUV2RGB1(index, c) \
  939. "xor "#index", "#index" \n\t"\
  940. ".p2align 4 \n\t"\
  941. "1: \n\t"\
  942. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  943. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  944. "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  945. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  946. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  947. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  948. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  949. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  950. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  951. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  952. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  953. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  954. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  955. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  956. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  957. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  958. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  959. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  960. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  961. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  962. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  963. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  964. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  965. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  966. "paddw %%mm3, %%mm4 \n\t"\
  967. "movq %%mm2, %%mm0 \n\t"\
  968. "movq %%mm5, %%mm6 \n\t"\
  969. "movq %%mm4, %%mm3 \n\t"\
  970. "punpcklwd %%mm2, %%mm2 \n\t"\
  971. "punpcklwd %%mm5, %%mm5 \n\t"\
  972. "punpcklwd %%mm4, %%mm4 \n\t"\
  973. "paddw %%mm1, %%mm2 \n\t"\
  974. "paddw %%mm1, %%mm5 \n\t"\
  975. "paddw %%mm1, %%mm4 \n\t"\
  976. "punpckhwd %%mm0, %%mm0 \n\t"\
  977. "punpckhwd %%mm6, %%mm6 \n\t"\
  978. "punpckhwd %%mm3, %%mm3 \n\t"\
  979. "paddw %%mm7, %%mm0 \n\t"\
  980. "paddw %%mm7, %%mm6 \n\t"\
  981. "paddw %%mm7, %%mm3 \n\t"\
  982. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  983. "packuswb %%mm0, %%mm2 \n\t"\
  984. "packuswb %%mm6, %%mm5 \n\t"\
  985. "packuswb %%mm3, %%mm4 \n\t"\
  986. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  987. // do vertical chrominance interpolation
  988. #define REAL_YSCALEYUV2RGB1b(index, c) \
  989. "xor "#index", "#index" \n\t"\
  990. ".p2align 4 \n\t"\
  991. "1: \n\t"\
  992. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  993. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  994. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  995. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  996. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  997. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  998. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  999. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  1000. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  1001. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  1002. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  1003. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  1004. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  1005. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  1006. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  1007. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  1008. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  1009. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  1010. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  1011. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  1012. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  1013. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  1014. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  1015. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  1016. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  1017. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  1018. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  1019. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  1020. "paddw %%mm3, %%mm4 \n\t"\
  1021. "movq %%mm2, %%mm0 \n\t"\
  1022. "movq %%mm5, %%mm6 \n\t"\
  1023. "movq %%mm4, %%mm3 \n\t"\
  1024. "punpcklwd %%mm2, %%mm2 \n\t"\
  1025. "punpcklwd %%mm5, %%mm5 \n\t"\
  1026. "punpcklwd %%mm4, %%mm4 \n\t"\
  1027. "paddw %%mm1, %%mm2 \n\t"\
  1028. "paddw %%mm1, %%mm5 \n\t"\
  1029. "paddw %%mm1, %%mm4 \n\t"\
  1030. "punpckhwd %%mm0, %%mm0 \n\t"\
  1031. "punpckhwd %%mm6, %%mm6 \n\t"\
  1032. "punpckhwd %%mm3, %%mm3 \n\t"\
  1033. "paddw %%mm7, %%mm0 \n\t"\
  1034. "paddw %%mm7, %%mm6 \n\t"\
  1035. "paddw %%mm7, %%mm3 \n\t"\
  1036. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  1037. "packuswb %%mm0, %%mm2 \n\t"\
  1038. "packuswb %%mm6, %%mm5 \n\t"\
  1039. "packuswb %%mm3, %%mm4 \n\t"\
  1040. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  1041. #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
  1042. "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
  1043. "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
  1044. "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
  1045. "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
  1046. "packuswb %%mm1, %%mm7 \n\t"
  1047. #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
  1048. /**
  1049. * YV12 to RGB without scaling or interpolating
  1050. */
  1051. static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0,
  1052. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1053. const int16_t *abuf0, uint8_t *dest,
  1054. int dstW, int uvalpha, int y)
  1055. {
  1056. const int16_t *ubuf0 = ubuf[0];
  1057. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1058. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1059. const int16_t *ubuf1 = ubuf[0];
  1060. if (CONFIG_SWSCALE_ALPHA && c->needAlpha) {
  1061. __asm__ volatile(
  1062. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1063. "mov %4, %%"FF_REG_b" \n\t"
  1064. "push %%"FF_REG_BP" \n\t"
  1065. YSCALEYUV2RGB1(%%FF_REGBP, %5)
  1066. YSCALEYUV2RGB1_ALPHA(%%FF_REGBP)
  1067. WRITEBGR32(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1068. "pop %%"FF_REG_BP" \n\t"
  1069. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1070. :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1071. "a" (&c->redDither)
  1072. );
  1073. } else {
  1074. __asm__ volatile(
  1075. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1076. "mov %4, %%"FF_REG_b" \n\t"
  1077. "push %%"FF_REG_BP" \n\t"
  1078. YSCALEYUV2RGB1(%%FF_REGBP, %5)
  1079. "pcmpeqd %%mm7, %%mm7 \n\t"
  1080. WRITEBGR32(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1081. "pop %%"FF_REG_BP" \n\t"
  1082. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1083. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1084. "a" (&c->redDither)
  1085. );
  1086. }
  1087. } else {
  1088. const int16_t *ubuf1 = ubuf[1];
  1089. if (CONFIG_SWSCALE_ALPHA && c->needAlpha) {
  1090. __asm__ volatile(
  1091. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1092. "mov %4, %%"FF_REG_b" \n\t"
  1093. "push %%"FF_REG_BP" \n\t"
  1094. YSCALEYUV2RGB1b(%%FF_REGBP, %5)
  1095. YSCALEYUV2RGB1_ALPHA(%%FF_REGBP)
  1096. WRITEBGR32(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1097. "pop %%"FF_REG_BP" \n\t"
  1098. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1099. :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1100. "a" (&c->redDither)
  1101. );
  1102. } else {
  1103. __asm__ volatile(
  1104. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1105. "mov %4, %%"FF_REG_b" \n\t"
  1106. "push %%"FF_REG_BP" \n\t"
  1107. YSCALEYUV2RGB1b(%%FF_REGBP, %5)
  1108. "pcmpeqd %%mm7, %%mm7 \n\t"
  1109. WRITEBGR32(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1110. "pop %%"FF_REG_BP" \n\t"
  1111. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1112. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1113. "a" (&c->redDither)
  1114. );
  1115. }
  1116. }
  1117. }
  1118. static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0,
  1119. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1120. const int16_t *abuf0, uint8_t *dest,
  1121. int dstW, int uvalpha, int y)
  1122. {
  1123. const int16_t *ubuf0 = ubuf[0];
  1124. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1125. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1126. const int16_t *ubuf1 = ubuf[0];
  1127. __asm__ volatile(
  1128. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1129. "mov %4, %%"FF_REG_b" \n\t"
  1130. "push %%"FF_REG_BP" \n\t"
  1131. YSCALEYUV2RGB1(%%FF_REGBP, %5)
  1132. "pxor %%mm7, %%mm7 \n\t"
  1133. WRITEBGR24(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  1134. "pop %%"FF_REG_BP" \n\t"
  1135. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1136. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1137. "a" (&c->redDither)
  1138. NAMED_CONSTRAINTS_ADD(ff_M24A,ff_M24C,ff_M24B)
  1139. );
  1140. } else {
  1141. const int16_t *ubuf1 = ubuf[1];
  1142. __asm__ volatile(
  1143. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1144. "mov %4, %%"FF_REG_b" \n\t"
  1145. "push %%"FF_REG_BP" \n\t"
  1146. YSCALEYUV2RGB1b(%%FF_REGBP, %5)
  1147. "pxor %%mm7, %%mm7 \n\t"
  1148. WRITEBGR24(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  1149. "pop %%"FF_REG_BP" \n\t"
  1150. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1151. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1152. "a" (&c->redDither)
  1153. NAMED_CONSTRAINTS_ADD(ff_M24A,ff_M24C,ff_M24B)
  1154. );
  1155. }
  1156. }
  1157. static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0,
  1158. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1159. const int16_t *abuf0, uint8_t *dest,
  1160. int dstW, int uvalpha, int y)
  1161. {
  1162. const int16_t *ubuf0 = ubuf[0];
  1163. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1164. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1165. const int16_t *ubuf1 = ubuf[0];
  1166. __asm__ volatile(
  1167. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1168. "mov %4, %%"FF_REG_b" \n\t"
  1169. "push %%"FF_REG_BP" \n\t"
  1170. YSCALEYUV2RGB1(%%FF_REGBP, %5)
  1171. "pxor %%mm7, %%mm7 \n\t"
  1172. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1173. #ifdef DITHER1XBPP
  1174. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1175. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1176. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1177. #endif
  1178. WRITERGB15(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  1179. "pop %%"FF_REG_BP" \n\t"
  1180. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1181. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1182. "a" (&c->redDither)
  1183. NAMED_CONSTRAINTS_ADD(bF8)
  1184. );
  1185. } else {
  1186. const int16_t *ubuf1 = ubuf[1];
  1187. __asm__ volatile(
  1188. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1189. "mov %4, %%"FF_REG_b" \n\t"
  1190. "push %%"FF_REG_BP" \n\t"
  1191. YSCALEYUV2RGB1b(%%FF_REGBP, %5)
  1192. "pxor %%mm7, %%mm7 \n\t"
  1193. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1194. #ifdef DITHER1XBPP
  1195. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1196. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1197. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1198. #endif
  1199. WRITERGB15(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  1200. "pop %%"FF_REG_BP" \n\t"
  1201. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1202. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1203. "a" (&c->redDither)
  1204. NAMED_CONSTRAINTS_ADD(bF8)
  1205. );
  1206. }
  1207. }
  1208. static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0,
  1209. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1210. const int16_t *abuf0, uint8_t *dest,
  1211. int dstW, int uvalpha, int y)
  1212. {
  1213. const int16_t *ubuf0 = ubuf[0];
  1214. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1215. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1216. const int16_t *ubuf1 = ubuf[0];
  1217. __asm__ volatile(
  1218. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1219. "mov %4, %%"FF_REG_b" \n\t"
  1220. "push %%"FF_REG_BP" \n\t"
  1221. YSCALEYUV2RGB1(%%FF_REGBP, %5)
  1222. "pxor %%mm7, %%mm7 \n\t"
  1223. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1224. #ifdef DITHER1XBPP
  1225. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1226. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1227. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1228. #endif
  1229. WRITERGB16(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  1230. "pop %%"FF_REG_BP" \n\t"
  1231. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1232. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1233. "a" (&c->redDither)
  1234. NAMED_CONSTRAINTS_ADD(bF8,bFC)
  1235. );
  1236. } else {
  1237. const int16_t *ubuf1 = ubuf[1];
  1238. __asm__ volatile(
  1239. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1240. "mov %4, %%"FF_REG_b" \n\t"
  1241. "push %%"FF_REG_BP" \n\t"
  1242. YSCALEYUV2RGB1b(%%FF_REGBP, %5)
  1243. "pxor %%mm7, %%mm7 \n\t"
  1244. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1245. #ifdef DITHER1XBPP
  1246. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1247. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1248. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1249. #endif
  1250. WRITERGB16(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  1251. "pop %%"FF_REG_BP" \n\t"
  1252. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1253. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1254. "a" (&c->redDither)
  1255. NAMED_CONSTRAINTS_ADD(bF8,bFC)
  1256. );
  1257. }
  1258. }
  1259. #define REAL_YSCALEYUV2PACKED1(index, c) \
  1260. "xor "#index", "#index" \n\t"\
  1261. ".p2align 4 \n\t"\
  1262. "1: \n\t"\
  1263. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  1264. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1265. "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  1266. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1267. "psraw $7, %%mm3 \n\t" \
  1268. "psraw $7, %%mm4 \n\t" \
  1269. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  1270. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  1271. "psraw $7, %%mm1 \n\t" \
  1272. "psraw $7, %%mm7 \n\t" \
  1273. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  1274. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  1275. "xor "#index", "#index" \n\t"\
  1276. ".p2align 4 \n\t"\
  1277. "1: \n\t"\
  1278. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  1279. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  1280. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1281. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  1282. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  1283. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1284. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  1285. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  1286. "psrlw $8, %%mm3 \n\t" \
  1287. "psrlw $8, %%mm4 \n\t" \
  1288. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  1289. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  1290. "psraw $7, %%mm1 \n\t" \
  1291. "psraw $7, %%mm7 \n\t"
  1292. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  1293. static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0,
  1294. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1295. const int16_t *abuf0, uint8_t *dest,
  1296. int dstW, int uvalpha, int y)
  1297. {
  1298. const int16_t *ubuf0 = ubuf[0];
  1299. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1300. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1301. const int16_t *ubuf1 = ubuf[0];
  1302. __asm__ volatile(
  1303. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1304. "mov %4, %%"FF_REG_b" \n\t"
  1305. "push %%"FF_REG_BP" \n\t"
  1306. YSCALEYUV2PACKED1(%%FF_REGBP, %5)
  1307. WRITEYUY2(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  1308. "pop %%"FF_REG_BP" \n\t"
  1309. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1310. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1311. "a" (&c->redDither)
  1312. );
  1313. } else {
  1314. const int16_t *ubuf1 = ubuf[1];
  1315. __asm__ volatile(
  1316. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1317. "mov %4, %%"FF_REG_b" \n\t"
  1318. "push %%"FF_REG_BP" \n\t"
  1319. YSCALEYUV2PACKED1b(%%FF_REGBP, %5)
  1320. WRITEYUY2(%%FF_REGb, DSTW_OFFSET"(%5)", %%FF_REGBP)
  1321. "pop %%"FF_REG_BP" \n\t"
  1322. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1323. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1324. "a" (&c->redDither)
  1325. );
  1326. }
  1327. }
  1328. static av_cold void RENAME(sws_init_swscale)(SwsContext *c)
  1329. {
  1330. enum AVPixelFormat dstFormat = c->dstFormat;
  1331. c->use_mmx_vfilter= 0;
  1332. if (!is16BPS(dstFormat) && !isNBPS(dstFormat) && !isSemiPlanarYUV(dstFormat)
  1333. && dstFormat != AV_PIX_FMT_GRAYF32BE && dstFormat != AV_PIX_FMT_GRAYF32LE
  1334. && !(c->flags & SWS_BITEXACT)) {
  1335. if (c->flags & SWS_ACCURATE_RND) {
  1336. if (!(c->flags & SWS_FULL_CHR_H_INT)) {
  1337. switch (c->dstFormat) {
  1338. case AV_PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X_ar); break;
  1339. #if HAVE_6REGS
  1340. case AV_PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X_ar); break;
  1341. #endif
  1342. case AV_PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X_ar); break;
  1343. case AV_PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X_ar); break;
  1344. case AV_PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X_ar); break;
  1345. default: break;
  1346. }
  1347. }
  1348. } else {
  1349. c->use_mmx_vfilter= 1;
  1350. if (!(c->flags & SWS_FULL_CHR_H_INT)) {
  1351. switch (c->dstFormat) {
  1352. case AV_PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X); break;
  1353. case AV_PIX_FMT_BGR32: c->yuv2packedX = RENAME(yuv2bgr32_X); break;
  1354. #if HAVE_6REGS
  1355. case AV_PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X); break;
  1356. #endif
  1357. case AV_PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X); break;
  1358. case AV_PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X); break;
  1359. case AV_PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X); break;
  1360. default: break;
  1361. }
  1362. }
  1363. }
  1364. if (!(c->flags & SWS_FULL_CHR_H_INT)) {
  1365. switch (c->dstFormat) {
  1366. case AV_PIX_FMT_RGB32:
  1367. c->yuv2packed1 = RENAME(yuv2rgb32_1);
  1368. c->yuv2packed2 = RENAME(yuv2rgb32_2);
  1369. break;
  1370. case AV_PIX_FMT_BGR24:
  1371. c->yuv2packed1 = RENAME(yuv2bgr24_1);
  1372. c->yuv2packed2 = RENAME(yuv2bgr24_2);
  1373. break;
  1374. case AV_PIX_FMT_RGB555:
  1375. c->yuv2packed1 = RENAME(yuv2rgb555_1);
  1376. c->yuv2packed2 = RENAME(yuv2rgb555_2);
  1377. break;
  1378. case AV_PIX_FMT_RGB565:
  1379. c->yuv2packed1 = RENAME(yuv2rgb565_1);
  1380. c->yuv2packed2 = RENAME(yuv2rgb565_2);
  1381. break;
  1382. case AV_PIX_FMT_YUYV422:
  1383. c->yuv2packed1 = RENAME(yuv2yuyv422_1);
  1384. c->yuv2packed2 = RENAME(yuv2yuyv422_2);
  1385. break;
  1386. default:
  1387. break;
  1388. }
  1389. }
  1390. }
  1391. if (c->srcBpc == 8 && c->dstBpc <= 14) {
  1392. // Use the new MMX scaler if the MMXEXT one can't be used (it is faster than the x86 ASM one).
  1393. if (c->flags & SWS_FAST_BILINEAR && c->canMMXEXTBeUsed) {
  1394. c->hyscale_fast = ff_hyscale_fast_mmxext;
  1395. c->hcscale_fast = ff_hcscale_fast_mmxext;
  1396. } else {
  1397. c->hyscale_fast = NULL;
  1398. c->hcscale_fast = NULL;
  1399. }
  1400. }
  1401. }