rgb2rgb_template.c 106 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595
  1. /*
  2. * software RGB to RGB converter
  3. * pluralize by software PAL8 to RGB converter
  4. * software YUV to YUV converter
  5. * software YUV to RGB converter
  6. * Written by Nick Kurshev.
  7. * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
  8. * lot of big-endian byte order fixes by Alex Beregszaszi
  9. *
  10. * This file is part of FFmpeg.
  11. *
  12. * FFmpeg is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU Lesser General Public
  14. * License as published by the Free Software Foundation; either
  15. * version 2.1 of the License, or (at your option) any later version.
  16. *
  17. * FFmpeg is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * Lesser General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU Lesser General Public
  23. * License along with FFmpeg; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. */
  26. #include <stddef.h>
  27. #undef PREFETCH
  28. #undef MOVNTQ
  29. #undef EMMS
  30. #undef SFENCE
  31. #undef PAVGB
  32. #if COMPILE_TEMPLATE_AMD3DNOW
  33. #define PREFETCH "prefetch"
  34. #define PAVGB "pavgusb"
  35. #elif COMPILE_TEMPLATE_MMX2
  36. #define PREFETCH "prefetchnta"
  37. #define PAVGB "pavgb"
  38. #else
  39. #define PREFETCH " # nop"
  40. #endif
  41. #if COMPILE_TEMPLATE_AMD3DNOW
  42. /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
  43. #define EMMS "femms"
  44. #else
  45. #define EMMS "emms"
  46. #endif
  47. #if COMPILE_TEMPLATE_MMX2
  48. #define MOVNTQ "movntq"
  49. #define SFENCE "sfence"
  50. #else
  51. #define MOVNTQ "movq"
  52. #define SFENCE " # nop"
  53. #endif
  54. #if !COMPILE_TEMPLATE_SSE2
  55. #if !COMPILE_TEMPLATE_AMD3DNOW
  56. static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, int src_size)
  57. {
  58. uint8_t *dest = dst;
  59. const uint8_t *s = src;
  60. const uint8_t *end;
  61. const uint8_t *mm_end;
  62. end = s + src_size;
  63. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  64. mm_end = end - 23;
  65. __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory");
  66. while (s < mm_end) {
  67. __asm__ volatile(
  68. PREFETCH" 32%1 \n\t"
  69. "movd %1, %%mm0 \n\t"
  70. "punpckldq 3%1, %%mm0 \n\t"
  71. "movd 6%1, %%mm1 \n\t"
  72. "punpckldq 9%1, %%mm1 \n\t"
  73. "movd 12%1, %%mm2 \n\t"
  74. "punpckldq 15%1, %%mm2 \n\t"
  75. "movd 18%1, %%mm3 \n\t"
  76. "punpckldq 21%1, %%mm3 \n\t"
  77. "por %%mm7, %%mm0 \n\t"
  78. "por %%mm7, %%mm1 \n\t"
  79. "por %%mm7, %%mm2 \n\t"
  80. "por %%mm7, %%mm3 \n\t"
  81. MOVNTQ" %%mm0, %0 \n\t"
  82. MOVNTQ" %%mm1, 8%0 \n\t"
  83. MOVNTQ" %%mm2, 16%0 \n\t"
  84. MOVNTQ" %%mm3, 24%0"
  85. :"=m"(*dest)
  86. :"m"(*s)
  87. :"memory");
  88. dest += 32;
  89. s += 24;
  90. }
  91. __asm__ volatile(SFENCE:::"memory");
  92. __asm__ volatile(EMMS:::"memory");
  93. while (s < end) {
  94. *dest++ = *s++;
  95. *dest++ = *s++;
  96. *dest++ = *s++;
  97. *dest++ = 255;
  98. }
  99. }
  100. #define STORE_BGR24_MMX \
  101. "psrlq $8, %%mm2 \n\t" \
  102. "psrlq $8, %%mm3 \n\t" \
  103. "psrlq $8, %%mm6 \n\t" \
  104. "psrlq $8, %%mm7 \n\t" \
  105. "pand "MANGLE(mask24l)", %%mm0\n\t" \
  106. "pand "MANGLE(mask24l)", %%mm1\n\t" \
  107. "pand "MANGLE(mask24l)", %%mm4\n\t" \
  108. "pand "MANGLE(mask24l)", %%mm5\n\t" \
  109. "pand "MANGLE(mask24h)", %%mm2\n\t" \
  110. "pand "MANGLE(mask24h)", %%mm3\n\t" \
  111. "pand "MANGLE(mask24h)", %%mm6\n\t" \
  112. "pand "MANGLE(mask24h)", %%mm7\n\t" \
  113. "por %%mm2, %%mm0 \n\t" \
  114. "por %%mm3, %%mm1 \n\t" \
  115. "por %%mm6, %%mm4 \n\t" \
  116. "por %%mm7, %%mm5 \n\t" \
  117. \
  118. "movq %%mm1, %%mm2 \n\t" \
  119. "movq %%mm4, %%mm3 \n\t" \
  120. "psllq $48, %%mm2 \n\t" \
  121. "psllq $32, %%mm3 \n\t" \
  122. "pand "MANGLE(mask24hh)", %%mm2\n\t" \
  123. "pand "MANGLE(mask24hhh)", %%mm3\n\t" \
  124. "por %%mm2, %%mm0 \n\t" \
  125. "psrlq $16, %%mm1 \n\t" \
  126. "psrlq $32, %%mm4 \n\t" \
  127. "psllq $16, %%mm5 \n\t" \
  128. "por %%mm3, %%mm1 \n\t" \
  129. "pand "MANGLE(mask24hhhh)", %%mm5\n\t" \
  130. "por %%mm5, %%mm4 \n\t" \
  131. \
  132. MOVNTQ" %%mm0, %0 \n\t" \
  133. MOVNTQ" %%mm1, 8%0 \n\t" \
  134. MOVNTQ" %%mm4, 16%0"
  135. static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  136. {
  137. uint8_t *dest = dst;
  138. const uint8_t *s = src;
  139. const uint8_t *end;
  140. const uint8_t *mm_end;
  141. end = s + src_size;
  142. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  143. mm_end = end - 31;
  144. while (s < mm_end) {
  145. __asm__ volatile(
  146. PREFETCH" 32%1 \n\t"
  147. "movq %1, %%mm0 \n\t"
  148. "movq 8%1, %%mm1 \n\t"
  149. "movq 16%1, %%mm4 \n\t"
  150. "movq 24%1, %%mm5 \n\t"
  151. "movq %%mm0, %%mm2 \n\t"
  152. "movq %%mm1, %%mm3 \n\t"
  153. "movq %%mm4, %%mm6 \n\t"
  154. "movq %%mm5, %%mm7 \n\t"
  155. STORE_BGR24_MMX
  156. :"=m"(*dest)
  157. :"m"(*s)
  158. :"memory");
  159. dest += 24;
  160. s += 32;
  161. }
  162. __asm__ volatile(SFENCE:::"memory");
  163. __asm__ volatile(EMMS:::"memory");
  164. while (s < end) {
  165. *dest++ = *s++;
  166. *dest++ = *s++;
  167. *dest++ = *s++;
  168. s++;
  169. }
  170. }
  171. /*
  172. original by Strepto/Astral
  173. ported to gcc & bugfixed: A'rpi
  174. MMX2, 3DNOW optimization by Nick Kurshev
  175. 32-bit C version, and and&add trick by Michael Niedermayer
  176. */
  177. static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, int src_size)
  178. {
  179. register const uint8_t* s=src;
  180. register uint8_t* d=dst;
  181. register const uint8_t *end;
  182. const uint8_t *mm_end;
  183. end = s + src_size;
  184. __asm__ volatile(PREFETCH" %0"::"m"(*s));
  185. __asm__ volatile("movq %0, %%mm4"::"m"(mask15s));
  186. mm_end = end - 15;
  187. while (s<mm_end) {
  188. __asm__ volatile(
  189. PREFETCH" 32%1 \n\t"
  190. "movq %1, %%mm0 \n\t"
  191. "movq 8%1, %%mm2 \n\t"
  192. "movq %%mm0, %%mm1 \n\t"
  193. "movq %%mm2, %%mm3 \n\t"
  194. "pand %%mm4, %%mm0 \n\t"
  195. "pand %%mm4, %%mm2 \n\t"
  196. "paddw %%mm1, %%mm0 \n\t"
  197. "paddw %%mm3, %%mm2 \n\t"
  198. MOVNTQ" %%mm0, %0 \n\t"
  199. MOVNTQ" %%mm2, 8%0"
  200. :"=m"(*d)
  201. :"m"(*s)
  202. );
  203. d+=16;
  204. s+=16;
  205. }
  206. __asm__ volatile(SFENCE:::"memory");
  207. __asm__ volatile(EMMS:::"memory");
  208. mm_end = end - 3;
  209. while (s < mm_end) {
  210. register unsigned x= *((const uint32_t *)s);
  211. *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
  212. d+=4;
  213. s+=4;
  214. }
  215. if (s < end) {
  216. register unsigned short x= *((const uint16_t *)s);
  217. *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
  218. }
  219. }
  220. static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, int src_size)
  221. {
  222. register const uint8_t* s=src;
  223. register uint8_t* d=dst;
  224. register const uint8_t *end;
  225. const uint8_t *mm_end;
  226. end = s + src_size;
  227. __asm__ volatile(PREFETCH" %0"::"m"(*s));
  228. __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg));
  229. __asm__ volatile("movq %0, %%mm6"::"m"(mask15b));
  230. mm_end = end - 15;
  231. while (s<mm_end) {
  232. __asm__ volatile(
  233. PREFETCH" 32%1 \n\t"
  234. "movq %1, %%mm0 \n\t"
  235. "movq 8%1, %%mm2 \n\t"
  236. "movq %%mm0, %%mm1 \n\t"
  237. "movq %%mm2, %%mm3 \n\t"
  238. "psrlq $1, %%mm0 \n\t"
  239. "psrlq $1, %%mm2 \n\t"
  240. "pand %%mm7, %%mm0 \n\t"
  241. "pand %%mm7, %%mm2 \n\t"
  242. "pand %%mm6, %%mm1 \n\t"
  243. "pand %%mm6, %%mm3 \n\t"
  244. "por %%mm1, %%mm0 \n\t"
  245. "por %%mm3, %%mm2 \n\t"
  246. MOVNTQ" %%mm0, %0 \n\t"
  247. MOVNTQ" %%mm2, 8%0"
  248. :"=m"(*d)
  249. :"m"(*s)
  250. );
  251. d+=16;
  252. s+=16;
  253. }
  254. __asm__ volatile(SFENCE:::"memory");
  255. __asm__ volatile(EMMS:::"memory");
  256. mm_end = end - 3;
  257. while (s < mm_end) {
  258. register uint32_t x= *((const uint32_t*)s);
  259. *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
  260. s+=4;
  261. d+=4;
  262. }
  263. if (s < end) {
  264. register uint16_t x= *((const uint16_t*)s);
  265. *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
  266. }
  267. }
  268. static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, int src_size)
  269. {
  270. const uint8_t *s = src;
  271. const uint8_t *end;
  272. const uint8_t *mm_end;
  273. uint16_t *d = (uint16_t *)dst;
  274. end = s + src_size;
  275. mm_end = end - 15;
  276. #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
  277. __asm__ volatile(
  278. "movq %3, %%mm5 \n\t"
  279. "movq %4, %%mm6 \n\t"
  280. "movq %5, %%mm7 \n\t"
  281. "jmp 2f \n\t"
  282. ".p2align 4 \n\t"
  283. "1: \n\t"
  284. PREFETCH" 32(%1) \n\t"
  285. "movd (%1), %%mm0 \n\t"
  286. "movd 4(%1), %%mm3 \n\t"
  287. "punpckldq 8(%1), %%mm0 \n\t"
  288. "punpckldq 12(%1), %%mm3 \n\t"
  289. "movq %%mm0, %%mm1 \n\t"
  290. "movq %%mm3, %%mm4 \n\t"
  291. "pand %%mm6, %%mm0 \n\t"
  292. "pand %%mm6, %%mm3 \n\t"
  293. "pmaddwd %%mm7, %%mm0 \n\t"
  294. "pmaddwd %%mm7, %%mm3 \n\t"
  295. "pand %%mm5, %%mm1 \n\t"
  296. "pand %%mm5, %%mm4 \n\t"
  297. "por %%mm1, %%mm0 \n\t"
  298. "por %%mm4, %%mm3 \n\t"
  299. "psrld $5, %%mm0 \n\t"
  300. "pslld $11, %%mm3 \n\t"
  301. "por %%mm3, %%mm0 \n\t"
  302. MOVNTQ" %%mm0, (%0) \n\t"
  303. "add $16, %1 \n\t"
  304. "add $8, %0 \n\t"
  305. "2: \n\t"
  306. "cmp %2, %1 \n\t"
  307. " jb 1b \n\t"
  308. : "+r" (d), "+r"(s)
  309. : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
  310. );
  311. #else
  312. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  313. __asm__ volatile(
  314. "movq %0, %%mm7 \n\t"
  315. "movq %1, %%mm6 \n\t"
  316. ::"m"(red_16mask),"m"(green_16mask));
  317. while (s < mm_end) {
  318. __asm__ volatile(
  319. PREFETCH" 32%1 \n\t"
  320. "movd %1, %%mm0 \n\t"
  321. "movd 4%1, %%mm3 \n\t"
  322. "punpckldq 8%1, %%mm0 \n\t"
  323. "punpckldq 12%1, %%mm3 \n\t"
  324. "movq %%mm0, %%mm1 \n\t"
  325. "movq %%mm0, %%mm2 \n\t"
  326. "movq %%mm3, %%mm4 \n\t"
  327. "movq %%mm3, %%mm5 \n\t"
  328. "psrlq $3, %%mm0 \n\t"
  329. "psrlq $3, %%mm3 \n\t"
  330. "pand %2, %%mm0 \n\t"
  331. "pand %2, %%mm3 \n\t"
  332. "psrlq $5, %%mm1 \n\t"
  333. "psrlq $5, %%mm4 \n\t"
  334. "pand %%mm6, %%mm1 \n\t"
  335. "pand %%mm6, %%mm4 \n\t"
  336. "psrlq $8, %%mm2 \n\t"
  337. "psrlq $8, %%mm5 \n\t"
  338. "pand %%mm7, %%mm2 \n\t"
  339. "pand %%mm7, %%mm5 \n\t"
  340. "por %%mm1, %%mm0 \n\t"
  341. "por %%mm4, %%mm3 \n\t"
  342. "por %%mm2, %%mm0 \n\t"
  343. "por %%mm5, %%mm3 \n\t"
  344. "psllq $16, %%mm3 \n\t"
  345. "por %%mm3, %%mm0 \n\t"
  346. MOVNTQ" %%mm0, %0 \n\t"
  347. :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
  348. d += 4;
  349. s += 16;
  350. }
  351. #endif
  352. __asm__ volatile(SFENCE:::"memory");
  353. __asm__ volatile(EMMS:::"memory");
  354. while (s < end) {
  355. register int rgb = *(const uint32_t*)s; s += 4;
  356. *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
  357. }
  358. }
  359. static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
  360. {
  361. const uint8_t *s = src;
  362. const uint8_t *end;
  363. const uint8_t *mm_end;
  364. uint16_t *d = (uint16_t *)dst;
  365. end = s + src_size;
  366. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  367. __asm__ volatile(
  368. "movq %0, %%mm7 \n\t"
  369. "movq %1, %%mm6 \n\t"
  370. ::"m"(red_16mask),"m"(green_16mask));
  371. mm_end = end - 15;
  372. while (s < mm_end) {
  373. __asm__ volatile(
  374. PREFETCH" 32%1 \n\t"
  375. "movd %1, %%mm0 \n\t"
  376. "movd 4%1, %%mm3 \n\t"
  377. "punpckldq 8%1, %%mm0 \n\t"
  378. "punpckldq 12%1, %%mm3 \n\t"
  379. "movq %%mm0, %%mm1 \n\t"
  380. "movq %%mm0, %%mm2 \n\t"
  381. "movq %%mm3, %%mm4 \n\t"
  382. "movq %%mm3, %%mm5 \n\t"
  383. "psllq $8, %%mm0 \n\t"
  384. "psllq $8, %%mm3 \n\t"
  385. "pand %%mm7, %%mm0 \n\t"
  386. "pand %%mm7, %%mm3 \n\t"
  387. "psrlq $5, %%mm1 \n\t"
  388. "psrlq $5, %%mm4 \n\t"
  389. "pand %%mm6, %%mm1 \n\t"
  390. "pand %%mm6, %%mm4 \n\t"
  391. "psrlq $19, %%mm2 \n\t"
  392. "psrlq $19, %%mm5 \n\t"
  393. "pand %2, %%mm2 \n\t"
  394. "pand %2, %%mm5 \n\t"
  395. "por %%mm1, %%mm0 \n\t"
  396. "por %%mm4, %%mm3 \n\t"
  397. "por %%mm2, %%mm0 \n\t"
  398. "por %%mm5, %%mm3 \n\t"
  399. "psllq $16, %%mm3 \n\t"
  400. "por %%mm3, %%mm0 \n\t"
  401. MOVNTQ" %%mm0, %0 \n\t"
  402. :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
  403. d += 4;
  404. s += 16;
  405. }
  406. __asm__ volatile(SFENCE:::"memory");
  407. __asm__ volatile(EMMS:::"memory");
  408. while (s < end) {
  409. register int rgb = *(const uint32_t*)s; s += 4;
  410. *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
  411. }
  412. }
  413. static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, int src_size)
  414. {
  415. const uint8_t *s = src;
  416. const uint8_t *end;
  417. const uint8_t *mm_end;
  418. uint16_t *d = (uint16_t *)dst;
  419. end = s + src_size;
  420. mm_end = end - 15;
  421. #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
  422. __asm__ volatile(
  423. "movq %3, %%mm5 \n\t"
  424. "movq %4, %%mm6 \n\t"
  425. "movq %5, %%mm7 \n\t"
  426. "jmp 2f \n\t"
  427. ".p2align 4 \n\t"
  428. "1: \n\t"
  429. PREFETCH" 32(%1) \n\t"
  430. "movd (%1), %%mm0 \n\t"
  431. "movd 4(%1), %%mm3 \n\t"
  432. "punpckldq 8(%1), %%mm0 \n\t"
  433. "punpckldq 12(%1), %%mm3 \n\t"
  434. "movq %%mm0, %%mm1 \n\t"
  435. "movq %%mm3, %%mm4 \n\t"
  436. "pand %%mm6, %%mm0 \n\t"
  437. "pand %%mm6, %%mm3 \n\t"
  438. "pmaddwd %%mm7, %%mm0 \n\t"
  439. "pmaddwd %%mm7, %%mm3 \n\t"
  440. "pand %%mm5, %%mm1 \n\t"
  441. "pand %%mm5, %%mm4 \n\t"
  442. "por %%mm1, %%mm0 \n\t"
  443. "por %%mm4, %%mm3 \n\t"
  444. "psrld $6, %%mm0 \n\t"
  445. "pslld $10, %%mm3 \n\t"
  446. "por %%mm3, %%mm0 \n\t"
  447. MOVNTQ" %%mm0, (%0) \n\t"
  448. "add $16, %1 \n\t"
  449. "add $8, %0 \n\t"
  450. "2: \n\t"
  451. "cmp %2, %1 \n\t"
  452. " jb 1b \n\t"
  453. : "+r" (d), "+r"(s)
  454. : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
  455. );
  456. #else
  457. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  458. __asm__ volatile(
  459. "movq %0, %%mm7 \n\t"
  460. "movq %1, %%mm6 \n\t"
  461. ::"m"(red_15mask),"m"(green_15mask));
  462. while (s < mm_end) {
  463. __asm__ volatile(
  464. PREFETCH" 32%1 \n\t"
  465. "movd %1, %%mm0 \n\t"
  466. "movd 4%1, %%mm3 \n\t"
  467. "punpckldq 8%1, %%mm0 \n\t"
  468. "punpckldq 12%1, %%mm3 \n\t"
  469. "movq %%mm0, %%mm1 \n\t"
  470. "movq %%mm0, %%mm2 \n\t"
  471. "movq %%mm3, %%mm4 \n\t"
  472. "movq %%mm3, %%mm5 \n\t"
  473. "psrlq $3, %%mm0 \n\t"
  474. "psrlq $3, %%mm3 \n\t"
  475. "pand %2, %%mm0 \n\t"
  476. "pand %2, %%mm3 \n\t"
  477. "psrlq $6, %%mm1 \n\t"
  478. "psrlq $6, %%mm4 \n\t"
  479. "pand %%mm6, %%mm1 \n\t"
  480. "pand %%mm6, %%mm4 \n\t"
  481. "psrlq $9, %%mm2 \n\t"
  482. "psrlq $9, %%mm5 \n\t"
  483. "pand %%mm7, %%mm2 \n\t"
  484. "pand %%mm7, %%mm5 \n\t"
  485. "por %%mm1, %%mm0 \n\t"
  486. "por %%mm4, %%mm3 \n\t"
  487. "por %%mm2, %%mm0 \n\t"
  488. "por %%mm5, %%mm3 \n\t"
  489. "psllq $16, %%mm3 \n\t"
  490. "por %%mm3, %%mm0 \n\t"
  491. MOVNTQ" %%mm0, %0 \n\t"
  492. :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
  493. d += 4;
  494. s += 16;
  495. }
  496. #endif
  497. __asm__ volatile(SFENCE:::"memory");
  498. __asm__ volatile(EMMS:::"memory");
  499. while (s < end) {
  500. register int rgb = *(const uint32_t*)s; s += 4;
  501. *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
  502. }
  503. }
  504. static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
  505. {
  506. const uint8_t *s = src;
  507. const uint8_t *end;
  508. const uint8_t *mm_end;
  509. uint16_t *d = (uint16_t *)dst;
  510. end = s + src_size;
  511. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  512. __asm__ volatile(
  513. "movq %0, %%mm7 \n\t"
  514. "movq %1, %%mm6 \n\t"
  515. ::"m"(red_15mask),"m"(green_15mask));
  516. mm_end = end - 15;
  517. while (s < mm_end) {
  518. __asm__ volatile(
  519. PREFETCH" 32%1 \n\t"
  520. "movd %1, %%mm0 \n\t"
  521. "movd 4%1, %%mm3 \n\t"
  522. "punpckldq 8%1, %%mm0 \n\t"
  523. "punpckldq 12%1, %%mm3 \n\t"
  524. "movq %%mm0, %%mm1 \n\t"
  525. "movq %%mm0, %%mm2 \n\t"
  526. "movq %%mm3, %%mm4 \n\t"
  527. "movq %%mm3, %%mm5 \n\t"
  528. "psllq $7, %%mm0 \n\t"
  529. "psllq $7, %%mm3 \n\t"
  530. "pand %%mm7, %%mm0 \n\t"
  531. "pand %%mm7, %%mm3 \n\t"
  532. "psrlq $6, %%mm1 \n\t"
  533. "psrlq $6, %%mm4 \n\t"
  534. "pand %%mm6, %%mm1 \n\t"
  535. "pand %%mm6, %%mm4 \n\t"
  536. "psrlq $19, %%mm2 \n\t"
  537. "psrlq $19, %%mm5 \n\t"
  538. "pand %2, %%mm2 \n\t"
  539. "pand %2, %%mm5 \n\t"
  540. "por %%mm1, %%mm0 \n\t"
  541. "por %%mm4, %%mm3 \n\t"
  542. "por %%mm2, %%mm0 \n\t"
  543. "por %%mm5, %%mm3 \n\t"
  544. "psllq $16, %%mm3 \n\t"
  545. "por %%mm3, %%mm0 \n\t"
  546. MOVNTQ" %%mm0, %0 \n\t"
  547. :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
  548. d += 4;
  549. s += 16;
  550. }
  551. __asm__ volatile(SFENCE:::"memory");
  552. __asm__ volatile(EMMS:::"memory");
  553. while (s < end) {
  554. register int rgb = *(const uint32_t*)s; s += 4;
  555. *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
  556. }
  557. }
  558. static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
  559. {
  560. const uint8_t *s = src;
  561. const uint8_t *end;
  562. const uint8_t *mm_end;
  563. uint16_t *d = (uint16_t *)dst;
  564. end = s + src_size;
  565. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  566. __asm__ volatile(
  567. "movq %0, %%mm7 \n\t"
  568. "movq %1, %%mm6 \n\t"
  569. ::"m"(red_16mask),"m"(green_16mask));
  570. mm_end = end - 11;
  571. while (s < mm_end) {
  572. __asm__ volatile(
  573. PREFETCH" 32%1 \n\t"
  574. "movd %1, %%mm0 \n\t"
  575. "movd 3%1, %%mm3 \n\t"
  576. "punpckldq 6%1, %%mm0 \n\t"
  577. "punpckldq 9%1, %%mm3 \n\t"
  578. "movq %%mm0, %%mm1 \n\t"
  579. "movq %%mm0, %%mm2 \n\t"
  580. "movq %%mm3, %%mm4 \n\t"
  581. "movq %%mm3, %%mm5 \n\t"
  582. "psrlq $3, %%mm0 \n\t"
  583. "psrlq $3, %%mm3 \n\t"
  584. "pand %2, %%mm0 \n\t"
  585. "pand %2, %%mm3 \n\t"
  586. "psrlq $5, %%mm1 \n\t"
  587. "psrlq $5, %%mm4 \n\t"
  588. "pand %%mm6, %%mm1 \n\t"
  589. "pand %%mm6, %%mm4 \n\t"
  590. "psrlq $8, %%mm2 \n\t"
  591. "psrlq $8, %%mm5 \n\t"
  592. "pand %%mm7, %%mm2 \n\t"
  593. "pand %%mm7, %%mm5 \n\t"
  594. "por %%mm1, %%mm0 \n\t"
  595. "por %%mm4, %%mm3 \n\t"
  596. "por %%mm2, %%mm0 \n\t"
  597. "por %%mm5, %%mm3 \n\t"
  598. "psllq $16, %%mm3 \n\t"
  599. "por %%mm3, %%mm0 \n\t"
  600. MOVNTQ" %%mm0, %0 \n\t"
  601. :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
  602. d += 4;
  603. s += 12;
  604. }
  605. __asm__ volatile(SFENCE:::"memory");
  606. __asm__ volatile(EMMS:::"memory");
  607. while (s < end) {
  608. const int b = *s++;
  609. const int g = *s++;
  610. const int r = *s++;
  611. *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  612. }
  613. }
  614. static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, int src_size)
  615. {
  616. const uint8_t *s = src;
  617. const uint8_t *end;
  618. const uint8_t *mm_end;
  619. uint16_t *d = (uint16_t *)dst;
  620. end = s + src_size;
  621. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  622. __asm__ volatile(
  623. "movq %0, %%mm7 \n\t"
  624. "movq %1, %%mm6 \n\t"
  625. ::"m"(red_16mask),"m"(green_16mask));
  626. mm_end = end - 15;
  627. while (s < mm_end) {
  628. __asm__ volatile(
  629. PREFETCH" 32%1 \n\t"
  630. "movd %1, %%mm0 \n\t"
  631. "movd 3%1, %%mm3 \n\t"
  632. "punpckldq 6%1, %%mm0 \n\t"
  633. "punpckldq 9%1, %%mm3 \n\t"
  634. "movq %%mm0, %%mm1 \n\t"
  635. "movq %%mm0, %%mm2 \n\t"
  636. "movq %%mm3, %%mm4 \n\t"
  637. "movq %%mm3, %%mm5 \n\t"
  638. "psllq $8, %%mm0 \n\t"
  639. "psllq $8, %%mm3 \n\t"
  640. "pand %%mm7, %%mm0 \n\t"
  641. "pand %%mm7, %%mm3 \n\t"
  642. "psrlq $5, %%mm1 \n\t"
  643. "psrlq $5, %%mm4 \n\t"
  644. "pand %%mm6, %%mm1 \n\t"
  645. "pand %%mm6, %%mm4 \n\t"
  646. "psrlq $19, %%mm2 \n\t"
  647. "psrlq $19, %%mm5 \n\t"
  648. "pand %2, %%mm2 \n\t"
  649. "pand %2, %%mm5 \n\t"
  650. "por %%mm1, %%mm0 \n\t"
  651. "por %%mm4, %%mm3 \n\t"
  652. "por %%mm2, %%mm0 \n\t"
  653. "por %%mm5, %%mm3 \n\t"
  654. "psllq $16, %%mm3 \n\t"
  655. "por %%mm3, %%mm0 \n\t"
  656. MOVNTQ" %%mm0, %0 \n\t"
  657. :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
  658. d += 4;
  659. s += 12;
  660. }
  661. __asm__ volatile(SFENCE:::"memory");
  662. __asm__ volatile(EMMS:::"memory");
  663. while (s < end) {
  664. const int r = *s++;
  665. const int g = *s++;
  666. const int b = *s++;
  667. *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  668. }
  669. }
  670. static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
  671. {
  672. const uint8_t *s = src;
  673. const uint8_t *end;
  674. const uint8_t *mm_end;
  675. uint16_t *d = (uint16_t *)dst;
  676. end = s + src_size;
  677. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  678. __asm__ volatile(
  679. "movq %0, %%mm7 \n\t"
  680. "movq %1, %%mm6 \n\t"
  681. ::"m"(red_15mask),"m"(green_15mask));
  682. mm_end = end - 11;
  683. while (s < mm_end) {
  684. __asm__ volatile(
  685. PREFETCH" 32%1 \n\t"
  686. "movd %1, %%mm0 \n\t"
  687. "movd 3%1, %%mm3 \n\t"
  688. "punpckldq 6%1, %%mm0 \n\t"
  689. "punpckldq 9%1, %%mm3 \n\t"
  690. "movq %%mm0, %%mm1 \n\t"
  691. "movq %%mm0, %%mm2 \n\t"
  692. "movq %%mm3, %%mm4 \n\t"
  693. "movq %%mm3, %%mm5 \n\t"
  694. "psrlq $3, %%mm0 \n\t"
  695. "psrlq $3, %%mm3 \n\t"
  696. "pand %2, %%mm0 \n\t"
  697. "pand %2, %%mm3 \n\t"
  698. "psrlq $6, %%mm1 \n\t"
  699. "psrlq $6, %%mm4 \n\t"
  700. "pand %%mm6, %%mm1 \n\t"
  701. "pand %%mm6, %%mm4 \n\t"
  702. "psrlq $9, %%mm2 \n\t"
  703. "psrlq $9, %%mm5 \n\t"
  704. "pand %%mm7, %%mm2 \n\t"
  705. "pand %%mm7, %%mm5 \n\t"
  706. "por %%mm1, %%mm0 \n\t"
  707. "por %%mm4, %%mm3 \n\t"
  708. "por %%mm2, %%mm0 \n\t"
  709. "por %%mm5, %%mm3 \n\t"
  710. "psllq $16, %%mm3 \n\t"
  711. "por %%mm3, %%mm0 \n\t"
  712. MOVNTQ" %%mm0, %0 \n\t"
  713. :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
  714. d += 4;
  715. s += 12;
  716. }
  717. __asm__ volatile(SFENCE:::"memory");
  718. __asm__ volatile(EMMS:::"memory");
  719. while (s < end) {
  720. const int b = *s++;
  721. const int g = *s++;
  722. const int r = *s++;
  723. *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
  724. }
  725. }
  726. static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, int src_size)
  727. {
  728. const uint8_t *s = src;
  729. const uint8_t *end;
  730. const uint8_t *mm_end;
  731. uint16_t *d = (uint16_t *)dst;
  732. end = s + src_size;
  733. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  734. __asm__ volatile(
  735. "movq %0, %%mm7 \n\t"
  736. "movq %1, %%mm6 \n\t"
  737. ::"m"(red_15mask),"m"(green_15mask));
  738. mm_end = end - 15;
  739. while (s < mm_end) {
  740. __asm__ volatile(
  741. PREFETCH" 32%1 \n\t"
  742. "movd %1, %%mm0 \n\t"
  743. "movd 3%1, %%mm3 \n\t"
  744. "punpckldq 6%1, %%mm0 \n\t"
  745. "punpckldq 9%1, %%mm3 \n\t"
  746. "movq %%mm0, %%mm1 \n\t"
  747. "movq %%mm0, %%mm2 \n\t"
  748. "movq %%mm3, %%mm4 \n\t"
  749. "movq %%mm3, %%mm5 \n\t"
  750. "psllq $7, %%mm0 \n\t"
  751. "psllq $7, %%mm3 \n\t"
  752. "pand %%mm7, %%mm0 \n\t"
  753. "pand %%mm7, %%mm3 \n\t"
  754. "psrlq $6, %%mm1 \n\t"
  755. "psrlq $6, %%mm4 \n\t"
  756. "pand %%mm6, %%mm1 \n\t"
  757. "pand %%mm6, %%mm4 \n\t"
  758. "psrlq $19, %%mm2 \n\t"
  759. "psrlq $19, %%mm5 \n\t"
  760. "pand %2, %%mm2 \n\t"
  761. "pand %2, %%mm5 \n\t"
  762. "por %%mm1, %%mm0 \n\t"
  763. "por %%mm4, %%mm3 \n\t"
  764. "por %%mm2, %%mm0 \n\t"
  765. "por %%mm5, %%mm3 \n\t"
  766. "psllq $16, %%mm3 \n\t"
  767. "por %%mm3, %%mm0 \n\t"
  768. MOVNTQ" %%mm0, %0 \n\t"
  769. :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
  770. d += 4;
  771. s += 12;
  772. }
  773. __asm__ volatile(SFENCE:::"memory");
  774. __asm__ volatile(EMMS:::"memory");
  775. while (s < end) {
  776. const int r = *s++;
  777. const int g = *s++;
  778. const int b = *s++;
  779. *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
  780. }
  781. }
  782. static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  783. {
  784. const uint16_t *end;
  785. const uint16_t *mm_end;
  786. uint8_t *d = dst;
  787. const uint16_t *s = (const uint16_t*)src;
  788. end = s + src_size/2;
  789. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  790. mm_end = end - 7;
  791. while (s < mm_end) {
  792. __asm__ volatile(
  793. PREFETCH" 32%1 \n\t"
  794. "movq %1, %%mm0 \n\t"
  795. "movq %1, %%mm1 \n\t"
  796. "movq %1, %%mm2 \n\t"
  797. "pand %2, %%mm0 \n\t"
  798. "pand %3, %%mm1 \n\t"
  799. "pand %4, %%mm2 \n\t"
  800. "psllq $5, %%mm0 \n\t"
  801. "pmulhw %6, %%mm0 \n\t"
  802. "pmulhw %6, %%mm1 \n\t"
  803. "pmulhw %7, %%mm2 \n\t"
  804. "movq %%mm0, %%mm3 \n\t"
  805. "movq %%mm1, %%mm4 \n\t"
  806. "movq %%mm2, %%mm5 \n\t"
  807. "punpcklwd %5, %%mm0 \n\t"
  808. "punpcklwd %5, %%mm1 \n\t"
  809. "punpcklwd %5, %%mm2 \n\t"
  810. "punpckhwd %5, %%mm3 \n\t"
  811. "punpckhwd %5, %%mm4 \n\t"
  812. "punpckhwd %5, %%mm5 \n\t"
  813. "psllq $8, %%mm1 \n\t"
  814. "psllq $16, %%mm2 \n\t"
  815. "por %%mm1, %%mm0 \n\t"
  816. "por %%mm2, %%mm0 \n\t"
  817. "psllq $8, %%mm4 \n\t"
  818. "psllq $16, %%mm5 \n\t"
  819. "por %%mm4, %%mm3 \n\t"
  820. "por %%mm5, %%mm3 \n\t"
  821. "movq %%mm0, %%mm6 \n\t"
  822. "movq %%mm3, %%mm7 \n\t"
  823. "movq 8%1, %%mm0 \n\t"
  824. "movq 8%1, %%mm1 \n\t"
  825. "movq 8%1, %%mm2 \n\t"
  826. "pand %2, %%mm0 \n\t"
  827. "pand %3, %%mm1 \n\t"
  828. "pand %4, %%mm2 \n\t"
  829. "psllq $5, %%mm0 \n\t"
  830. "pmulhw %6, %%mm0 \n\t"
  831. "pmulhw %6, %%mm1 \n\t"
  832. "pmulhw %7, %%mm2 \n\t"
  833. "movq %%mm0, %%mm3 \n\t"
  834. "movq %%mm1, %%mm4 \n\t"
  835. "movq %%mm2, %%mm5 \n\t"
  836. "punpcklwd %5, %%mm0 \n\t"
  837. "punpcklwd %5, %%mm1 \n\t"
  838. "punpcklwd %5, %%mm2 \n\t"
  839. "punpckhwd %5, %%mm3 \n\t"
  840. "punpckhwd %5, %%mm4 \n\t"
  841. "punpckhwd %5, %%mm5 \n\t"
  842. "psllq $8, %%mm1 \n\t"
  843. "psllq $16, %%mm2 \n\t"
  844. "por %%mm1, %%mm0 \n\t"
  845. "por %%mm2, %%mm0 \n\t"
  846. "psllq $8, %%mm4 \n\t"
  847. "psllq $16, %%mm5 \n\t"
  848. "por %%mm4, %%mm3 \n\t"
  849. "por %%mm5, %%mm3 \n\t"
  850. :"=m"(*d)
  851. :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r),"m"(mmx_null),"m"(mul15_mid),"m"(mul15_hi)
  852. :"memory");
  853. /* borrowed 32 to 24 */
  854. __asm__ volatile(
  855. "movq %%mm0, %%mm4 \n\t"
  856. "movq %%mm3, %%mm5 \n\t"
  857. "movq %%mm6, %%mm0 \n\t"
  858. "movq %%mm7, %%mm1 \n\t"
  859. "movq %%mm4, %%mm6 \n\t"
  860. "movq %%mm5, %%mm7 \n\t"
  861. "movq %%mm0, %%mm2 \n\t"
  862. "movq %%mm1, %%mm3 \n\t"
  863. STORE_BGR24_MMX
  864. :"=m"(*d)
  865. :"m"(*s)
  866. :"memory");
  867. d += 24;
  868. s += 8;
  869. }
  870. __asm__ volatile(SFENCE:::"memory");
  871. __asm__ volatile(EMMS:::"memory");
  872. while (s < end) {
  873. register uint16_t bgr;
  874. bgr = *s++;
  875. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  876. *d++ = ((bgr&0x3E0)>>2) | ((bgr&0x3E0)>>7);
  877. *d++ = ((bgr&0x7C00)>>7) | ((bgr&0x7C00)>>12);
  878. }
  879. }
  880. static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  881. {
  882. const uint16_t *end;
  883. const uint16_t *mm_end;
  884. uint8_t *d = (uint8_t *)dst;
  885. const uint16_t *s = (const uint16_t *)src;
  886. end = s + src_size/2;
  887. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  888. mm_end = end - 7;
  889. while (s < mm_end) {
  890. __asm__ volatile(
  891. PREFETCH" 32%1 \n\t"
  892. "movq %1, %%mm0 \n\t"
  893. "movq %1, %%mm1 \n\t"
  894. "movq %1, %%mm2 \n\t"
  895. "pand %2, %%mm0 \n\t"
  896. "pand %3, %%mm1 \n\t"
  897. "pand %4, %%mm2 \n\t"
  898. "psllq $5, %%mm0 \n\t"
  899. "psrlq $1, %%mm2 \n\t"
  900. "pmulhw %6, %%mm0 \n\t"
  901. "pmulhw %8, %%mm1 \n\t"
  902. "pmulhw %7, %%mm2 \n\t"
  903. "movq %%mm0, %%mm3 \n\t"
  904. "movq %%mm1, %%mm4 \n\t"
  905. "movq %%mm2, %%mm5 \n\t"
  906. "punpcklwd %5, %%mm0 \n\t"
  907. "punpcklwd %5, %%mm1 \n\t"
  908. "punpcklwd %5, %%mm2 \n\t"
  909. "punpckhwd %5, %%mm3 \n\t"
  910. "punpckhwd %5, %%mm4 \n\t"
  911. "punpckhwd %5, %%mm5 \n\t"
  912. "psllq $8, %%mm1 \n\t"
  913. "psllq $16, %%mm2 \n\t"
  914. "por %%mm1, %%mm0 \n\t"
  915. "por %%mm2, %%mm0 \n\t"
  916. "psllq $8, %%mm4 \n\t"
  917. "psllq $16, %%mm5 \n\t"
  918. "por %%mm4, %%mm3 \n\t"
  919. "por %%mm5, %%mm3 \n\t"
  920. "movq %%mm0, %%mm6 \n\t"
  921. "movq %%mm3, %%mm7 \n\t"
  922. "movq 8%1, %%mm0 \n\t"
  923. "movq 8%1, %%mm1 \n\t"
  924. "movq 8%1, %%mm2 \n\t"
  925. "pand %2, %%mm0 \n\t"
  926. "pand %3, %%mm1 \n\t"
  927. "pand %4, %%mm2 \n\t"
  928. "psllq $5, %%mm0 \n\t"
  929. "psrlq $1, %%mm2 \n\t"
  930. "pmulhw %6, %%mm0 \n\t"
  931. "pmulhw %8, %%mm1 \n\t"
  932. "pmulhw %7, %%mm2 \n\t"
  933. "movq %%mm0, %%mm3 \n\t"
  934. "movq %%mm1, %%mm4 \n\t"
  935. "movq %%mm2, %%mm5 \n\t"
  936. "punpcklwd %5, %%mm0 \n\t"
  937. "punpcklwd %5, %%mm1 \n\t"
  938. "punpcklwd %5, %%mm2 \n\t"
  939. "punpckhwd %5, %%mm3 \n\t"
  940. "punpckhwd %5, %%mm4 \n\t"
  941. "punpckhwd %5, %%mm5 \n\t"
  942. "psllq $8, %%mm1 \n\t"
  943. "psllq $16, %%mm2 \n\t"
  944. "por %%mm1, %%mm0 \n\t"
  945. "por %%mm2, %%mm0 \n\t"
  946. "psllq $8, %%mm4 \n\t"
  947. "psllq $16, %%mm5 \n\t"
  948. "por %%mm4, %%mm3 \n\t"
  949. "por %%mm5, %%mm3 \n\t"
  950. :"=m"(*d)
  951. :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null),"m"(mul15_mid),"m"(mul15_hi),"m"(mul16_mid)
  952. :"memory");
  953. /* borrowed 32 to 24 */
  954. __asm__ volatile(
  955. "movq %%mm0, %%mm4 \n\t"
  956. "movq %%mm3, %%mm5 \n\t"
  957. "movq %%mm6, %%mm0 \n\t"
  958. "movq %%mm7, %%mm1 \n\t"
  959. "movq %%mm4, %%mm6 \n\t"
  960. "movq %%mm5, %%mm7 \n\t"
  961. "movq %%mm0, %%mm2 \n\t"
  962. "movq %%mm1, %%mm3 \n\t"
  963. STORE_BGR24_MMX
  964. :"=m"(*d)
  965. :"m"(*s)
  966. :"memory");
  967. d += 24;
  968. s += 8;
  969. }
  970. __asm__ volatile(SFENCE:::"memory");
  971. __asm__ volatile(EMMS:::"memory");
  972. while (s < end) {
  973. register uint16_t bgr;
  974. bgr = *s++;
  975. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  976. *d++ = ((bgr&0x7E0)>>3) | ((bgr&0x7E0)>>9);
  977. *d++ = ((bgr&0xF800)>>8) | ((bgr&0xF800)>>13);
  978. }
  979. }
  980. /*
  981. * mm0 = 00 B3 00 B2 00 B1 00 B0
  982. * mm1 = 00 G3 00 G2 00 G1 00 G0
  983. * mm2 = 00 R3 00 R2 00 R1 00 R0
  984. * mm6 = FF FF FF FF FF FF FF FF
  985. * mm7 = 00 00 00 00 00 00 00 00
  986. */
  987. #define PACK_RGB32 \
  988. "packuswb %%mm7, %%mm0 \n\t" /* 00 00 00 00 B3 B2 B1 B0 */ \
  989. "packuswb %%mm7, %%mm1 \n\t" /* 00 00 00 00 G3 G2 G1 G0 */ \
  990. "packuswb %%mm7, %%mm2 \n\t" /* 00 00 00 00 R3 R2 R1 R0 */ \
  991. "punpcklbw %%mm1, %%mm0 \n\t" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \
  992. "punpcklbw %%mm6, %%mm2 \n\t" /* FF R3 FF R2 FF R1 FF R0 */ \
  993. "movq %%mm0, %%mm3 \n\t" \
  994. "punpcklwd %%mm2, %%mm0 \n\t" /* FF R1 G1 B1 FF R0 G0 B0 */ \
  995. "punpckhwd %%mm2, %%mm3 \n\t" /* FF R3 G3 B3 FF R2 G2 B2 */ \
  996. MOVNTQ" %%mm0, %0 \n\t" \
  997. MOVNTQ" %%mm3, 8%0 \n\t" \
  998. static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, int src_size)
  999. {
  1000. const uint16_t *end;
  1001. const uint16_t *mm_end;
  1002. uint8_t *d = dst;
  1003. const uint16_t *s = (const uint16_t *)src;
  1004. end = s + src_size/2;
  1005. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  1006. __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
  1007. __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
  1008. mm_end = end - 3;
  1009. while (s < mm_end) {
  1010. __asm__ volatile(
  1011. PREFETCH" 32%1 \n\t"
  1012. "movq %1, %%mm0 \n\t"
  1013. "movq %1, %%mm1 \n\t"
  1014. "movq %1, %%mm2 \n\t"
  1015. "pand %2, %%mm0 \n\t"
  1016. "pand %3, %%mm1 \n\t"
  1017. "pand %4, %%mm2 \n\t"
  1018. "psllq $5, %%mm0 \n\t"
  1019. "pmulhw %5, %%mm0 \n\t"
  1020. "pmulhw %5, %%mm1 \n\t"
  1021. "pmulhw %6, %%mm2 \n\t"
  1022. PACK_RGB32
  1023. :"=m"(*d)
  1024. :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r),"m"(mul15_mid),"m"(mul15_hi)
  1025. :"memory");
  1026. d += 16;
  1027. s += 4;
  1028. }
  1029. __asm__ volatile(SFENCE:::"memory");
  1030. __asm__ volatile(EMMS:::"memory");
  1031. while (s < end) {
  1032. register uint16_t bgr;
  1033. bgr = *s++;
  1034. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  1035. *d++ = ((bgr&0x3E0)>>2) | ((bgr&0x3E0)>>7);
  1036. *d++ = ((bgr&0x7C00)>>7) | ((bgr&0x7C00)>>12);
  1037. *d++ = 255;
  1038. }
  1039. }
  1040. static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, int src_size)
  1041. {
  1042. const uint16_t *end;
  1043. const uint16_t *mm_end;
  1044. uint8_t *d = dst;
  1045. const uint16_t *s = (const uint16_t*)src;
  1046. end = s + src_size/2;
  1047. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  1048. __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
  1049. __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
  1050. mm_end = end - 3;
  1051. while (s < mm_end) {
  1052. __asm__ volatile(
  1053. PREFETCH" 32%1 \n\t"
  1054. "movq %1, %%mm0 \n\t"
  1055. "movq %1, %%mm1 \n\t"
  1056. "movq %1, %%mm2 \n\t"
  1057. "pand %2, %%mm0 \n\t"
  1058. "pand %3, %%mm1 \n\t"
  1059. "pand %4, %%mm2 \n\t"
  1060. "psllq $5, %%mm0 \n\t"
  1061. "psrlq $1, %%mm2 \n\t"
  1062. "pmulhw %5, %%mm0 \n\t"
  1063. "pmulhw %7, %%mm1 \n\t"
  1064. "pmulhw %6, %%mm2 \n\t"
  1065. PACK_RGB32
  1066. :"=m"(*d)
  1067. :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mul15_mid),"m"(mul15_hi),"m"(mul16_mid)
  1068. :"memory");
  1069. d += 16;
  1070. s += 4;
  1071. }
  1072. __asm__ volatile(SFENCE:::"memory");
  1073. __asm__ volatile(EMMS:::"memory");
  1074. while (s < end) {
  1075. register uint16_t bgr;
  1076. bgr = *s++;
  1077. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  1078. *d++ = ((bgr&0x7E0)>>3) | ((bgr&0x7E0)>>9);
  1079. *d++ = ((bgr&0xF800)>>8) | ((bgr&0xF800)>>13);
  1080. *d++ = 255;
  1081. }
  1082. }
  1083. static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, int src_size)
  1084. {
  1085. x86_reg idx = 15 - src_size;
  1086. const uint8_t *s = src-idx;
  1087. uint8_t *d = dst-idx;
  1088. __asm__ volatile(
  1089. "test %0, %0 \n\t"
  1090. "jns 2f \n\t"
  1091. PREFETCH" (%1, %0) \n\t"
  1092. "movq %3, %%mm7 \n\t"
  1093. "pxor %4, %%mm7 \n\t"
  1094. "movq %%mm7, %%mm6 \n\t"
  1095. "pxor %5, %%mm7 \n\t"
  1096. ".p2align 4 \n\t"
  1097. "1: \n\t"
  1098. PREFETCH" 32(%1, %0) \n\t"
  1099. "movq (%1, %0), %%mm0 \n\t"
  1100. "movq 8(%1, %0), %%mm1 \n\t"
  1101. # if COMPILE_TEMPLATE_MMX2
  1102. "pshufw $177, %%mm0, %%mm3 \n\t"
  1103. "pshufw $177, %%mm1, %%mm5 \n\t"
  1104. "pand %%mm7, %%mm0 \n\t"
  1105. "pand %%mm6, %%mm3 \n\t"
  1106. "pand %%mm7, %%mm1 \n\t"
  1107. "pand %%mm6, %%mm5 \n\t"
  1108. "por %%mm3, %%mm0 \n\t"
  1109. "por %%mm5, %%mm1 \n\t"
  1110. # else
  1111. "movq %%mm0, %%mm2 \n\t"
  1112. "movq %%mm1, %%mm4 \n\t"
  1113. "pand %%mm7, %%mm0 \n\t"
  1114. "pand %%mm6, %%mm2 \n\t"
  1115. "pand %%mm7, %%mm1 \n\t"
  1116. "pand %%mm6, %%mm4 \n\t"
  1117. "movq %%mm2, %%mm3 \n\t"
  1118. "movq %%mm4, %%mm5 \n\t"
  1119. "pslld $16, %%mm2 \n\t"
  1120. "psrld $16, %%mm3 \n\t"
  1121. "pslld $16, %%mm4 \n\t"
  1122. "psrld $16, %%mm5 \n\t"
  1123. "por %%mm2, %%mm0 \n\t"
  1124. "por %%mm4, %%mm1 \n\t"
  1125. "por %%mm3, %%mm0 \n\t"
  1126. "por %%mm5, %%mm1 \n\t"
  1127. # endif
  1128. MOVNTQ" %%mm0, (%2, %0) \n\t"
  1129. MOVNTQ" %%mm1, 8(%2, %0) \n\t"
  1130. "add $16, %0 \n\t"
  1131. "js 1b \n\t"
  1132. SFENCE" \n\t"
  1133. EMMS" \n\t"
  1134. "2: \n\t"
  1135. : "+&r"(idx)
  1136. : "r" (s), "r" (d), "m" (mask32b), "m" (mask32r), "m" (mmx_one)
  1137. : "memory");
  1138. for (; idx<15; idx+=4) {
  1139. register int v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00;
  1140. v &= 0xff00ff;
  1141. *(uint32_t *)&d[idx] = (v>>16) + g + (v<<16);
  1142. }
  1143. }
  1144. static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  1145. {
  1146. unsigned i;
  1147. x86_reg mmx_size= 23 - src_size;
  1148. __asm__ volatile (
  1149. "test %%"REG_a", %%"REG_a" \n\t"
  1150. "jns 2f \n\t"
  1151. "movq "MANGLE(mask24r)", %%mm5 \n\t"
  1152. "movq "MANGLE(mask24g)", %%mm6 \n\t"
  1153. "movq "MANGLE(mask24b)", %%mm7 \n\t"
  1154. ".p2align 4 \n\t"
  1155. "1: \n\t"
  1156. PREFETCH" 32(%1, %%"REG_a") \n\t"
  1157. "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
  1158. "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG
  1159. "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B
  1160. "psllq $16, %%mm0 \n\t" // 00 BGR BGR
  1161. "pand %%mm5, %%mm0 \n\t"
  1162. "pand %%mm6, %%mm1 \n\t"
  1163. "pand %%mm7, %%mm2 \n\t"
  1164. "por %%mm0, %%mm1 \n\t"
  1165. "por %%mm2, %%mm1 \n\t"
  1166. "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
  1167. MOVNTQ" %%mm1, (%2, %%"REG_a") \n\t" // RGB RGB RG
  1168. "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B
  1169. "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR
  1170. "pand %%mm7, %%mm0 \n\t"
  1171. "pand %%mm5, %%mm1 \n\t"
  1172. "pand %%mm6, %%mm2 \n\t"
  1173. "por %%mm0, %%mm1 \n\t"
  1174. "por %%mm2, %%mm1 \n\t"
  1175. "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B
  1176. MOVNTQ" %%mm1, 8(%2, %%"REG_a") \n\t" // B RGB RGB R
  1177. "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR
  1178. "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG
  1179. "pand %%mm6, %%mm0 \n\t"
  1180. "pand %%mm7, %%mm1 \n\t"
  1181. "pand %%mm5, %%mm2 \n\t"
  1182. "por %%mm0, %%mm1 \n\t"
  1183. "por %%mm2, %%mm1 \n\t"
  1184. MOVNTQ" %%mm1, 16(%2, %%"REG_a") \n\t"
  1185. "add $24, %%"REG_a" \n\t"
  1186. " js 1b \n\t"
  1187. "2: \n\t"
  1188. : "+a" (mmx_size)
  1189. : "r" (src-mmx_size), "r"(dst-mmx_size)
  1190. );
  1191. __asm__ volatile(SFENCE:::"memory");
  1192. __asm__ volatile(EMMS:::"memory");
  1193. if (mmx_size==23) return; //finished, was multiple of 8
  1194. src+= src_size;
  1195. dst+= src_size;
  1196. src_size= 23-mmx_size;
  1197. src-= src_size;
  1198. dst-= src_size;
  1199. for (i=0; i<src_size; i+=3) {
  1200. register uint8_t x;
  1201. x = src[i + 2];
  1202. dst[i + 1] = src[i + 1];
  1203. dst[i + 2] = src[i + 0];
  1204. dst[i + 0] = x;
  1205. }
  1206. }
  1207. static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1208. int width, int height,
  1209. int lumStride, int chromStride, int dstStride, int vertLumPerChroma)
  1210. {
  1211. int y;
  1212. const x86_reg chromWidth= width>>1;
  1213. for (y=0; y<height; y++) {
  1214. //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
  1215. __asm__ volatile(
  1216. "xor %%"REG_a", %%"REG_a" \n\t"
  1217. ".p2align 4 \n\t"
  1218. "1: \n\t"
  1219. PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
  1220. PREFETCH" 32(%2, %%"REG_a") \n\t"
  1221. PREFETCH" 32(%3, %%"REG_a") \n\t"
  1222. "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
  1223. "movq %%mm0, %%mm2 \n\t" // U(0)
  1224. "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
  1225. "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1226. "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
  1227. "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
  1228. "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
  1229. "movq %%mm3, %%mm4 \n\t" // Y(0)
  1230. "movq %%mm5, %%mm6 \n\t" // Y(8)
  1231. "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
  1232. "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4)
  1233. "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
  1234. "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
  1235. MOVNTQ" %%mm3, (%0, %%"REG_a", 4) \n\t"
  1236. MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t"
  1237. MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4) \n\t"
  1238. MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t"
  1239. "add $8, %%"REG_a" \n\t"
  1240. "cmp %4, %%"REG_a" \n\t"
  1241. " jb 1b \n\t"
  1242. ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
  1243. : "%"REG_a
  1244. );
  1245. if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
  1246. usrc += chromStride;
  1247. vsrc += chromStride;
  1248. }
  1249. ysrc += lumStride;
  1250. dst += dstStride;
  1251. }
  1252. __asm__(EMMS" \n\t"
  1253. SFENCE" \n\t"
  1254. :::"memory");
  1255. }
  1256. /**
  1257. * Height should be a multiple of 2 and width should be a multiple of 16.
  1258. * (If this is a problem for anyone then tell me, and I will fix it.)
  1259. */
  1260. static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1261. int width, int height,
  1262. int lumStride, int chromStride, int dstStride)
  1263. {
  1264. //FIXME interpolate chroma
  1265. RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
  1266. }
  1267. static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1268. int width, int height,
  1269. int lumStride, int chromStride, int dstStride, int vertLumPerChroma)
  1270. {
  1271. int y;
  1272. const x86_reg chromWidth= width>>1;
  1273. for (y=0; y<height; y++) {
  1274. //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
  1275. __asm__ volatile(
  1276. "xor %%"REG_a", %%"REG_a" \n\t"
  1277. ".p2align 4 \n\t"
  1278. "1: \n\t"
  1279. PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
  1280. PREFETCH" 32(%2, %%"REG_a") \n\t"
  1281. PREFETCH" 32(%3, %%"REG_a") \n\t"
  1282. "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
  1283. "movq %%mm0, %%mm2 \n\t" // U(0)
  1284. "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
  1285. "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1286. "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
  1287. "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
  1288. "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
  1289. "movq %%mm0, %%mm4 \n\t" // Y(0)
  1290. "movq %%mm2, %%mm6 \n\t" // Y(8)
  1291. "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0)
  1292. "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4)
  1293. "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8)
  1294. "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12)
  1295. MOVNTQ" %%mm0, (%0, %%"REG_a", 4) \n\t"
  1296. MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t"
  1297. MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4) \n\t"
  1298. MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t"
  1299. "add $8, %%"REG_a" \n\t"
  1300. "cmp %4, %%"REG_a" \n\t"
  1301. " jb 1b \n\t"
  1302. ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
  1303. : "%"REG_a
  1304. );
  1305. if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
  1306. usrc += chromStride;
  1307. vsrc += chromStride;
  1308. }
  1309. ysrc += lumStride;
  1310. dst += dstStride;
  1311. }
  1312. __asm__(EMMS" \n\t"
  1313. SFENCE" \n\t"
  1314. :::"memory");
  1315. }
  1316. /**
  1317. * Height should be a multiple of 2 and width should be a multiple of 16
  1318. * (If this is a problem for anyone then tell me, and I will fix it.)
  1319. */
  1320. static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1321. int width, int height,
  1322. int lumStride, int chromStride, int dstStride)
  1323. {
  1324. //FIXME interpolate chroma
  1325. RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
  1326. }
  1327. /**
  1328. * Width should be a multiple of 16.
  1329. */
  1330. static inline void RENAME(yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1331. int width, int height,
  1332. int lumStride, int chromStride, int dstStride)
  1333. {
  1334. RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
  1335. }
  1336. /**
  1337. * Width should be a multiple of 16.
  1338. */
  1339. static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1340. int width, int height,
  1341. int lumStride, int chromStride, int dstStride)
  1342. {
  1343. RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
  1344. }
  1345. /**
  1346. * Height should be a multiple of 2 and width should be a multiple of 16.
  1347. * (If this is a problem for anyone then tell me, and I will fix it.)
  1348. */
  1349. static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1350. int width, int height,
  1351. int lumStride, int chromStride, int srcStride)
  1352. {
  1353. int y;
  1354. const x86_reg chromWidth= width>>1;
  1355. for (y=0; y<height; y+=2) {
  1356. __asm__ volatile(
  1357. "xor %%"REG_a", %%"REG_a" \n\t"
  1358. "pcmpeqw %%mm7, %%mm7 \n\t"
  1359. "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
  1360. ".p2align 4 \n\t"
  1361. "1: \n\t"
  1362. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1363. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1364. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1365. "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
  1366. "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
  1367. "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
  1368. "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
  1369. "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
  1370. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
  1371. "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1372. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
  1373. MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t"
  1374. "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(8)
  1375. "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(12)
  1376. "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
  1377. "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
  1378. "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
  1379. "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
  1380. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
  1381. "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
  1382. "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
  1383. "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
  1384. MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t"
  1385. "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
  1386. "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
  1387. "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
  1388. "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
  1389. "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
  1390. "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
  1391. "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
  1392. "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
  1393. MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
  1394. MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
  1395. "add $8, %%"REG_a" \n\t"
  1396. "cmp %4, %%"REG_a" \n\t"
  1397. " jb 1b \n\t"
  1398. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1399. : "memory", "%"REG_a
  1400. );
  1401. ydst += lumStride;
  1402. src += srcStride;
  1403. __asm__ volatile(
  1404. "xor %%"REG_a", %%"REG_a" \n\t"
  1405. ".p2align 4 \n\t"
  1406. "1: \n\t"
  1407. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1408. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1409. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1410. "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
  1411. "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
  1412. "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
  1413. "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
  1414. "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
  1415. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
  1416. "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
  1417. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
  1418. MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t"
  1419. MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t"
  1420. "add $8, %%"REG_a" \n\t"
  1421. "cmp %4, %%"REG_a" \n\t"
  1422. " jb 1b \n\t"
  1423. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1424. : "memory", "%"REG_a
  1425. );
  1426. udst += chromStride;
  1427. vdst += chromStride;
  1428. ydst += lumStride;
  1429. src += srcStride;
  1430. }
  1431. __asm__ volatile(EMMS" \n\t"
  1432. SFENCE" \n\t"
  1433. :::"memory");
  1434. }
  1435. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1436. #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
  1437. static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWidth, int srcHeight, int srcStride, int dstStride)
  1438. {
  1439. int x,y;
  1440. dst[0]= src[0];
  1441. // first line
  1442. for (x=0; x<srcWidth-1; x++) {
  1443. dst[2*x+1]= (3*src[x] + src[x+1])>>2;
  1444. dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
  1445. }
  1446. dst[2*srcWidth-1]= src[srcWidth-1];
  1447. dst+= dstStride;
  1448. for (y=1; y<srcHeight; y++) {
  1449. const x86_reg mmxSize= srcWidth&~15;
  1450. __asm__ volatile(
  1451. "mov %4, %%"REG_a" \n\t"
  1452. "movq "MANGLE(mmx_ff)", %%mm0 \n\t"
  1453. "movq (%0, %%"REG_a"), %%mm4 \n\t"
  1454. "movq %%mm4, %%mm2 \n\t"
  1455. "psllq $8, %%mm4 \n\t"
  1456. "pand %%mm0, %%mm2 \n\t"
  1457. "por %%mm2, %%mm4 \n\t"
  1458. "movq (%1, %%"REG_a"), %%mm5 \n\t"
  1459. "movq %%mm5, %%mm3 \n\t"
  1460. "psllq $8, %%mm5 \n\t"
  1461. "pand %%mm0, %%mm3 \n\t"
  1462. "por %%mm3, %%mm5 \n\t"
  1463. "1: \n\t"
  1464. "movq (%0, %%"REG_a"), %%mm0 \n\t"
  1465. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  1466. "movq 1(%0, %%"REG_a"), %%mm2 \n\t"
  1467. "movq 1(%1, %%"REG_a"), %%mm3 \n\t"
  1468. PAVGB" %%mm0, %%mm5 \n\t"
  1469. PAVGB" %%mm0, %%mm3 \n\t"
  1470. PAVGB" %%mm0, %%mm5 \n\t"
  1471. PAVGB" %%mm0, %%mm3 \n\t"
  1472. PAVGB" %%mm1, %%mm4 \n\t"
  1473. PAVGB" %%mm1, %%mm2 \n\t"
  1474. PAVGB" %%mm1, %%mm4 \n\t"
  1475. PAVGB" %%mm1, %%mm2 \n\t"
  1476. "movq %%mm5, %%mm7 \n\t"
  1477. "movq %%mm4, %%mm6 \n\t"
  1478. "punpcklbw %%mm3, %%mm5 \n\t"
  1479. "punpckhbw %%mm3, %%mm7 \n\t"
  1480. "punpcklbw %%mm2, %%mm4 \n\t"
  1481. "punpckhbw %%mm2, %%mm6 \n\t"
  1482. MOVNTQ" %%mm5, (%2, %%"REG_a", 2) \n\t"
  1483. MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2) \n\t"
  1484. MOVNTQ" %%mm4, (%3, %%"REG_a", 2) \n\t"
  1485. MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2) \n\t"
  1486. "add $8, %%"REG_a" \n\t"
  1487. "movq -1(%0, %%"REG_a"), %%mm4 \n\t"
  1488. "movq -1(%1, %%"REG_a"), %%mm5 \n\t"
  1489. " js 1b \n\t"
  1490. :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
  1491. "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
  1492. "g" (-mmxSize)
  1493. : "%"REG_a
  1494. );
  1495. for (x=mmxSize-1; x<srcWidth-1; x++) {
  1496. dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
  1497. dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2;
  1498. dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2;
  1499. dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2;
  1500. }
  1501. dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2;
  1502. dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
  1503. dst+=dstStride*2;
  1504. src+=srcStride;
  1505. }
  1506. // last line
  1507. dst[0]= src[0];
  1508. for (x=0; x<srcWidth-1; x++) {
  1509. dst[2*x+1]= (3*src[x] + src[x+1])>>2;
  1510. dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
  1511. }
  1512. dst[2*srcWidth-1]= src[srcWidth-1];
  1513. __asm__ volatile(EMMS" \n\t"
  1514. SFENCE" \n\t"
  1515. :::"memory");
  1516. }
  1517. #endif /* COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW */
  1518. #if !COMPILE_TEMPLATE_AMD3DNOW
  1519. /**
  1520. * Height should be a multiple of 2 and width should be a multiple of 16.
  1521. * (If this is a problem for anyone then tell me, and I will fix it.)
  1522. * Chrominance data is only taken from every second line, others are ignored.
  1523. * FIXME: Write HQ version.
  1524. */
  1525. static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1526. int width, int height,
  1527. int lumStride, int chromStride, int srcStride)
  1528. {
  1529. int y;
  1530. const x86_reg chromWidth= width>>1;
  1531. for (y=0; y<height; y+=2) {
  1532. __asm__ volatile(
  1533. "xor %%"REG_a", %%"REG_a" \n\t"
  1534. "pcmpeqw %%mm7, %%mm7 \n\t"
  1535. "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
  1536. ".p2align 4 \n\t"
  1537. "1: \n\t"
  1538. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1539. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // UYVY UYVY(0)
  1540. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(4)
  1541. "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0)
  1542. "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4)
  1543. "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0)
  1544. "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4)
  1545. "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
  1546. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
  1547. "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1548. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
  1549. MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t"
  1550. "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(8)
  1551. "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // UYVY UYVY(12)
  1552. "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8)
  1553. "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12)
  1554. "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8)
  1555. "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12)
  1556. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
  1557. "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
  1558. "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
  1559. "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
  1560. MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t"
  1561. "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
  1562. "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
  1563. "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
  1564. "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
  1565. "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
  1566. "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
  1567. "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
  1568. "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
  1569. MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
  1570. MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
  1571. "add $8, %%"REG_a" \n\t"
  1572. "cmp %4, %%"REG_a" \n\t"
  1573. " jb 1b \n\t"
  1574. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1575. : "memory", "%"REG_a
  1576. );
  1577. ydst += lumStride;
  1578. src += srcStride;
  1579. __asm__ volatile(
  1580. "xor %%"REG_a", %%"REG_a" \n\t"
  1581. ".p2align 4 \n\t"
  1582. "1: \n\t"
  1583. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1584. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1585. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1586. "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
  1587. "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
  1588. "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
  1589. "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
  1590. "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
  1591. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
  1592. "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
  1593. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
  1594. MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t"
  1595. MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t"
  1596. "add $8, %%"REG_a" \n\t"
  1597. "cmp %4, %%"REG_a" \n\t"
  1598. " jb 1b \n\t"
  1599. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1600. : "memory", "%"REG_a
  1601. );
  1602. udst += chromStride;
  1603. vdst += chromStride;
  1604. ydst += lumStride;
  1605. src += srcStride;
  1606. }
  1607. __asm__ volatile(EMMS" \n\t"
  1608. SFENCE" \n\t"
  1609. :::"memory");
  1610. }
  1611. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1612. /**
  1613. * Height should be a multiple of 2 and width should be a multiple of 2.
  1614. * (If this is a problem for anyone then tell me, and I will fix it.)
  1615. * Chrominance data is only taken from every second line,
  1616. * others are ignored in the C version.
  1617. * FIXME: Write HQ version.
  1618. */
  1619. static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1620. int width, int height,
  1621. int lumStride, int chromStride, int srcStride)
  1622. {
  1623. int y;
  1624. const x86_reg chromWidth= width>>1;
  1625. for (y=0; y<height-2; y+=2) {
  1626. int i;
  1627. for (i=0; i<2; i++) {
  1628. __asm__ volatile(
  1629. "mov %2, %%"REG_a" \n\t"
  1630. "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t"
  1631. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1632. "pxor %%mm7, %%mm7 \n\t"
  1633. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1634. ".p2align 4 \n\t"
  1635. "1: \n\t"
  1636. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1637. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1638. "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
  1639. "punpcklbw %%mm7, %%mm0 \n\t"
  1640. "punpcklbw %%mm7, %%mm1 \n\t"
  1641. "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
  1642. "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
  1643. "punpcklbw %%mm7, %%mm2 \n\t"
  1644. "punpcklbw %%mm7, %%mm3 \n\t"
  1645. "pmaddwd %%mm6, %%mm0 \n\t"
  1646. "pmaddwd %%mm6, %%mm1 \n\t"
  1647. "pmaddwd %%mm6, %%mm2 \n\t"
  1648. "pmaddwd %%mm6, %%mm3 \n\t"
  1649. #ifndef FAST_BGR2YV12
  1650. "psrad $8, %%mm0 \n\t"
  1651. "psrad $8, %%mm1 \n\t"
  1652. "psrad $8, %%mm2 \n\t"
  1653. "psrad $8, %%mm3 \n\t"
  1654. #endif
  1655. "packssdw %%mm1, %%mm0 \n\t"
  1656. "packssdw %%mm3, %%mm2 \n\t"
  1657. "pmaddwd %%mm5, %%mm0 \n\t"
  1658. "pmaddwd %%mm5, %%mm2 \n\t"
  1659. "packssdw %%mm2, %%mm0 \n\t"
  1660. "psraw $7, %%mm0 \n\t"
  1661. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1662. "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
  1663. "punpcklbw %%mm7, %%mm4 \n\t"
  1664. "punpcklbw %%mm7, %%mm1 \n\t"
  1665. "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
  1666. "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
  1667. "punpcklbw %%mm7, %%mm2 \n\t"
  1668. "punpcklbw %%mm7, %%mm3 \n\t"
  1669. "pmaddwd %%mm6, %%mm4 \n\t"
  1670. "pmaddwd %%mm6, %%mm1 \n\t"
  1671. "pmaddwd %%mm6, %%mm2 \n\t"
  1672. "pmaddwd %%mm6, %%mm3 \n\t"
  1673. #ifndef FAST_BGR2YV12
  1674. "psrad $8, %%mm4 \n\t"
  1675. "psrad $8, %%mm1 \n\t"
  1676. "psrad $8, %%mm2 \n\t"
  1677. "psrad $8, %%mm3 \n\t"
  1678. #endif
  1679. "packssdw %%mm1, %%mm4 \n\t"
  1680. "packssdw %%mm3, %%mm2 \n\t"
  1681. "pmaddwd %%mm5, %%mm4 \n\t"
  1682. "pmaddwd %%mm5, %%mm2 \n\t"
  1683. "add $24, %%"REG_d" \n\t"
  1684. "packssdw %%mm2, %%mm4 \n\t"
  1685. "psraw $7, %%mm4 \n\t"
  1686. "packuswb %%mm4, %%mm0 \n\t"
  1687. "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t"
  1688. MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t"
  1689. "add $8, %%"REG_a" \n\t"
  1690. " js 1b \n\t"
  1691. : : "r" (src+width*3), "r" (ydst+width), "g" ((x86_reg)-width)
  1692. : "%"REG_a, "%"REG_d
  1693. );
  1694. ydst += lumStride;
  1695. src += srcStride;
  1696. }
  1697. src -= srcStride*2;
  1698. __asm__ volatile(
  1699. "mov %4, %%"REG_a" \n\t"
  1700. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1701. "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t"
  1702. "pxor %%mm7, %%mm7 \n\t"
  1703. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1704. "add %%"REG_d", %%"REG_d" \n\t"
  1705. ".p2align 4 \n\t"
  1706. "1: \n\t"
  1707. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1708. PREFETCH" 64(%1, %%"REG_d") \n\t"
  1709. #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
  1710. "movq (%0, %%"REG_d"), %%mm0 \n\t"
  1711. "movq (%1, %%"REG_d"), %%mm1 \n\t"
  1712. "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
  1713. "movq 6(%1, %%"REG_d"), %%mm3 \n\t"
  1714. PAVGB" %%mm1, %%mm0 \n\t"
  1715. PAVGB" %%mm3, %%mm2 \n\t"
  1716. "movq %%mm0, %%mm1 \n\t"
  1717. "movq %%mm2, %%mm3 \n\t"
  1718. "psrlq $24, %%mm0 \n\t"
  1719. "psrlq $24, %%mm2 \n\t"
  1720. PAVGB" %%mm1, %%mm0 \n\t"
  1721. PAVGB" %%mm3, %%mm2 \n\t"
  1722. "punpcklbw %%mm7, %%mm0 \n\t"
  1723. "punpcklbw %%mm7, %%mm2 \n\t"
  1724. #else
  1725. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1726. "movd (%1, %%"REG_d"), %%mm1 \n\t"
  1727. "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
  1728. "movd 3(%1, %%"REG_d"), %%mm3 \n\t"
  1729. "punpcklbw %%mm7, %%mm0 \n\t"
  1730. "punpcklbw %%mm7, %%mm1 \n\t"
  1731. "punpcklbw %%mm7, %%mm2 \n\t"
  1732. "punpcklbw %%mm7, %%mm3 \n\t"
  1733. "paddw %%mm1, %%mm0 \n\t"
  1734. "paddw %%mm3, %%mm2 \n\t"
  1735. "paddw %%mm2, %%mm0 \n\t"
  1736. "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
  1737. "movd 6(%1, %%"REG_d"), %%mm1 \n\t"
  1738. "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
  1739. "movd 9(%1, %%"REG_d"), %%mm3 \n\t"
  1740. "punpcklbw %%mm7, %%mm4 \n\t"
  1741. "punpcklbw %%mm7, %%mm1 \n\t"
  1742. "punpcklbw %%mm7, %%mm2 \n\t"
  1743. "punpcklbw %%mm7, %%mm3 \n\t"
  1744. "paddw %%mm1, %%mm4 \n\t"
  1745. "paddw %%mm3, %%mm2 \n\t"
  1746. "paddw %%mm4, %%mm2 \n\t"
  1747. "psrlw $2, %%mm0 \n\t"
  1748. "psrlw $2, %%mm2 \n\t"
  1749. #endif
  1750. "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
  1751. "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
  1752. "pmaddwd %%mm0, %%mm1 \n\t"
  1753. "pmaddwd %%mm2, %%mm3 \n\t"
  1754. "pmaddwd %%mm6, %%mm0 \n\t"
  1755. "pmaddwd %%mm6, %%mm2 \n\t"
  1756. #ifndef FAST_BGR2YV12
  1757. "psrad $8, %%mm0 \n\t"
  1758. "psrad $8, %%mm1 \n\t"
  1759. "psrad $8, %%mm2 \n\t"
  1760. "psrad $8, %%mm3 \n\t"
  1761. #endif
  1762. "packssdw %%mm2, %%mm0 \n\t"
  1763. "packssdw %%mm3, %%mm1 \n\t"
  1764. "pmaddwd %%mm5, %%mm0 \n\t"
  1765. "pmaddwd %%mm5, %%mm1 \n\t"
  1766. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1767. "psraw $7, %%mm0 \n\t"
  1768. #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
  1769. "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
  1770. "movq 12(%1, %%"REG_d"), %%mm1 \n\t"
  1771. "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
  1772. "movq 18(%1, %%"REG_d"), %%mm3 \n\t"
  1773. PAVGB" %%mm1, %%mm4 \n\t"
  1774. PAVGB" %%mm3, %%mm2 \n\t"
  1775. "movq %%mm4, %%mm1 \n\t"
  1776. "movq %%mm2, %%mm3 \n\t"
  1777. "psrlq $24, %%mm4 \n\t"
  1778. "psrlq $24, %%mm2 \n\t"
  1779. PAVGB" %%mm1, %%mm4 \n\t"
  1780. PAVGB" %%mm3, %%mm2 \n\t"
  1781. "punpcklbw %%mm7, %%mm4 \n\t"
  1782. "punpcklbw %%mm7, %%mm2 \n\t"
  1783. #else
  1784. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1785. "movd 12(%1, %%"REG_d"), %%mm1 \n\t"
  1786. "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
  1787. "movd 15(%1, %%"REG_d"), %%mm3 \n\t"
  1788. "punpcklbw %%mm7, %%mm4 \n\t"
  1789. "punpcklbw %%mm7, %%mm1 \n\t"
  1790. "punpcklbw %%mm7, %%mm2 \n\t"
  1791. "punpcklbw %%mm7, %%mm3 \n\t"
  1792. "paddw %%mm1, %%mm4 \n\t"
  1793. "paddw %%mm3, %%mm2 \n\t"
  1794. "paddw %%mm2, %%mm4 \n\t"
  1795. "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
  1796. "movd 18(%1, %%"REG_d"), %%mm1 \n\t"
  1797. "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
  1798. "movd 21(%1, %%"REG_d"), %%mm3 \n\t"
  1799. "punpcklbw %%mm7, %%mm5 \n\t"
  1800. "punpcklbw %%mm7, %%mm1 \n\t"
  1801. "punpcklbw %%mm7, %%mm2 \n\t"
  1802. "punpcklbw %%mm7, %%mm3 \n\t"
  1803. "paddw %%mm1, %%mm5 \n\t"
  1804. "paddw %%mm3, %%mm2 \n\t"
  1805. "paddw %%mm5, %%mm2 \n\t"
  1806. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1807. "psrlw $2, %%mm4 \n\t"
  1808. "psrlw $2, %%mm2 \n\t"
  1809. #endif
  1810. "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
  1811. "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
  1812. "pmaddwd %%mm4, %%mm1 \n\t"
  1813. "pmaddwd %%mm2, %%mm3 \n\t"
  1814. "pmaddwd %%mm6, %%mm4 \n\t"
  1815. "pmaddwd %%mm6, %%mm2 \n\t"
  1816. #ifndef FAST_BGR2YV12
  1817. "psrad $8, %%mm4 \n\t"
  1818. "psrad $8, %%mm1 \n\t"
  1819. "psrad $8, %%mm2 \n\t"
  1820. "psrad $8, %%mm3 \n\t"
  1821. #endif
  1822. "packssdw %%mm2, %%mm4 \n\t"
  1823. "packssdw %%mm3, %%mm1 \n\t"
  1824. "pmaddwd %%mm5, %%mm4 \n\t"
  1825. "pmaddwd %%mm5, %%mm1 \n\t"
  1826. "add $24, %%"REG_d" \n\t"
  1827. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1828. "psraw $7, %%mm4 \n\t"
  1829. "movq %%mm0, %%mm1 \n\t"
  1830. "punpckldq %%mm4, %%mm0 \n\t"
  1831. "punpckhdq %%mm4, %%mm1 \n\t"
  1832. "packsswb %%mm1, %%mm0 \n\t"
  1833. "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t"
  1834. "movd %%mm0, (%2, %%"REG_a") \n\t"
  1835. "punpckhdq %%mm0, %%mm0 \n\t"
  1836. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1837. "add $4, %%"REG_a" \n\t"
  1838. " js 1b \n\t"
  1839. : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
  1840. : "%"REG_a, "%"REG_d
  1841. );
  1842. udst += chromStride;
  1843. vdst += chromStride;
  1844. src += srcStride*2;
  1845. }
  1846. __asm__ volatile(EMMS" \n\t"
  1847. SFENCE" \n\t"
  1848. :::"memory");
  1849. rgb24toyv12_c(src, ydst, udst, vdst, width, height-y, lumStride, chromStride, srcStride);
  1850. }
  1851. #endif /* !COMPILE_TEMPLATE_SSE2 */
  1852. #if !COMPILE_TEMPLATE_AMD3DNOW
  1853. static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, uint8_t *dest,
  1854. int width, int height, int src1Stride,
  1855. int src2Stride, int dstStride)
  1856. {
  1857. int h;
  1858. for (h=0; h < height; h++) {
  1859. int w;
  1860. #if COMPILE_TEMPLATE_SSE2
  1861. __asm__(
  1862. "xor %%"REG_a", %%"REG_a" \n\t"
  1863. "1: \n\t"
  1864. PREFETCH" 64(%1, %%"REG_a") \n\t"
  1865. PREFETCH" 64(%2, %%"REG_a") \n\t"
  1866. "movdqa (%1, %%"REG_a"), %%xmm0 \n\t"
  1867. "movdqa (%1, %%"REG_a"), %%xmm1 \n\t"
  1868. "movdqa (%2, %%"REG_a"), %%xmm2 \n\t"
  1869. "punpcklbw %%xmm2, %%xmm0 \n\t"
  1870. "punpckhbw %%xmm2, %%xmm1 \n\t"
  1871. "movntdq %%xmm0, (%0, %%"REG_a", 2) \n\t"
  1872. "movntdq %%xmm1, 16(%0, %%"REG_a", 2) \n\t"
  1873. "add $16, %%"REG_a" \n\t"
  1874. "cmp %3, %%"REG_a" \n\t"
  1875. " jb 1b \n\t"
  1876. ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
  1877. : "memory", "%"REG_a""
  1878. );
  1879. #else
  1880. __asm__(
  1881. "xor %%"REG_a", %%"REG_a" \n\t"
  1882. "1: \n\t"
  1883. PREFETCH" 64(%1, %%"REG_a") \n\t"
  1884. PREFETCH" 64(%2, %%"REG_a") \n\t"
  1885. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  1886. "movq 8(%1, %%"REG_a"), %%mm2 \n\t"
  1887. "movq %%mm0, %%mm1 \n\t"
  1888. "movq %%mm2, %%mm3 \n\t"
  1889. "movq (%2, %%"REG_a"), %%mm4 \n\t"
  1890. "movq 8(%2, %%"REG_a"), %%mm5 \n\t"
  1891. "punpcklbw %%mm4, %%mm0 \n\t"
  1892. "punpckhbw %%mm4, %%mm1 \n\t"
  1893. "punpcklbw %%mm5, %%mm2 \n\t"
  1894. "punpckhbw %%mm5, %%mm3 \n\t"
  1895. MOVNTQ" %%mm0, (%0, %%"REG_a", 2) \n\t"
  1896. MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2) \n\t"
  1897. MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2) \n\t"
  1898. MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2) \n\t"
  1899. "add $16, %%"REG_a" \n\t"
  1900. "cmp %3, %%"REG_a" \n\t"
  1901. " jb 1b \n\t"
  1902. ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
  1903. : "memory", "%"REG_a
  1904. );
  1905. #endif
  1906. for (w= (width&(~15)); w < width; w++) {
  1907. dest[2*w+0] = src1[w];
  1908. dest[2*w+1] = src2[w];
  1909. }
  1910. dest += dstStride;
  1911. src1 += src1Stride;
  1912. src2 += src2Stride;
  1913. }
  1914. __asm__(
  1915. EMMS" \n\t"
  1916. SFENCE" \n\t"
  1917. ::: "memory"
  1918. );
  1919. }
  1920. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1921. #if !COMPILE_TEMPLATE_SSE2
  1922. #if !COMPILE_TEMPLATE_AMD3DNOW
  1923. static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
  1924. uint8_t *dst1, uint8_t *dst2,
  1925. int width, int height,
  1926. int srcStride1, int srcStride2,
  1927. int dstStride1, int dstStride2)
  1928. {
  1929. x86_reg y;
  1930. int x,w,h;
  1931. w=width/2; h=height/2;
  1932. __asm__ volatile(
  1933. PREFETCH" %0 \n\t"
  1934. PREFETCH" %1 \n\t"
  1935. ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
  1936. for (y=0;y<h;y++) {
  1937. const uint8_t* s1=src1+srcStride1*(y>>1);
  1938. uint8_t* d=dst1+dstStride1*y;
  1939. x=0;
  1940. for (;x<w-31;x+=32) {
  1941. __asm__ volatile(
  1942. PREFETCH" 32%1 \n\t"
  1943. "movq %1, %%mm0 \n\t"
  1944. "movq 8%1, %%mm2 \n\t"
  1945. "movq 16%1, %%mm4 \n\t"
  1946. "movq 24%1, %%mm6 \n\t"
  1947. "movq %%mm0, %%mm1 \n\t"
  1948. "movq %%mm2, %%mm3 \n\t"
  1949. "movq %%mm4, %%mm5 \n\t"
  1950. "movq %%mm6, %%mm7 \n\t"
  1951. "punpcklbw %%mm0, %%mm0 \n\t"
  1952. "punpckhbw %%mm1, %%mm1 \n\t"
  1953. "punpcklbw %%mm2, %%mm2 \n\t"
  1954. "punpckhbw %%mm3, %%mm3 \n\t"
  1955. "punpcklbw %%mm4, %%mm4 \n\t"
  1956. "punpckhbw %%mm5, %%mm5 \n\t"
  1957. "punpcklbw %%mm6, %%mm6 \n\t"
  1958. "punpckhbw %%mm7, %%mm7 \n\t"
  1959. MOVNTQ" %%mm0, %0 \n\t"
  1960. MOVNTQ" %%mm1, 8%0 \n\t"
  1961. MOVNTQ" %%mm2, 16%0 \n\t"
  1962. MOVNTQ" %%mm3, 24%0 \n\t"
  1963. MOVNTQ" %%mm4, 32%0 \n\t"
  1964. MOVNTQ" %%mm5, 40%0 \n\t"
  1965. MOVNTQ" %%mm6, 48%0 \n\t"
  1966. MOVNTQ" %%mm7, 56%0"
  1967. :"=m"(d[2*x])
  1968. :"m"(s1[x])
  1969. :"memory");
  1970. }
  1971. for (;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
  1972. }
  1973. for (y=0;y<h;y++) {
  1974. const uint8_t* s2=src2+srcStride2*(y>>1);
  1975. uint8_t* d=dst2+dstStride2*y;
  1976. x=0;
  1977. for (;x<w-31;x+=32) {
  1978. __asm__ volatile(
  1979. PREFETCH" 32%1 \n\t"
  1980. "movq %1, %%mm0 \n\t"
  1981. "movq 8%1, %%mm2 \n\t"
  1982. "movq 16%1, %%mm4 \n\t"
  1983. "movq 24%1, %%mm6 \n\t"
  1984. "movq %%mm0, %%mm1 \n\t"
  1985. "movq %%mm2, %%mm3 \n\t"
  1986. "movq %%mm4, %%mm5 \n\t"
  1987. "movq %%mm6, %%mm7 \n\t"
  1988. "punpcklbw %%mm0, %%mm0 \n\t"
  1989. "punpckhbw %%mm1, %%mm1 \n\t"
  1990. "punpcklbw %%mm2, %%mm2 \n\t"
  1991. "punpckhbw %%mm3, %%mm3 \n\t"
  1992. "punpcklbw %%mm4, %%mm4 \n\t"
  1993. "punpckhbw %%mm5, %%mm5 \n\t"
  1994. "punpcklbw %%mm6, %%mm6 \n\t"
  1995. "punpckhbw %%mm7, %%mm7 \n\t"
  1996. MOVNTQ" %%mm0, %0 \n\t"
  1997. MOVNTQ" %%mm1, 8%0 \n\t"
  1998. MOVNTQ" %%mm2, 16%0 \n\t"
  1999. MOVNTQ" %%mm3, 24%0 \n\t"
  2000. MOVNTQ" %%mm4, 32%0 \n\t"
  2001. MOVNTQ" %%mm5, 40%0 \n\t"
  2002. MOVNTQ" %%mm6, 48%0 \n\t"
  2003. MOVNTQ" %%mm7, 56%0"
  2004. :"=m"(d[2*x])
  2005. :"m"(s2[x])
  2006. :"memory");
  2007. }
  2008. for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
  2009. }
  2010. __asm__(
  2011. EMMS" \n\t"
  2012. SFENCE" \n\t"
  2013. ::: "memory"
  2014. );
  2015. }
  2016. static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
  2017. uint8_t *dst,
  2018. int width, int height,
  2019. int srcStride1, int srcStride2,
  2020. int srcStride3, int dstStride)
  2021. {
  2022. x86_reg x;
  2023. int y,w,h;
  2024. w=width/2; h=height;
  2025. for (y=0;y<h;y++) {
  2026. const uint8_t* yp=src1+srcStride1*y;
  2027. const uint8_t* up=src2+srcStride2*(y>>2);
  2028. const uint8_t* vp=src3+srcStride3*(y>>2);
  2029. uint8_t* d=dst+dstStride*y;
  2030. x=0;
  2031. for (;x<w-7;x+=8) {
  2032. __asm__ volatile(
  2033. PREFETCH" 32(%1, %0) \n\t"
  2034. PREFETCH" 32(%2, %0) \n\t"
  2035. PREFETCH" 32(%3, %0) \n\t"
  2036. "movq (%1, %0, 4), %%mm0 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
  2037. "movq (%2, %0), %%mm1 \n\t" /* U0U1U2U3U4U5U6U7 */
  2038. "movq (%3, %0), %%mm2 \n\t" /* V0V1V2V3V4V5V6V7 */
  2039. "movq %%mm0, %%mm3 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
  2040. "movq %%mm1, %%mm4 \n\t" /* U0U1U2U3U4U5U6U7 */
  2041. "movq %%mm2, %%mm5 \n\t" /* V0V1V2V3V4V5V6V7 */
  2042. "punpcklbw %%mm1, %%mm1 \n\t" /* U0U0 U1U1 U2U2 U3U3 */
  2043. "punpcklbw %%mm2, %%mm2 \n\t" /* V0V0 V1V1 V2V2 V3V3 */
  2044. "punpckhbw %%mm4, %%mm4 \n\t" /* U4U4 U5U5 U6U6 U7U7 */
  2045. "punpckhbw %%mm5, %%mm5 \n\t" /* V4V4 V5V5 V6V6 V7V7 */
  2046. "movq %%mm1, %%mm6 \n\t"
  2047. "punpcklbw %%mm2, %%mm1 \n\t" /* U0V0 U0V0 U1V1 U1V1*/
  2048. "punpcklbw %%mm1, %%mm0 \n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
  2049. "punpckhbw %%mm1, %%mm3 \n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
  2050. MOVNTQ" %%mm0, (%4, %0, 8) \n\t"
  2051. MOVNTQ" %%mm3, 8(%4, %0, 8) \n\t"
  2052. "punpckhbw %%mm2, %%mm6 \n\t" /* U2V2 U2V2 U3V3 U3V3*/
  2053. "movq 8(%1, %0, 4), %%mm0 \n\t"
  2054. "movq %%mm0, %%mm3 \n\t"
  2055. "punpcklbw %%mm6, %%mm0 \n\t" /* Y U2 Y V2 Y U2 Y V2*/
  2056. "punpckhbw %%mm6, %%mm3 \n\t" /* Y U3 Y V3 Y U3 Y V3*/
  2057. MOVNTQ" %%mm0, 16(%4, %0, 8) \n\t"
  2058. MOVNTQ" %%mm3, 24(%4, %0, 8) \n\t"
  2059. "movq %%mm4, %%mm6 \n\t"
  2060. "movq 16(%1, %0, 4), %%mm0 \n\t"
  2061. "movq %%mm0, %%mm3 \n\t"
  2062. "punpcklbw %%mm5, %%mm4 \n\t"
  2063. "punpcklbw %%mm4, %%mm0 \n\t" /* Y U4 Y V4 Y U4 Y V4*/
  2064. "punpckhbw %%mm4, %%mm3 \n\t" /* Y U5 Y V5 Y U5 Y V5*/
  2065. MOVNTQ" %%mm0, 32(%4, %0, 8) \n\t"
  2066. MOVNTQ" %%mm3, 40(%4, %0, 8) \n\t"
  2067. "punpckhbw %%mm5, %%mm6 \n\t"
  2068. "movq 24(%1, %0, 4), %%mm0 \n\t"
  2069. "movq %%mm0, %%mm3 \n\t"
  2070. "punpcklbw %%mm6, %%mm0 \n\t" /* Y U6 Y V6 Y U6 Y V6*/
  2071. "punpckhbw %%mm6, %%mm3 \n\t" /* Y U7 Y V7 Y U7 Y V7*/
  2072. MOVNTQ" %%mm0, 48(%4, %0, 8) \n\t"
  2073. MOVNTQ" %%mm3, 56(%4, %0, 8) \n\t"
  2074. : "+r" (x)
  2075. : "r"(yp), "r" (up), "r"(vp), "r"(d)
  2076. :"memory");
  2077. }
  2078. for (; x<w; x++) {
  2079. const int x2 = x<<2;
  2080. d[8*x+0] = yp[x2];
  2081. d[8*x+1] = up[x];
  2082. d[8*x+2] = yp[x2+1];
  2083. d[8*x+3] = vp[x];
  2084. d[8*x+4] = yp[x2+2];
  2085. d[8*x+5] = up[x];
  2086. d[8*x+6] = yp[x2+3];
  2087. d[8*x+7] = vp[x];
  2088. }
  2089. }
  2090. __asm__(
  2091. EMMS" \n\t"
  2092. SFENCE" \n\t"
  2093. ::: "memory"
  2094. );
  2095. }
  2096. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2097. static void RENAME(extract_even)(const uint8_t *src, uint8_t *dst, x86_reg count)
  2098. {
  2099. dst += count;
  2100. src += 2*count;
  2101. count= - count;
  2102. if(count <= -16) {
  2103. count += 15;
  2104. __asm__ volatile(
  2105. "pcmpeqw %%mm7, %%mm7 \n\t"
  2106. "psrlw $8, %%mm7 \n\t"
  2107. "1: \n\t"
  2108. "movq -30(%1, %0, 2), %%mm0 \n\t"
  2109. "movq -22(%1, %0, 2), %%mm1 \n\t"
  2110. "movq -14(%1, %0, 2), %%mm2 \n\t"
  2111. "movq -6(%1, %0, 2), %%mm3 \n\t"
  2112. "pand %%mm7, %%mm0 \n\t"
  2113. "pand %%mm7, %%mm1 \n\t"
  2114. "pand %%mm7, %%mm2 \n\t"
  2115. "pand %%mm7, %%mm3 \n\t"
  2116. "packuswb %%mm1, %%mm0 \n\t"
  2117. "packuswb %%mm3, %%mm2 \n\t"
  2118. MOVNTQ" %%mm0,-15(%2, %0) \n\t"
  2119. MOVNTQ" %%mm2,- 7(%2, %0) \n\t"
  2120. "add $16, %0 \n\t"
  2121. " js 1b \n\t"
  2122. : "+r"(count)
  2123. : "r"(src), "r"(dst)
  2124. );
  2125. count -= 15;
  2126. }
  2127. while(count<0) {
  2128. dst[count]= src[2*count];
  2129. count++;
  2130. }
  2131. }
  2132. #if !COMPILE_TEMPLATE_AMD3DNOW
  2133. static void RENAME(extract_even2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2134. {
  2135. dst0+= count;
  2136. dst1+= count;
  2137. src += 4*count;
  2138. count= - count;
  2139. if(count <= -8) {
  2140. count += 7;
  2141. __asm__ volatile(
  2142. "pcmpeqw %%mm7, %%mm7 \n\t"
  2143. "psrlw $8, %%mm7 \n\t"
  2144. "1: \n\t"
  2145. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2146. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2147. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2148. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2149. "pand %%mm7, %%mm0 \n\t"
  2150. "pand %%mm7, %%mm1 \n\t"
  2151. "pand %%mm7, %%mm2 \n\t"
  2152. "pand %%mm7, %%mm3 \n\t"
  2153. "packuswb %%mm1, %%mm0 \n\t"
  2154. "packuswb %%mm3, %%mm2 \n\t"
  2155. "movq %%mm0, %%mm1 \n\t"
  2156. "movq %%mm2, %%mm3 \n\t"
  2157. "psrlw $8, %%mm0 \n\t"
  2158. "psrlw $8, %%mm2 \n\t"
  2159. "pand %%mm7, %%mm1 \n\t"
  2160. "pand %%mm7, %%mm3 \n\t"
  2161. "packuswb %%mm2, %%mm0 \n\t"
  2162. "packuswb %%mm3, %%mm1 \n\t"
  2163. MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
  2164. MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
  2165. "add $8, %0 \n\t"
  2166. " js 1b \n\t"
  2167. : "+r"(count)
  2168. : "r"(src), "r"(dst0), "r"(dst1)
  2169. );
  2170. count -= 7;
  2171. }
  2172. while(count<0) {
  2173. dst0[count]= src[4*count+0];
  2174. dst1[count]= src[4*count+2];
  2175. count++;
  2176. }
  2177. }
  2178. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2179. static void RENAME(extract_even2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2180. {
  2181. dst0 += count;
  2182. dst1 += count;
  2183. src0 += 4*count;
  2184. src1 += 4*count;
  2185. count= - count;
  2186. #ifdef PAVGB
  2187. if(count <= -8) {
  2188. count += 7;
  2189. __asm__ volatile(
  2190. "pcmpeqw %%mm7, %%mm7 \n\t"
  2191. "psrlw $8, %%mm7 \n\t"
  2192. "1: \n\t"
  2193. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2194. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2195. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2196. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2197. PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
  2198. PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
  2199. PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
  2200. PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
  2201. "pand %%mm7, %%mm0 \n\t"
  2202. "pand %%mm7, %%mm1 \n\t"
  2203. "pand %%mm7, %%mm2 \n\t"
  2204. "pand %%mm7, %%mm3 \n\t"
  2205. "packuswb %%mm1, %%mm0 \n\t"
  2206. "packuswb %%mm3, %%mm2 \n\t"
  2207. "movq %%mm0, %%mm1 \n\t"
  2208. "movq %%mm2, %%mm3 \n\t"
  2209. "psrlw $8, %%mm0 \n\t"
  2210. "psrlw $8, %%mm2 \n\t"
  2211. "pand %%mm7, %%mm1 \n\t"
  2212. "pand %%mm7, %%mm3 \n\t"
  2213. "packuswb %%mm2, %%mm0 \n\t"
  2214. "packuswb %%mm3, %%mm1 \n\t"
  2215. MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
  2216. MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
  2217. "add $8, %0 \n\t"
  2218. " js 1b \n\t"
  2219. : "+r"(count)
  2220. : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
  2221. );
  2222. count -= 7;
  2223. }
  2224. #endif
  2225. while(count<0) {
  2226. dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
  2227. dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
  2228. count++;
  2229. }
  2230. }
  2231. #if !COMPILE_TEMPLATE_AMD3DNOW
  2232. static void RENAME(extract_odd2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2233. {
  2234. dst0+= count;
  2235. dst1+= count;
  2236. src += 4*count;
  2237. count= - count;
  2238. if(count <= -8) {
  2239. count += 7;
  2240. __asm__ volatile(
  2241. "pcmpeqw %%mm7, %%mm7 \n\t"
  2242. "psrlw $8, %%mm7 \n\t"
  2243. "1: \n\t"
  2244. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2245. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2246. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2247. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2248. "psrlw $8, %%mm0 \n\t"
  2249. "psrlw $8, %%mm1 \n\t"
  2250. "psrlw $8, %%mm2 \n\t"
  2251. "psrlw $8, %%mm3 \n\t"
  2252. "packuswb %%mm1, %%mm0 \n\t"
  2253. "packuswb %%mm3, %%mm2 \n\t"
  2254. "movq %%mm0, %%mm1 \n\t"
  2255. "movq %%mm2, %%mm3 \n\t"
  2256. "psrlw $8, %%mm0 \n\t"
  2257. "psrlw $8, %%mm2 \n\t"
  2258. "pand %%mm7, %%mm1 \n\t"
  2259. "pand %%mm7, %%mm3 \n\t"
  2260. "packuswb %%mm2, %%mm0 \n\t"
  2261. "packuswb %%mm3, %%mm1 \n\t"
  2262. MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
  2263. MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
  2264. "add $8, %0 \n\t"
  2265. " js 1b \n\t"
  2266. : "+r"(count)
  2267. : "r"(src), "r"(dst0), "r"(dst1)
  2268. );
  2269. count -= 7;
  2270. }
  2271. src++;
  2272. while(count<0) {
  2273. dst0[count]= src[4*count+0];
  2274. dst1[count]= src[4*count+2];
  2275. count++;
  2276. }
  2277. }
  2278. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2279. static void RENAME(extract_odd2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2280. {
  2281. dst0 += count;
  2282. dst1 += count;
  2283. src0 += 4*count;
  2284. src1 += 4*count;
  2285. count= - count;
  2286. #ifdef PAVGB
  2287. if(count <= -8) {
  2288. count += 7;
  2289. __asm__ volatile(
  2290. "pcmpeqw %%mm7, %%mm7 \n\t"
  2291. "psrlw $8, %%mm7 \n\t"
  2292. "1: \n\t"
  2293. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2294. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2295. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2296. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2297. PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
  2298. PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
  2299. PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
  2300. PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
  2301. "psrlw $8, %%mm0 \n\t"
  2302. "psrlw $8, %%mm1 \n\t"
  2303. "psrlw $8, %%mm2 \n\t"
  2304. "psrlw $8, %%mm3 \n\t"
  2305. "packuswb %%mm1, %%mm0 \n\t"
  2306. "packuswb %%mm3, %%mm2 \n\t"
  2307. "movq %%mm0, %%mm1 \n\t"
  2308. "movq %%mm2, %%mm3 \n\t"
  2309. "psrlw $8, %%mm0 \n\t"
  2310. "psrlw $8, %%mm2 \n\t"
  2311. "pand %%mm7, %%mm1 \n\t"
  2312. "pand %%mm7, %%mm3 \n\t"
  2313. "packuswb %%mm2, %%mm0 \n\t"
  2314. "packuswb %%mm3, %%mm1 \n\t"
  2315. MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
  2316. MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
  2317. "add $8, %0 \n\t"
  2318. " js 1b \n\t"
  2319. : "+r"(count)
  2320. : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
  2321. );
  2322. count -= 7;
  2323. }
  2324. #endif
  2325. src0++;
  2326. src1++;
  2327. while(count<0) {
  2328. dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
  2329. dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
  2330. count++;
  2331. }
  2332. }
  2333. static void RENAME(yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2334. int width, int height,
  2335. int lumStride, int chromStride, int srcStride)
  2336. {
  2337. int y;
  2338. const int chromWidth= -((-width)>>1);
  2339. for (y=0; y<height; y++) {
  2340. RENAME(extract_even)(src, ydst, width);
  2341. if(y&1) {
  2342. RENAME(extract_odd2avg)(src-srcStride, src, udst, vdst, chromWidth);
  2343. udst+= chromStride;
  2344. vdst+= chromStride;
  2345. }
  2346. src += srcStride;
  2347. ydst+= lumStride;
  2348. }
  2349. __asm__(
  2350. EMMS" \n\t"
  2351. SFENCE" \n\t"
  2352. ::: "memory"
  2353. );
  2354. }
  2355. #if !COMPILE_TEMPLATE_AMD3DNOW
  2356. static void RENAME(yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2357. int width, int height,
  2358. int lumStride, int chromStride, int srcStride)
  2359. {
  2360. int y;
  2361. const int chromWidth= -((-width)>>1);
  2362. for (y=0; y<height; y++) {
  2363. RENAME(extract_even)(src, ydst, width);
  2364. RENAME(extract_odd2)(src, udst, vdst, chromWidth);
  2365. src += srcStride;
  2366. ydst+= lumStride;
  2367. udst+= chromStride;
  2368. vdst+= chromStride;
  2369. }
  2370. __asm__(
  2371. EMMS" \n\t"
  2372. SFENCE" \n\t"
  2373. ::: "memory"
  2374. );
  2375. }
  2376. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2377. static void RENAME(uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2378. int width, int height,
  2379. int lumStride, int chromStride, int srcStride)
  2380. {
  2381. int y;
  2382. const int chromWidth= -((-width)>>1);
  2383. for (y=0; y<height; y++) {
  2384. RENAME(extract_even)(src+1, ydst, width);
  2385. if(y&1) {
  2386. RENAME(extract_even2avg)(src-srcStride, src, udst, vdst, chromWidth);
  2387. udst+= chromStride;
  2388. vdst+= chromStride;
  2389. }
  2390. src += srcStride;
  2391. ydst+= lumStride;
  2392. }
  2393. __asm__(
  2394. EMMS" \n\t"
  2395. SFENCE" \n\t"
  2396. ::: "memory"
  2397. );
  2398. }
  2399. #if !COMPILE_TEMPLATE_AMD3DNOW
  2400. static void RENAME(uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2401. int width, int height,
  2402. int lumStride, int chromStride, int srcStride)
  2403. {
  2404. int y;
  2405. const int chromWidth= -((-width)>>1);
  2406. for (y=0; y<height; y++) {
  2407. RENAME(extract_even)(src+1, ydst, width);
  2408. RENAME(extract_even2)(src, udst, vdst, chromWidth);
  2409. src += srcStride;
  2410. ydst+= lumStride;
  2411. udst+= chromStride;
  2412. vdst+= chromStride;
  2413. }
  2414. __asm__(
  2415. EMMS" \n\t"
  2416. SFENCE" \n\t"
  2417. ::: "memory"
  2418. );
  2419. }
  2420. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2421. #endif /* !COMPILE_TEMPLATE_SSE2 */
  2422. static inline void RENAME(rgb2rgb_init)(void)
  2423. {
  2424. #if !COMPILE_TEMPLATE_SSE2
  2425. #if !COMPILE_TEMPLATE_AMD3DNOW
  2426. rgb15to16 = RENAME(rgb15to16);
  2427. rgb15tobgr24 = RENAME(rgb15tobgr24);
  2428. rgb15to32 = RENAME(rgb15to32);
  2429. rgb16tobgr24 = RENAME(rgb16tobgr24);
  2430. rgb16to32 = RENAME(rgb16to32);
  2431. rgb16to15 = RENAME(rgb16to15);
  2432. rgb24tobgr16 = RENAME(rgb24tobgr16);
  2433. rgb24tobgr15 = RENAME(rgb24tobgr15);
  2434. rgb24tobgr32 = RENAME(rgb24tobgr32);
  2435. rgb32to16 = RENAME(rgb32to16);
  2436. rgb32to15 = RENAME(rgb32to15);
  2437. rgb32tobgr24 = RENAME(rgb32tobgr24);
  2438. rgb24to15 = RENAME(rgb24to15);
  2439. rgb24to16 = RENAME(rgb24to16);
  2440. rgb24tobgr24 = RENAME(rgb24tobgr24);
  2441. shuffle_bytes_2103 = RENAME(shuffle_bytes_2103);
  2442. rgb32tobgr16 = RENAME(rgb32tobgr16);
  2443. rgb32tobgr15 = RENAME(rgb32tobgr15);
  2444. yv12toyuy2 = RENAME(yv12toyuy2);
  2445. yv12touyvy = RENAME(yv12touyvy);
  2446. yuv422ptoyuy2 = RENAME(yuv422ptoyuy2);
  2447. yuv422ptouyvy = RENAME(yuv422ptouyvy);
  2448. yuy2toyv12 = RENAME(yuy2toyv12);
  2449. vu9_to_vu12 = RENAME(vu9_to_vu12);
  2450. yvu9_to_yuy2 = RENAME(yvu9_to_yuy2);
  2451. uyvytoyuv422 = RENAME(uyvytoyuv422);
  2452. yuyvtoyuv422 = RENAME(yuyvtoyuv422);
  2453. #endif /* !COMPILE_TEMPLATE_SSE2 */
  2454. #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
  2455. planar2x = RENAME(planar2x);
  2456. #endif /* COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW */
  2457. rgb24toyv12 = RENAME(rgb24toyv12);
  2458. yuyvtoyuv420 = RENAME(yuyvtoyuv420);
  2459. uyvytoyuv420 = RENAME(uyvytoyuv420);
  2460. #endif /* COMPILE_TEMPLATE_SSE2 */
  2461. #if !COMPILE_TEMPLATE_AMD3DNOW
  2462. interleaveBytes = RENAME(interleaveBytes);
  2463. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2464. }