rgb2rgb_template.c 106 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607
  1. /*
  2. * software RGB to RGB converter
  3. * pluralize by software PAL8 to RGB converter
  4. * software YUV to YUV converter
  5. * software YUV to RGB converter
  6. * Written by Nick Kurshev.
  7. * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
  8. * lot of big-endian byte order fixes by Alex Beregszaszi
  9. *
  10. * This file is part of FFmpeg.
  11. *
  12. * FFmpeg is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU Lesser General Public
  14. * License as published by the Free Software Foundation; either
  15. * version 2.1 of the License, or (at your option) any later version.
  16. *
  17. * FFmpeg is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * Lesser General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU Lesser General Public
  23. * License along with FFmpeg; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. */
  26. #include <stddef.h>
  27. #undef PREFETCH
  28. #undef MOVNTQ
  29. #undef EMMS
  30. #undef SFENCE
  31. #undef PAVGB
  32. #if COMPILE_TEMPLATE_AMD3DNOW
  33. #define PREFETCH "prefetch"
  34. #define PAVGB "pavgusb"
  35. #elif COMPILE_TEMPLATE_MMX2
  36. #define PREFETCH "prefetchnta"
  37. #define PAVGB "pavgb"
  38. #else
  39. #define PREFETCH " # nop"
  40. #endif
  41. #if COMPILE_TEMPLATE_AMD3DNOW
  42. /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
  43. #define EMMS "femms"
  44. #else
  45. #define EMMS "emms"
  46. #endif
  47. #if COMPILE_TEMPLATE_MMX2
  48. #define MOVNTQ "movntq"
  49. #define SFENCE "sfence"
  50. #else
  51. #define MOVNTQ "movq"
  52. #define SFENCE " # nop"
  53. #endif
  54. #if !COMPILE_TEMPLATE_SSE2
  55. #if !COMPILE_TEMPLATE_AMD3DNOW
  56. static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, int src_size)
  57. {
  58. uint8_t *dest = dst;
  59. const uint8_t *s = src;
  60. const uint8_t *end;
  61. const uint8_t *mm_end;
  62. end = s + src_size;
  63. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  64. mm_end = end - 23;
  65. __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory");
  66. while (s < mm_end) {
  67. __asm__ volatile(
  68. PREFETCH" 32%1 \n\t"
  69. "movd %1, %%mm0 \n\t"
  70. "punpckldq 3%1, %%mm0 \n\t"
  71. "movd 6%1, %%mm1 \n\t"
  72. "punpckldq 9%1, %%mm1 \n\t"
  73. "movd 12%1, %%mm2 \n\t"
  74. "punpckldq 15%1, %%mm2 \n\t"
  75. "movd 18%1, %%mm3 \n\t"
  76. "punpckldq 21%1, %%mm3 \n\t"
  77. "por %%mm7, %%mm0 \n\t"
  78. "por %%mm7, %%mm1 \n\t"
  79. "por %%mm7, %%mm2 \n\t"
  80. "por %%mm7, %%mm3 \n\t"
  81. MOVNTQ" %%mm0, %0 \n\t"
  82. MOVNTQ" %%mm1, 8%0 \n\t"
  83. MOVNTQ" %%mm2, 16%0 \n\t"
  84. MOVNTQ" %%mm3, 24%0"
  85. :"=m"(*dest)
  86. :"m"(*s)
  87. :"memory");
  88. dest += 32;
  89. s += 24;
  90. }
  91. __asm__ volatile(SFENCE:::"memory");
  92. __asm__ volatile(EMMS:::"memory");
  93. while (s < end) {
  94. *dest++ = *s++;
  95. *dest++ = *s++;
  96. *dest++ = *s++;
  97. *dest++ = 255;
  98. }
  99. }
  100. #define STORE_BGR24_MMX \
  101. "psrlq $8, %%mm2 \n\t" \
  102. "psrlq $8, %%mm3 \n\t" \
  103. "psrlq $8, %%mm6 \n\t" \
  104. "psrlq $8, %%mm7 \n\t" \
  105. "pand "MANGLE(mask24l)", %%mm0\n\t" \
  106. "pand "MANGLE(mask24l)", %%mm1\n\t" \
  107. "pand "MANGLE(mask24l)", %%mm4\n\t" \
  108. "pand "MANGLE(mask24l)", %%mm5\n\t" \
  109. "pand "MANGLE(mask24h)", %%mm2\n\t" \
  110. "pand "MANGLE(mask24h)", %%mm3\n\t" \
  111. "pand "MANGLE(mask24h)", %%mm6\n\t" \
  112. "pand "MANGLE(mask24h)", %%mm7\n\t" \
  113. "por %%mm2, %%mm0 \n\t" \
  114. "por %%mm3, %%mm1 \n\t" \
  115. "por %%mm6, %%mm4 \n\t" \
  116. "por %%mm7, %%mm5 \n\t" \
  117. \
  118. "movq %%mm1, %%mm2 \n\t" \
  119. "movq %%mm4, %%mm3 \n\t" \
  120. "psllq $48, %%mm2 \n\t" \
  121. "psllq $32, %%mm3 \n\t" \
  122. "pand "MANGLE(mask24hh)", %%mm2\n\t" \
  123. "pand "MANGLE(mask24hhh)", %%mm3\n\t" \
  124. "por %%mm2, %%mm0 \n\t" \
  125. "psrlq $16, %%mm1 \n\t" \
  126. "psrlq $32, %%mm4 \n\t" \
  127. "psllq $16, %%mm5 \n\t" \
  128. "por %%mm3, %%mm1 \n\t" \
  129. "pand "MANGLE(mask24hhhh)", %%mm5\n\t" \
  130. "por %%mm5, %%mm4 \n\t" \
  131. \
  132. MOVNTQ" %%mm0, %0 \n\t" \
  133. MOVNTQ" %%mm1, 8%0 \n\t" \
  134. MOVNTQ" %%mm4, 16%0"
  135. static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  136. {
  137. uint8_t *dest = dst;
  138. const uint8_t *s = src;
  139. const uint8_t *end;
  140. const uint8_t *mm_end;
  141. end = s + src_size;
  142. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  143. mm_end = end - 31;
  144. while (s < mm_end) {
  145. __asm__ volatile(
  146. PREFETCH" 32%1 \n\t"
  147. "movq %1, %%mm0 \n\t"
  148. "movq 8%1, %%mm1 \n\t"
  149. "movq 16%1, %%mm4 \n\t"
  150. "movq 24%1, %%mm5 \n\t"
  151. "movq %%mm0, %%mm2 \n\t"
  152. "movq %%mm1, %%mm3 \n\t"
  153. "movq %%mm4, %%mm6 \n\t"
  154. "movq %%mm5, %%mm7 \n\t"
  155. STORE_BGR24_MMX
  156. :"=m"(*dest)
  157. :"m"(*s)
  158. :"memory");
  159. dest += 24;
  160. s += 32;
  161. }
  162. __asm__ volatile(SFENCE:::"memory");
  163. __asm__ volatile(EMMS:::"memory");
  164. while (s < end) {
  165. *dest++ = *s++;
  166. *dest++ = *s++;
  167. *dest++ = *s++;
  168. s++;
  169. }
  170. }
  171. /*
  172. original by Strepto/Astral
  173. ported to gcc & bugfixed: A'rpi
  174. MMX2, 3DNOW optimization by Nick Kurshev
  175. 32-bit C version, and and&add trick by Michael Niedermayer
  176. */
  177. static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, int src_size)
  178. {
  179. register const uint8_t* s=src;
  180. register uint8_t* d=dst;
  181. register const uint8_t *end;
  182. const uint8_t *mm_end;
  183. end = s + src_size;
  184. __asm__ volatile(PREFETCH" %0"::"m"(*s));
  185. __asm__ volatile("movq %0, %%mm4"::"m"(mask15s));
  186. mm_end = end - 15;
  187. while (s<mm_end) {
  188. __asm__ volatile(
  189. PREFETCH" 32%1 \n\t"
  190. "movq %1, %%mm0 \n\t"
  191. "movq 8%1, %%mm2 \n\t"
  192. "movq %%mm0, %%mm1 \n\t"
  193. "movq %%mm2, %%mm3 \n\t"
  194. "pand %%mm4, %%mm0 \n\t"
  195. "pand %%mm4, %%mm2 \n\t"
  196. "paddw %%mm1, %%mm0 \n\t"
  197. "paddw %%mm3, %%mm2 \n\t"
  198. MOVNTQ" %%mm0, %0 \n\t"
  199. MOVNTQ" %%mm2, 8%0"
  200. :"=m"(*d)
  201. :"m"(*s)
  202. );
  203. d+=16;
  204. s+=16;
  205. }
  206. __asm__ volatile(SFENCE:::"memory");
  207. __asm__ volatile(EMMS:::"memory");
  208. mm_end = end - 3;
  209. while (s < mm_end) {
  210. register unsigned x= *((const uint32_t *)s);
  211. *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
  212. d+=4;
  213. s+=4;
  214. }
  215. if (s < end) {
  216. register unsigned short x= *((const uint16_t *)s);
  217. *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
  218. }
  219. }
  220. static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, int src_size)
  221. {
  222. register const uint8_t* s=src;
  223. register uint8_t* d=dst;
  224. register const uint8_t *end;
  225. const uint8_t *mm_end;
  226. end = s + src_size;
  227. __asm__ volatile(PREFETCH" %0"::"m"(*s));
  228. __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg));
  229. __asm__ volatile("movq %0, %%mm6"::"m"(mask15b));
  230. mm_end = end - 15;
  231. while (s<mm_end) {
  232. __asm__ volatile(
  233. PREFETCH" 32%1 \n\t"
  234. "movq %1, %%mm0 \n\t"
  235. "movq 8%1, %%mm2 \n\t"
  236. "movq %%mm0, %%mm1 \n\t"
  237. "movq %%mm2, %%mm3 \n\t"
  238. "psrlq $1, %%mm0 \n\t"
  239. "psrlq $1, %%mm2 \n\t"
  240. "pand %%mm7, %%mm0 \n\t"
  241. "pand %%mm7, %%mm2 \n\t"
  242. "pand %%mm6, %%mm1 \n\t"
  243. "pand %%mm6, %%mm3 \n\t"
  244. "por %%mm1, %%mm0 \n\t"
  245. "por %%mm3, %%mm2 \n\t"
  246. MOVNTQ" %%mm0, %0 \n\t"
  247. MOVNTQ" %%mm2, 8%0"
  248. :"=m"(*d)
  249. :"m"(*s)
  250. );
  251. d+=16;
  252. s+=16;
  253. }
  254. __asm__ volatile(SFENCE:::"memory");
  255. __asm__ volatile(EMMS:::"memory");
  256. mm_end = end - 3;
  257. while (s < mm_end) {
  258. register uint32_t x= *((const uint32_t*)s);
  259. *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
  260. s+=4;
  261. d+=4;
  262. }
  263. if (s < end) {
  264. register uint16_t x= *((const uint16_t*)s);
  265. *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
  266. }
  267. }
  268. static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, int src_size)
  269. {
  270. const uint8_t *s = src;
  271. const uint8_t *end;
  272. const uint8_t *mm_end;
  273. uint16_t *d = (uint16_t *)dst;
  274. end = s + src_size;
  275. mm_end = end - 15;
  276. #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
  277. __asm__ volatile(
  278. "movq %3, %%mm5 \n\t"
  279. "movq %4, %%mm6 \n\t"
  280. "movq %5, %%mm7 \n\t"
  281. "jmp 2f \n\t"
  282. ".p2align 4 \n\t"
  283. "1: \n\t"
  284. PREFETCH" 32(%1) \n\t"
  285. "movd (%1), %%mm0 \n\t"
  286. "movd 4(%1), %%mm3 \n\t"
  287. "punpckldq 8(%1), %%mm0 \n\t"
  288. "punpckldq 12(%1), %%mm3 \n\t"
  289. "movq %%mm0, %%mm1 \n\t"
  290. "movq %%mm3, %%mm4 \n\t"
  291. "pand %%mm6, %%mm0 \n\t"
  292. "pand %%mm6, %%mm3 \n\t"
  293. "pmaddwd %%mm7, %%mm0 \n\t"
  294. "pmaddwd %%mm7, %%mm3 \n\t"
  295. "pand %%mm5, %%mm1 \n\t"
  296. "pand %%mm5, %%mm4 \n\t"
  297. "por %%mm1, %%mm0 \n\t"
  298. "por %%mm4, %%mm3 \n\t"
  299. "psrld $5, %%mm0 \n\t"
  300. "pslld $11, %%mm3 \n\t"
  301. "por %%mm3, %%mm0 \n\t"
  302. MOVNTQ" %%mm0, (%0) \n\t"
  303. "add $16, %1 \n\t"
  304. "add $8, %0 \n\t"
  305. "2: \n\t"
  306. "cmp %2, %1 \n\t"
  307. " jb 1b \n\t"
  308. : "+r" (d), "+r"(s)
  309. : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
  310. );
  311. #else
  312. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  313. __asm__ volatile(
  314. "movq %0, %%mm7 \n\t"
  315. "movq %1, %%mm6 \n\t"
  316. ::"m"(red_16mask),"m"(green_16mask));
  317. while (s < mm_end) {
  318. __asm__ volatile(
  319. PREFETCH" 32%1 \n\t"
  320. "movd %1, %%mm0 \n\t"
  321. "movd 4%1, %%mm3 \n\t"
  322. "punpckldq 8%1, %%mm0 \n\t"
  323. "punpckldq 12%1, %%mm3 \n\t"
  324. "movq %%mm0, %%mm1 \n\t"
  325. "movq %%mm0, %%mm2 \n\t"
  326. "movq %%mm3, %%mm4 \n\t"
  327. "movq %%mm3, %%mm5 \n\t"
  328. "psrlq $3, %%mm0 \n\t"
  329. "psrlq $3, %%mm3 \n\t"
  330. "pand %2, %%mm0 \n\t"
  331. "pand %2, %%mm3 \n\t"
  332. "psrlq $5, %%mm1 \n\t"
  333. "psrlq $5, %%mm4 \n\t"
  334. "pand %%mm6, %%mm1 \n\t"
  335. "pand %%mm6, %%mm4 \n\t"
  336. "psrlq $8, %%mm2 \n\t"
  337. "psrlq $8, %%mm5 \n\t"
  338. "pand %%mm7, %%mm2 \n\t"
  339. "pand %%mm7, %%mm5 \n\t"
  340. "por %%mm1, %%mm0 \n\t"
  341. "por %%mm4, %%mm3 \n\t"
  342. "por %%mm2, %%mm0 \n\t"
  343. "por %%mm5, %%mm3 \n\t"
  344. "psllq $16, %%mm3 \n\t"
  345. "por %%mm3, %%mm0 \n\t"
  346. MOVNTQ" %%mm0, %0 \n\t"
  347. :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
  348. d += 4;
  349. s += 16;
  350. }
  351. #endif
  352. __asm__ volatile(SFENCE:::"memory");
  353. __asm__ volatile(EMMS:::"memory");
  354. while (s < end) {
  355. register int rgb = *(const uint32_t*)s; s += 4;
  356. *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
  357. }
  358. }
  359. static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
  360. {
  361. const uint8_t *s = src;
  362. const uint8_t *end;
  363. const uint8_t *mm_end;
  364. uint16_t *d = (uint16_t *)dst;
  365. end = s + src_size;
  366. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  367. __asm__ volatile(
  368. "movq %0, %%mm7 \n\t"
  369. "movq %1, %%mm6 \n\t"
  370. ::"m"(red_16mask),"m"(green_16mask));
  371. mm_end = end - 15;
  372. while (s < mm_end) {
  373. __asm__ volatile(
  374. PREFETCH" 32%1 \n\t"
  375. "movd %1, %%mm0 \n\t"
  376. "movd 4%1, %%mm3 \n\t"
  377. "punpckldq 8%1, %%mm0 \n\t"
  378. "punpckldq 12%1, %%mm3 \n\t"
  379. "movq %%mm0, %%mm1 \n\t"
  380. "movq %%mm0, %%mm2 \n\t"
  381. "movq %%mm3, %%mm4 \n\t"
  382. "movq %%mm3, %%mm5 \n\t"
  383. "psllq $8, %%mm0 \n\t"
  384. "psllq $8, %%mm3 \n\t"
  385. "pand %%mm7, %%mm0 \n\t"
  386. "pand %%mm7, %%mm3 \n\t"
  387. "psrlq $5, %%mm1 \n\t"
  388. "psrlq $5, %%mm4 \n\t"
  389. "pand %%mm6, %%mm1 \n\t"
  390. "pand %%mm6, %%mm4 \n\t"
  391. "psrlq $19, %%mm2 \n\t"
  392. "psrlq $19, %%mm5 \n\t"
  393. "pand %2, %%mm2 \n\t"
  394. "pand %2, %%mm5 \n\t"
  395. "por %%mm1, %%mm0 \n\t"
  396. "por %%mm4, %%mm3 \n\t"
  397. "por %%mm2, %%mm0 \n\t"
  398. "por %%mm5, %%mm3 \n\t"
  399. "psllq $16, %%mm3 \n\t"
  400. "por %%mm3, %%mm0 \n\t"
  401. MOVNTQ" %%mm0, %0 \n\t"
  402. :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
  403. d += 4;
  404. s += 16;
  405. }
  406. __asm__ volatile(SFENCE:::"memory");
  407. __asm__ volatile(EMMS:::"memory");
  408. while (s < end) {
  409. register int rgb = *(const uint32_t*)s; s += 4;
  410. *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
  411. }
  412. }
  413. static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, int src_size)
  414. {
  415. const uint8_t *s = src;
  416. const uint8_t *end;
  417. const uint8_t *mm_end;
  418. uint16_t *d = (uint16_t *)dst;
  419. end = s + src_size;
  420. mm_end = end - 15;
  421. #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
  422. __asm__ volatile(
  423. "movq %3, %%mm5 \n\t"
  424. "movq %4, %%mm6 \n\t"
  425. "movq %5, %%mm7 \n\t"
  426. "jmp 2f \n\t"
  427. ".p2align 4 \n\t"
  428. "1: \n\t"
  429. PREFETCH" 32(%1) \n\t"
  430. "movd (%1), %%mm0 \n\t"
  431. "movd 4(%1), %%mm3 \n\t"
  432. "punpckldq 8(%1), %%mm0 \n\t"
  433. "punpckldq 12(%1), %%mm3 \n\t"
  434. "movq %%mm0, %%mm1 \n\t"
  435. "movq %%mm3, %%mm4 \n\t"
  436. "pand %%mm6, %%mm0 \n\t"
  437. "pand %%mm6, %%mm3 \n\t"
  438. "pmaddwd %%mm7, %%mm0 \n\t"
  439. "pmaddwd %%mm7, %%mm3 \n\t"
  440. "pand %%mm5, %%mm1 \n\t"
  441. "pand %%mm5, %%mm4 \n\t"
  442. "por %%mm1, %%mm0 \n\t"
  443. "por %%mm4, %%mm3 \n\t"
  444. "psrld $6, %%mm0 \n\t"
  445. "pslld $10, %%mm3 \n\t"
  446. "por %%mm3, %%mm0 \n\t"
  447. MOVNTQ" %%mm0, (%0) \n\t"
  448. "add $16, %1 \n\t"
  449. "add $8, %0 \n\t"
  450. "2: \n\t"
  451. "cmp %2, %1 \n\t"
  452. " jb 1b \n\t"
  453. : "+r" (d), "+r"(s)
  454. : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
  455. );
  456. #else
  457. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  458. __asm__ volatile(
  459. "movq %0, %%mm7 \n\t"
  460. "movq %1, %%mm6 \n\t"
  461. ::"m"(red_15mask),"m"(green_15mask));
  462. while (s < mm_end) {
  463. __asm__ volatile(
  464. PREFETCH" 32%1 \n\t"
  465. "movd %1, %%mm0 \n\t"
  466. "movd 4%1, %%mm3 \n\t"
  467. "punpckldq 8%1, %%mm0 \n\t"
  468. "punpckldq 12%1, %%mm3 \n\t"
  469. "movq %%mm0, %%mm1 \n\t"
  470. "movq %%mm0, %%mm2 \n\t"
  471. "movq %%mm3, %%mm4 \n\t"
  472. "movq %%mm3, %%mm5 \n\t"
  473. "psrlq $3, %%mm0 \n\t"
  474. "psrlq $3, %%mm3 \n\t"
  475. "pand %2, %%mm0 \n\t"
  476. "pand %2, %%mm3 \n\t"
  477. "psrlq $6, %%mm1 \n\t"
  478. "psrlq $6, %%mm4 \n\t"
  479. "pand %%mm6, %%mm1 \n\t"
  480. "pand %%mm6, %%mm4 \n\t"
  481. "psrlq $9, %%mm2 \n\t"
  482. "psrlq $9, %%mm5 \n\t"
  483. "pand %%mm7, %%mm2 \n\t"
  484. "pand %%mm7, %%mm5 \n\t"
  485. "por %%mm1, %%mm0 \n\t"
  486. "por %%mm4, %%mm3 \n\t"
  487. "por %%mm2, %%mm0 \n\t"
  488. "por %%mm5, %%mm3 \n\t"
  489. "psllq $16, %%mm3 \n\t"
  490. "por %%mm3, %%mm0 \n\t"
  491. MOVNTQ" %%mm0, %0 \n\t"
  492. :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
  493. d += 4;
  494. s += 16;
  495. }
  496. #endif
  497. __asm__ volatile(SFENCE:::"memory");
  498. __asm__ volatile(EMMS:::"memory");
  499. while (s < end) {
  500. register int rgb = *(const uint32_t*)s; s += 4;
  501. *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
  502. }
  503. }
  504. static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
  505. {
  506. const uint8_t *s = src;
  507. const uint8_t *end;
  508. const uint8_t *mm_end;
  509. uint16_t *d = (uint16_t *)dst;
  510. end = s + src_size;
  511. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  512. __asm__ volatile(
  513. "movq %0, %%mm7 \n\t"
  514. "movq %1, %%mm6 \n\t"
  515. ::"m"(red_15mask),"m"(green_15mask));
  516. mm_end = end - 15;
  517. while (s < mm_end) {
  518. __asm__ volatile(
  519. PREFETCH" 32%1 \n\t"
  520. "movd %1, %%mm0 \n\t"
  521. "movd 4%1, %%mm3 \n\t"
  522. "punpckldq 8%1, %%mm0 \n\t"
  523. "punpckldq 12%1, %%mm3 \n\t"
  524. "movq %%mm0, %%mm1 \n\t"
  525. "movq %%mm0, %%mm2 \n\t"
  526. "movq %%mm3, %%mm4 \n\t"
  527. "movq %%mm3, %%mm5 \n\t"
  528. "psllq $7, %%mm0 \n\t"
  529. "psllq $7, %%mm3 \n\t"
  530. "pand %%mm7, %%mm0 \n\t"
  531. "pand %%mm7, %%mm3 \n\t"
  532. "psrlq $6, %%mm1 \n\t"
  533. "psrlq $6, %%mm4 \n\t"
  534. "pand %%mm6, %%mm1 \n\t"
  535. "pand %%mm6, %%mm4 \n\t"
  536. "psrlq $19, %%mm2 \n\t"
  537. "psrlq $19, %%mm5 \n\t"
  538. "pand %2, %%mm2 \n\t"
  539. "pand %2, %%mm5 \n\t"
  540. "por %%mm1, %%mm0 \n\t"
  541. "por %%mm4, %%mm3 \n\t"
  542. "por %%mm2, %%mm0 \n\t"
  543. "por %%mm5, %%mm3 \n\t"
  544. "psllq $16, %%mm3 \n\t"
  545. "por %%mm3, %%mm0 \n\t"
  546. MOVNTQ" %%mm0, %0 \n\t"
  547. :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
  548. d += 4;
  549. s += 16;
  550. }
  551. __asm__ volatile(SFENCE:::"memory");
  552. __asm__ volatile(EMMS:::"memory");
  553. while (s < end) {
  554. register int rgb = *(const uint32_t*)s; s += 4;
  555. *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
  556. }
  557. }
  558. static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
  559. {
  560. const uint8_t *s = src;
  561. const uint8_t *end;
  562. const uint8_t *mm_end;
  563. uint16_t *d = (uint16_t *)dst;
  564. end = s + src_size;
  565. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  566. __asm__ volatile(
  567. "movq %0, %%mm7 \n\t"
  568. "movq %1, %%mm6 \n\t"
  569. ::"m"(red_16mask),"m"(green_16mask));
  570. mm_end = end - 11;
  571. while (s < mm_end) {
  572. __asm__ volatile(
  573. PREFETCH" 32%1 \n\t"
  574. "movd %1, %%mm0 \n\t"
  575. "movd 3%1, %%mm3 \n\t"
  576. "punpckldq 6%1, %%mm0 \n\t"
  577. "punpckldq 9%1, %%mm3 \n\t"
  578. "movq %%mm0, %%mm1 \n\t"
  579. "movq %%mm0, %%mm2 \n\t"
  580. "movq %%mm3, %%mm4 \n\t"
  581. "movq %%mm3, %%mm5 \n\t"
  582. "psrlq $3, %%mm0 \n\t"
  583. "psrlq $3, %%mm3 \n\t"
  584. "pand %2, %%mm0 \n\t"
  585. "pand %2, %%mm3 \n\t"
  586. "psrlq $5, %%mm1 \n\t"
  587. "psrlq $5, %%mm4 \n\t"
  588. "pand %%mm6, %%mm1 \n\t"
  589. "pand %%mm6, %%mm4 \n\t"
  590. "psrlq $8, %%mm2 \n\t"
  591. "psrlq $8, %%mm5 \n\t"
  592. "pand %%mm7, %%mm2 \n\t"
  593. "pand %%mm7, %%mm5 \n\t"
  594. "por %%mm1, %%mm0 \n\t"
  595. "por %%mm4, %%mm3 \n\t"
  596. "por %%mm2, %%mm0 \n\t"
  597. "por %%mm5, %%mm3 \n\t"
  598. "psllq $16, %%mm3 \n\t"
  599. "por %%mm3, %%mm0 \n\t"
  600. MOVNTQ" %%mm0, %0 \n\t"
  601. :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
  602. d += 4;
  603. s += 12;
  604. }
  605. __asm__ volatile(SFENCE:::"memory");
  606. __asm__ volatile(EMMS:::"memory");
  607. while (s < end) {
  608. const int b = *s++;
  609. const int g = *s++;
  610. const int r = *s++;
  611. *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  612. }
  613. }
  614. static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, int src_size)
  615. {
  616. const uint8_t *s = src;
  617. const uint8_t *end;
  618. const uint8_t *mm_end;
  619. uint16_t *d = (uint16_t *)dst;
  620. end = s + src_size;
  621. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  622. __asm__ volatile(
  623. "movq %0, %%mm7 \n\t"
  624. "movq %1, %%mm6 \n\t"
  625. ::"m"(red_16mask),"m"(green_16mask));
  626. mm_end = end - 15;
  627. while (s < mm_end) {
  628. __asm__ volatile(
  629. PREFETCH" 32%1 \n\t"
  630. "movd %1, %%mm0 \n\t"
  631. "movd 3%1, %%mm3 \n\t"
  632. "punpckldq 6%1, %%mm0 \n\t"
  633. "punpckldq 9%1, %%mm3 \n\t"
  634. "movq %%mm0, %%mm1 \n\t"
  635. "movq %%mm0, %%mm2 \n\t"
  636. "movq %%mm3, %%mm4 \n\t"
  637. "movq %%mm3, %%mm5 \n\t"
  638. "psllq $8, %%mm0 \n\t"
  639. "psllq $8, %%mm3 \n\t"
  640. "pand %%mm7, %%mm0 \n\t"
  641. "pand %%mm7, %%mm3 \n\t"
  642. "psrlq $5, %%mm1 \n\t"
  643. "psrlq $5, %%mm4 \n\t"
  644. "pand %%mm6, %%mm1 \n\t"
  645. "pand %%mm6, %%mm4 \n\t"
  646. "psrlq $19, %%mm2 \n\t"
  647. "psrlq $19, %%mm5 \n\t"
  648. "pand %2, %%mm2 \n\t"
  649. "pand %2, %%mm5 \n\t"
  650. "por %%mm1, %%mm0 \n\t"
  651. "por %%mm4, %%mm3 \n\t"
  652. "por %%mm2, %%mm0 \n\t"
  653. "por %%mm5, %%mm3 \n\t"
  654. "psllq $16, %%mm3 \n\t"
  655. "por %%mm3, %%mm0 \n\t"
  656. MOVNTQ" %%mm0, %0 \n\t"
  657. :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
  658. d += 4;
  659. s += 12;
  660. }
  661. __asm__ volatile(SFENCE:::"memory");
  662. __asm__ volatile(EMMS:::"memory");
  663. while (s < end) {
  664. const int r = *s++;
  665. const int g = *s++;
  666. const int b = *s++;
  667. *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  668. }
  669. }
  670. static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
  671. {
  672. const uint8_t *s = src;
  673. const uint8_t *end;
  674. const uint8_t *mm_end;
  675. uint16_t *d = (uint16_t *)dst;
  676. end = s + src_size;
  677. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  678. __asm__ volatile(
  679. "movq %0, %%mm7 \n\t"
  680. "movq %1, %%mm6 \n\t"
  681. ::"m"(red_15mask),"m"(green_15mask));
  682. mm_end = end - 11;
  683. while (s < mm_end) {
  684. __asm__ volatile(
  685. PREFETCH" 32%1 \n\t"
  686. "movd %1, %%mm0 \n\t"
  687. "movd 3%1, %%mm3 \n\t"
  688. "punpckldq 6%1, %%mm0 \n\t"
  689. "punpckldq 9%1, %%mm3 \n\t"
  690. "movq %%mm0, %%mm1 \n\t"
  691. "movq %%mm0, %%mm2 \n\t"
  692. "movq %%mm3, %%mm4 \n\t"
  693. "movq %%mm3, %%mm5 \n\t"
  694. "psrlq $3, %%mm0 \n\t"
  695. "psrlq $3, %%mm3 \n\t"
  696. "pand %2, %%mm0 \n\t"
  697. "pand %2, %%mm3 \n\t"
  698. "psrlq $6, %%mm1 \n\t"
  699. "psrlq $6, %%mm4 \n\t"
  700. "pand %%mm6, %%mm1 \n\t"
  701. "pand %%mm6, %%mm4 \n\t"
  702. "psrlq $9, %%mm2 \n\t"
  703. "psrlq $9, %%mm5 \n\t"
  704. "pand %%mm7, %%mm2 \n\t"
  705. "pand %%mm7, %%mm5 \n\t"
  706. "por %%mm1, %%mm0 \n\t"
  707. "por %%mm4, %%mm3 \n\t"
  708. "por %%mm2, %%mm0 \n\t"
  709. "por %%mm5, %%mm3 \n\t"
  710. "psllq $16, %%mm3 \n\t"
  711. "por %%mm3, %%mm0 \n\t"
  712. MOVNTQ" %%mm0, %0 \n\t"
  713. :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
  714. d += 4;
  715. s += 12;
  716. }
  717. __asm__ volatile(SFENCE:::"memory");
  718. __asm__ volatile(EMMS:::"memory");
  719. while (s < end) {
  720. const int b = *s++;
  721. const int g = *s++;
  722. const int r = *s++;
  723. *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
  724. }
  725. }
  726. static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, int src_size)
  727. {
  728. const uint8_t *s = src;
  729. const uint8_t *end;
  730. const uint8_t *mm_end;
  731. uint16_t *d = (uint16_t *)dst;
  732. end = s + src_size;
  733. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  734. __asm__ volatile(
  735. "movq %0, %%mm7 \n\t"
  736. "movq %1, %%mm6 \n\t"
  737. ::"m"(red_15mask),"m"(green_15mask));
  738. mm_end = end - 15;
  739. while (s < mm_end) {
  740. __asm__ volatile(
  741. PREFETCH" 32%1 \n\t"
  742. "movd %1, %%mm0 \n\t"
  743. "movd 3%1, %%mm3 \n\t"
  744. "punpckldq 6%1, %%mm0 \n\t"
  745. "punpckldq 9%1, %%mm3 \n\t"
  746. "movq %%mm0, %%mm1 \n\t"
  747. "movq %%mm0, %%mm2 \n\t"
  748. "movq %%mm3, %%mm4 \n\t"
  749. "movq %%mm3, %%mm5 \n\t"
  750. "psllq $7, %%mm0 \n\t"
  751. "psllq $7, %%mm3 \n\t"
  752. "pand %%mm7, %%mm0 \n\t"
  753. "pand %%mm7, %%mm3 \n\t"
  754. "psrlq $6, %%mm1 \n\t"
  755. "psrlq $6, %%mm4 \n\t"
  756. "pand %%mm6, %%mm1 \n\t"
  757. "pand %%mm6, %%mm4 \n\t"
  758. "psrlq $19, %%mm2 \n\t"
  759. "psrlq $19, %%mm5 \n\t"
  760. "pand %2, %%mm2 \n\t"
  761. "pand %2, %%mm5 \n\t"
  762. "por %%mm1, %%mm0 \n\t"
  763. "por %%mm4, %%mm3 \n\t"
  764. "por %%mm2, %%mm0 \n\t"
  765. "por %%mm5, %%mm3 \n\t"
  766. "psllq $16, %%mm3 \n\t"
  767. "por %%mm3, %%mm0 \n\t"
  768. MOVNTQ" %%mm0, %0 \n\t"
  769. :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
  770. d += 4;
  771. s += 12;
  772. }
  773. __asm__ volatile(SFENCE:::"memory");
  774. __asm__ volatile(EMMS:::"memory");
  775. while (s < end) {
  776. const int r = *s++;
  777. const int g = *s++;
  778. const int b = *s++;
  779. *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
  780. }
  781. }
  782. /*
  783. I use less accurate approximation here by simply left-shifting the input
  784. value and filling the low order bits with zeroes. This method improves PNG
  785. compression but this scheme cannot reproduce white exactly, since it does
  786. not generate an all-ones maximum value; the net effect is to darken the
  787. image slightly.
  788. The better method should be "left bit replication":
  789. 4 3 2 1 0
  790. ---------
  791. 1 1 0 1 1
  792. 7 6 5 4 3 2 1 0
  793. ----------------
  794. 1 1 0 1 1 1 1 0
  795. |=======| |===|
  796. | leftmost bits repeated to fill open bits
  797. |
  798. original bits
  799. */
  800. static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  801. {
  802. const uint16_t *end;
  803. const uint16_t *mm_end;
  804. uint8_t *d = dst;
  805. const uint16_t *s = (const uint16_t*)src;
  806. end = s + src_size/2;
  807. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  808. mm_end = end - 7;
  809. while (s < mm_end) {
  810. __asm__ volatile(
  811. PREFETCH" 32%1 \n\t"
  812. "movq %1, %%mm0 \n\t"
  813. "movq %1, %%mm1 \n\t"
  814. "movq %1, %%mm2 \n\t"
  815. "pand %2, %%mm0 \n\t"
  816. "pand %3, %%mm1 \n\t"
  817. "pand %4, %%mm2 \n\t"
  818. "psllq $3, %%mm0 \n\t"
  819. "psrlq $2, %%mm1 \n\t"
  820. "psrlq $7, %%mm2 \n\t"
  821. "movq %%mm0, %%mm3 \n\t"
  822. "movq %%mm1, %%mm4 \n\t"
  823. "movq %%mm2, %%mm5 \n\t"
  824. "punpcklwd %5, %%mm0 \n\t"
  825. "punpcklwd %5, %%mm1 \n\t"
  826. "punpcklwd %5, %%mm2 \n\t"
  827. "punpckhwd %5, %%mm3 \n\t"
  828. "punpckhwd %5, %%mm4 \n\t"
  829. "punpckhwd %5, %%mm5 \n\t"
  830. "psllq $8, %%mm1 \n\t"
  831. "psllq $16, %%mm2 \n\t"
  832. "por %%mm1, %%mm0 \n\t"
  833. "por %%mm2, %%mm0 \n\t"
  834. "psllq $8, %%mm4 \n\t"
  835. "psllq $16, %%mm5 \n\t"
  836. "por %%mm4, %%mm3 \n\t"
  837. "por %%mm5, %%mm3 \n\t"
  838. "movq %%mm0, %%mm6 \n\t"
  839. "movq %%mm3, %%mm7 \n\t"
  840. "movq 8%1, %%mm0 \n\t"
  841. "movq 8%1, %%mm1 \n\t"
  842. "movq 8%1, %%mm2 \n\t"
  843. "pand %2, %%mm0 \n\t"
  844. "pand %3, %%mm1 \n\t"
  845. "pand %4, %%mm2 \n\t"
  846. "psllq $3, %%mm0 \n\t"
  847. "psrlq $2, %%mm1 \n\t"
  848. "psrlq $7, %%mm2 \n\t"
  849. "movq %%mm0, %%mm3 \n\t"
  850. "movq %%mm1, %%mm4 \n\t"
  851. "movq %%mm2, %%mm5 \n\t"
  852. "punpcklwd %5, %%mm0 \n\t"
  853. "punpcklwd %5, %%mm1 \n\t"
  854. "punpcklwd %5, %%mm2 \n\t"
  855. "punpckhwd %5, %%mm3 \n\t"
  856. "punpckhwd %5, %%mm4 \n\t"
  857. "punpckhwd %5, %%mm5 \n\t"
  858. "psllq $8, %%mm1 \n\t"
  859. "psllq $16, %%mm2 \n\t"
  860. "por %%mm1, %%mm0 \n\t"
  861. "por %%mm2, %%mm0 \n\t"
  862. "psllq $8, %%mm4 \n\t"
  863. "psllq $16, %%mm5 \n\t"
  864. "por %%mm4, %%mm3 \n\t"
  865. "por %%mm5, %%mm3 \n\t"
  866. :"=m"(*d)
  867. :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
  868. :"memory");
  869. /* borrowed 32 to 24 */
  870. __asm__ volatile(
  871. "movq %%mm0, %%mm4 \n\t"
  872. "movq %%mm3, %%mm5 \n\t"
  873. "movq %%mm6, %%mm0 \n\t"
  874. "movq %%mm7, %%mm1 \n\t"
  875. "movq %%mm4, %%mm6 \n\t"
  876. "movq %%mm5, %%mm7 \n\t"
  877. "movq %%mm0, %%mm2 \n\t"
  878. "movq %%mm1, %%mm3 \n\t"
  879. STORE_BGR24_MMX
  880. :"=m"(*d)
  881. :"m"(*s)
  882. :"memory");
  883. d += 24;
  884. s += 8;
  885. }
  886. __asm__ volatile(SFENCE:::"memory");
  887. __asm__ volatile(EMMS:::"memory");
  888. while (s < end) {
  889. register uint16_t bgr;
  890. bgr = *s++;
  891. *d++ = (bgr&0x1F)<<3;
  892. *d++ = (bgr&0x3E0)>>2;
  893. *d++ = (bgr&0x7C00)>>7;
  894. }
  895. }
  896. static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  897. {
  898. const uint16_t *end;
  899. const uint16_t *mm_end;
  900. uint8_t *d = (uint8_t *)dst;
  901. const uint16_t *s = (const uint16_t *)src;
  902. end = s + src_size/2;
  903. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  904. mm_end = end - 7;
  905. while (s < mm_end) {
  906. __asm__ volatile(
  907. PREFETCH" 32%1 \n\t"
  908. "movq %1, %%mm0 \n\t"
  909. "movq %1, %%mm1 \n\t"
  910. "movq %1, %%mm2 \n\t"
  911. "pand %2, %%mm0 \n\t"
  912. "pand %3, %%mm1 \n\t"
  913. "pand %4, %%mm2 \n\t"
  914. "psllq $3, %%mm0 \n\t"
  915. "psrlq $3, %%mm1 \n\t"
  916. "psrlq $8, %%mm2 \n\t"
  917. "movq %%mm0, %%mm3 \n\t"
  918. "movq %%mm1, %%mm4 \n\t"
  919. "movq %%mm2, %%mm5 \n\t"
  920. "punpcklwd %5, %%mm0 \n\t"
  921. "punpcklwd %5, %%mm1 \n\t"
  922. "punpcklwd %5, %%mm2 \n\t"
  923. "punpckhwd %5, %%mm3 \n\t"
  924. "punpckhwd %5, %%mm4 \n\t"
  925. "punpckhwd %5, %%mm5 \n\t"
  926. "psllq $8, %%mm1 \n\t"
  927. "psllq $16, %%mm2 \n\t"
  928. "por %%mm1, %%mm0 \n\t"
  929. "por %%mm2, %%mm0 \n\t"
  930. "psllq $8, %%mm4 \n\t"
  931. "psllq $16, %%mm5 \n\t"
  932. "por %%mm4, %%mm3 \n\t"
  933. "por %%mm5, %%mm3 \n\t"
  934. "movq %%mm0, %%mm6 \n\t"
  935. "movq %%mm3, %%mm7 \n\t"
  936. "movq 8%1, %%mm0 \n\t"
  937. "movq 8%1, %%mm1 \n\t"
  938. "movq 8%1, %%mm2 \n\t"
  939. "pand %2, %%mm0 \n\t"
  940. "pand %3, %%mm1 \n\t"
  941. "pand %4, %%mm2 \n\t"
  942. "psllq $3, %%mm0 \n\t"
  943. "psrlq $3, %%mm1 \n\t"
  944. "psrlq $8, %%mm2 \n\t"
  945. "movq %%mm0, %%mm3 \n\t"
  946. "movq %%mm1, %%mm4 \n\t"
  947. "movq %%mm2, %%mm5 \n\t"
  948. "punpcklwd %5, %%mm0 \n\t"
  949. "punpcklwd %5, %%mm1 \n\t"
  950. "punpcklwd %5, %%mm2 \n\t"
  951. "punpckhwd %5, %%mm3 \n\t"
  952. "punpckhwd %5, %%mm4 \n\t"
  953. "punpckhwd %5, %%mm5 \n\t"
  954. "psllq $8, %%mm1 \n\t"
  955. "psllq $16, %%mm2 \n\t"
  956. "por %%mm1, %%mm0 \n\t"
  957. "por %%mm2, %%mm0 \n\t"
  958. "psllq $8, %%mm4 \n\t"
  959. "psllq $16, %%mm5 \n\t"
  960. "por %%mm4, %%mm3 \n\t"
  961. "por %%mm5, %%mm3 \n\t"
  962. :"=m"(*d)
  963. :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
  964. :"memory");
  965. /* borrowed 32 to 24 */
  966. __asm__ volatile(
  967. "movq %%mm0, %%mm4 \n\t"
  968. "movq %%mm3, %%mm5 \n\t"
  969. "movq %%mm6, %%mm0 \n\t"
  970. "movq %%mm7, %%mm1 \n\t"
  971. "movq %%mm4, %%mm6 \n\t"
  972. "movq %%mm5, %%mm7 \n\t"
  973. "movq %%mm0, %%mm2 \n\t"
  974. "movq %%mm1, %%mm3 \n\t"
  975. STORE_BGR24_MMX
  976. :"=m"(*d)
  977. :"m"(*s)
  978. :"memory");
  979. d += 24;
  980. s += 8;
  981. }
  982. __asm__ volatile(SFENCE:::"memory");
  983. __asm__ volatile(EMMS:::"memory");
  984. while (s < end) {
  985. register uint16_t bgr;
  986. bgr = *s++;
  987. *d++ = (bgr&0x1F)<<3;
  988. *d++ = (bgr&0x7E0)>>3;
  989. *d++ = (bgr&0xF800)>>8;
  990. }
  991. }
  992. /*
  993. * mm0 = 00 B3 00 B2 00 B1 00 B0
  994. * mm1 = 00 G3 00 G2 00 G1 00 G0
  995. * mm2 = 00 R3 00 R2 00 R1 00 R0
  996. * mm6 = FF FF FF FF FF FF FF FF
  997. * mm7 = 00 00 00 00 00 00 00 00
  998. */
  999. #define PACK_RGB32 \
  1000. "packuswb %%mm7, %%mm0 \n\t" /* 00 00 00 00 B3 B2 B1 B0 */ \
  1001. "packuswb %%mm7, %%mm1 \n\t" /* 00 00 00 00 G3 G2 G1 G0 */ \
  1002. "packuswb %%mm7, %%mm2 \n\t" /* 00 00 00 00 R3 R2 R1 R0 */ \
  1003. "punpcklbw %%mm1, %%mm0 \n\t" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \
  1004. "punpcklbw %%mm6, %%mm2 \n\t" /* FF R3 FF R2 FF R1 FF R0 */ \
  1005. "movq %%mm0, %%mm3 \n\t" \
  1006. "punpcklwd %%mm2, %%mm0 \n\t" /* FF R1 G1 B1 FF R0 G0 B0 */ \
  1007. "punpckhwd %%mm2, %%mm3 \n\t" /* FF R3 G3 B3 FF R2 G2 B2 */ \
  1008. MOVNTQ" %%mm0, %0 \n\t" \
  1009. MOVNTQ" %%mm3, 8%0 \n\t" \
  1010. static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, int src_size)
  1011. {
  1012. const uint16_t *end;
  1013. const uint16_t *mm_end;
  1014. uint8_t *d = dst;
  1015. const uint16_t *s = (const uint16_t *)src;
  1016. end = s + src_size/2;
  1017. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  1018. __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
  1019. __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
  1020. mm_end = end - 3;
  1021. while (s < mm_end) {
  1022. __asm__ volatile(
  1023. PREFETCH" 32%1 \n\t"
  1024. "movq %1, %%mm0 \n\t"
  1025. "movq %1, %%mm1 \n\t"
  1026. "movq %1, %%mm2 \n\t"
  1027. "pand %2, %%mm0 \n\t"
  1028. "pand %3, %%mm1 \n\t"
  1029. "pand %4, %%mm2 \n\t"
  1030. "psllq $3, %%mm0 \n\t"
  1031. "psrlq $2, %%mm1 \n\t"
  1032. "psrlq $7, %%mm2 \n\t"
  1033. PACK_RGB32
  1034. :"=m"(*d)
  1035. :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r)
  1036. :"memory");
  1037. d += 16;
  1038. s += 4;
  1039. }
  1040. __asm__ volatile(SFENCE:::"memory");
  1041. __asm__ volatile(EMMS:::"memory");
  1042. while (s < end) {
  1043. register uint16_t bgr;
  1044. bgr = *s++;
  1045. *d++ = (bgr&0x1F)<<3;
  1046. *d++ = (bgr&0x3E0)>>2;
  1047. *d++ = (bgr&0x7C00)>>7;
  1048. *d++ = 255;
  1049. }
  1050. }
  1051. static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, int src_size)
  1052. {
  1053. const uint16_t *end;
  1054. const uint16_t *mm_end;
  1055. uint8_t *d = dst;
  1056. const uint16_t *s = (const uint16_t*)src;
  1057. end = s + src_size/2;
  1058. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  1059. __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
  1060. __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
  1061. mm_end = end - 3;
  1062. while (s < mm_end) {
  1063. __asm__ volatile(
  1064. PREFETCH" 32%1 \n\t"
  1065. "movq %1, %%mm0 \n\t"
  1066. "movq %1, %%mm1 \n\t"
  1067. "movq %1, %%mm2 \n\t"
  1068. "pand %2, %%mm0 \n\t"
  1069. "pand %3, %%mm1 \n\t"
  1070. "pand %4, %%mm2 \n\t"
  1071. "psllq $3, %%mm0 \n\t"
  1072. "psrlq $3, %%mm1 \n\t"
  1073. "psrlq $8, %%mm2 \n\t"
  1074. PACK_RGB32
  1075. :"=m"(*d)
  1076. :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r)
  1077. :"memory");
  1078. d += 16;
  1079. s += 4;
  1080. }
  1081. __asm__ volatile(SFENCE:::"memory");
  1082. __asm__ volatile(EMMS:::"memory");
  1083. while (s < end) {
  1084. register uint16_t bgr;
  1085. bgr = *s++;
  1086. *d++ = (bgr&0x1F)<<3;
  1087. *d++ = (bgr&0x7E0)>>3;
  1088. *d++ = (bgr&0xF800)>>8;
  1089. *d++ = 255;
  1090. }
  1091. }
  1092. static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, int src_size)
  1093. {
  1094. x86_reg idx = 15 - src_size;
  1095. const uint8_t *s = src-idx;
  1096. uint8_t *d = dst-idx;
  1097. __asm__ volatile(
  1098. "test %0, %0 \n\t"
  1099. "jns 2f \n\t"
  1100. PREFETCH" (%1, %0) \n\t"
  1101. "movq %3, %%mm7 \n\t"
  1102. "pxor %4, %%mm7 \n\t"
  1103. "movq %%mm7, %%mm6 \n\t"
  1104. "pxor %5, %%mm7 \n\t"
  1105. ".p2align 4 \n\t"
  1106. "1: \n\t"
  1107. PREFETCH" 32(%1, %0) \n\t"
  1108. "movq (%1, %0), %%mm0 \n\t"
  1109. "movq 8(%1, %0), %%mm1 \n\t"
  1110. # if COMPILE_TEMPLATE_MMX2
  1111. "pshufw $177, %%mm0, %%mm3 \n\t"
  1112. "pshufw $177, %%mm1, %%mm5 \n\t"
  1113. "pand %%mm7, %%mm0 \n\t"
  1114. "pand %%mm6, %%mm3 \n\t"
  1115. "pand %%mm7, %%mm1 \n\t"
  1116. "pand %%mm6, %%mm5 \n\t"
  1117. "por %%mm3, %%mm0 \n\t"
  1118. "por %%mm5, %%mm1 \n\t"
  1119. # else
  1120. "movq %%mm0, %%mm2 \n\t"
  1121. "movq %%mm1, %%mm4 \n\t"
  1122. "pand %%mm7, %%mm0 \n\t"
  1123. "pand %%mm6, %%mm2 \n\t"
  1124. "pand %%mm7, %%mm1 \n\t"
  1125. "pand %%mm6, %%mm4 \n\t"
  1126. "movq %%mm2, %%mm3 \n\t"
  1127. "movq %%mm4, %%mm5 \n\t"
  1128. "pslld $16, %%mm2 \n\t"
  1129. "psrld $16, %%mm3 \n\t"
  1130. "pslld $16, %%mm4 \n\t"
  1131. "psrld $16, %%mm5 \n\t"
  1132. "por %%mm2, %%mm0 \n\t"
  1133. "por %%mm4, %%mm1 \n\t"
  1134. "por %%mm3, %%mm0 \n\t"
  1135. "por %%mm5, %%mm1 \n\t"
  1136. # endif
  1137. MOVNTQ" %%mm0, (%2, %0) \n\t"
  1138. MOVNTQ" %%mm1, 8(%2, %0) \n\t"
  1139. "add $16, %0 \n\t"
  1140. "js 1b \n\t"
  1141. SFENCE" \n\t"
  1142. EMMS" \n\t"
  1143. "2: \n\t"
  1144. : "+&r"(idx)
  1145. : "r" (s), "r" (d), "m" (mask32b), "m" (mask32r), "m" (mmx_one)
  1146. : "memory");
  1147. for (; idx<15; idx+=4) {
  1148. register int v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00;
  1149. v &= 0xff00ff;
  1150. *(uint32_t *)&d[idx] = (v>>16) + g + (v<<16);
  1151. }
  1152. }
  1153. static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  1154. {
  1155. unsigned i;
  1156. x86_reg mmx_size= 23 - src_size;
  1157. __asm__ volatile (
  1158. "test %%"REG_a", %%"REG_a" \n\t"
  1159. "jns 2f \n\t"
  1160. "movq "MANGLE(mask24r)", %%mm5 \n\t"
  1161. "movq "MANGLE(mask24g)", %%mm6 \n\t"
  1162. "movq "MANGLE(mask24b)", %%mm7 \n\t"
  1163. ".p2align 4 \n\t"
  1164. "1: \n\t"
  1165. PREFETCH" 32(%1, %%"REG_a") \n\t"
  1166. "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
  1167. "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG
  1168. "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B
  1169. "psllq $16, %%mm0 \n\t" // 00 BGR BGR
  1170. "pand %%mm5, %%mm0 \n\t"
  1171. "pand %%mm6, %%mm1 \n\t"
  1172. "pand %%mm7, %%mm2 \n\t"
  1173. "por %%mm0, %%mm1 \n\t"
  1174. "por %%mm2, %%mm1 \n\t"
  1175. "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
  1176. MOVNTQ" %%mm1, (%2, %%"REG_a") \n\t" // RGB RGB RG
  1177. "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B
  1178. "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR
  1179. "pand %%mm7, %%mm0 \n\t"
  1180. "pand %%mm5, %%mm1 \n\t"
  1181. "pand %%mm6, %%mm2 \n\t"
  1182. "por %%mm0, %%mm1 \n\t"
  1183. "por %%mm2, %%mm1 \n\t"
  1184. "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B
  1185. MOVNTQ" %%mm1, 8(%2, %%"REG_a") \n\t" // B RGB RGB R
  1186. "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR
  1187. "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG
  1188. "pand %%mm6, %%mm0 \n\t"
  1189. "pand %%mm7, %%mm1 \n\t"
  1190. "pand %%mm5, %%mm2 \n\t"
  1191. "por %%mm0, %%mm1 \n\t"
  1192. "por %%mm2, %%mm1 \n\t"
  1193. MOVNTQ" %%mm1, 16(%2, %%"REG_a") \n\t"
  1194. "add $24, %%"REG_a" \n\t"
  1195. " js 1b \n\t"
  1196. "2: \n\t"
  1197. : "+a" (mmx_size)
  1198. : "r" (src-mmx_size), "r"(dst-mmx_size)
  1199. );
  1200. __asm__ volatile(SFENCE:::"memory");
  1201. __asm__ volatile(EMMS:::"memory");
  1202. if (mmx_size==23) return; //finished, was multiple of 8
  1203. src+= src_size;
  1204. dst+= src_size;
  1205. src_size= 23-mmx_size;
  1206. src-= src_size;
  1207. dst-= src_size;
  1208. for (i=0; i<src_size; i+=3) {
  1209. register uint8_t x;
  1210. x = src[i + 2];
  1211. dst[i + 1] = src[i + 1];
  1212. dst[i + 2] = src[i + 0];
  1213. dst[i + 0] = x;
  1214. }
  1215. }
  1216. static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1217. int width, int height,
  1218. int lumStride, int chromStride, int dstStride, int vertLumPerChroma)
  1219. {
  1220. int y;
  1221. const x86_reg chromWidth= width>>1;
  1222. for (y=0; y<height; y++) {
  1223. //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
  1224. __asm__ volatile(
  1225. "xor %%"REG_a", %%"REG_a" \n\t"
  1226. ".p2align 4 \n\t"
  1227. "1: \n\t"
  1228. PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
  1229. PREFETCH" 32(%2, %%"REG_a") \n\t"
  1230. PREFETCH" 32(%3, %%"REG_a") \n\t"
  1231. "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
  1232. "movq %%mm0, %%mm2 \n\t" // U(0)
  1233. "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
  1234. "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1235. "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
  1236. "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
  1237. "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
  1238. "movq %%mm3, %%mm4 \n\t" // Y(0)
  1239. "movq %%mm5, %%mm6 \n\t" // Y(8)
  1240. "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
  1241. "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4)
  1242. "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
  1243. "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
  1244. MOVNTQ" %%mm3, (%0, %%"REG_a", 4) \n\t"
  1245. MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t"
  1246. MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4) \n\t"
  1247. MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t"
  1248. "add $8, %%"REG_a" \n\t"
  1249. "cmp %4, %%"REG_a" \n\t"
  1250. " jb 1b \n\t"
  1251. ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
  1252. : "%"REG_a
  1253. );
  1254. if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
  1255. usrc += chromStride;
  1256. vsrc += chromStride;
  1257. }
  1258. ysrc += lumStride;
  1259. dst += dstStride;
  1260. }
  1261. __asm__(EMMS" \n\t"
  1262. SFENCE" \n\t"
  1263. :::"memory");
  1264. }
  1265. /**
  1266. * Height should be a multiple of 2 and width should be a multiple of 16.
  1267. * (If this is a problem for anyone then tell me, and I will fix it.)
  1268. */
  1269. static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1270. int width, int height,
  1271. int lumStride, int chromStride, int dstStride)
  1272. {
  1273. //FIXME interpolate chroma
  1274. RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
  1275. }
  1276. static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1277. int width, int height,
  1278. int lumStride, int chromStride, int dstStride, int vertLumPerChroma)
  1279. {
  1280. int y;
  1281. const x86_reg chromWidth= width>>1;
  1282. for (y=0; y<height; y++) {
  1283. //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
  1284. __asm__ volatile(
  1285. "xor %%"REG_a", %%"REG_a" \n\t"
  1286. ".p2align 4 \n\t"
  1287. "1: \n\t"
  1288. PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
  1289. PREFETCH" 32(%2, %%"REG_a") \n\t"
  1290. PREFETCH" 32(%3, %%"REG_a") \n\t"
  1291. "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
  1292. "movq %%mm0, %%mm2 \n\t" // U(0)
  1293. "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
  1294. "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1295. "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
  1296. "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
  1297. "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
  1298. "movq %%mm0, %%mm4 \n\t" // Y(0)
  1299. "movq %%mm2, %%mm6 \n\t" // Y(8)
  1300. "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0)
  1301. "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4)
  1302. "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8)
  1303. "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12)
  1304. MOVNTQ" %%mm0, (%0, %%"REG_a", 4) \n\t"
  1305. MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t"
  1306. MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4) \n\t"
  1307. MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t"
  1308. "add $8, %%"REG_a" \n\t"
  1309. "cmp %4, %%"REG_a" \n\t"
  1310. " jb 1b \n\t"
  1311. ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
  1312. : "%"REG_a
  1313. );
  1314. if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
  1315. usrc += chromStride;
  1316. vsrc += chromStride;
  1317. }
  1318. ysrc += lumStride;
  1319. dst += dstStride;
  1320. }
  1321. __asm__(EMMS" \n\t"
  1322. SFENCE" \n\t"
  1323. :::"memory");
  1324. }
  1325. /**
  1326. * Height should be a multiple of 2 and width should be a multiple of 16
  1327. * (If this is a problem for anyone then tell me, and I will fix it.)
  1328. */
  1329. static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1330. int width, int height,
  1331. int lumStride, int chromStride, int dstStride)
  1332. {
  1333. //FIXME interpolate chroma
  1334. RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
  1335. }
  1336. /**
  1337. * Width should be a multiple of 16.
  1338. */
  1339. static inline void RENAME(yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1340. int width, int height,
  1341. int lumStride, int chromStride, int dstStride)
  1342. {
  1343. RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
  1344. }
  1345. /**
  1346. * Width should be a multiple of 16.
  1347. */
  1348. static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1349. int width, int height,
  1350. int lumStride, int chromStride, int dstStride)
  1351. {
  1352. RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
  1353. }
  1354. /**
  1355. * Height should be a multiple of 2 and width should be a multiple of 16.
  1356. * (If this is a problem for anyone then tell me, and I will fix it.)
  1357. */
  1358. static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1359. int width, int height,
  1360. int lumStride, int chromStride, int srcStride)
  1361. {
  1362. int y;
  1363. const x86_reg chromWidth= width>>1;
  1364. for (y=0; y<height; y+=2) {
  1365. __asm__ volatile(
  1366. "xor %%"REG_a", %%"REG_a" \n\t"
  1367. "pcmpeqw %%mm7, %%mm7 \n\t"
  1368. "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
  1369. ".p2align 4 \n\t"
  1370. "1: \n\t"
  1371. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1372. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1373. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1374. "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
  1375. "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
  1376. "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
  1377. "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
  1378. "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
  1379. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
  1380. "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1381. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
  1382. MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t"
  1383. "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(8)
  1384. "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(12)
  1385. "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
  1386. "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
  1387. "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
  1388. "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
  1389. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
  1390. "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
  1391. "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
  1392. "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
  1393. MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t"
  1394. "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
  1395. "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
  1396. "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
  1397. "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
  1398. "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
  1399. "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
  1400. "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
  1401. "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
  1402. MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
  1403. MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
  1404. "add $8, %%"REG_a" \n\t"
  1405. "cmp %4, %%"REG_a" \n\t"
  1406. " jb 1b \n\t"
  1407. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1408. : "memory", "%"REG_a
  1409. );
  1410. ydst += lumStride;
  1411. src += srcStride;
  1412. __asm__ volatile(
  1413. "xor %%"REG_a", %%"REG_a" \n\t"
  1414. ".p2align 4 \n\t"
  1415. "1: \n\t"
  1416. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1417. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1418. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1419. "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
  1420. "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
  1421. "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
  1422. "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
  1423. "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
  1424. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
  1425. "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
  1426. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
  1427. MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t"
  1428. MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t"
  1429. "add $8, %%"REG_a" \n\t"
  1430. "cmp %4, %%"REG_a" \n\t"
  1431. " jb 1b \n\t"
  1432. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1433. : "memory", "%"REG_a
  1434. );
  1435. udst += chromStride;
  1436. vdst += chromStride;
  1437. ydst += lumStride;
  1438. src += srcStride;
  1439. }
  1440. __asm__ volatile(EMMS" \n\t"
  1441. SFENCE" \n\t"
  1442. :::"memory");
  1443. }
  1444. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1445. #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
  1446. static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWidth, int srcHeight, int srcStride, int dstStride)
  1447. {
  1448. int x,y;
  1449. dst[0]= src[0];
  1450. // first line
  1451. for (x=0; x<srcWidth-1; x++) {
  1452. dst[2*x+1]= (3*src[x] + src[x+1])>>2;
  1453. dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
  1454. }
  1455. dst[2*srcWidth-1]= src[srcWidth-1];
  1456. dst+= dstStride;
  1457. for (y=1; y<srcHeight; y++) {
  1458. const x86_reg mmxSize= srcWidth&~15;
  1459. __asm__ volatile(
  1460. "mov %4, %%"REG_a" \n\t"
  1461. "movq "MANGLE(mmx_ff)", %%mm0 \n\t"
  1462. "movq (%0, %%"REG_a"), %%mm4 \n\t"
  1463. "movq %%mm4, %%mm2 \n\t"
  1464. "psllq $8, %%mm4 \n\t"
  1465. "pand %%mm0, %%mm2 \n\t"
  1466. "por %%mm2, %%mm4 \n\t"
  1467. "movq (%1, %%"REG_a"), %%mm5 \n\t"
  1468. "movq %%mm5, %%mm3 \n\t"
  1469. "psllq $8, %%mm5 \n\t"
  1470. "pand %%mm0, %%mm3 \n\t"
  1471. "por %%mm3, %%mm5 \n\t"
  1472. "1: \n\t"
  1473. "movq (%0, %%"REG_a"), %%mm0 \n\t"
  1474. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  1475. "movq 1(%0, %%"REG_a"), %%mm2 \n\t"
  1476. "movq 1(%1, %%"REG_a"), %%mm3 \n\t"
  1477. PAVGB" %%mm0, %%mm5 \n\t"
  1478. PAVGB" %%mm0, %%mm3 \n\t"
  1479. PAVGB" %%mm0, %%mm5 \n\t"
  1480. PAVGB" %%mm0, %%mm3 \n\t"
  1481. PAVGB" %%mm1, %%mm4 \n\t"
  1482. PAVGB" %%mm1, %%mm2 \n\t"
  1483. PAVGB" %%mm1, %%mm4 \n\t"
  1484. PAVGB" %%mm1, %%mm2 \n\t"
  1485. "movq %%mm5, %%mm7 \n\t"
  1486. "movq %%mm4, %%mm6 \n\t"
  1487. "punpcklbw %%mm3, %%mm5 \n\t"
  1488. "punpckhbw %%mm3, %%mm7 \n\t"
  1489. "punpcklbw %%mm2, %%mm4 \n\t"
  1490. "punpckhbw %%mm2, %%mm6 \n\t"
  1491. MOVNTQ" %%mm5, (%2, %%"REG_a", 2) \n\t"
  1492. MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2) \n\t"
  1493. MOVNTQ" %%mm4, (%3, %%"REG_a", 2) \n\t"
  1494. MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2) \n\t"
  1495. "add $8, %%"REG_a" \n\t"
  1496. "movq -1(%0, %%"REG_a"), %%mm4 \n\t"
  1497. "movq -1(%1, %%"REG_a"), %%mm5 \n\t"
  1498. " js 1b \n\t"
  1499. :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
  1500. "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
  1501. "g" (-mmxSize)
  1502. : "%"REG_a
  1503. );
  1504. for (x=mmxSize-1; x<srcWidth-1; x++) {
  1505. dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
  1506. dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2;
  1507. dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2;
  1508. dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2;
  1509. }
  1510. dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2;
  1511. dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
  1512. dst+=dstStride*2;
  1513. src+=srcStride;
  1514. }
  1515. // last line
  1516. dst[0]= src[0];
  1517. for (x=0; x<srcWidth-1; x++) {
  1518. dst[2*x+1]= (3*src[x] + src[x+1])>>2;
  1519. dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
  1520. }
  1521. dst[2*srcWidth-1]= src[srcWidth-1];
  1522. __asm__ volatile(EMMS" \n\t"
  1523. SFENCE" \n\t"
  1524. :::"memory");
  1525. }
  1526. #endif /* COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW */
  1527. #if !COMPILE_TEMPLATE_AMD3DNOW
  1528. /**
  1529. * Height should be a multiple of 2 and width should be a multiple of 16.
  1530. * (If this is a problem for anyone then tell me, and I will fix it.)
  1531. * Chrominance data is only taken from every second line, others are ignored.
  1532. * FIXME: Write HQ version.
  1533. */
  1534. static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1535. int width, int height,
  1536. int lumStride, int chromStride, int srcStride)
  1537. {
  1538. int y;
  1539. const x86_reg chromWidth= width>>1;
  1540. for (y=0; y<height; y+=2) {
  1541. __asm__ volatile(
  1542. "xor %%"REG_a", %%"REG_a" \n\t"
  1543. "pcmpeqw %%mm7, %%mm7 \n\t"
  1544. "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
  1545. ".p2align 4 \n\t"
  1546. "1: \n\t"
  1547. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1548. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // UYVY UYVY(0)
  1549. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(4)
  1550. "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0)
  1551. "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4)
  1552. "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0)
  1553. "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4)
  1554. "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
  1555. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
  1556. "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1557. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
  1558. MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t"
  1559. "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(8)
  1560. "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // UYVY UYVY(12)
  1561. "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8)
  1562. "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12)
  1563. "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8)
  1564. "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12)
  1565. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
  1566. "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
  1567. "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
  1568. "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
  1569. MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t"
  1570. "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
  1571. "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
  1572. "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
  1573. "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
  1574. "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
  1575. "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
  1576. "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
  1577. "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
  1578. MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
  1579. MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
  1580. "add $8, %%"REG_a" \n\t"
  1581. "cmp %4, %%"REG_a" \n\t"
  1582. " jb 1b \n\t"
  1583. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1584. : "memory", "%"REG_a
  1585. );
  1586. ydst += lumStride;
  1587. src += srcStride;
  1588. __asm__ volatile(
  1589. "xor %%"REG_a", %%"REG_a" \n\t"
  1590. ".p2align 4 \n\t"
  1591. "1: \n\t"
  1592. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1593. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1594. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1595. "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
  1596. "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
  1597. "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
  1598. "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
  1599. "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
  1600. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
  1601. "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
  1602. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
  1603. MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t"
  1604. MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t"
  1605. "add $8, %%"REG_a" \n\t"
  1606. "cmp %4, %%"REG_a" \n\t"
  1607. " jb 1b \n\t"
  1608. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1609. : "memory", "%"REG_a
  1610. );
  1611. udst += chromStride;
  1612. vdst += chromStride;
  1613. ydst += lumStride;
  1614. src += srcStride;
  1615. }
  1616. __asm__ volatile(EMMS" \n\t"
  1617. SFENCE" \n\t"
  1618. :::"memory");
  1619. }
  1620. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1621. /**
  1622. * Height should be a multiple of 2 and width should be a multiple of 2.
  1623. * (If this is a problem for anyone then tell me, and I will fix it.)
  1624. * Chrominance data is only taken from every second line,
  1625. * others are ignored in the C version.
  1626. * FIXME: Write HQ version.
  1627. */
  1628. static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1629. int width, int height,
  1630. int lumStride, int chromStride, int srcStride)
  1631. {
  1632. int y;
  1633. const x86_reg chromWidth= width>>1;
  1634. for (y=0; y<height-2; y+=2) {
  1635. int i;
  1636. for (i=0; i<2; i++) {
  1637. __asm__ volatile(
  1638. "mov %2, %%"REG_a" \n\t"
  1639. "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t"
  1640. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1641. "pxor %%mm7, %%mm7 \n\t"
  1642. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1643. ".p2align 4 \n\t"
  1644. "1: \n\t"
  1645. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1646. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1647. "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
  1648. "punpcklbw %%mm7, %%mm0 \n\t"
  1649. "punpcklbw %%mm7, %%mm1 \n\t"
  1650. "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
  1651. "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
  1652. "punpcklbw %%mm7, %%mm2 \n\t"
  1653. "punpcklbw %%mm7, %%mm3 \n\t"
  1654. "pmaddwd %%mm6, %%mm0 \n\t"
  1655. "pmaddwd %%mm6, %%mm1 \n\t"
  1656. "pmaddwd %%mm6, %%mm2 \n\t"
  1657. "pmaddwd %%mm6, %%mm3 \n\t"
  1658. #ifndef FAST_BGR2YV12
  1659. "psrad $8, %%mm0 \n\t"
  1660. "psrad $8, %%mm1 \n\t"
  1661. "psrad $8, %%mm2 \n\t"
  1662. "psrad $8, %%mm3 \n\t"
  1663. #endif
  1664. "packssdw %%mm1, %%mm0 \n\t"
  1665. "packssdw %%mm3, %%mm2 \n\t"
  1666. "pmaddwd %%mm5, %%mm0 \n\t"
  1667. "pmaddwd %%mm5, %%mm2 \n\t"
  1668. "packssdw %%mm2, %%mm0 \n\t"
  1669. "psraw $7, %%mm0 \n\t"
  1670. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1671. "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
  1672. "punpcklbw %%mm7, %%mm4 \n\t"
  1673. "punpcklbw %%mm7, %%mm1 \n\t"
  1674. "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
  1675. "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
  1676. "punpcklbw %%mm7, %%mm2 \n\t"
  1677. "punpcklbw %%mm7, %%mm3 \n\t"
  1678. "pmaddwd %%mm6, %%mm4 \n\t"
  1679. "pmaddwd %%mm6, %%mm1 \n\t"
  1680. "pmaddwd %%mm6, %%mm2 \n\t"
  1681. "pmaddwd %%mm6, %%mm3 \n\t"
  1682. #ifndef FAST_BGR2YV12
  1683. "psrad $8, %%mm4 \n\t"
  1684. "psrad $8, %%mm1 \n\t"
  1685. "psrad $8, %%mm2 \n\t"
  1686. "psrad $8, %%mm3 \n\t"
  1687. #endif
  1688. "packssdw %%mm1, %%mm4 \n\t"
  1689. "packssdw %%mm3, %%mm2 \n\t"
  1690. "pmaddwd %%mm5, %%mm4 \n\t"
  1691. "pmaddwd %%mm5, %%mm2 \n\t"
  1692. "add $24, %%"REG_d" \n\t"
  1693. "packssdw %%mm2, %%mm4 \n\t"
  1694. "psraw $7, %%mm4 \n\t"
  1695. "packuswb %%mm4, %%mm0 \n\t"
  1696. "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t"
  1697. MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t"
  1698. "add $8, %%"REG_a" \n\t"
  1699. " js 1b \n\t"
  1700. : : "r" (src+width*3), "r" (ydst+width), "g" ((x86_reg)-width)
  1701. : "%"REG_a, "%"REG_d
  1702. );
  1703. ydst += lumStride;
  1704. src += srcStride;
  1705. }
  1706. src -= srcStride*2;
  1707. __asm__ volatile(
  1708. "mov %4, %%"REG_a" \n\t"
  1709. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1710. "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t"
  1711. "pxor %%mm7, %%mm7 \n\t"
  1712. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1713. "add %%"REG_d", %%"REG_d" \n\t"
  1714. ".p2align 4 \n\t"
  1715. "1: \n\t"
  1716. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1717. PREFETCH" 64(%1, %%"REG_d") \n\t"
  1718. #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
  1719. "movq (%0, %%"REG_d"), %%mm0 \n\t"
  1720. "movq (%1, %%"REG_d"), %%mm1 \n\t"
  1721. "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
  1722. "movq 6(%1, %%"REG_d"), %%mm3 \n\t"
  1723. PAVGB" %%mm1, %%mm0 \n\t"
  1724. PAVGB" %%mm3, %%mm2 \n\t"
  1725. "movq %%mm0, %%mm1 \n\t"
  1726. "movq %%mm2, %%mm3 \n\t"
  1727. "psrlq $24, %%mm0 \n\t"
  1728. "psrlq $24, %%mm2 \n\t"
  1729. PAVGB" %%mm1, %%mm0 \n\t"
  1730. PAVGB" %%mm3, %%mm2 \n\t"
  1731. "punpcklbw %%mm7, %%mm0 \n\t"
  1732. "punpcklbw %%mm7, %%mm2 \n\t"
  1733. #else
  1734. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1735. "movd (%1, %%"REG_d"), %%mm1 \n\t"
  1736. "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
  1737. "movd 3(%1, %%"REG_d"), %%mm3 \n\t"
  1738. "punpcklbw %%mm7, %%mm0 \n\t"
  1739. "punpcklbw %%mm7, %%mm1 \n\t"
  1740. "punpcklbw %%mm7, %%mm2 \n\t"
  1741. "punpcklbw %%mm7, %%mm3 \n\t"
  1742. "paddw %%mm1, %%mm0 \n\t"
  1743. "paddw %%mm3, %%mm2 \n\t"
  1744. "paddw %%mm2, %%mm0 \n\t"
  1745. "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
  1746. "movd 6(%1, %%"REG_d"), %%mm1 \n\t"
  1747. "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
  1748. "movd 9(%1, %%"REG_d"), %%mm3 \n\t"
  1749. "punpcklbw %%mm7, %%mm4 \n\t"
  1750. "punpcklbw %%mm7, %%mm1 \n\t"
  1751. "punpcklbw %%mm7, %%mm2 \n\t"
  1752. "punpcklbw %%mm7, %%mm3 \n\t"
  1753. "paddw %%mm1, %%mm4 \n\t"
  1754. "paddw %%mm3, %%mm2 \n\t"
  1755. "paddw %%mm4, %%mm2 \n\t"
  1756. "psrlw $2, %%mm0 \n\t"
  1757. "psrlw $2, %%mm2 \n\t"
  1758. #endif
  1759. "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
  1760. "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
  1761. "pmaddwd %%mm0, %%mm1 \n\t"
  1762. "pmaddwd %%mm2, %%mm3 \n\t"
  1763. "pmaddwd %%mm6, %%mm0 \n\t"
  1764. "pmaddwd %%mm6, %%mm2 \n\t"
  1765. #ifndef FAST_BGR2YV12
  1766. "psrad $8, %%mm0 \n\t"
  1767. "psrad $8, %%mm1 \n\t"
  1768. "psrad $8, %%mm2 \n\t"
  1769. "psrad $8, %%mm3 \n\t"
  1770. #endif
  1771. "packssdw %%mm2, %%mm0 \n\t"
  1772. "packssdw %%mm3, %%mm1 \n\t"
  1773. "pmaddwd %%mm5, %%mm0 \n\t"
  1774. "pmaddwd %%mm5, %%mm1 \n\t"
  1775. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1776. "psraw $7, %%mm0 \n\t"
  1777. #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
  1778. "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
  1779. "movq 12(%1, %%"REG_d"), %%mm1 \n\t"
  1780. "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
  1781. "movq 18(%1, %%"REG_d"), %%mm3 \n\t"
  1782. PAVGB" %%mm1, %%mm4 \n\t"
  1783. PAVGB" %%mm3, %%mm2 \n\t"
  1784. "movq %%mm4, %%mm1 \n\t"
  1785. "movq %%mm2, %%mm3 \n\t"
  1786. "psrlq $24, %%mm4 \n\t"
  1787. "psrlq $24, %%mm2 \n\t"
  1788. PAVGB" %%mm1, %%mm4 \n\t"
  1789. PAVGB" %%mm3, %%mm2 \n\t"
  1790. "punpcklbw %%mm7, %%mm4 \n\t"
  1791. "punpcklbw %%mm7, %%mm2 \n\t"
  1792. #else
  1793. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1794. "movd 12(%1, %%"REG_d"), %%mm1 \n\t"
  1795. "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
  1796. "movd 15(%1, %%"REG_d"), %%mm3 \n\t"
  1797. "punpcklbw %%mm7, %%mm4 \n\t"
  1798. "punpcklbw %%mm7, %%mm1 \n\t"
  1799. "punpcklbw %%mm7, %%mm2 \n\t"
  1800. "punpcklbw %%mm7, %%mm3 \n\t"
  1801. "paddw %%mm1, %%mm4 \n\t"
  1802. "paddw %%mm3, %%mm2 \n\t"
  1803. "paddw %%mm2, %%mm4 \n\t"
  1804. "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
  1805. "movd 18(%1, %%"REG_d"), %%mm1 \n\t"
  1806. "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
  1807. "movd 21(%1, %%"REG_d"), %%mm3 \n\t"
  1808. "punpcklbw %%mm7, %%mm5 \n\t"
  1809. "punpcklbw %%mm7, %%mm1 \n\t"
  1810. "punpcklbw %%mm7, %%mm2 \n\t"
  1811. "punpcklbw %%mm7, %%mm3 \n\t"
  1812. "paddw %%mm1, %%mm5 \n\t"
  1813. "paddw %%mm3, %%mm2 \n\t"
  1814. "paddw %%mm5, %%mm2 \n\t"
  1815. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1816. "psrlw $2, %%mm4 \n\t"
  1817. "psrlw $2, %%mm2 \n\t"
  1818. #endif
  1819. "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
  1820. "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
  1821. "pmaddwd %%mm4, %%mm1 \n\t"
  1822. "pmaddwd %%mm2, %%mm3 \n\t"
  1823. "pmaddwd %%mm6, %%mm4 \n\t"
  1824. "pmaddwd %%mm6, %%mm2 \n\t"
  1825. #ifndef FAST_BGR2YV12
  1826. "psrad $8, %%mm4 \n\t"
  1827. "psrad $8, %%mm1 \n\t"
  1828. "psrad $8, %%mm2 \n\t"
  1829. "psrad $8, %%mm3 \n\t"
  1830. #endif
  1831. "packssdw %%mm2, %%mm4 \n\t"
  1832. "packssdw %%mm3, %%mm1 \n\t"
  1833. "pmaddwd %%mm5, %%mm4 \n\t"
  1834. "pmaddwd %%mm5, %%mm1 \n\t"
  1835. "add $24, %%"REG_d" \n\t"
  1836. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1837. "psraw $7, %%mm4 \n\t"
  1838. "movq %%mm0, %%mm1 \n\t"
  1839. "punpckldq %%mm4, %%mm0 \n\t"
  1840. "punpckhdq %%mm4, %%mm1 \n\t"
  1841. "packsswb %%mm1, %%mm0 \n\t"
  1842. "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t"
  1843. "movd %%mm0, (%2, %%"REG_a") \n\t"
  1844. "punpckhdq %%mm0, %%mm0 \n\t"
  1845. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1846. "add $4, %%"REG_a" \n\t"
  1847. " js 1b \n\t"
  1848. : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
  1849. : "%"REG_a, "%"REG_d
  1850. );
  1851. udst += chromStride;
  1852. vdst += chromStride;
  1853. src += srcStride*2;
  1854. }
  1855. __asm__ volatile(EMMS" \n\t"
  1856. SFENCE" \n\t"
  1857. :::"memory");
  1858. rgb24toyv12_c(src, ydst, udst, vdst, width, height-y, lumStride, chromStride, srcStride);
  1859. }
  1860. #endif /* !COMPILE_TEMPLATE_SSE2 */
  1861. #if !COMPILE_TEMPLATE_AMD3DNOW
  1862. static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, uint8_t *dest,
  1863. int width, int height, int src1Stride,
  1864. int src2Stride, int dstStride)
  1865. {
  1866. int h;
  1867. for (h=0; h < height; h++) {
  1868. int w;
  1869. #if COMPILE_TEMPLATE_SSE2
  1870. __asm__(
  1871. "xor %%"REG_a", %%"REG_a" \n\t"
  1872. "1: \n\t"
  1873. PREFETCH" 64(%1, %%"REG_a") \n\t"
  1874. PREFETCH" 64(%2, %%"REG_a") \n\t"
  1875. "movdqa (%1, %%"REG_a"), %%xmm0 \n\t"
  1876. "movdqa (%1, %%"REG_a"), %%xmm1 \n\t"
  1877. "movdqa (%2, %%"REG_a"), %%xmm2 \n\t"
  1878. "punpcklbw %%xmm2, %%xmm0 \n\t"
  1879. "punpckhbw %%xmm2, %%xmm1 \n\t"
  1880. "movntdq %%xmm0, (%0, %%"REG_a", 2) \n\t"
  1881. "movntdq %%xmm1, 16(%0, %%"REG_a", 2) \n\t"
  1882. "add $16, %%"REG_a" \n\t"
  1883. "cmp %3, %%"REG_a" \n\t"
  1884. " jb 1b \n\t"
  1885. ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
  1886. : "memory", "%"REG_a""
  1887. );
  1888. #else
  1889. __asm__(
  1890. "xor %%"REG_a", %%"REG_a" \n\t"
  1891. "1: \n\t"
  1892. PREFETCH" 64(%1, %%"REG_a") \n\t"
  1893. PREFETCH" 64(%2, %%"REG_a") \n\t"
  1894. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  1895. "movq 8(%1, %%"REG_a"), %%mm2 \n\t"
  1896. "movq %%mm0, %%mm1 \n\t"
  1897. "movq %%mm2, %%mm3 \n\t"
  1898. "movq (%2, %%"REG_a"), %%mm4 \n\t"
  1899. "movq 8(%2, %%"REG_a"), %%mm5 \n\t"
  1900. "punpcklbw %%mm4, %%mm0 \n\t"
  1901. "punpckhbw %%mm4, %%mm1 \n\t"
  1902. "punpcklbw %%mm5, %%mm2 \n\t"
  1903. "punpckhbw %%mm5, %%mm3 \n\t"
  1904. MOVNTQ" %%mm0, (%0, %%"REG_a", 2) \n\t"
  1905. MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2) \n\t"
  1906. MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2) \n\t"
  1907. MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2) \n\t"
  1908. "add $16, %%"REG_a" \n\t"
  1909. "cmp %3, %%"REG_a" \n\t"
  1910. " jb 1b \n\t"
  1911. ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
  1912. : "memory", "%"REG_a
  1913. );
  1914. #endif
  1915. for (w= (width&(~15)); w < width; w++) {
  1916. dest[2*w+0] = src1[w];
  1917. dest[2*w+1] = src2[w];
  1918. }
  1919. dest += dstStride;
  1920. src1 += src1Stride;
  1921. src2 += src2Stride;
  1922. }
  1923. __asm__(
  1924. EMMS" \n\t"
  1925. SFENCE" \n\t"
  1926. ::: "memory"
  1927. );
  1928. }
  1929. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1930. #if !COMPILE_TEMPLATE_SSE2
  1931. #if !COMPILE_TEMPLATE_AMD3DNOW
  1932. static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
  1933. uint8_t *dst1, uint8_t *dst2,
  1934. int width, int height,
  1935. int srcStride1, int srcStride2,
  1936. int dstStride1, int dstStride2)
  1937. {
  1938. x86_reg y;
  1939. int x,w,h;
  1940. w=width/2; h=height/2;
  1941. __asm__ volatile(
  1942. PREFETCH" %0 \n\t"
  1943. PREFETCH" %1 \n\t"
  1944. ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
  1945. for (y=0;y<h;y++) {
  1946. const uint8_t* s1=src1+srcStride1*(y>>1);
  1947. uint8_t* d=dst1+dstStride1*y;
  1948. x=0;
  1949. for (;x<w-31;x+=32) {
  1950. __asm__ volatile(
  1951. PREFETCH" 32%1 \n\t"
  1952. "movq %1, %%mm0 \n\t"
  1953. "movq 8%1, %%mm2 \n\t"
  1954. "movq 16%1, %%mm4 \n\t"
  1955. "movq 24%1, %%mm6 \n\t"
  1956. "movq %%mm0, %%mm1 \n\t"
  1957. "movq %%mm2, %%mm3 \n\t"
  1958. "movq %%mm4, %%mm5 \n\t"
  1959. "movq %%mm6, %%mm7 \n\t"
  1960. "punpcklbw %%mm0, %%mm0 \n\t"
  1961. "punpckhbw %%mm1, %%mm1 \n\t"
  1962. "punpcklbw %%mm2, %%mm2 \n\t"
  1963. "punpckhbw %%mm3, %%mm3 \n\t"
  1964. "punpcklbw %%mm4, %%mm4 \n\t"
  1965. "punpckhbw %%mm5, %%mm5 \n\t"
  1966. "punpcklbw %%mm6, %%mm6 \n\t"
  1967. "punpckhbw %%mm7, %%mm7 \n\t"
  1968. MOVNTQ" %%mm0, %0 \n\t"
  1969. MOVNTQ" %%mm1, 8%0 \n\t"
  1970. MOVNTQ" %%mm2, 16%0 \n\t"
  1971. MOVNTQ" %%mm3, 24%0 \n\t"
  1972. MOVNTQ" %%mm4, 32%0 \n\t"
  1973. MOVNTQ" %%mm5, 40%0 \n\t"
  1974. MOVNTQ" %%mm6, 48%0 \n\t"
  1975. MOVNTQ" %%mm7, 56%0"
  1976. :"=m"(d[2*x])
  1977. :"m"(s1[x])
  1978. :"memory");
  1979. }
  1980. for (;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
  1981. }
  1982. for (y=0;y<h;y++) {
  1983. const uint8_t* s2=src2+srcStride2*(y>>1);
  1984. uint8_t* d=dst2+dstStride2*y;
  1985. x=0;
  1986. for (;x<w-31;x+=32) {
  1987. __asm__ volatile(
  1988. PREFETCH" 32%1 \n\t"
  1989. "movq %1, %%mm0 \n\t"
  1990. "movq 8%1, %%mm2 \n\t"
  1991. "movq 16%1, %%mm4 \n\t"
  1992. "movq 24%1, %%mm6 \n\t"
  1993. "movq %%mm0, %%mm1 \n\t"
  1994. "movq %%mm2, %%mm3 \n\t"
  1995. "movq %%mm4, %%mm5 \n\t"
  1996. "movq %%mm6, %%mm7 \n\t"
  1997. "punpcklbw %%mm0, %%mm0 \n\t"
  1998. "punpckhbw %%mm1, %%mm1 \n\t"
  1999. "punpcklbw %%mm2, %%mm2 \n\t"
  2000. "punpckhbw %%mm3, %%mm3 \n\t"
  2001. "punpcklbw %%mm4, %%mm4 \n\t"
  2002. "punpckhbw %%mm5, %%mm5 \n\t"
  2003. "punpcklbw %%mm6, %%mm6 \n\t"
  2004. "punpckhbw %%mm7, %%mm7 \n\t"
  2005. MOVNTQ" %%mm0, %0 \n\t"
  2006. MOVNTQ" %%mm1, 8%0 \n\t"
  2007. MOVNTQ" %%mm2, 16%0 \n\t"
  2008. MOVNTQ" %%mm3, 24%0 \n\t"
  2009. MOVNTQ" %%mm4, 32%0 \n\t"
  2010. MOVNTQ" %%mm5, 40%0 \n\t"
  2011. MOVNTQ" %%mm6, 48%0 \n\t"
  2012. MOVNTQ" %%mm7, 56%0"
  2013. :"=m"(d[2*x])
  2014. :"m"(s2[x])
  2015. :"memory");
  2016. }
  2017. for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
  2018. }
  2019. __asm__(
  2020. EMMS" \n\t"
  2021. SFENCE" \n\t"
  2022. ::: "memory"
  2023. );
  2024. }
  2025. static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
  2026. uint8_t *dst,
  2027. int width, int height,
  2028. int srcStride1, int srcStride2,
  2029. int srcStride3, int dstStride)
  2030. {
  2031. x86_reg x;
  2032. int y,w,h;
  2033. w=width/2; h=height;
  2034. for (y=0;y<h;y++) {
  2035. const uint8_t* yp=src1+srcStride1*y;
  2036. const uint8_t* up=src2+srcStride2*(y>>2);
  2037. const uint8_t* vp=src3+srcStride3*(y>>2);
  2038. uint8_t* d=dst+dstStride*y;
  2039. x=0;
  2040. for (;x<w-7;x+=8) {
  2041. __asm__ volatile(
  2042. PREFETCH" 32(%1, %0) \n\t"
  2043. PREFETCH" 32(%2, %0) \n\t"
  2044. PREFETCH" 32(%3, %0) \n\t"
  2045. "movq (%1, %0, 4), %%mm0 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
  2046. "movq (%2, %0), %%mm1 \n\t" /* U0U1U2U3U4U5U6U7 */
  2047. "movq (%3, %0), %%mm2 \n\t" /* V0V1V2V3V4V5V6V7 */
  2048. "movq %%mm0, %%mm3 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
  2049. "movq %%mm1, %%mm4 \n\t" /* U0U1U2U3U4U5U6U7 */
  2050. "movq %%mm2, %%mm5 \n\t" /* V0V1V2V3V4V5V6V7 */
  2051. "punpcklbw %%mm1, %%mm1 \n\t" /* U0U0 U1U1 U2U2 U3U3 */
  2052. "punpcklbw %%mm2, %%mm2 \n\t" /* V0V0 V1V1 V2V2 V3V3 */
  2053. "punpckhbw %%mm4, %%mm4 \n\t" /* U4U4 U5U5 U6U6 U7U7 */
  2054. "punpckhbw %%mm5, %%mm5 \n\t" /* V4V4 V5V5 V6V6 V7V7 */
  2055. "movq %%mm1, %%mm6 \n\t"
  2056. "punpcklbw %%mm2, %%mm1 \n\t" /* U0V0 U0V0 U1V1 U1V1*/
  2057. "punpcklbw %%mm1, %%mm0 \n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
  2058. "punpckhbw %%mm1, %%mm3 \n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
  2059. MOVNTQ" %%mm0, (%4, %0, 8) \n\t"
  2060. MOVNTQ" %%mm3, 8(%4, %0, 8) \n\t"
  2061. "punpckhbw %%mm2, %%mm6 \n\t" /* U2V2 U2V2 U3V3 U3V3*/
  2062. "movq 8(%1, %0, 4), %%mm0 \n\t"
  2063. "movq %%mm0, %%mm3 \n\t"
  2064. "punpcklbw %%mm6, %%mm0 \n\t" /* Y U2 Y V2 Y U2 Y V2*/
  2065. "punpckhbw %%mm6, %%mm3 \n\t" /* Y U3 Y V3 Y U3 Y V3*/
  2066. MOVNTQ" %%mm0, 16(%4, %0, 8) \n\t"
  2067. MOVNTQ" %%mm3, 24(%4, %0, 8) \n\t"
  2068. "movq %%mm4, %%mm6 \n\t"
  2069. "movq 16(%1, %0, 4), %%mm0 \n\t"
  2070. "movq %%mm0, %%mm3 \n\t"
  2071. "punpcklbw %%mm5, %%mm4 \n\t"
  2072. "punpcklbw %%mm4, %%mm0 \n\t" /* Y U4 Y V4 Y U4 Y V4*/
  2073. "punpckhbw %%mm4, %%mm3 \n\t" /* Y U5 Y V5 Y U5 Y V5*/
  2074. MOVNTQ" %%mm0, 32(%4, %0, 8) \n\t"
  2075. MOVNTQ" %%mm3, 40(%4, %0, 8) \n\t"
  2076. "punpckhbw %%mm5, %%mm6 \n\t"
  2077. "movq 24(%1, %0, 4), %%mm0 \n\t"
  2078. "movq %%mm0, %%mm3 \n\t"
  2079. "punpcklbw %%mm6, %%mm0 \n\t" /* Y U6 Y V6 Y U6 Y V6*/
  2080. "punpckhbw %%mm6, %%mm3 \n\t" /* Y U7 Y V7 Y U7 Y V7*/
  2081. MOVNTQ" %%mm0, 48(%4, %0, 8) \n\t"
  2082. MOVNTQ" %%mm3, 56(%4, %0, 8) \n\t"
  2083. : "+r" (x)
  2084. : "r"(yp), "r" (up), "r"(vp), "r"(d)
  2085. :"memory");
  2086. }
  2087. for (; x<w; x++) {
  2088. const int x2 = x<<2;
  2089. d[8*x+0] = yp[x2];
  2090. d[8*x+1] = up[x];
  2091. d[8*x+2] = yp[x2+1];
  2092. d[8*x+3] = vp[x];
  2093. d[8*x+4] = yp[x2+2];
  2094. d[8*x+5] = up[x];
  2095. d[8*x+6] = yp[x2+3];
  2096. d[8*x+7] = vp[x];
  2097. }
  2098. }
  2099. __asm__(
  2100. EMMS" \n\t"
  2101. SFENCE" \n\t"
  2102. ::: "memory"
  2103. );
  2104. }
  2105. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2106. static void RENAME(extract_even)(const uint8_t *src, uint8_t *dst, x86_reg count)
  2107. {
  2108. dst += count;
  2109. src += 2*count;
  2110. count= - count;
  2111. if(count <= -16) {
  2112. count += 15;
  2113. __asm__ volatile(
  2114. "pcmpeqw %%mm7, %%mm7 \n\t"
  2115. "psrlw $8, %%mm7 \n\t"
  2116. "1: \n\t"
  2117. "movq -30(%1, %0, 2), %%mm0 \n\t"
  2118. "movq -22(%1, %0, 2), %%mm1 \n\t"
  2119. "movq -14(%1, %0, 2), %%mm2 \n\t"
  2120. "movq -6(%1, %0, 2), %%mm3 \n\t"
  2121. "pand %%mm7, %%mm0 \n\t"
  2122. "pand %%mm7, %%mm1 \n\t"
  2123. "pand %%mm7, %%mm2 \n\t"
  2124. "pand %%mm7, %%mm3 \n\t"
  2125. "packuswb %%mm1, %%mm0 \n\t"
  2126. "packuswb %%mm3, %%mm2 \n\t"
  2127. MOVNTQ" %%mm0,-15(%2, %0) \n\t"
  2128. MOVNTQ" %%mm2,- 7(%2, %0) \n\t"
  2129. "add $16, %0 \n\t"
  2130. " js 1b \n\t"
  2131. : "+r"(count)
  2132. : "r"(src), "r"(dst)
  2133. );
  2134. count -= 15;
  2135. }
  2136. while(count<0) {
  2137. dst[count]= src[2*count];
  2138. count++;
  2139. }
  2140. }
  2141. #if !COMPILE_TEMPLATE_AMD3DNOW
  2142. static void RENAME(extract_even2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2143. {
  2144. dst0+= count;
  2145. dst1+= count;
  2146. src += 4*count;
  2147. count= - count;
  2148. if(count <= -8) {
  2149. count += 7;
  2150. __asm__ volatile(
  2151. "pcmpeqw %%mm7, %%mm7 \n\t"
  2152. "psrlw $8, %%mm7 \n\t"
  2153. "1: \n\t"
  2154. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2155. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2156. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2157. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2158. "pand %%mm7, %%mm0 \n\t"
  2159. "pand %%mm7, %%mm1 \n\t"
  2160. "pand %%mm7, %%mm2 \n\t"
  2161. "pand %%mm7, %%mm3 \n\t"
  2162. "packuswb %%mm1, %%mm0 \n\t"
  2163. "packuswb %%mm3, %%mm2 \n\t"
  2164. "movq %%mm0, %%mm1 \n\t"
  2165. "movq %%mm2, %%mm3 \n\t"
  2166. "psrlw $8, %%mm0 \n\t"
  2167. "psrlw $8, %%mm2 \n\t"
  2168. "pand %%mm7, %%mm1 \n\t"
  2169. "pand %%mm7, %%mm3 \n\t"
  2170. "packuswb %%mm2, %%mm0 \n\t"
  2171. "packuswb %%mm3, %%mm1 \n\t"
  2172. MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
  2173. MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
  2174. "add $8, %0 \n\t"
  2175. " js 1b \n\t"
  2176. : "+r"(count)
  2177. : "r"(src), "r"(dst0), "r"(dst1)
  2178. );
  2179. count -= 7;
  2180. }
  2181. while(count<0) {
  2182. dst0[count]= src[4*count+0];
  2183. dst1[count]= src[4*count+2];
  2184. count++;
  2185. }
  2186. }
  2187. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2188. static void RENAME(extract_even2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2189. {
  2190. dst0 += count;
  2191. dst1 += count;
  2192. src0 += 4*count;
  2193. src1 += 4*count;
  2194. count= - count;
  2195. #ifdef PAVGB
  2196. if(count <= -8) {
  2197. count += 7;
  2198. __asm__ volatile(
  2199. "pcmpeqw %%mm7, %%mm7 \n\t"
  2200. "psrlw $8, %%mm7 \n\t"
  2201. "1: \n\t"
  2202. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2203. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2204. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2205. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2206. PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
  2207. PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
  2208. PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
  2209. PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
  2210. "pand %%mm7, %%mm0 \n\t"
  2211. "pand %%mm7, %%mm1 \n\t"
  2212. "pand %%mm7, %%mm2 \n\t"
  2213. "pand %%mm7, %%mm3 \n\t"
  2214. "packuswb %%mm1, %%mm0 \n\t"
  2215. "packuswb %%mm3, %%mm2 \n\t"
  2216. "movq %%mm0, %%mm1 \n\t"
  2217. "movq %%mm2, %%mm3 \n\t"
  2218. "psrlw $8, %%mm0 \n\t"
  2219. "psrlw $8, %%mm2 \n\t"
  2220. "pand %%mm7, %%mm1 \n\t"
  2221. "pand %%mm7, %%mm3 \n\t"
  2222. "packuswb %%mm2, %%mm0 \n\t"
  2223. "packuswb %%mm3, %%mm1 \n\t"
  2224. MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
  2225. MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
  2226. "add $8, %0 \n\t"
  2227. " js 1b \n\t"
  2228. : "+r"(count)
  2229. : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
  2230. );
  2231. count -= 7;
  2232. }
  2233. #endif
  2234. while(count<0) {
  2235. dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
  2236. dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
  2237. count++;
  2238. }
  2239. }
  2240. #if !COMPILE_TEMPLATE_AMD3DNOW
  2241. static void RENAME(extract_odd2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2242. {
  2243. dst0+= count;
  2244. dst1+= count;
  2245. src += 4*count;
  2246. count= - count;
  2247. if(count <= -8) {
  2248. count += 7;
  2249. __asm__ volatile(
  2250. "pcmpeqw %%mm7, %%mm7 \n\t"
  2251. "psrlw $8, %%mm7 \n\t"
  2252. "1: \n\t"
  2253. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2254. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2255. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2256. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2257. "psrlw $8, %%mm0 \n\t"
  2258. "psrlw $8, %%mm1 \n\t"
  2259. "psrlw $8, %%mm2 \n\t"
  2260. "psrlw $8, %%mm3 \n\t"
  2261. "packuswb %%mm1, %%mm0 \n\t"
  2262. "packuswb %%mm3, %%mm2 \n\t"
  2263. "movq %%mm0, %%mm1 \n\t"
  2264. "movq %%mm2, %%mm3 \n\t"
  2265. "psrlw $8, %%mm0 \n\t"
  2266. "psrlw $8, %%mm2 \n\t"
  2267. "pand %%mm7, %%mm1 \n\t"
  2268. "pand %%mm7, %%mm3 \n\t"
  2269. "packuswb %%mm2, %%mm0 \n\t"
  2270. "packuswb %%mm3, %%mm1 \n\t"
  2271. MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
  2272. MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
  2273. "add $8, %0 \n\t"
  2274. " js 1b \n\t"
  2275. : "+r"(count)
  2276. : "r"(src), "r"(dst0), "r"(dst1)
  2277. );
  2278. count -= 7;
  2279. }
  2280. src++;
  2281. while(count<0) {
  2282. dst0[count]= src[4*count+0];
  2283. dst1[count]= src[4*count+2];
  2284. count++;
  2285. }
  2286. }
  2287. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2288. static void RENAME(extract_odd2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2289. {
  2290. dst0 += count;
  2291. dst1 += count;
  2292. src0 += 4*count;
  2293. src1 += 4*count;
  2294. count= - count;
  2295. #ifdef PAVGB
  2296. if(count <= -8) {
  2297. count += 7;
  2298. __asm__ volatile(
  2299. "pcmpeqw %%mm7, %%mm7 \n\t"
  2300. "psrlw $8, %%mm7 \n\t"
  2301. "1: \n\t"
  2302. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2303. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2304. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2305. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2306. PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
  2307. PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
  2308. PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
  2309. PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
  2310. "psrlw $8, %%mm0 \n\t"
  2311. "psrlw $8, %%mm1 \n\t"
  2312. "psrlw $8, %%mm2 \n\t"
  2313. "psrlw $8, %%mm3 \n\t"
  2314. "packuswb %%mm1, %%mm0 \n\t"
  2315. "packuswb %%mm3, %%mm2 \n\t"
  2316. "movq %%mm0, %%mm1 \n\t"
  2317. "movq %%mm2, %%mm3 \n\t"
  2318. "psrlw $8, %%mm0 \n\t"
  2319. "psrlw $8, %%mm2 \n\t"
  2320. "pand %%mm7, %%mm1 \n\t"
  2321. "pand %%mm7, %%mm3 \n\t"
  2322. "packuswb %%mm2, %%mm0 \n\t"
  2323. "packuswb %%mm3, %%mm1 \n\t"
  2324. MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
  2325. MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
  2326. "add $8, %0 \n\t"
  2327. " js 1b \n\t"
  2328. : "+r"(count)
  2329. : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
  2330. );
  2331. count -= 7;
  2332. }
  2333. #endif
  2334. src0++;
  2335. src1++;
  2336. while(count<0) {
  2337. dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
  2338. dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
  2339. count++;
  2340. }
  2341. }
  2342. static void RENAME(yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2343. int width, int height,
  2344. int lumStride, int chromStride, int srcStride)
  2345. {
  2346. int y;
  2347. const int chromWidth= -((-width)>>1);
  2348. for (y=0; y<height; y++) {
  2349. RENAME(extract_even)(src, ydst, width);
  2350. if(y&1) {
  2351. RENAME(extract_odd2avg)(src-srcStride, src, udst, vdst, chromWidth);
  2352. udst+= chromStride;
  2353. vdst+= chromStride;
  2354. }
  2355. src += srcStride;
  2356. ydst+= lumStride;
  2357. }
  2358. __asm__(
  2359. EMMS" \n\t"
  2360. SFENCE" \n\t"
  2361. ::: "memory"
  2362. );
  2363. }
  2364. #if !COMPILE_TEMPLATE_AMD3DNOW
  2365. static void RENAME(yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2366. int width, int height,
  2367. int lumStride, int chromStride, int srcStride)
  2368. {
  2369. int y;
  2370. const int chromWidth= -((-width)>>1);
  2371. for (y=0; y<height; y++) {
  2372. RENAME(extract_even)(src, ydst, width);
  2373. RENAME(extract_odd2)(src, udst, vdst, chromWidth);
  2374. src += srcStride;
  2375. ydst+= lumStride;
  2376. udst+= chromStride;
  2377. vdst+= chromStride;
  2378. }
  2379. __asm__(
  2380. EMMS" \n\t"
  2381. SFENCE" \n\t"
  2382. ::: "memory"
  2383. );
  2384. }
  2385. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2386. static void RENAME(uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2387. int width, int height,
  2388. int lumStride, int chromStride, int srcStride)
  2389. {
  2390. int y;
  2391. const int chromWidth= -((-width)>>1);
  2392. for (y=0; y<height; y++) {
  2393. RENAME(extract_even)(src+1, ydst, width);
  2394. if(y&1) {
  2395. RENAME(extract_even2avg)(src-srcStride, src, udst, vdst, chromWidth);
  2396. udst+= chromStride;
  2397. vdst+= chromStride;
  2398. }
  2399. src += srcStride;
  2400. ydst+= lumStride;
  2401. }
  2402. __asm__(
  2403. EMMS" \n\t"
  2404. SFENCE" \n\t"
  2405. ::: "memory"
  2406. );
  2407. }
  2408. #if !COMPILE_TEMPLATE_AMD3DNOW
  2409. static void RENAME(uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2410. int width, int height,
  2411. int lumStride, int chromStride, int srcStride)
  2412. {
  2413. int y;
  2414. const int chromWidth= -((-width)>>1);
  2415. for (y=0; y<height; y++) {
  2416. RENAME(extract_even)(src+1, ydst, width);
  2417. RENAME(extract_even2)(src, udst, vdst, chromWidth);
  2418. src += srcStride;
  2419. ydst+= lumStride;
  2420. udst+= chromStride;
  2421. vdst+= chromStride;
  2422. }
  2423. __asm__(
  2424. EMMS" \n\t"
  2425. SFENCE" \n\t"
  2426. ::: "memory"
  2427. );
  2428. }
  2429. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2430. #endif /* !COMPILE_TEMPLATE_SSE2 */
  2431. static inline void RENAME(rgb2rgb_init)(void)
  2432. {
  2433. #if !COMPILE_TEMPLATE_SSE2
  2434. #if !COMPILE_TEMPLATE_AMD3DNOW
  2435. rgb15to16 = RENAME(rgb15to16);
  2436. rgb15tobgr24 = RENAME(rgb15tobgr24);
  2437. rgb15to32 = RENAME(rgb15to32);
  2438. rgb16tobgr24 = RENAME(rgb16tobgr24);
  2439. rgb16to32 = RENAME(rgb16to32);
  2440. rgb16to15 = RENAME(rgb16to15);
  2441. rgb24tobgr16 = RENAME(rgb24tobgr16);
  2442. rgb24tobgr15 = RENAME(rgb24tobgr15);
  2443. rgb24tobgr32 = RENAME(rgb24tobgr32);
  2444. rgb32to16 = RENAME(rgb32to16);
  2445. rgb32to15 = RENAME(rgb32to15);
  2446. rgb32tobgr24 = RENAME(rgb32tobgr24);
  2447. rgb24to15 = RENAME(rgb24to15);
  2448. rgb24to16 = RENAME(rgb24to16);
  2449. rgb24tobgr24 = RENAME(rgb24tobgr24);
  2450. shuffle_bytes_2103 = RENAME(shuffle_bytes_2103);
  2451. rgb32tobgr16 = RENAME(rgb32tobgr16);
  2452. rgb32tobgr15 = RENAME(rgb32tobgr15);
  2453. yv12toyuy2 = RENAME(yv12toyuy2);
  2454. yv12touyvy = RENAME(yv12touyvy);
  2455. yuv422ptoyuy2 = RENAME(yuv422ptoyuy2);
  2456. yuv422ptouyvy = RENAME(yuv422ptouyvy);
  2457. yuy2toyv12 = RENAME(yuy2toyv12);
  2458. vu9_to_vu12 = RENAME(vu9_to_vu12);
  2459. yvu9_to_yuy2 = RENAME(yvu9_to_yuy2);
  2460. uyvytoyuv422 = RENAME(uyvytoyuv422);
  2461. yuyvtoyuv422 = RENAME(yuyvtoyuv422);
  2462. #endif /* !COMPILE_TEMPLATE_SSE2 */
  2463. #if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
  2464. planar2x = RENAME(planar2x);
  2465. #endif /* COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW */
  2466. rgb24toyv12 = RENAME(rgb24toyv12);
  2467. yuyvtoyuv420 = RENAME(yuyvtoyuv420);
  2468. uyvytoyuv420 = RENAME(uyvytoyuv420);
  2469. #endif /* COMPILE_TEMPLATE_SSE2 */
  2470. #if !COMPILE_TEMPLATE_AMD3DNOW
  2471. interleaveBytes = RENAME(interleaveBytes);
  2472. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2473. }