rgb2rgb_template.c 104 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545
  1. /*
  2. * software RGB to RGB converter
  3. * pluralize by software PAL8 to RGB converter
  4. * software YUV to YUV converter
  5. * software YUV to RGB converter
  6. * Written by Nick Kurshev.
  7. * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
  8. * lot of big-endian byte order fixes by Alex Beregszaszi
  9. *
  10. * This file is part of FFmpeg.
  11. *
  12. * FFmpeg is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU Lesser General Public
  14. * License as published by the Free Software Foundation; either
  15. * version 2.1 of the License, or (at your option) any later version.
  16. *
  17. * FFmpeg is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * Lesser General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU Lesser General Public
  23. * License along with FFmpeg; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. */
  26. #include <stddef.h>
  27. #include <stdint.h>
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/x86/asm.h"
  30. #undef PREFETCH
  31. #undef MOVNTQ
  32. #undef EMMS
  33. #undef SFENCE
  34. #undef PAVGB
  35. #if COMPILE_TEMPLATE_AMD3DNOW
  36. #define PREFETCH "prefetch"
  37. #define PAVGB "pavgusb"
  38. #elif COMPILE_TEMPLATE_MMXEXT
  39. #define PREFETCH "prefetchnta"
  40. #define PAVGB "pavgb"
  41. #else
  42. #define PREFETCH " # nop"
  43. #endif
  44. #if COMPILE_TEMPLATE_AMD3DNOW
  45. /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
  46. #define EMMS "femms"
  47. #else
  48. #define EMMS "emms"
  49. #endif
  50. #if COMPILE_TEMPLATE_MMXEXT
  51. #define MOVNTQ "movntq"
  52. #define SFENCE "sfence"
  53. #else
  54. #define MOVNTQ "movq"
  55. #define SFENCE " # nop"
  56. #endif
  57. #if !COMPILE_TEMPLATE_SSE2
  58. #if !COMPILE_TEMPLATE_AMD3DNOW
  59. static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, int src_size)
  60. {
  61. uint8_t *dest = dst;
  62. const uint8_t *s = src;
  63. const uint8_t *end;
  64. const uint8_t *mm_end;
  65. end = s + src_size;
  66. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  67. mm_end = end - 23;
  68. __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory");
  69. while (s < mm_end) {
  70. __asm__ volatile(
  71. PREFETCH" 32(%1) \n\t"
  72. "movd (%1), %%mm0 \n\t"
  73. "punpckldq 3(%1), %%mm0 \n\t"
  74. "movd 6(%1), %%mm1 \n\t"
  75. "punpckldq 9(%1), %%mm1 \n\t"
  76. "movd 12(%1), %%mm2 \n\t"
  77. "punpckldq 15(%1), %%mm2 \n\t"
  78. "movd 18(%1), %%mm3 \n\t"
  79. "punpckldq 21(%1), %%mm3 \n\t"
  80. "por %%mm7, %%mm0 \n\t"
  81. "por %%mm7, %%mm1 \n\t"
  82. "por %%mm7, %%mm2 \n\t"
  83. "por %%mm7, %%mm3 \n\t"
  84. MOVNTQ" %%mm0, (%0) \n\t"
  85. MOVNTQ" %%mm1, 8(%0) \n\t"
  86. MOVNTQ" %%mm2, 16(%0) \n\t"
  87. MOVNTQ" %%mm3, 24(%0)"
  88. :: "r"(dest), "r"(s)
  89. :"memory");
  90. dest += 32;
  91. s += 24;
  92. }
  93. __asm__ volatile(SFENCE:::"memory");
  94. __asm__ volatile(EMMS:::"memory");
  95. while (s < end) {
  96. *dest++ = *s++;
  97. *dest++ = *s++;
  98. *dest++ = *s++;
  99. *dest++ = 255;
  100. }
  101. }
  102. #define STORE_BGR24_MMX \
  103. "psrlq $8, %%mm2 \n\t" \
  104. "psrlq $8, %%mm3 \n\t" \
  105. "psrlq $8, %%mm6 \n\t" \
  106. "psrlq $8, %%mm7 \n\t" \
  107. "pand "MANGLE(mask24l)", %%mm0\n\t" \
  108. "pand "MANGLE(mask24l)", %%mm1\n\t" \
  109. "pand "MANGLE(mask24l)", %%mm4\n\t" \
  110. "pand "MANGLE(mask24l)", %%mm5\n\t" \
  111. "pand "MANGLE(mask24h)", %%mm2\n\t" \
  112. "pand "MANGLE(mask24h)", %%mm3\n\t" \
  113. "pand "MANGLE(mask24h)", %%mm6\n\t" \
  114. "pand "MANGLE(mask24h)", %%mm7\n\t" \
  115. "por %%mm2, %%mm0 \n\t" \
  116. "por %%mm3, %%mm1 \n\t" \
  117. "por %%mm6, %%mm4 \n\t" \
  118. "por %%mm7, %%mm5 \n\t" \
  119. \
  120. "movq %%mm1, %%mm2 \n\t" \
  121. "movq %%mm4, %%mm3 \n\t" \
  122. "psllq $48, %%mm2 \n\t" \
  123. "psllq $32, %%mm3 \n\t" \
  124. "por %%mm2, %%mm0 \n\t" \
  125. "psrlq $16, %%mm1 \n\t" \
  126. "psrlq $32, %%mm4 \n\t" \
  127. "psllq $16, %%mm5 \n\t" \
  128. "por %%mm3, %%mm1 \n\t" \
  129. "por %%mm5, %%mm4 \n\t" \
  130. \
  131. MOVNTQ" %%mm0, (%0) \n\t" \
  132. MOVNTQ" %%mm1, 8(%0) \n\t" \
  133. MOVNTQ" %%mm4, 16(%0)"
  134. static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  135. {
  136. uint8_t *dest = dst;
  137. const uint8_t *s = src;
  138. const uint8_t *end;
  139. const uint8_t *mm_end;
  140. end = s + src_size;
  141. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  142. mm_end = end - 31;
  143. while (s < mm_end) {
  144. __asm__ volatile(
  145. PREFETCH" 32(%1) \n\t"
  146. "movq (%1), %%mm0 \n\t"
  147. "movq 8(%1), %%mm1 \n\t"
  148. "movq 16(%1), %%mm4 \n\t"
  149. "movq 24(%1), %%mm5 \n\t"
  150. "movq %%mm0, %%mm2 \n\t"
  151. "movq %%mm1, %%mm3 \n\t"
  152. "movq %%mm4, %%mm6 \n\t"
  153. "movq %%mm5, %%mm7 \n\t"
  154. STORE_BGR24_MMX
  155. :: "r"(dest), "r"(s)
  156. NAMED_CONSTRAINTS_ADD(mask24l,mask24h)
  157. :"memory");
  158. dest += 24;
  159. s += 32;
  160. }
  161. __asm__ volatile(SFENCE:::"memory");
  162. __asm__ volatile(EMMS:::"memory");
  163. while (s < end) {
  164. *dest++ = *s++;
  165. *dest++ = *s++;
  166. *dest++ = *s++;
  167. s++;
  168. }
  169. }
  170. /*
  171. original by Strepto/Astral
  172. ported to gcc & bugfixed: A'rpi
  173. MMXEXT, 3DNOW optimization by Nick Kurshev
  174. 32-bit C version, and and&add trick by Michael Niedermayer
  175. */
  176. static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, int src_size)
  177. {
  178. register const uint8_t* s=src;
  179. register uint8_t* d=dst;
  180. register const uint8_t *end;
  181. const uint8_t *mm_end;
  182. end = s + src_size;
  183. __asm__ volatile(PREFETCH" %0"::"m"(*s));
  184. __asm__ volatile("movq %0, %%mm4"::"m"(mask15s));
  185. mm_end = end - 15;
  186. while (s<mm_end) {
  187. __asm__ volatile(
  188. PREFETCH" 32(%1) \n\t"
  189. "movq (%1), %%mm0 \n\t"
  190. "movq 8(%1), %%mm2 \n\t"
  191. "movq %%mm0, %%mm1 \n\t"
  192. "movq %%mm2, %%mm3 \n\t"
  193. "pand %%mm4, %%mm0 \n\t"
  194. "pand %%mm4, %%mm2 \n\t"
  195. "paddw %%mm1, %%mm0 \n\t"
  196. "paddw %%mm3, %%mm2 \n\t"
  197. MOVNTQ" %%mm0, (%0) \n\t"
  198. MOVNTQ" %%mm2, 8(%0)"
  199. :: "r"(d), "r"(s)
  200. );
  201. d+=16;
  202. s+=16;
  203. }
  204. __asm__ volatile(SFENCE:::"memory");
  205. __asm__ volatile(EMMS:::"memory");
  206. mm_end = end - 3;
  207. while (s < mm_end) {
  208. register unsigned x= *((const uint32_t *)s);
  209. *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
  210. d+=4;
  211. s+=4;
  212. }
  213. if (s < end) {
  214. register unsigned short x= *((const uint16_t *)s);
  215. *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
  216. }
  217. }
  218. static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, int src_size)
  219. {
  220. register const uint8_t* s=src;
  221. register uint8_t* d=dst;
  222. register const uint8_t *end;
  223. const uint8_t *mm_end;
  224. end = s + src_size;
  225. __asm__ volatile(PREFETCH" %0"::"m"(*s));
  226. __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg));
  227. __asm__ volatile("movq %0, %%mm6"::"m"(mask15b));
  228. mm_end = end - 15;
  229. while (s<mm_end) {
  230. __asm__ volatile(
  231. PREFETCH" 32(%1) \n\t"
  232. "movq (%1), %%mm0 \n\t"
  233. "movq 8(%1), %%mm2 \n\t"
  234. "movq %%mm0, %%mm1 \n\t"
  235. "movq %%mm2, %%mm3 \n\t"
  236. "psrlq $1, %%mm0 \n\t"
  237. "psrlq $1, %%mm2 \n\t"
  238. "pand %%mm7, %%mm0 \n\t"
  239. "pand %%mm7, %%mm2 \n\t"
  240. "pand %%mm6, %%mm1 \n\t"
  241. "pand %%mm6, %%mm3 \n\t"
  242. "por %%mm1, %%mm0 \n\t"
  243. "por %%mm3, %%mm2 \n\t"
  244. MOVNTQ" %%mm0, (%0) \n\t"
  245. MOVNTQ" %%mm2, 8(%0)"
  246. :: "r"(d), "r"(s)
  247. );
  248. d+=16;
  249. s+=16;
  250. }
  251. __asm__ volatile(SFENCE:::"memory");
  252. __asm__ volatile(EMMS:::"memory");
  253. mm_end = end - 3;
  254. while (s < mm_end) {
  255. register uint32_t x= *((const uint32_t*)s);
  256. *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
  257. s+=4;
  258. d+=4;
  259. }
  260. if (s < end) {
  261. register uint16_t x= *((const uint16_t*)s);
  262. *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
  263. }
  264. }
  265. static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, int src_size)
  266. {
  267. const uint8_t *s = src;
  268. const uint8_t *end;
  269. const uint8_t *mm_end;
  270. uint16_t *d = (uint16_t *)dst;
  271. end = s + src_size;
  272. mm_end = end - 15;
  273. __asm__ volatile(
  274. "movq %3, %%mm5 \n\t"
  275. "movq %4, %%mm6 \n\t"
  276. "movq %5, %%mm7 \n\t"
  277. "jmp 2f \n\t"
  278. ".p2align 4 \n\t"
  279. "1: \n\t"
  280. PREFETCH" 32(%1) \n\t"
  281. "movd (%1), %%mm0 \n\t"
  282. "movd 4(%1), %%mm3 \n\t"
  283. "punpckldq 8(%1), %%mm0 \n\t"
  284. "punpckldq 12(%1), %%mm3 \n\t"
  285. "movq %%mm0, %%mm1 \n\t"
  286. "movq %%mm3, %%mm4 \n\t"
  287. "pand %%mm6, %%mm0 \n\t"
  288. "pand %%mm6, %%mm3 \n\t"
  289. "pmaddwd %%mm7, %%mm0 \n\t"
  290. "pmaddwd %%mm7, %%mm3 \n\t"
  291. "pand %%mm5, %%mm1 \n\t"
  292. "pand %%mm5, %%mm4 \n\t"
  293. "por %%mm1, %%mm0 \n\t"
  294. "por %%mm4, %%mm3 \n\t"
  295. "psrld $5, %%mm0 \n\t"
  296. "pslld $11, %%mm3 \n\t"
  297. "por %%mm3, %%mm0 \n\t"
  298. MOVNTQ" %%mm0, (%0) \n\t"
  299. "add $16, %1 \n\t"
  300. "add $8, %0 \n\t"
  301. "2: \n\t"
  302. "cmp %2, %1 \n\t"
  303. " jb 1b \n\t"
  304. : "+r" (d), "+r"(s)
  305. : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
  306. );
  307. __asm__ volatile(SFENCE:::"memory");
  308. __asm__ volatile(EMMS:::"memory");
  309. while (s < end) {
  310. register int rgb = *(const uint32_t*)s; s += 4;
  311. *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
  312. }
  313. }
  314. static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
  315. {
  316. const uint8_t *s = src;
  317. const uint8_t *end;
  318. const uint8_t *mm_end;
  319. uint16_t *d = (uint16_t *)dst;
  320. end = s + src_size;
  321. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  322. __asm__ volatile(
  323. "movq %0, %%mm7 \n\t"
  324. "movq %1, %%mm6 \n\t"
  325. ::"m"(red_16mask),"m"(green_16mask));
  326. mm_end = end - 15;
  327. while (s < mm_end) {
  328. __asm__ volatile(
  329. PREFETCH" 32(%1) \n\t"
  330. "movd (%1), %%mm0 \n\t"
  331. "movd 4(%1), %%mm3 \n\t"
  332. "punpckldq 8(%1), %%mm0 \n\t"
  333. "punpckldq 12(%1), %%mm3 \n\t"
  334. "movq %%mm0, %%mm1 \n\t"
  335. "movq %%mm0, %%mm2 \n\t"
  336. "movq %%mm3, %%mm4 \n\t"
  337. "movq %%mm3, %%mm5 \n\t"
  338. "psllq $8, %%mm0 \n\t"
  339. "psllq $8, %%mm3 \n\t"
  340. "pand %%mm7, %%mm0 \n\t"
  341. "pand %%mm7, %%mm3 \n\t"
  342. "psrlq $5, %%mm1 \n\t"
  343. "psrlq $5, %%mm4 \n\t"
  344. "pand %%mm6, %%mm1 \n\t"
  345. "pand %%mm6, %%mm4 \n\t"
  346. "psrlq $19, %%mm2 \n\t"
  347. "psrlq $19, %%mm5 \n\t"
  348. "pand %2, %%mm2 \n\t"
  349. "pand %2, %%mm5 \n\t"
  350. "por %%mm1, %%mm0 \n\t"
  351. "por %%mm4, %%mm3 \n\t"
  352. "por %%mm2, %%mm0 \n\t"
  353. "por %%mm5, %%mm3 \n\t"
  354. "psllq $16, %%mm3 \n\t"
  355. "por %%mm3, %%mm0 \n\t"
  356. MOVNTQ" %%mm0, (%0) \n\t"
  357. :: "r"(d),"r"(s),"m"(blue_16mask):"memory");
  358. d += 4;
  359. s += 16;
  360. }
  361. __asm__ volatile(SFENCE:::"memory");
  362. __asm__ volatile(EMMS:::"memory");
  363. while (s < end) {
  364. register int rgb = *(const uint32_t*)s; s += 4;
  365. *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
  366. }
  367. }
  368. static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, int src_size)
  369. {
  370. const uint8_t *s = src;
  371. const uint8_t *end;
  372. const uint8_t *mm_end;
  373. uint16_t *d = (uint16_t *)dst;
  374. end = s + src_size;
  375. mm_end = end - 15;
  376. __asm__ volatile(
  377. "movq %3, %%mm5 \n\t"
  378. "movq %4, %%mm6 \n\t"
  379. "movq %5, %%mm7 \n\t"
  380. "jmp 2f \n\t"
  381. ".p2align 4 \n\t"
  382. "1: \n\t"
  383. PREFETCH" 32(%1) \n\t"
  384. "movd (%1), %%mm0 \n\t"
  385. "movd 4(%1), %%mm3 \n\t"
  386. "punpckldq 8(%1), %%mm0 \n\t"
  387. "punpckldq 12(%1), %%mm3 \n\t"
  388. "movq %%mm0, %%mm1 \n\t"
  389. "movq %%mm3, %%mm4 \n\t"
  390. "pand %%mm6, %%mm0 \n\t"
  391. "pand %%mm6, %%mm3 \n\t"
  392. "pmaddwd %%mm7, %%mm0 \n\t"
  393. "pmaddwd %%mm7, %%mm3 \n\t"
  394. "pand %%mm5, %%mm1 \n\t"
  395. "pand %%mm5, %%mm4 \n\t"
  396. "por %%mm1, %%mm0 \n\t"
  397. "por %%mm4, %%mm3 \n\t"
  398. "psrld $6, %%mm0 \n\t"
  399. "pslld $10, %%mm3 \n\t"
  400. "por %%mm3, %%mm0 \n\t"
  401. MOVNTQ" %%mm0, (%0) \n\t"
  402. "add $16, %1 \n\t"
  403. "add $8, %0 \n\t"
  404. "2: \n\t"
  405. "cmp %2, %1 \n\t"
  406. " jb 1b \n\t"
  407. : "+r" (d), "+r"(s)
  408. : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
  409. );
  410. __asm__ volatile(SFENCE:::"memory");
  411. __asm__ volatile(EMMS:::"memory");
  412. while (s < end) {
  413. register int rgb = *(const uint32_t*)s; s += 4;
  414. *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
  415. }
  416. }
  417. static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
  418. {
  419. const uint8_t *s = src;
  420. const uint8_t *end;
  421. const uint8_t *mm_end;
  422. uint16_t *d = (uint16_t *)dst;
  423. end = s + src_size;
  424. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  425. __asm__ volatile(
  426. "movq %0, %%mm7 \n\t"
  427. "movq %1, %%mm6 \n\t"
  428. ::"m"(red_15mask),"m"(green_15mask));
  429. mm_end = end - 15;
  430. while (s < mm_end) {
  431. __asm__ volatile(
  432. PREFETCH" 32(%1) \n\t"
  433. "movd (%1), %%mm0 \n\t"
  434. "movd 4(%1), %%mm3 \n\t"
  435. "punpckldq 8(%1), %%mm0 \n\t"
  436. "punpckldq 12(%1), %%mm3 \n\t"
  437. "movq %%mm0, %%mm1 \n\t"
  438. "movq %%mm0, %%mm2 \n\t"
  439. "movq %%mm3, %%mm4 \n\t"
  440. "movq %%mm3, %%mm5 \n\t"
  441. "psllq $7, %%mm0 \n\t"
  442. "psllq $7, %%mm3 \n\t"
  443. "pand %%mm7, %%mm0 \n\t"
  444. "pand %%mm7, %%mm3 \n\t"
  445. "psrlq $6, %%mm1 \n\t"
  446. "psrlq $6, %%mm4 \n\t"
  447. "pand %%mm6, %%mm1 \n\t"
  448. "pand %%mm6, %%mm4 \n\t"
  449. "psrlq $19, %%mm2 \n\t"
  450. "psrlq $19, %%mm5 \n\t"
  451. "pand %2, %%mm2 \n\t"
  452. "pand %2, %%mm5 \n\t"
  453. "por %%mm1, %%mm0 \n\t"
  454. "por %%mm4, %%mm3 \n\t"
  455. "por %%mm2, %%mm0 \n\t"
  456. "por %%mm5, %%mm3 \n\t"
  457. "psllq $16, %%mm3 \n\t"
  458. "por %%mm3, %%mm0 \n\t"
  459. MOVNTQ" %%mm0, (%0) \n\t"
  460. ::"r"(d),"r"(s),"m"(blue_15mask):"memory");
  461. d += 4;
  462. s += 16;
  463. }
  464. __asm__ volatile(SFENCE:::"memory");
  465. __asm__ volatile(EMMS:::"memory");
  466. while (s < end) {
  467. register int rgb = *(const uint32_t*)s; s += 4;
  468. *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
  469. }
  470. }
  471. static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
  472. {
  473. const uint8_t *s = src;
  474. const uint8_t *end;
  475. const uint8_t *mm_end;
  476. uint16_t *d = (uint16_t *)dst;
  477. end = s + src_size;
  478. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  479. __asm__ volatile(
  480. "movq %0, %%mm7 \n\t"
  481. "movq %1, %%mm6 \n\t"
  482. ::"m"(red_16mask),"m"(green_16mask));
  483. mm_end = end - 11;
  484. while (s < mm_end) {
  485. __asm__ volatile(
  486. PREFETCH" 32(%1) \n\t"
  487. "movd (%1), %%mm0 \n\t"
  488. "movd 3(%1), %%mm3 \n\t"
  489. "punpckldq 6(%1), %%mm0 \n\t"
  490. "punpckldq 9(%1), %%mm3 \n\t"
  491. "movq %%mm0, %%mm1 \n\t"
  492. "movq %%mm0, %%mm2 \n\t"
  493. "movq %%mm3, %%mm4 \n\t"
  494. "movq %%mm3, %%mm5 \n\t"
  495. "psrlq $3, %%mm0 \n\t"
  496. "psrlq $3, %%mm3 \n\t"
  497. "pand %2, %%mm0 \n\t"
  498. "pand %2, %%mm3 \n\t"
  499. "psrlq $5, %%mm1 \n\t"
  500. "psrlq $5, %%mm4 \n\t"
  501. "pand %%mm6, %%mm1 \n\t"
  502. "pand %%mm6, %%mm4 \n\t"
  503. "psrlq $8, %%mm2 \n\t"
  504. "psrlq $8, %%mm5 \n\t"
  505. "pand %%mm7, %%mm2 \n\t"
  506. "pand %%mm7, %%mm5 \n\t"
  507. "por %%mm1, %%mm0 \n\t"
  508. "por %%mm4, %%mm3 \n\t"
  509. "por %%mm2, %%mm0 \n\t"
  510. "por %%mm5, %%mm3 \n\t"
  511. "psllq $16, %%mm3 \n\t"
  512. "por %%mm3, %%mm0 \n\t"
  513. MOVNTQ" %%mm0, (%0) \n\t"
  514. ::"r"(d),"r"(s),"m"(blue_16mask):"memory");
  515. d += 4;
  516. s += 12;
  517. }
  518. __asm__ volatile(SFENCE:::"memory");
  519. __asm__ volatile(EMMS:::"memory");
  520. while (s < end) {
  521. const int b = *s++;
  522. const int g = *s++;
  523. const int r = *s++;
  524. *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  525. }
  526. }
  527. static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, int src_size)
  528. {
  529. const uint8_t *s = src;
  530. const uint8_t *end;
  531. const uint8_t *mm_end;
  532. uint16_t *d = (uint16_t *)dst;
  533. end = s + src_size;
  534. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  535. __asm__ volatile(
  536. "movq %0, %%mm7 \n\t"
  537. "movq %1, %%mm6 \n\t"
  538. ::"m"(red_16mask),"m"(green_16mask));
  539. mm_end = end - 15;
  540. while (s < mm_end) {
  541. __asm__ volatile(
  542. PREFETCH" 32(%1) \n\t"
  543. "movd (%1), %%mm0 \n\t"
  544. "movd 3(%1), %%mm3 \n\t"
  545. "punpckldq 6(%1), %%mm0 \n\t"
  546. "punpckldq 9(%1), %%mm3 \n\t"
  547. "movq %%mm0, %%mm1 \n\t"
  548. "movq %%mm0, %%mm2 \n\t"
  549. "movq %%mm3, %%mm4 \n\t"
  550. "movq %%mm3, %%mm5 \n\t"
  551. "psllq $8, %%mm0 \n\t"
  552. "psllq $8, %%mm3 \n\t"
  553. "pand %%mm7, %%mm0 \n\t"
  554. "pand %%mm7, %%mm3 \n\t"
  555. "psrlq $5, %%mm1 \n\t"
  556. "psrlq $5, %%mm4 \n\t"
  557. "pand %%mm6, %%mm1 \n\t"
  558. "pand %%mm6, %%mm4 \n\t"
  559. "psrlq $19, %%mm2 \n\t"
  560. "psrlq $19, %%mm5 \n\t"
  561. "pand %2, %%mm2 \n\t"
  562. "pand %2, %%mm5 \n\t"
  563. "por %%mm1, %%mm0 \n\t"
  564. "por %%mm4, %%mm3 \n\t"
  565. "por %%mm2, %%mm0 \n\t"
  566. "por %%mm5, %%mm3 \n\t"
  567. "psllq $16, %%mm3 \n\t"
  568. "por %%mm3, %%mm0 \n\t"
  569. MOVNTQ" %%mm0, (%0) \n\t"
  570. ::"r"(d),"r"(s),"m"(blue_16mask):"memory");
  571. d += 4;
  572. s += 12;
  573. }
  574. __asm__ volatile(SFENCE:::"memory");
  575. __asm__ volatile(EMMS:::"memory");
  576. while (s < end) {
  577. const int r = *s++;
  578. const int g = *s++;
  579. const int b = *s++;
  580. *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  581. }
  582. }
  583. static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
  584. {
  585. const uint8_t *s = src;
  586. const uint8_t *end;
  587. const uint8_t *mm_end;
  588. uint16_t *d = (uint16_t *)dst;
  589. end = s + src_size;
  590. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  591. __asm__ volatile(
  592. "movq %0, %%mm7 \n\t"
  593. "movq %1, %%mm6 \n\t"
  594. ::"m"(red_15mask),"m"(green_15mask));
  595. mm_end = end - 11;
  596. while (s < mm_end) {
  597. __asm__ volatile(
  598. PREFETCH" 32(%1) \n\t"
  599. "movd (%1), %%mm0 \n\t"
  600. "movd 3(%1), %%mm3 \n\t"
  601. "punpckldq 6(%1), %%mm0 \n\t"
  602. "punpckldq 9(%1), %%mm3 \n\t"
  603. "movq %%mm0, %%mm1 \n\t"
  604. "movq %%mm0, %%mm2 \n\t"
  605. "movq %%mm3, %%mm4 \n\t"
  606. "movq %%mm3, %%mm5 \n\t"
  607. "psrlq $3, %%mm0 \n\t"
  608. "psrlq $3, %%mm3 \n\t"
  609. "pand %2, %%mm0 \n\t"
  610. "pand %2, %%mm3 \n\t"
  611. "psrlq $6, %%mm1 \n\t"
  612. "psrlq $6, %%mm4 \n\t"
  613. "pand %%mm6, %%mm1 \n\t"
  614. "pand %%mm6, %%mm4 \n\t"
  615. "psrlq $9, %%mm2 \n\t"
  616. "psrlq $9, %%mm5 \n\t"
  617. "pand %%mm7, %%mm2 \n\t"
  618. "pand %%mm7, %%mm5 \n\t"
  619. "por %%mm1, %%mm0 \n\t"
  620. "por %%mm4, %%mm3 \n\t"
  621. "por %%mm2, %%mm0 \n\t"
  622. "por %%mm5, %%mm3 \n\t"
  623. "psllq $16, %%mm3 \n\t"
  624. "por %%mm3, %%mm0 \n\t"
  625. MOVNTQ" %%mm0, (%0) \n\t"
  626. ::"r"(d),"r"(s),"m"(blue_15mask):"memory");
  627. d += 4;
  628. s += 12;
  629. }
  630. __asm__ volatile(SFENCE:::"memory");
  631. __asm__ volatile(EMMS:::"memory");
  632. while (s < end) {
  633. const int b = *s++;
  634. const int g = *s++;
  635. const int r = *s++;
  636. *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
  637. }
  638. }
  639. static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, int src_size)
  640. {
  641. const uint8_t *s = src;
  642. const uint8_t *end;
  643. const uint8_t *mm_end;
  644. uint16_t *d = (uint16_t *)dst;
  645. end = s + src_size;
  646. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  647. __asm__ volatile(
  648. "movq %0, %%mm7 \n\t"
  649. "movq %1, %%mm6 \n\t"
  650. ::"m"(red_15mask),"m"(green_15mask));
  651. mm_end = end - 15;
  652. while (s < mm_end) {
  653. __asm__ volatile(
  654. PREFETCH" 32(%1) \n\t"
  655. "movd (%1), %%mm0 \n\t"
  656. "movd 3(%1), %%mm3 \n\t"
  657. "punpckldq 6(%1), %%mm0 \n\t"
  658. "punpckldq 9(%1), %%mm3 \n\t"
  659. "movq %%mm0, %%mm1 \n\t"
  660. "movq %%mm0, %%mm2 \n\t"
  661. "movq %%mm3, %%mm4 \n\t"
  662. "movq %%mm3, %%mm5 \n\t"
  663. "psllq $7, %%mm0 \n\t"
  664. "psllq $7, %%mm3 \n\t"
  665. "pand %%mm7, %%mm0 \n\t"
  666. "pand %%mm7, %%mm3 \n\t"
  667. "psrlq $6, %%mm1 \n\t"
  668. "psrlq $6, %%mm4 \n\t"
  669. "pand %%mm6, %%mm1 \n\t"
  670. "pand %%mm6, %%mm4 \n\t"
  671. "psrlq $19, %%mm2 \n\t"
  672. "psrlq $19, %%mm5 \n\t"
  673. "pand %2, %%mm2 \n\t"
  674. "pand %2, %%mm5 \n\t"
  675. "por %%mm1, %%mm0 \n\t"
  676. "por %%mm4, %%mm3 \n\t"
  677. "por %%mm2, %%mm0 \n\t"
  678. "por %%mm5, %%mm3 \n\t"
  679. "psllq $16, %%mm3 \n\t"
  680. "por %%mm3, %%mm0 \n\t"
  681. MOVNTQ" %%mm0, (%0) \n\t"
  682. ::"r"(d),"r"(s),"m"(blue_15mask):"memory");
  683. d += 4;
  684. s += 12;
  685. }
  686. __asm__ volatile(SFENCE:::"memory");
  687. __asm__ volatile(EMMS:::"memory");
  688. while (s < end) {
  689. const int r = *s++;
  690. const int g = *s++;
  691. const int b = *s++;
  692. *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
  693. }
  694. }
  695. static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  696. {
  697. const uint16_t *end;
  698. const uint16_t *mm_end;
  699. uint8_t *d = dst;
  700. const uint16_t *s = (const uint16_t*)src;
  701. end = s + src_size/2;
  702. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  703. mm_end = end - 7;
  704. while (s < mm_end) {
  705. __asm__ volatile(
  706. PREFETCH" 32(%1) \n\t"
  707. "movq (%1), %%mm0 \n\t"
  708. "movq (%1), %%mm1 \n\t"
  709. "movq (%1), %%mm2 \n\t"
  710. "pand %2, %%mm0 \n\t"
  711. "pand %3, %%mm1 \n\t"
  712. "pand %4, %%mm2 \n\t"
  713. "psllq $5, %%mm0 \n\t"
  714. "pmulhw "MANGLE(mul15_mid)", %%mm0 \n\t"
  715. "pmulhw "MANGLE(mul15_mid)", %%mm1 \n\t"
  716. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  717. "movq %%mm0, %%mm3 \n\t"
  718. "movq %%mm1, %%mm4 \n\t"
  719. "movq %%mm2, %%mm5 \n\t"
  720. "punpcklwd %5, %%mm0 \n\t"
  721. "punpcklwd %5, %%mm1 \n\t"
  722. "punpcklwd %5, %%mm2 \n\t"
  723. "punpckhwd %5, %%mm3 \n\t"
  724. "punpckhwd %5, %%mm4 \n\t"
  725. "punpckhwd %5, %%mm5 \n\t"
  726. "psllq $8, %%mm1 \n\t"
  727. "psllq $16, %%mm2 \n\t"
  728. "por %%mm1, %%mm0 \n\t"
  729. "por %%mm2, %%mm0 \n\t"
  730. "psllq $8, %%mm4 \n\t"
  731. "psllq $16, %%mm5 \n\t"
  732. "por %%mm4, %%mm3 \n\t"
  733. "por %%mm5, %%mm3 \n\t"
  734. "movq %%mm0, %%mm6 \n\t"
  735. "movq %%mm3, %%mm7 \n\t"
  736. "movq 8(%1), %%mm0 \n\t"
  737. "movq 8(%1), %%mm1 \n\t"
  738. "movq 8(%1), %%mm2 \n\t"
  739. "pand %2, %%mm0 \n\t"
  740. "pand %3, %%mm1 \n\t"
  741. "pand %4, %%mm2 \n\t"
  742. "psllq $5, %%mm0 \n\t"
  743. "pmulhw "MANGLE(mul15_mid)", %%mm0 \n\t"
  744. "pmulhw "MANGLE(mul15_mid)", %%mm1 \n\t"
  745. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  746. "movq %%mm0, %%mm3 \n\t"
  747. "movq %%mm1, %%mm4 \n\t"
  748. "movq %%mm2, %%mm5 \n\t"
  749. "punpcklwd %5, %%mm0 \n\t"
  750. "punpcklwd %5, %%mm1 \n\t"
  751. "punpcklwd %5, %%mm2 \n\t"
  752. "punpckhwd %5, %%mm3 \n\t"
  753. "punpckhwd %5, %%mm4 \n\t"
  754. "punpckhwd %5, %%mm5 \n\t"
  755. "psllq $8, %%mm1 \n\t"
  756. "psllq $16, %%mm2 \n\t"
  757. "por %%mm1, %%mm0 \n\t"
  758. "por %%mm2, %%mm0 \n\t"
  759. "psllq $8, %%mm4 \n\t"
  760. "psllq $16, %%mm5 \n\t"
  761. "por %%mm4, %%mm3 \n\t"
  762. "por %%mm5, %%mm3 \n\t"
  763. :"=m"(*d)
  764. :"r"(s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
  765. NAMED_CONSTRAINTS_ADD(mul15_mid,mul15_hi)
  766. :"memory");
  767. /* borrowed 32 to 24 */
  768. __asm__ volatile(
  769. "movq %%mm0, %%mm4 \n\t"
  770. "movq %%mm3, %%mm5 \n\t"
  771. "movq %%mm6, %%mm0 \n\t"
  772. "movq %%mm7, %%mm1 \n\t"
  773. "movq %%mm4, %%mm6 \n\t"
  774. "movq %%mm5, %%mm7 \n\t"
  775. "movq %%mm0, %%mm2 \n\t"
  776. "movq %%mm1, %%mm3 \n\t"
  777. STORE_BGR24_MMX
  778. :: "r"(d), "m"(*s)
  779. NAMED_CONSTRAINTS_ADD(mask24l,mask24h)
  780. :"memory");
  781. d += 24;
  782. s += 8;
  783. }
  784. __asm__ volatile(SFENCE:::"memory");
  785. __asm__ volatile(EMMS:::"memory");
  786. while (s < end) {
  787. register uint16_t bgr;
  788. bgr = *s++;
  789. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  790. *d++ = ((bgr&0x3E0)>>2) | ((bgr&0x3E0)>>7);
  791. *d++ = ((bgr&0x7C00)>>7) | ((bgr&0x7C00)>>12);
  792. }
  793. }
  794. static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  795. {
  796. const uint16_t *end;
  797. const uint16_t *mm_end;
  798. uint8_t *d = (uint8_t *)dst;
  799. const uint16_t *s = (const uint16_t *)src;
  800. end = s + src_size/2;
  801. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  802. mm_end = end - 7;
  803. while (s < mm_end) {
  804. __asm__ volatile(
  805. PREFETCH" 32(%1) \n\t"
  806. "movq (%1), %%mm0 \n\t"
  807. "movq (%1), %%mm1 \n\t"
  808. "movq (%1), %%mm2 \n\t"
  809. "pand %2, %%mm0 \n\t"
  810. "pand %3, %%mm1 \n\t"
  811. "pand %4, %%mm2 \n\t"
  812. "psllq $5, %%mm0 \n\t"
  813. "psrlq $1, %%mm2 \n\t"
  814. "pmulhw "MANGLE(mul15_mid)", %%mm0 \n\t"
  815. "pmulhw "MANGLE(mul16_mid)", %%mm1 \n\t"
  816. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  817. "movq %%mm0, %%mm3 \n\t"
  818. "movq %%mm1, %%mm4 \n\t"
  819. "movq %%mm2, %%mm5 \n\t"
  820. "punpcklwd %5, %%mm0 \n\t"
  821. "punpcklwd %5, %%mm1 \n\t"
  822. "punpcklwd %5, %%mm2 \n\t"
  823. "punpckhwd %5, %%mm3 \n\t"
  824. "punpckhwd %5, %%mm4 \n\t"
  825. "punpckhwd %5, %%mm5 \n\t"
  826. "psllq $8, %%mm1 \n\t"
  827. "psllq $16, %%mm2 \n\t"
  828. "por %%mm1, %%mm0 \n\t"
  829. "por %%mm2, %%mm0 \n\t"
  830. "psllq $8, %%mm4 \n\t"
  831. "psllq $16, %%mm5 \n\t"
  832. "por %%mm4, %%mm3 \n\t"
  833. "por %%mm5, %%mm3 \n\t"
  834. "movq %%mm0, %%mm6 \n\t"
  835. "movq %%mm3, %%mm7 \n\t"
  836. "movq 8(%1), %%mm0 \n\t"
  837. "movq 8(%1), %%mm1 \n\t"
  838. "movq 8(%1), %%mm2 \n\t"
  839. "pand %2, %%mm0 \n\t"
  840. "pand %3, %%mm1 \n\t"
  841. "pand %4, %%mm2 \n\t"
  842. "psllq $5, %%mm0 \n\t"
  843. "psrlq $1, %%mm2 \n\t"
  844. "pmulhw "MANGLE(mul15_mid)", %%mm0 \n\t"
  845. "pmulhw "MANGLE(mul16_mid)", %%mm1 \n\t"
  846. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  847. "movq %%mm0, %%mm3 \n\t"
  848. "movq %%mm1, %%mm4 \n\t"
  849. "movq %%mm2, %%mm5 \n\t"
  850. "punpcklwd %5, %%mm0 \n\t"
  851. "punpcklwd %5, %%mm1 \n\t"
  852. "punpcklwd %5, %%mm2 \n\t"
  853. "punpckhwd %5, %%mm3 \n\t"
  854. "punpckhwd %5, %%mm4 \n\t"
  855. "punpckhwd %5, %%mm5 \n\t"
  856. "psllq $8, %%mm1 \n\t"
  857. "psllq $16, %%mm2 \n\t"
  858. "por %%mm1, %%mm0 \n\t"
  859. "por %%mm2, %%mm0 \n\t"
  860. "psllq $8, %%mm4 \n\t"
  861. "psllq $16, %%mm5 \n\t"
  862. "por %%mm4, %%mm3 \n\t"
  863. "por %%mm5, %%mm3 \n\t"
  864. :"=m"(*d)
  865. :"r"(s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
  866. NAMED_CONSTRAINTS_ADD(mul15_mid,mul16_mid,mul15_hi)
  867. :"memory");
  868. /* borrowed 32 to 24 */
  869. __asm__ volatile(
  870. "movq %%mm0, %%mm4 \n\t"
  871. "movq %%mm3, %%mm5 \n\t"
  872. "movq %%mm6, %%mm0 \n\t"
  873. "movq %%mm7, %%mm1 \n\t"
  874. "movq %%mm4, %%mm6 \n\t"
  875. "movq %%mm5, %%mm7 \n\t"
  876. "movq %%mm0, %%mm2 \n\t"
  877. "movq %%mm1, %%mm3 \n\t"
  878. STORE_BGR24_MMX
  879. :: "r"(d), "m"(*s)
  880. NAMED_CONSTRAINTS_ADD(mask24l,mask24h)
  881. :"memory");
  882. d += 24;
  883. s += 8;
  884. }
  885. __asm__ volatile(SFENCE:::"memory");
  886. __asm__ volatile(EMMS:::"memory");
  887. while (s < end) {
  888. register uint16_t bgr;
  889. bgr = *s++;
  890. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  891. *d++ = ((bgr&0x7E0)>>3) | ((bgr&0x7E0)>>9);
  892. *d++ = ((bgr&0xF800)>>8) | ((bgr&0xF800)>>13);
  893. }
  894. }
  895. /*
  896. * mm0 = 00 B3 00 B2 00 B1 00 B0
  897. * mm1 = 00 G3 00 G2 00 G1 00 G0
  898. * mm2 = 00 R3 00 R2 00 R1 00 R0
  899. * mm6 = FF FF FF FF FF FF FF FF
  900. * mm7 = 00 00 00 00 00 00 00 00
  901. */
  902. #define PACK_RGB32 \
  903. "packuswb %%mm7, %%mm0 \n\t" /* 00 00 00 00 B3 B2 B1 B0 */ \
  904. "packuswb %%mm7, %%mm1 \n\t" /* 00 00 00 00 G3 G2 G1 G0 */ \
  905. "packuswb %%mm7, %%mm2 \n\t" /* 00 00 00 00 R3 R2 R1 R0 */ \
  906. "punpcklbw %%mm1, %%mm0 \n\t" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \
  907. "punpcklbw %%mm6, %%mm2 \n\t" /* FF R3 FF R2 FF R1 FF R0 */ \
  908. "movq %%mm0, %%mm3 \n\t" \
  909. "punpcklwd %%mm2, %%mm0 \n\t" /* FF R1 G1 B1 FF R0 G0 B0 */ \
  910. "punpckhwd %%mm2, %%mm3 \n\t" /* FF R3 G3 B3 FF R2 G2 B2 */ \
  911. MOVNTQ" %%mm0, (%0) \n\t" \
  912. MOVNTQ" %%mm3, 8(%0) \n\t" \
  913. static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, int src_size)
  914. {
  915. const uint16_t *end;
  916. const uint16_t *mm_end;
  917. uint8_t *d = dst;
  918. const uint16_t *s = (const uint16_t *)src;
  919. end = s + src_size/2;
  920. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  921. __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
  922. __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
  923. mm_end = end - 3;
  924. while (s < mm_end) {
  925. __asm__ volatile(
  926. PREFETCH" 32(%1) \n\t"
  927. "movq (%1), %%mm0 \n\t"
  928. "movq (%1), %%mm1 \n\t"
  929. "movq (%1), %%mm2 \n\t"
  930. "pand %2, %%mm0 \n\t"
  931. "pand %3, %%mm1 \n\t"
  932. "pand %4, %%mm2 \n\t"
  933. "psllq $5, %%mm0 \n\t"
  934. "pmulhw %5, %%mm0 \n\t"
  935. "pmulhw %5, %%mm1 \n\t"
  936. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  937. PACK_RGB32
  938. ::"r"(d),"r"(s),"m"(mask15b),"m"(mask15g),"m"(mask15r) ,"m"(mul15_mid)
  939. NAMED_CONSTRAINTS_ADD(mul15_hi)
  940. :"memory");
  941. d += 16;
  942. s += 4;
  943. }
  944. __asm__ volatile(SFENCE:::"memory");
  945. __asm__ volatile(EMMS:::"memory");
  946. while (s < end) {
  947. register uint16_t bgr;
  948. bgr = *s++;
  949. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  950. *d++ = ((bgr&0x3E0)>>2) | ((bgr&0x3E0)>>7);
  951. *d++ = ((bgr&0x7C00)>>7) | ((bgr&0x7C00)>>12);
  952. *d++ = 255;
  953. }
  954. }
  955. static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, int src_size)
  956. {
  957. const uint16_t *end;
  958. const uint16_t *mm_end;
  959. uint8_t *d = dst;
  960. const uint16_t *s = (const uint16_t*)src;
  961. end = s + src_size/2;
  962. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  963. __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
  964. __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
  965. mm_end = end - 3;
  966. while (s < mm_end) {
  967. __asm__ volatile(
  968. PREFETCH" 32(%1) \n\t"
  969. "movq (%1), %%mm0 \n\t"
  970. "movq (%1), %%mm1 \n\t"
  971. "movq (%1), %%mm2 \n\t"
  972. "pand %2, %%mm0 \n\t"
  973. "pand %3, %%mm1 \n\t"
  974. "pand %4, %%mm2 \n\t"
  975. "psllq $5, %%mm0 \n\t"
  976. "psrlq $1, %%mm2 \n\t"
  977. "pmulhw %5, %%mm0 \n\t"
  978. "pmulhw "MANGLE(mul16_mid)", %%mm1 \n\t"
  979. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  980. PACK_RGB32
  981. ::"r"(d),"r"(s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mul15_mid)
  982. NAMED_CONSTRAINTS_ADD(mul16_mid,mul15_hi)
  983. :"memory");
  984. d += 16;
  985. s += 4;
  986. }
  987. __asm__ volatile(SFENCE:::"memory");
  988. __asm__ volatile(EMMS:::"memory");
  989. while (s < end) {
  990. register uint16_t bgr;
  991. bgr = *s++;
  992. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  993. *d++ = ((bgr&0x7E0)>>3) | ((bgr&0x7E0)>>9);
  994. *d++ = ((bgr&0xF800)>>8) | ((bgr&0xF800)>>13);
  995. *d++ = 255;
  996. }
  997. }
  998. static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  999. {
  1000. unsigned i;
  1001. x86_reg mmx_size= 23 - src_size;
  1002. __asm__ volatile (
  1003. "test %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1004. "jns 2f \n\t"
  1005. "movq "MANGLE(mask24r)", %%mm5 \n\t"
  1006. "movq "MANGLE(mask24g)", %%mm6 \n\t"
  1007. "movq "MANGLE(mask24b)", %%mm7 \n\t"
  1008. ".p2align 4 \n\t"
  1009. "1: \n\t"
  1010. PREFETCH" 32(%1, %%"FF_REG_a") \n\t"
  1011. "movq (%1, %%"FF_REG_a"), %%mm0 \n\t" // BGR BGR BG
  1012. "movq (%1, %%"FF_REG_a"), %%mm1 \n\t" // BGR BGR BG
  1013. "movq 2(%1, %%"FF_REG_a"), %%mm2 \n\t" // R BGR BGR B
  1014. "psllq $16, %%mm0 \n\t" // 00 BGR BGR
  1015. "pand %%mm5, %%mm0 \n\t"
  1016. "pand %%mm6, %%mm1 \n\t"
  1017. "pand %%mm7, %%mm2 \n\t"
  1018. "por %%mm0, %%mm1 \n\t"
  1019. "por %%mm2, %%mm1 \n\t"
  1020. "movq 6(%1, %%"FF_REG_a"), %%mm0 \n\t" // BGR BGR BG
  1021. MOVNTQ" %%mm1,(%2, %%"FF_REG_a") \n\t" // RGB RGB RG
  1022. "movq 8(%1, %%"FF_REG_a"), %%mm1 \n\t" // R BGR BGR B
  1023. "movq 10(%1, %%"FF_REG_a"), %%mm2 \n\t" // GR BGR BGR
  1024. "pand %%mm7, %%mm0 \n\t"
  1025. "pand %%mm5, %%mm1 \n\t"
  1026. "pand %%mm6, %%mm2 \n\t"
  1027. "por %%mm0, %%mm1 \n\t"
  1028. "por %%mm2, %%mm1 \n\t"
  1029. "movq 14(%1, %%"FF_REG_a"), %%mm0 \n\t" // R BGR BGR B
  1030. MOVNTQ" %%mm1, 8(%2, %%"FF_REG_a")\n\t" // B RGB RGB R
  1031. "movq 16(%1, %%"FF_REG_a"), %%mm1 \n\t" // GR BGR BGR
  1032. "movq 18(%1, %%"FF_REG_a"), %%mm2 \n\t" // BGR BGR BG
  1033. "pand %%mm6, %%mm0 \n\t"
  1034. "pand %%mm7, %%mm1 \n\t"
  1035. "pand %%mm5, %%mm2 \n\t"
  1036. "por %%mm0, %%mm1 \n\t"
  1037. "por %%mm2, %%mm1 \n\t"
  1038. MOVNTQ" %%mm1, 16(%2, %%"FF_REG_a") \n\t"
  1039. "add $24, %%"FF_REG_a" \n\t"
  1040. " js 1b \n\t"
  1041. "2: \n\t"
  1042. : "+a" (mmx_size)
  1043. : "r" (src-mmx_size), "r"(dst-mmx_size)
  1044. NAMED_CONSTRAINTS_ADD(mask24r,mask24g,mask24b)
  1045. );
  1046. __asm__ volatile(SFENCE:::"memory");
  1047. __asm__ volatile(EMMS:::"memory");
  1048. if (mmx_size==23) return; //finished, was multiple of 8
  1049. src+= src_size;
  1050. dst+= src_size;
  1051. src_size= 23-mmx_size;
  1052. src-= src_size;
  1053. dst-= src_size;
  1054. for (i=0; i<src_size; i+=3) {
  1055. register uint8_t x;
  1056. x = src[i + 2];
  1057. dst[i + 1] = src[i + 1];
  1058. dst[i + 2] = src[i + 0];
  1059. dst[i + 0] = x;
  1060. }
  1061. }
  1062. static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1063. int width, int height,
  1064. int lumStride, int chromStride, int dstStride, int vertLumPerChroma)
  1065. {
  1066. int y;
  1067. const x86_reg chromWidth= width>>1;
  1068. for (y=0; y<height; y++) {
  1069. //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
  1070. __asm__ volatile(
  1071. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1072. ".p2align 4 \n\t"
  1073. "1: \n\t"
  1074. PREFETCH" 32(%1, %%"FF_REG_a", 2) \n\t"
  1075. PREFETCH" 32(%2, %%"FF_REG_a") \n\t"
  1076. PREFETCH" 32(%3, %%"FF_REG_a") \n\t"
  1077. "movq (%2, %%"FF_REG_a"), %%mm0 \n\t" // U(0)
  1078. "movq %%mm0, %%mm2 \n\t" // U(0)
  1079. "movq (%3, %%"FF_REG_a"), %%mm1 \n\t" // V(0)
  1080. "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1081. "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
  1082. "movq (%1, %%"FF_REG_a",2), %%mm3 \n\t" // Y(0)
  1083. "movq 8(%1, %%"FF_REG_a",2), %%mm5 \n\t" // Y(8)
  1084. "movq %%mm3, %%mm4 \n\t" // Y(0)
  1085. "movq %%mm5, %%mm6 \n\t" // Y(8)
  1086. "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
  1087. "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4)
  1088. "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
  1089. "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
  1090. MOVNTQ" %%mm3, (%0, %%"FF_REG_a", 4) \n\t"
  1091. MOVNTQ" %%mm4, 8(%0, %%"FF_REG_a", 4) \n\t"
  1092. MOVNTQ" %%mm5, 16(%0, %%"FF_REG_a", 4) \n\t"
  1093. MOVNTQ" %%mm6, 24(%0, %%"FF_REG_a", 4) \n\t"
  1094. "add $8, %%"FF_REG_a" \n\t"
  1095. "cmp %4, %%"FF_REG_a" \n\t"
  1096. " jb 1b \n\t"
  1097. ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
  1098. : "%"FF_REG_a
  1099. );
  1100. if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
  1101. usrc += chromStride;
  1102. vsrc += chromStride;
  1103. }
  1104. ysrc += lumStride;
  1105. dst += dstStride;
  1106. }
  1107. __asm__(EMMS" \n\t"
  1108. SFENCE" \n\t"
  1109. :::"memory");
  1110. }
  1111. /**
  1112. * Height should be a multiple of 2 and width should be a multiple of 16.
  1113. * (If this is a problem for anyone then tell me, and I will fix it.)
  1114. */
  1115. static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1116. int width, int height,
  1117. int lumStride, int chromStride, int dstStride)
  1118. {
  1119. //FIXME interpolate chroma
  1120. RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
  1121. }
  1122. static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1123. int width, int height,
  1124. int lumStride, int chromStride, int dstStride, int vertLumPerChroma)
  1125. {
  1126. int y;
  1127. const x86_reg chromWidth= width>>1;
  1128. for (y=0; y<height; y++) {
  1129. //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
  1130. __asm__ volatile(
  1131. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1132. ".p2align 4 \n\t"
  1133. "1: \n\t"
  1134. PREFETCH" 32(%1, %%"FF_REG_a", 2) \n\t"
  1135. PREFETCH" 32(%2, %%"FF_REG_a") \n\t"
  1136. PREFETCH" 32(%3, %%"FF_REG_a") \n\t"
  1137. "movq (%2, %%"FF_REG_a"), %%mm0 \n\t" // U(0)
  1138. "movq %%mm0, %%mm2 \n\t" // U(0)
  1139. "movq (%3, %%"FF_REG_a"), %%mm1 \n\t" // V(0)
  1140. "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1141. "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
  1142. "movq (%1, %%"FF_REG_a",2), %%mm3 \n\t" // Y(0)
  1143. "movq 8(%1, %%"FF_REG_a",2), %%mm5 \n\t" // Y(8)
  1144. "movq %%mm0, %%mm4 \n\t" // Y(0)
  1145. "movq %%mm2, %%mm6 \n\t" // Y(8)
  1146. "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0)
  1147. "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4)
  1148. "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8)
  1149. "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12)
  1150. MOVNTQ" %%mm0, (%0, %%"FF_REG_a", 4) \n\t"
  1151. MOVNTQ" %%mm4, 8(%0, %%"FF_REG_a", 4) \n\t"
  1152. MOVNTQ" %%mm2, 16(%0, %%"FF_REG_a", 4) \n\t"
  1153. MOVNTQ" %%mm6, 24(%0, %%"FF_REG_a", 4) \n\t"
  1154. "add $8, %%"FF_REG_a" \n\t"
  1155. "cmp %4, %%"FF_REG_a" \n\t"
  1156. " jb 1b \n\t"
  1157. ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
  1158. : "%"FF_REG_a
  1159. );
  1160. if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
  1161. usrc += chromStride;
  1162. vsrc += chromStride;
  1163. }
  1164. ysrc += lumStride;
  1165. dst += dstStride;
  1166. }
  1167. __asm__(EMMS" \n\t"
  1168. SFENCE" \n\t"
  1169. :::"memory");
  1170. }
  1171. /**
  1172. * Height should be a multiple of 2 and width should be a multiple of 16
  1173. * (If this is a problem for anyone then tell me, and I will fix it.)
  1174. */
  1175. static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1176. int width, int height,
  1177. int lumStride, int chromStride, int dstStride)
  1178. {
  1179. //FIXME interpolate chroma
  1180. RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
  1181. }
  1182. /**
  1183. * Width should be a multiple of 16.
  1184. */
  1185. static inline void RENAME(yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1186. int width, int height,
  1187. int lumStride, int chromStride, int dstStride)
  1188. {
  1189. RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
  1190. }
  1191. /**
  1192. * Width should be a multiple of 16.
  1193. */
  1194. static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1195. int width, int height,
  1196. int lumStride, int chromStride, int dstStride)
  1197. {
  1198. RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
  1199. }
  1200. /**
  1201. * Height should be a multiple of 2 and width should be a multiple of 16.
  1202. * (If this is a problem for anyone then tell me, and I will fix it.)
  1203. */
  1204. static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1205. int width, int height,
  1206. int lumStride, int chromStride, int srcStride)
  1207. {
  1208. int y;
  1209. const x86_reg chromWidth= width>>1;
  1210. for (y=0; y<height; y+=2) {
  1211. __asm__ volatile(
  1212. "xor %%"FF_REG_a", %%"FF_REG_a"\n\t"
  1213. "pcmpeqw %%mm7, %%mm7 \n\t"
  1214. "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
  1215. ".p2align 4 \n\t"
  1216. "1: \n\t"
  1217. PREFETCH" 64(%0, %%"FF_REG_a", 4) \n\t"
  1218. "movq (%0, %%"FF_REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1219. "movq 8(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1220. "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
  1221. "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
  1222. "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
  1223. "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
  1224. "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
  1225. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
  1226. "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1227. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
  1228. MOVNTQ" %%mm2, (%1, %%"FF_REG_a", 2) \n\t"
  1229. "movq 16(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // YUYV YUYV(8)
  1230. "movq 24(%0, %%"FF_REG_a", 4), %%mm2 \n\t" // YUYV YUYV(12)
  1231. "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
  1232. "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
  1233. "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
  1234. "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
  1235. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
  1236. "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
  1237. "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
  1238. "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
  1239. MOVNTQ" %%mm3, 8(%1, %%"FF_REG_a", 2) \n\t"
  1240. "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
  1241. "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
  1242. "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
  1243. "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
  1244. "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
  1245. "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
  1246. "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
  1247. "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
  1248. MOVNTQ" %%mm0, (%3, %%"FF_REG_a") \n\t"
  1249. MOVNTQ" %%mm2, (%2, %%"FF_REG_a") \n\t"
  1250. "add $8, %%"FF_REG_a" \n\t"
  1251. "cmp %4, %%"FF_REG_a" \n\t"
  1252. " jb 1b \n\t"
  1253. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1254. : "memory", "%"FF_REG_a
  1255. );
  1256. ydst += lumStride;
  1257. src += srcStride;
  1258. __asm__ volatile(
  1259. "xor %%"FF_REG_a", %%"FF_REG_a"\n\t"
  1260. ".p2align 4 \n\t"
  1261. "1: \n\t"
  1262. PREFETCH" 64(%0, %%"FF_REG_a", 4) \n\t"
  1263. "movq (%0, %%"FF_REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1264. "movq 8(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1265. "movq 16(%0, %%"FF_REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
  1266. "movq 24(%0, %%"FF_REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
  1267. "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
  1268. "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
  1269. "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
  1270. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
  1271. "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
  1272. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
  1273. MOVNTQ" %%mm0, (%1, %%"FF_REG_a", 2) \n\t"
  1274. MOVNTQ" %%mm2, 8(%1, %%"FF_REG_a", 2) \n\t"
  1275. "add $8, %%"FF_REG_a"\n\t"
  1276. "cmp %4, %%"FF_REG_a"\n\t"
  1277. " jb 1b \n\t"
  1278. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1279. : "memory", "%"FF_REG_a
  1280. );
  1281. udst += chromStride;
  1282. vdst += chromStride;
  1283. ydst += lumStride;
  1284. src += srcStride;
  1285. }
  1286. __asm__ volatile(EMMS" \n\t"
  1287. SFENCE" \n\t"
  1288. :::"memory");
  1289. }
  1290. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1291. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  1292. static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWidth, int srcHeight, int srcStride, int dstStride)
  1293. {
  1294. int x,y;
  1295. dst[0]= src[0];
  1296. // first line
  1297. for (x=0; x<srcWidth-1; x++) {
  1298. dst[2*x+1]= (3*src[x] + src[x+1])>>2;
  1299. dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
  1300. }
  1301. dst[2*srcWidth-1]= src[srcWidth-1];
  1302. dst+= dstStride;
  1303. for (y=1; y<srcHeight; y++) {
  1304. x86_reg mmxSize= srcWidth&~15;
  1305. if (mmxSize) {
  1306. __asm__ volatile(
  1307. "mov %4, %%"FF_REG_a" \n\t"
  1308. "movq "MANGLE(mmx_ff)", %%mm0 \n\t"
  1309. "movq (%0, %%"FF_REG_a"), %%mm4 \n\t"
  1310. "movq %%mm4, %%mm2 \n\t"
  1311. "psllq $8, %%mm4 \n\t"
  1312. "pand %%mm0, %%mm2 \n\t"
  1313. "por %%mm2, %%mm4 \n\t"
  1314. "movq (%1, %%"FF_REG_a"), %%mm5 \n\t"
  1315. "movq %%mm5, %%mm3 \n\t"
  1316. "psllq $8, %%mm5 \n\t"
  1317. "pand %%mm0, %%mm3 \n\t"
  1318. "por %%mm3, %%mm5 \n\t"
  1319. "1: \n\t"
  1320. "movq (%0, %%"FF_REG_a"), %%mm0 \n\t"
  1321. "movq (%1, %%"FF_REG_a"), %%mm1 \n\t"
  1322. "movq 1(%0, %%"FF_REG_a"), %%mm2 \n\t"
  1323. "movq 1(%1, %%"FF_REG_a"), %%mm3 \n\t"
  1324. PAVGB" %%mm0, %%mm5 \n\t"
  1325. PAVGB" %%mm0, %%mm3 \n\t"
  1326. PAVGB" %%mm0, %%mm5 \n\t"
  1327. PAVGB" %%mm0, %%mm3 \n\t"
  1328. PAVGB" %%mm1, %%mm4 \n\t"
  1329. PAVGB" %%mm1, %%mm2 \n\t"
  1330. PAVGB" %%mm1, %%mm4 \n\t"
  1331. PAVGB" %%mm1, %%mm2 \n\t"
  1332. "movq %%mm5, %%mm7 \n\t"
  1333. "movq %%mm4, %%mm6 \n\t"
  1334. "punpcklbw %%mm3, %%mm5 \n\t"
  1335. "punpckhbw %%mm3, %%mm7 \n\t"
  1336. "punpcklbw %%mm2, %%mm4 \n\t"
  1337. "punpckhbw %%mm2, %%mm6 \n\t"
  1338. MOVNTQ" %%mm5, (%2, %%"FF_REG_a", 2) \n\t"
  1339. MOVNTQ" %%mm7, 8(%2, %%"FF_REG_a", 2) \n\t"
  1340. MOVNTQ" %%mm4, (%3, %%"FF_REG_a", 2) \n\t"
  1341. MOVNTQ" %%mm6, 8(%3, %%"FF_REG_a", 2) \n\t"
  1342. "add $8, %%"FF_REG_a" \n\t"
  1343. "movq -1(%0, %%"FF_REG_a"), %%mm4 \n\t"
  1344. "movq -1(%1, %%"FF_REG_a"), %%mm5 \n\t"
  1345. " js 1b \n\t"
  1346. :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
  1347. "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
  1348. "g" (-mmxSize)
  1349. NAMED_CONSTRAINTS_ADD(mmx_ff)
  1350. : "%"FF_REG_a
  1351. );
  1352. } else {
  1353. mmxSize = 1;
  1354. dst[0] = (src[0] * 3 + src[srcStride]) >> 2;
  1355. dst[dstStride] = (src[0] + 3 * src[srcStride]) >> 2;
  1356. }
  1357. for (x=mmxSize-1; x<srcWidth-1; x++) {
  1358. dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
  1359. dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2;
  1360. dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2;
  1361. dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2;
  1362. }
  1363. dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2;
  1364. dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
  1365. dst+=dstStride*2;
  1366. src+=srcStride;
  1367. }
  1368. // last line
  1369. dst[0]= src[0];
  1370. for (x=0; x<srcWidth-1; x++) {
  1371. dst[2*x+1]= (3*src[x] + src[x+1])>>2;
  1372. dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
  1373. }
  1374. dst[2*srcWidth-1]= src[srcWidth-1];
  1375. __asm__ volatile(EMMS" \n\t"
  1376. SFENCE" \n\t"
  1377. :::"memory");
  1378. }
  1379. #endif /* COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW */
  1380. #if !COMPILE_TEMPLATE_AMD3DNOW
  1381. /**
  1382. * Height should be a multiple of 2 and width should be a multiple of 16.
  1383. * (If this is a problem for anyone then tell me, and I will fix it.)
  1384. * Chrominance data is only taken from every second line, others are ignored.
  1385. * FIXME: Write HQ version.
  1386. */
  1387. static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1388. int width, int height,
  1389. int lumStride, int chromStride, int srcStride)
  1390. {
  1391. int y;
  1392. const x86_reg chromWidth= width>>1;
  1393. for (y=0; y<height; y+=2) {
  1394. __asm__ volatile(
  1395. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1396. "pcmpeqw %%mm7, %%mm7 \n\t"
  1397. "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
  1398. ".p2align 4 \n\t"
  1399. "1: \n\t"
  1400. PREFETCH" 64(%0, %%"FF_REG_a", 4) \n\t"
  1401. "movq (%0, %%"FF_REG_a", 4), %%mm0 \n\t" // UYVY UYVY(0)
  1402. "movq 8(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // UYVY UYVY(4)
  1403. "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0)
  1404. "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4)
  1405. "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0)
  1406. "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4)
  1407. "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
  1408. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
  1409. "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1410. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
  1411. MOVNTQ" %%mm2, (%1, %%"FF_REG_a", 2) \n\t"
  1412. "movq 16(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // UYVY UYVY(8)
  1413. "movq 24(%0, %%"FF_REG_a", 4), %%mm2 \n\t" // UYVY UYVY(12)
  1414. "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8)
  1415. "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12)
  1416. "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8)
  1417. "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12)
  1418. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
  1419. "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
  1420. "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
  1421. "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
  1422. MOVNTQ" %%mm3, 8(%1, %%"FF_REG_a", 2) \n\t"
  1423. "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
  1424. "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
  1425. "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
  1426. "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
  1427. "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
  1428. "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
  1429. "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
  1430. "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
  1431. MOVNTQ" %%mm0, (%3, %%"FF_REG_a") \n\t"
  1432. MOVNTQ" %%mm2, (%2, %%"FF_REG_a") \n\t"
  1433. "add $8, %%"FF_REG_a" \n\t"
  1434. "cmp %4, %%"FF_REG_a" \n\t"
  1435. " jb 1b \n\t"
  1436. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1437. : "memory", "%"FF_REG_a
  1438. );
  1439. ydst += lumStride;
  1440. src += srcStride;
  1441. __asm__ volatile(
  1442. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1443. ".p2align 4 \n\t"
  1444. "1: \n\t"
  1445. PREFETCH" 64(%0, %%"FF_REG_a", 4) \n\t"
  1446. "movq (%0, %%"FF_REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1447. "movq 8(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1448. "movq 16(%0, %%"FF_REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
  1449. "movq 24(%0, %%"FF_REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
  1450. "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
  1451. "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
  1452. "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
  1453. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
  1454. "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
  1455. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
  1456. MOVNTQ" %%mm0, (%1, %%"FF_REG_a", 2) \n\t"
  1457. MOVNTQ" %%mm2, 8(%1, %%"FF_REG_a", 2) \n\t"
  1458. "add $8, %%"FF_REG_a" \n\t"
  1459. "cmp %4, %%"FF_REG_a" \n\t"
  1460. " jb 1b \n\t"
  1461. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1462. : "memory", "%"FF_REG_a
  1463. );
  1464. udst += chromStride;
  1465. vdst += chromStride;
  1466. ydst += lumStride;
  1467. src += srcStride;
  1468. }
  1469. __asm__ volatile(EMMS" \n\t"
  1470. SFENCE" \n\t"
  1471. :::"memory");
  1472. }
  1473. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1474. /**
  1475. * Height should be a multiple of 2 and width should be a multiple of 2.
  1476. * (If this is a problem for anyone then tell me, and I will fix it.)
  1477. * Chrominance data is only taken from every second line,
  1478. * others are ignored in the C version.
  1479. * FIXME: Write HQ version.
  1480. */
  1481. #if HAVE_7REGS
  1482. static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1483. int width, int height,
  1484. int lumStride, int chromStride, int srcStride,
  1485. int32_t *rgb2yuv)
  1486. {
  1487. #define BGR2Y_IDX "16*4+16*32"
  1488. #define BGR2U_IDX "16*4+16*33"
  1489. #define BGR2V_IDX "16*4+16*34"
  1490. int y;
  1491. const x86_reg chromWidth= width>>1;
  1492. if (height > 2) {
  1493. ff_rgb24toyv12_c(src, ydst, udst, vdst, width, 2, lumStride, chromStride, srcStride, rgb2yuv);
  1494. src += 2*srcStride;
  1495. ydst += 2*lumStride;
  1496. udst += chromStride;
  1497. vdst += chromStride;
  1498. height -= 2;
  1499. }
  1500. for (y=0; y<height-2; y+=2) {
  1501. int i;
  1502. for (i=0; i<2; i++) {
  1503. __asm__ volatile(
  1504. "mov %2, %%"FF_REG_a"\n\t"
  1505. "movq "BGR2Y_IDX"(%3), %%mm6 \n\t"
  1506. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1507. "pxor %%mm7, %%mm7 \n\t"
  1508. "lea (%%"FF_REG_a", %%"FF_REG_a", 2), %%"FF_REG_d" \n\t"
  1509. ".p2align 4 \n\t"
  1510. "1: \n\t"
  1511. PREFETCH" 64(%0, %%"FF_REG_d") \n\t"
  1512. "movd (%0, %%"FF_REG_d"), %%mm0 \n\t"
  1513. "movd 3(%0, %%"FF_REG_d"), %%mm1 \n\t"
  1514. "punpcklbw %%mm7, %%mm0 \n\t"
  1515. "punpcklbw %%mm7, %%mm1 \n\t"
  1516. "movd 6(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1517. "movd 9(%0, %%"FF_REG_d"), %%mm3 \n\t"
  1518. "punpcklbw %%mm7, %%mm2 \n\t"
  1519. "punpcklbw %%mm7, %%mm3 \n\t"
  1520. "pmaddwd %%mm6, %%mm0 \n\t"
  1521. "pmaddwd %%mm6, %%mm1 \n\t"
  1522. "pmaddwd %%mm6, %%mm2 \n\t"
  1523. "pmaddwd %%mm6, %%mm3 \n\t"
  1524. "psrad $8, %%mm0 \n\t"
  1525. "psrad $8, %%mm1 \n\t"
  1526. "psrad $8, %%mm2 \n\t"
  1527. "psrad $8, %%mm3 \n\t"
  1528. "packssdw %%mm1, %%mm0 \n\t"
  1529. "packssdw %%mm3, %%mm2 \n\t"
  1530. "pmaddwd %%mm5, %%mm0 \n\t"
  1531. "pmaddwd %%mm5, %%mm2 \n\t"
  1532. "packssdw %%mm2, %%mm0 \n\t"
  1533. "psraw $7, %%mm0 \n\t"
  1534. "movd 12(%0, %%"FF_REG_d"), %%mm4 \n\t"
  1535. "movd 15(%0, %%"FF_REG_d"), %%mm1 \n\t"
  1536. "punpcklbw %%mm7, %%mm4 \n\t"
  1537. "punpcklbw %%mm7, %%mm1 \n\t"
  1538. "movd 18(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1539. "movd 21(%0, %%"FF_REG_d"), %%mm3 \n\t"
  1540. "punpcklbw %%mm7, %%mm2 \n\t"
  1541. "punpcklbw %%mm7, %%mm3 \n\t"
  1542. "pmaddwd %%mm6, %%mm4 \n\t"
  1543. "pmaddwd %%mm6, %%mm1 \n\t"
  1544. "pmaddwd %%mm6, %%mm2 \n\t"
  1545. "pmaddwd %%mm6, %%mm3 \n\t"
  1546. "psrad $8, %%mm4 \n\t"
  1547. "psrad $8, %%mm1 \n\t"
  1548. "psrad $8, %%mm2 \n\t"
  1549. "psrad $8, %%mm3 \n\t"
  1550. "packssdw %%mm1, %%mm4 \n\t"
  1551. "packssdw %%mm3, %%mm2 \n\t"
  1552. "pmaddwd %%mm5, %%mm4 \n\t"
  1553. "pmaddwd %%mm5, %%mm2 \n\t"
  1554. "add $24, %%"FF_REG_d"\n\t"
  1555. "packssdw %%mm2, %%mm4 \n\t"
  1556. "psraw $7, %%mm4 \n\t"
  1557. "packuswb %%mm4, %%mm0 \n\t"
  1558. "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t"
  1559. MOVNTQ" %%mm0, (%1, %%"FF_REG_a") \n\t"
  1560. "add $8, %%"FF_REG_a" \n\t"
  1561. " js 1b \n\t"
  1562. : : "r" (src+width*3), "r" (ydst+width), "g" ((x86_reg)-width), "r"(rgb2yuv)
  1563. NAMED_CONSTRAINTS_ADD(ff_w1111,ff_bgr2YOffset)
  1564. : "%"FF_REG_a, "%"FF_REG_d
  1565. );
  1566. ydst += lumStride;
  1567. src += srcStride;
  1568. }
  1569. src -= srcStride*2;
  1570. __asm__ volatile(
  1571. "mov %4, %%"FF_REG_a"\n\t"
  1572. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1573. "movq "BGR2U_IDX"(%5), %%mm6 \n\t"
  1574. "pxor %%mm7, %%mm7 \n\t"
  1575. "lea (%%"FF_REG_a", %%"FF_REG_a", 2), %%"FF_REG_d" \n\t"
  1576. "add %%"FF_REG_d", %%"FF_REG_d"\n\t"
  1577. ".p2align 4 \n\t"
  1578. "1: \n\t"
  1579. PREFETCH" 64(%0, %%"FF_REG_d") \n\t"
  1580. PREFETCH" 64(%1, %%"FF_REG_d") \n\t"
  1581. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  1582. "movq (%0, %%"FF_REG_d"), %%mm0 \n\t"
  1583. "movq (%1, %%"FF_REG_d"), %%mm1 \n\t"
  1584. "movq 6(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1585. "movq 6(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1586. PAVGB" %%mm1, %%mm0 \n\t"
  1587. PAVGB" %%mm3, %%mm2 \n\t"
  1588. "movq %%mm0, %%mm1 \n\t"
  1589. "movq %%mm2, %%mm3 \n\t"
  1590. "psrlq $24, %%mm0 \n\t"
  1591. "psrlq $24, %%mm2 \n\t"
  1592. PAVGB" %%mm1, %%mm0 \n\t"
  1593. PAVGB" %%mm3, %%mm2 \n\t"
  1594. "punpcklbw %%mm7, %%mm0 \n\t"
  1595. "punpcklbw %%mm7, %%mm2 \n\t"
  1596. #else
  1597. "movd (%0, %%"FF_REG_d"), %%mm0 \n\t"
  1598. "movd (%1, %%"FF_REG_d"), %%mm1 \n\t"
  1599. "movd 3(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1600. "movd 3(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1601. "punpcklbw %%mm7, %%mm0 \n\t"
  1602. "punpcklbw %%mm7, %%mm1 \n\t"
  1603. "punpcklbw %%mm7, %%mm2 \n\t"
  1604. "punpcklbw %%mm7, %%mm3 \n\t"
  1605. "paddw %%mm1, %%mm0 \n\t"
  1606. "paddw %%mm3, %%mm2 \n\t"
  1607. "paddw %%mm2, %%mm0 \n\t"
  1608. "movd 6(%0, %%"FF_REG_d"), %%mm4 \n\t"
  1609. "movd 6(%1, %%"FF_REG_d"), %%mm1 \n\t"
  1610. "movd 9(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1611. "movd 9(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1612. "punpcklbw %%mm7, %%mm4 \n\t"
  1613. "punpcklbw %%mm7, %%mm1 \n\t"
  1614. "punpcklbw %%mm7, %%mm2 \n\t"
  1615. "punpcklbw %%mm7, %%mm3 \n\t"
  1616. "paddw %%mm1, %%mm4 \n\t"
  1617. "paddw %%mm3, %%mm2 \n\t"
  1618. "paddw %%mm4, %%mm2 \n\t"
  1619. "psrlw $2, %%mm0 \n\t"
  1620. "psrlw $2, %%mm2 \n\t"
  1621. #endif
  1622. "movq "BGR2V_IDX"(%5), %%mm1 \n\t"
  1623. "movq "BGR2V_IDX"(%5), %%mm3 \n\t"
  1624. "pmaddwd %%mm0, %%mm1 \n\t"
  1625. "pmaddwd %%mm2, %%mm3 \n\t"
  1626. "pmaddwd %%mm6, %%mm0 \n\t"
  1627. "pmaddwd %%mm6, %%mm2 \n\t"
  1628. "psrad $8, %%mm0 \n\t"
  1629. "psrad $8, %%mm1 \n\t"
  1630. "psrad $8, %%mm2 \n\t"
  1631. "psrad $8, %%mm3 \n\t"
  1632. "packssdw %%mm2, %%mm0 \n\t"
  1633. "packssdw %%mm3, %%mm1 \n\t"
  1634. "pmaddwd %%mm5, %%mm0 \n\t"
  1635. "pmaddwd %%mm5, %%mm1 \n\t"
  1636. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1637. "psraw $7, %%mm0 \n\t"
  1638. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  1639. "movq 12(%0, %%"FF_REG_d"), %%mm4 \n\t"
  1640. "movq 12(%1, %%"FF_REG_d"), %%mm1 \n\t"
  1641. "movq 18(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1642. "movq 18(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1643. PAVGB" %%mm1, %%mm4 \n\t"
  1644. PAVGB" %%mm3, %%mm2 \n\t"
  1645. "movq %%mm4, %%mm1 \n\t"
  1646. "movq %%mm2, %%mm3 \n\t"
  1647. "psrlq $24, %%mm4 \n\t"
  1648. "psrlq $24, %%mm2 \n\t"
  1649. PAVGB" %%mm1, %%mm4 \n\t"
  1650. PAVGB" %%mm3, %%mm2 \n\t"
  1651. "punpcklbw %%mm7, %%mm4 \n\t"
  1652. "punpcklbw %%mm7, %%mm2 \n\t"
  1653. #else
  1654. "movd 12(%0, %%"FF_REG_d"), %%mm4 \n\t"
  1655. "movd 12(%1, %%"FF_REG_d"), %%mm1 \n\t"
  1656. "movd 15(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1657. "movd 15(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1658. "punpcklbw %%mm7, %%mm4 \n\t"
  1659. "punpcklbw %%mm7, %%mm1 \n\t"
  1660. "punpcklbw %%mm7, %%mm2 \n\t"
  1661. "punpcklbw %%mm7, %%mm3 \n\t"
  1662. "paddw %%mm1, %%mm4 \n\t"
  1663. "paddw %%mm3, %%mm2 \n\t"
  1664. "paddw %%mm2, %%mm4 \n\t"
  1665. "movd 18(%0, %%"FF_REG_d"), %%mm5 \n\t"
  1666. "movd 18(%1, %%"FF_REG_d"), %%mm1 \n\t"
  1667. "movd 21(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1668. "movd 21(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1669. "punpcklbw %%mm7, %%mm5 \n\t"
  1670. "punpcklbw %%mm7, %%mm1 \n\t"
  1671. "punpcklbw %%mm7, %%mm2 \n\t"
  1672. "punpcklbw %%mm7, %%mm3 \n\t"
  1673. "paddw %%mm1, %%mm5 \n\t"
  1674. "paddw %%mm3, %%mm2 \n\t"
  1675. "paddw %%mm5, %%mm2 \n\t"
  1676. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1677. "psrlw $2, %%mm4 \n\t"
  1678. "psrlw $2, %%mm2 \n\t"
  1679. #endif
  1680. "movq "BGR2V_IDX"(%5), %%mm1 \n\t"
  1681. "movq "BGR2V_IDX"(%5), %%mm3 \n\t"
  1682. "pmaddwd %%mm4, %%mm1 \n\t"
  1683. "pmaddwd %%mm2, %%mm3 \n\t"
  1684. "pmaddwd %%mm6, %%mm4 \n\t"
  1685. "pmaddwd %%mm6, %%mm2 \n\t"
  1686. "psrad $8, %%mm4 \n\t"
  1687. "psrad $8, %%mm1 \n\t"
  1688. "psrad $8, %%mm2 \n\t"
  1689. "psrad $8, %%mm3 \n\t"
  1690. "packssdw %%mm2, %%mm4 \n\t"
  1691. "packssdw %%mm3, %%mm1 \n\t"
  1692. "pmaddwd %%mm5, %%mm4 \n\t"
  1693. "pmaddwd %%mm5, %%mm1 \n\t"
  1694. "add $24, %%"FF_REG_d"\n\t"
  1695. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1696. "psraw $7, %%mm4 \n\t"
  1697. "movq %%mm0, %%mm1 \n\t"
  1698. "punpckldq %%mm4, %%mm0 \n\t"
  1699. "punpckhdq %%mm4, %%mm1 \n\t"
  1700. "packsswb %%mm1, %%mm0 \n\t"
  1701. "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t"
  1702. "movd %%mm0, (%2, %%"FF_REG_a") \n\t"
  1703. "punpckhdq %%mm0, %%mm0 \n\t"
  1704. "movd %%mm0, (%3, %%"FF_REG_a") \n\t"
  1705. "add $4, %%"FF_REG_a" \n\t"
  1706. " js 1b \n\t"
  1707. : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth), "r"(rgb2yuv)
  1708. NAMED_CONSTRAINTS_ADD(ff_w1111,ff_bgr2UVOffset)
  1709. : "%"FF_REG_a, "%"FF_REG_d
  1710. );
  1711. udst += chromStride;
  1712. vdst += chromStride;
  1713. src += srcStride*2;
  1714. }
  1715. __asm__ volatile(EMMS" \n\t"
  1716. SFENCE" \n\t"
  1717. :::"memory");
  1718. ff_rgb24toyv12_c(src, ydst, udst, vdst, width, height-y, lumStride, chromStride, srcStride, rgb2yuv);
  1719. }
  1720. #endif /* HAVE_7REGS */
  1721. #endif /* !COMPILE_TEMPLATE_SSE2 */
  1722. #if !COMPILE_TEMPLATE_AMD3DNOW && !COMPILE_TEMPLATE_AVX
  1723. static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, uint8_t *dest,
  1724. int width, int height, int src1Stride,
  1725. int src2Stride, int dstStride)
  1726. {
  1727. int h;
  1728. for (h=0; h < height; h++) {
  1729. int w;
  1730. if (width >= 16) {
  1731. #if COMPILE_TEMPLATE_SSE2
  1732. if (!((((intptr_t)src1) | ((intptr_t)src2) | ((intptr_t)dest))&15)) {
  1733. __asm__(
  1734. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1735. "1: \n\t"
  1736. PREFETCH" 64(%1, %%"FF_REG_a") \n\t"
  1737. PREFETCH" 64(%2, %%"FF_REG_a") \n\t"
  1738. "movdqa (%1, %%"FF_REG_a"), %%xmm0 \n\t"
  1739. "movdqa (%1, %%"FF_REG_a"), %%xmm1 \n\t"
  1740. "movdqa (%2, %%"FF_REG_a"), %%xmm2 \n\t"
  1741. "punpcklbw %%xmm2, %%xmm0 \n\t"
  1742. "punpckhbw %%xmm2, %%xmm1 \n\t"
  1743. "movntdq %%xmm0, (%0, %%"FF_REG_a", 2) \n\t"
  1744. "movntdq %%xmm1, 16(%0, %%"FF_REG_a", 2) \n\t"
  1745. "add $16, %%"FF_REG_a" \n\t"
  1746. "cmp %3, %%"FF_REG_a" \n\t"
  1747. " jb 1b \n\t"
  1748. ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
  1749. : "memory", XMM_CLOBBERS("xmm0", "xmm1", "xmm2",) "%"FF_REG_a
  1750. );
  1751. } else
  1752. #endif
  1753. __asm__(
  1754. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1755. "1: \n\t"
  1756. PREFETCH" 64(%1, %%"FF_REG_a") \n\t"
  1757. PREFETCH" 64(%2, %%"FF_REG_a") \n\t"
  1758. "movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
  1759. "movq 8(%1, %%"FF_REG_a"), %%mm2 \n\t"
  1760. "movq %%mm0, %%mm1 \n\t"
  1761. "movq %%mm2, %%mm3 \n\t"
  1762. "movq (%2, %%"FF_REG_a"), %%mm4 \n\t"
  1763. "movq 8(%2, %%"FF_REG_a"), %%mm5 \n\t"
  1764. "punpcklbw %%mm4, %%mm0 \n\t"
  1765. "punpckhbw %%mm4, %%mm1 \n\t"
  1766. "punpcklbw %%mm5, %%mm2 \n\t"
  1767. "punpckhbw %%mm5, %%mm3 \n\t"
  1768. MOVNTQ" %%mm0, (%0, %%"FF_REG_a", 2) \n\t"
  1769. MOVNTQ" %%mm1, 8(%0, %%"FF_REG_a", 2) \n\t"
  1770. MOVNTQ" %%mm2, 16(%0, %%"FF_REG_a", 2) \n\t"
  1771. MOVNTQ" %%mm3, 24(%0, %%"FF_REG_a", 2) \n\t"
  1772. "add $16, %%"FF_REG_a" \n\t"
  1773. "cmp %3, %%"FF_REG_a" \n\t"
  1774. " jb 1b \n\t"
  1775. ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
  1776. : "memory", "%"FF_REG_a
  1777. );
  1778. }
  1779. for (w= (width&(~15)); w < width; w++) {
  1780. dest[2*w+0] = src1[w];
  1781. dest[2*w+1] = src2[w];
  1782. }
  1783. dest += dstStride;
  1784. src1 += src1Stride;
  1785. src2 += src2Stride;
  1786. }
  1787. __asm__(
  1788. EMMS" \n\t"
  1789. SFENCE" \n\t"
  1790. ::: "memory"
  1791. );
  1792. }
  1793. #endif /* !COMPILE_TEMPLATE_AMD3DNOW && !COMPILE_TEMPLATE_AVX */
  1794. #if !COMPILE_TEMPLATE_AVX || HAVE_AVX_EXTERNAL
  1795. #if !COMPILE_TEMPLATE_AMD3DNOW && (ARCH_X86_32 || COMPILE_TEMPLATE_SSE2) && COMPILE_TEMPLATE_MMXEXT == COMPILE_TEMPLATE_SSE2 && HAVE_X86ASM
  1796. void RENAME(ff_nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
  1797. const uint8_t *unused,
  1798. const uint8_t *src1,
  1799. const uint8_t *src2,
  1800. int w,
  1801. uint32_t *unused2);
  1802. static void RENAME(deinterleaveBytes)(const uint8_t *src, uint8_t *dst1, uint8_t *dst2,
  1803. int width, int height, int srcStride,
  1804. int dst1Stride, int dst2Stride)
  1805. {
  1806. int h;
  1807. for (h = 0; h < height; h++) {
  1808. RENAME(ff_nv12ToUV)(dst1, dst2, NULL, src, NULL, width, NULL);
  1809. src += srcStride;
  1810. dst1 += dst1Stride;
  1811. dst2 += dst2Stride;
  1812. }
  1813. __asm__(
  1814. #if !COMPILE_TEMPLATE_SSE2
  1815. EMMS" \n\t"
  1816. #endif
  1817. SFENCE" \n\t"
  1818. ::: "memory"
  1819. );
  1820. }
  1821. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1822. #endif /* !COMPILE_TEMPLATE_AVX || HAVE_AVX_EXTERNAL */
  1823. #if !COMPILE_TEMPLATE_SSE2
  1824. #if !COMPILE_TEMPLATE_AMD3DNOW
  1825. static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
  1826. uint8_t *dst1, uint8_t *dst2,
  1827. int width, int height,
  1828. int srcStride1, int srcStride2,
  1829. int dstStride1, int dstStride2)
  1830. {
  1831. x86_reg x, y;
  1832. int w,h;
  1833. w=width/2; h=height/2;
  1834. __asm__ volatile(
  1835. PREFETCH" %0 \n\t"
  1836. PREFETCH" %1 \n\t"
  1837. ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
  1838. for (y=0;y<h;y++) {
  1839. const uint8_t* s1=src1+srcStride1*(y>>1);
  1840. uint8_t* d=dst1+dstStride1*y;
  1841. x=0;
  1842. for (;x<w-31;x+=32) {
  1843. __asm__ volatile(
  1844. PREFETCH" 32(%1,%2) \n\t"
  1845. "movq (%1,%2), %%mm0 \n\t"
  1846. "movq 8(%1,%2), %%mm2 \n\t"
  1847. "movq 16(%1,%2), %%mm4 \n\t"
  1848. "movq 24(%1,%2), %%mm6 \n\t"
  1849. "movq %%mm0, %%mm1 \n\t"
  1850. "movq %%mm2, %%mm3 \n\t"
  1851. "movq %%mm4, %%mm5 \n\t"
  1852. "movq %%mm6, %%mm7 \n\t"
  1853. "punpcklbw %%mm0, %%mm0 \n\t"
  1854. "punpckhbw %%mm1, %%mm1 \n\t"
  1855. "punpcklbw %%mm2, %%mm2 \n\t"
  1856. "punpckhbw %%mm3, %%mm3 \n\t"
  1857. "punpcklbw %%mm4, %%mm4 \n\t"
  1858. "punpckhbw %%mm5, %%mm5 \n\t"
  1859. "punpcklbw %%mm6, %%mm6 \n\t"
  1860. "punpckhbw %%mm7, %%mm7 \n\t"
  1861. MOVNTQ" %%mm0, (%0,%2,2) \n\t"
  1862. MOVNTQ" %%mm1, 8(%0,%2,2) \n\t"
  1863. MOVNTQ" %%mm2, 16(%0,%2,2) \n\t"
  1864. MOVNTQ" %%mm3, 24(%0,%2,2) \n\t"
  1865. MOVNTQ" %%mm4, 32(%0,%2,2) \n\t"
  1866. MOVNTQ" %%mm5, 40(%0,%2,2) \n\t"
  1867. MOVNTQ" %%mm6, 48(%0,%2,2) \n\t"
  1868. MOVNTQ" %%mm7, 56(%0,%2,2)"
  1869. :: "r"(d), "r"(s1), "r"(x)
  1870. :"memory");
  1871. }
  1872. for (;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
  1873. }
  1874. for (y=0;y<h;y++) {
  1875. const uint8_t* s2=src2+srcStride2*(y>>1);
  1876. uint8_t* d=dst2+dstStride2*y;
  1877. x=0;
  1878. for (;x<w-31;x+=32) {
  1879. __asm__ volatile(
  1880. PREFETCH" 32(%1,%2) \n\t"
  1881. "movq (%1,%2), %%mm0 \n\t"
  1882. "movq 8(%1,%2), %%mm2 \n\t"
  1883. "movq 16(%1,%2), %%mm4 \n\t"
  1884. "movq 24(%1,%2), %%mm6 \n\t"
  1885. "movq %%mm0, %%mm1 \n\t"
  1886. "movq %%mm2, %%mm3 \n\t"
  1887. "movq %%mm4, %%mm5 \n\t"
  1888. "movq %%mm6, %%mm7 \n\t"
  1889. "punpcklbw %%mm0, %%mm0 \n\t"
  1890. "punpckhbw %%mm1, %%mm1 \n\t"
  1891. "punpcklbw %%mm2, %%mm2 \n\t"
  1892. "punpckhbw %%mm3, %%mm3 \n\t"
  1893. "punpcklbw %%mm4, %%mm4 \n\t"
  1894. "punpckhbw %%mm5, %%mm5 \n\t"
  1895. "punpcklbw %%mm6, %%mm6 \n\t"
  1896. "punpckhbw %%mm7, %%mm7 \n\t"
  1897. MOVNTQ" %%mm0, (%0,%2,2) \n\t"
  1898. MOVNTQ" %%mm1, 8(%0,%2,2) \n\t"
  1899. MOVNTQ" %%mm2, 16(%0,%2,2) \n\t"
  1900. MOVNTQ" %%mm3, 24(%0,%2,2) \n\t"
  1901. MOVNTQ" %%mm4, 32(%0,%2,2) \n\t"
  1902. MOVNTQ" %%mm5, 40(%0,%2,2) \n\t"
  1903. MOVNTQ" %%mm6, 48(%0,%2,2) \n\t"
  1904. MOVNTQ" %%mm7, 56(%0,%2,2)"
  1905. :: "r"(d), "r"(s2), "r"(x)
  1906. :"memory");
  1907. }
  1908. for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
  1909. }
  1910. __asm__(
  1911. EMMS" \n\t"
  1912. SFENCE" \n\t"
  1913. ::: "memory"
  1914. );
  1915. }
  1916. static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
  1917. uint8_t *dst,
  1918. int width, int height,
  1919. int srcStride1, int srcStride2,
  1920. int srcStride3, int dstStride)
  1921. {
  1922. x86_reg x;
  1923. int y,w,h;
  1924. w=width/2; h=height;
  1925. for (y=0;y<h;y++) {
  1926. const uint8_t* yp=src1+srcStride1*y;
  1927. const uint8_t* up=src2+srcStride2*(y>>2);
  1928. const uint8_t* vp=src3+srcStride3*(y>>2);
  1929. uint8_t* d=dst+dstStride*y;
  1930. x=0;
  1931. for (;x<w-7;x+=8) {
  1932. __asm__ volatile(
  1933. PREFETCH" 32(%1, %0) \n\t"
  1934. PREFETCH" 32(%2, %0) \n\t"
  1935. PREFETCH" 32(%3, %0) \n\t"
  1936. "movq (%1, %0, 4), %%mm0 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
  1937. "movq (%2, %0), %%mm1 \n\t" /* U0U1U2U3U4U5U6U7 */
  1938. "movq (%3, %0), %%mm2 \n\t" /* V0V1V2V3V4V5V6V7 */
  1939. "movq %%mm0, %%mm3 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
  1940. "movq %%mm1, %%mm4 \n\t" /* U0U1U2U3U4U5U6U7 */
  1941. "movq %%mm2, %%mm5 \n\t" /* V0V1V2V3V4V5V6V7 */
  1942. "punpcklbw %%mm1, %%mm1 \n\t" /* U0U0 U1U1 U2U2 U3U3 */
  1943. "punpcklbw %%mm2, %%mm2 \n\t" /* V0V0 V1V1 V2V2 V3V3 */
  1944. "punpckhbw %%mm4, %%mm4 \n\t" /* U4U4 U5U5 U6U6 U7U7 */
  1945. "punpckhbw %%mm5, %%mm5 \n\t" /* V4V4 V5V5 V6V6 V7V7 */
  1946. "movq %%mm1, %%mm6 \n\t"
  1947. "punpcklbw %%mm2, %%mm1 \n\t" /* U0V0 U0V0 U1V1 U1V1*/
  1948. "punpcklbw %%mm1, %%mm0 \n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
  1949. "punpckhbw %%mm1, %%mm3 \n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
  1950. MOVNTQ" %%mm0, (%4, %0, 8) \n\t"
  1951. MOVNTQ" %%mm3, 8(%4, %0, 8) \n\t"
  1952. "punpckhbw %%mm2, %%mm6 \n\t" /* U2V2 U2V2 U3V3 U3V3*/
  1953. "movq 8(%1, %0, 4), %%mm0 \n\t"
  1954. "movq %%mm0, %%mm3 \n\t"
  1955. "punpcklbw %%mm6, %%mm0 \n\t" /* Y U2 Y V2 Y U2 Y V2*/
  1956. "punpckhbw %%mm6, %%mm3 \n\t" /* Y U3 Y V3 Y U3 Y V3*/
  1957. MOVNTQ" %%mm0, 16(%4, %0, 8) \n\t"
  1958. MOVNTQ" %%mm3, 24(%4, %0, 8) \n\t"
  1959. "movq %%mm4, %%mm6 \n\t"
  1960. "movq 16(%1, %0, 4), %%mm0 \n\t"
  1961. "movq %%mm0, %%mm3 \n\t"
  1962. "punpcklbw %%mm5, %%mm4 \n\t"
  1963. "punpcklbw %%mm4, %%mm0 \n\t" /* Y U4 Y V4 Y U4 Y V4*/
  1964. "punpckhbw %%mm4, %%mm3 \n\t" /* Y U5 Y V5 Y U5 Y V5*/
  1965. MOVNTQ" %%mm0, 32(%4, %0, 8) \n\t"
  1966. MOVNTQ" %%mm3, 40(%4, %0, 8) \n\t"
  1967. "punpckhbw %%mm5, %%mm6 \n\t"
  1968. "movq 24(%1, %0, 4), %%mm0 \n\t"
  1969. "movq %%mm0, %%mm3 \n\t"
  1970. "punpcklbw %%mm6, %%mm0 \n\t" /* Y U6 Y V6 Y U6 Y V6*/
  1971. "punpckhbw %%mm6, %%mm3 \n\t" /* Y U7 Y V7 Y U7 Y V7*/
  1972. MOVNTQ" %%mm0, 48(%4, %0, 8) \n\t"
  1973. MOVNTQ" %%mm3, 56(%4, %0, 8) \n\t"
  1974. : "+r" (x)
  1975. : "r"(yp), "r" (up), "r"(vp), "r"(d)
  1976. :"memory");
  1977. }
  1978. for (; x<w; x++) {
  1979. const int x2 = x<<2;
  1980. d[8*x+0] = yp[x2];
  1981. d[8*x+1] = up[x];
  1982. d[8*x+2] = yp[x2+1];
  1983. d[8*x+3] = vp[x];
  1984. d[8*x+4] = yp[x2+2];
  1985. d[8*x+5] = up[x];
  1986. d[8*x+6] = yp[x2+3];
  1987. d[8*x+7] = vp[x];
  1988. }
  1989. }
  1990. __asm__(
  1991. EMMS" \n\t"
  1992. SFENCE" \n\t"
  1993. ::: "memory"
  1994. );
  1995. }
  1996. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1997. static void RENAME(extract_even)(const uint8_t *src, uint8_t *dst, x86_reg count)
  1998. {
  1999. dst += count;
  2000. src += 2*count;
  2001. count= - count;
  2002. if(count <= -16) {
  2003. count += 15;
  2004. __asm__ volatile(
  2005. "pcmpeqw %%mm7, %%mm7 \n\t"
  2006. "psrlw $8, %%mm7 \n\t"
  2007. "1: \n\t"
  2008. "movq -30(%1, %0, 2), %%mm0 \n\t"
  2009. "movq -22(%1, %0, 2), %%mm1 \n\t"
  2010. "movq -14(%1, %0, 2), %%mm2 \n\t"
  2011. "movq -6(%1, %0, 2), %%mm3 \n\t"
  2012. "pand %%mm7, %%mm0 \n\t"
  2013. "pand %%mm7, %%mm1 \n\t"
  2014. "pand %%mm7, %%mm2 \n\t"
  2015. "pand %%mm7, %%mm3 \n\t"
  2016. "packuswb %%mm1, %%mm0 \n\t"
  2017. "packuswb %%mm3, %%mm2 \n\t"
  2018. MOVNTQ" %%mm0,-15(%2, %0) \n\t"
  2019. MOVNTQ" %%mm2,- 7(%2, %0) \n\t"
  2020. "add $16, %0 \n\t"
  2021. " js 1b \n\t"
  2022. : "+r"(count)
  2023. : "r"(src), "r"(dst)
  2024. );
  2025. count -= 15;
  2026. }
  2027. while(count<0) {
  2028. dst[count]= src[2*count];
  2029. count++;
  2030. }
  2031. }
  2032. static void RENAME(extract_odd)(const uint8_t *src, uint8_t *dst, x86_reg count)
  2033. {
  2034. src ++;
  2035. dst += count;
  2036. src += 2*count;
  2037. count= - count;
  2038. if(count < -16) {
  2039. count += 16;
  2040. __asm__ volatile(
  2041. "pcmpeqw %%mm7, %%mm7 \n\t"
  2042. "psrlw $8, %%mm7 \n\t"
  2043. "1: \n\t"
  2044. "movq -32(%1, %0, 2), %%mm0 \n\t"
  2045. "movq -24(%1, %0, 2), %%mm1 \n\t"
  2046. "movq -16(%1, %0, 2), %%mm2 \n\t"
  2047. "movq -8(%1, %0, 2), %%mm3 \n\t"
  2048. "pand %%mm7, %%mm0 \n\t"
  2049. "pand %%mm7, %%mm1 \n\t"
  2050. "pand %%mm7, %%mm2 \n\t"
  2051. "pand %%mm7, %%mm3 \n\t"
  2052. "packuswb %%mm1, %%mm0 \n\t"
  2053. "packuswb %%mm3, %%mm2 \n\t"
  2054. MOVNTQ" %%mm0,-16(%2, %0) \n\t"
  2055. MOVNTQ" %%mm2,- 8(%2, %0) \n\t"
  2056. "add $16, %0 \n\t"
  2057. " js 1b \n\t"
  2058. : "+r"(count)
  2059. : "r"(src), "r"(dst)
  2060. );
  2061. count -= 16;
  2062. }
  2063. while(count<0) {
  2064. dst[count]= src[2*count];
  2065. count++;
  2066. }
  2067. }
  2068. #if !COMPILE_TEMPLATE_AMD3DNOW
  2069. static void RENAME(extract_even2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2070. {
  2071. dst0+= count;
  2072. dst1+= count;
  2073. src += 4*count;
  2074. count= - count;
  2075. if(count <= -8) {
  2076. count += 7;
  2077. __asm__ volatile(
  2078. "pcmpeqw %%mm7, %%mm7 \n\t"
  2079. "psrlw $8, %%mm7 \n\t"
  2080. "1: \n\t"
  2081. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2082. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2083. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2084. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2085. "pand %%mm7, %%mm0 \n\t"
  2086. "pand %%mm7, %%mm1 \n\t"
  2087. "pand %%mm7, %%mm2 \n\t"
  2088. "pand %%mm7, %%mm3 \n\t"
  2089. "packuswb %%mm1, %%mm0 \n\t"
  2090. "packuswb %%mm3, %%mm2 \n\t"
  2091. "movq %%mm0, %%mm1 \n\t"
  2092. "movq %%mm2, %%mm3 \n\t"
  2093. "psrlw $8, %%mm0 \n\t"
  2094. "psrlw $8, %%mm2 \n\t"
  2095. "pand %%mm7, %%mm1 \n\t"
  2096. "pand %%mm7, %%mm3 \n\t"
  2097. "packuswb %%mm2, %%mm0 \n\t"
  2098. "packuswb %%mm3, %%mm1 \n\t"
  2099. MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
  2100. MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
  2101. "add $8, %0 \n\t"
  2102. " js 1b \n\t"
  2103. : "+r"(count)
  2104. : "r"(src), "r"(dst0), "r"(dst1)
  2105. );
  2106. count -= 7;
  2107. }
  2108. while(count<0) {
  2109. dst0[count]= src[4*count+0];
  2110. dst1[count]= src[4*count+2];
  2111. count++;
  2112. }
  2113. }
  2114. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2115. static void RENAME(extract_even2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2116. {
  2117. dst0 += count;
  2118. dst1 += count;
  2119. src0 += 4*count;
  2120. src1 += 4*count;
  2121. count= - count;
  2122. #ifdef PAVGB
  2123. if(count <= -8) {
  2124. count += 7;
  2125. __asm__ volatile(
  2126. "pcmpeqw %%mm7, %%mm7 \n\t"
  2127. "psrlw $8, %%mm7 \n\t"
  2128. "1: \n\t"
  2129. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2130. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2131. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2132. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2133. PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
  2134. PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
  2135. PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
  2136. PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
  2137. "pand %%mm7, %%mm0 \n\t"
  2138. "pand %%mm7, %%mm1 \n\t"
  2139. "pand %%mm7, %%mm2 \n\t"
  2140. "pand %%mm7, %%mm3 \n\t"
  2141. "packuswb %%mm1, %%mm0 \n\t"
  2142. "packuswb %%mm3, %%mm2 \n\t"
  2143. "movq %%mm0, %%mm1 \n\t"
  2144. "movq %%mm2, %%mm3 \n\t"
  2145. "psrlw $8, %%mm0 \n\t"
  2146. "psrlw $8, %%mm2 \n\t"
  2147. "pand %%mm7, %%mm1 \n\t"
  2148. "pand %%mm7, %%mm3 \n\t"
  2149. "packuswb %%mm2, %%mm0 \n\t"
  2150. "packuswb %%mm3, %%mm1 \n\t"
  2151. MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
  2152. MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
  2153. "add $8, %0 \n\t"
  2154. " js 1b \n\t"
  2155. : "+r"(count)
  2156. : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
  2157. );
  2158. count -= 7;
  2159. }
  2160. #endif
  2161. while(count<0) {
  2162. dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
  2163. dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
  2164. count++;
  2165. }
  2166. }
  2167. #if !COMPILE_TEMPLATE_AMD3DNOW
  2168. static void RENAME(extract_odd2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2169. {
  2170. dst0+= count;
  2171. dst1+= count;
  2172. src += 4*count;
  2173. count= - count;
  2174. if(count <= -8) {
  2175. count += 7;
  2176. __asm__ volatile(
  2177. "pcmpeqw %%mm7, %%mm7 \n\t"
  2178. "psrlw $8, %%mm7 \n\t"
  2179. "1: \n\t"
  2180. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2181. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2182. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2183. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2184. "psrlw $8, %%mm0 \n\t"
  2185. "psrlw $8, %%mm1 \n\t"
  2186. "psrlw $8, %%mm2 \n\t"
  2187. "psrlw $8, %%mm3 \n\t"
  2188. "packuswb %%mm1, %%mm0 \n\t"
  2189. "packuswb %%mm3, %%mm2 \n\t"
  2190. "movq %%mm0, %%mm1 \n\t"
  2191. "movq %%mm2, %%mm3 \n\t"
  2192. "psrlw $8, %%mm0 \n\t"
  2193. "psrlw $8, %%mm2 \n\t"
  2194. "pand %%mm7, %%mm1 \n\t"
  2195. "pand %%mm7, %%mm3 \n\t"
  2196. "packuswb %%mm2, %%mm0 \n\t"
  2197. "packuswb %%mm3, %%mm1 \n\t"
  2198. MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
  2199. MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
  2200. "add $8, %0 \n\t"
  2201. " js 1b \n\t"
  2202. : "+r"(count)
  2203. : "r"(src), "r"(dst0), "r"(dst1)
  2204. );
  2205. count -= 7;
  2206. }
  2207. src++;
  2208. while(count<0) {
  2209. dst0[count]= src[4*count+0];
  2210. dst1[count]= src[4*count+2];
  2211. count++;
  2212. }
  2213. }
  2214. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2215. static void RENAME(extract_odd2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2216. {
  2217. dst0 += count;
  2218. dst1 += count;
  2219. src0 += 4*count;
  2220. src1 += 4*count;
  2221. count= - count;
  2222. #ifdef PAVGB
  2223. if(count <= -8) {
  2224. count += 7;
  2225. __asm__ volatile(
  2226. "pcmpeqw %%mm7, %%mm7 \n\t"
  2227. "psrlw $8, %%mm7 \n\t"
  2228. "1: \n\t"
  2229. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2230. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2231. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2232. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2233. PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
  2234. PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
  2235. PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
  2236. PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
  2237. "psrlw $8, %%mm0 \n\t"
  2238. "psrlw $8, %%mm1 \n\t"
  2239. "psrlw $8, %%mm2 \n\t"
  2240. "psrlw $8, %%mm3 \n\t"
  2241. "packuswb %%mm1, %%mm0 \n\t"
  2242. "packuswb %%mm3, %%mm2 \n\t"
  2243. "movq %%mm0, %%mm1 \n\t"
  2244. "movq %%mm2, %%mm3 \n\t"
  2245. "psrlw $8, %%mm0 \n\t"
  2246. "psrlw $8, %%mm2 \n\t"
  2247. "pand %%mm7, %%mm1 \n\t"
  2248. "pand %%mm7, %%mm3 \n\t"
  2249. "packuswb %%mm2, %%mm0 \n\t"
  2250. "packuswb %%mm3, %%mm1 \n\t"
  2251. MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
  2252. MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
  2253. "add $8, %0 \n\t"
  2254. " js 1b \n\t"
  2255. : "+r"(count)
  2256. : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
  2257. );
  2258. count -= 7;
  2259. }
  2260. #endif
  2261. src0++;
  2262. src1++;
  2263. while(count<0) {
  2264. dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
  2265. dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
  2266. count++;
  2267. }
  2268. }
  2269. static void RENAME(yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2270. int width, int height,
  2271. int lumStride, int chromStride, int srcStride)
  2272. {
  2273. int y;
  2274. const int chromWidth = AV_CEIL_RSHIFT(width, 1);
  2275. for (y=0; y<height; y++) {
  2276. RENAME(extract_even)(src, ydst, width);
  2277. if(y&1) {
  2278. RENAME(extract_odd2avg)(src-srcStride, src, udst, vdst, chromWidth);
  2279. udst+= chromStride;
  2280. vdst+= chromStride;
  2281. }
  2282. src += srcStride;
  2283. ydst+= lumStride;
  2284. }
  2285. __asm__(
  2286. EMMS" \n\t"
  2287. SFENCE" \n\t"
  2288. ::: "memory"
  2289. );
  2290. }
  2291. #if !COMPILE_TEMPLATE_AMD3DNOW
  2292. static void RENAME(yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2293. int width, int height,
  2294. int lumStride, int chromStride, int srcStride)
  2295. {
  2296. int y;
  2297. const int chromWidth = AV_CEIL_RSHIFT(width, 1);
  2298. for (y=0; y<height; y++) {
  2299. RENAME(extract_even)(src, ydst, width);
  2300. RENAME(extract_odd2)(src, udst, vdst, chromWidth);
  2301. src += srcStride;
  2302. ydst+= lumStride;
  2303. udst+= chromStride;
  2304. vdst+= chromStride;
  2305. }
  2306. __asm__(
  2307. EMMS" \n\t"
  2308. SFENCE" \n\t"
  2309. ::: "memory"
  2310. );
  2311. }
  2312. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2313. static void RENAME(uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2314. int width, int height,
  2315. int lumStride, int chromStride, int srcStride)
  2316. {
  2317. int y;
  2318. const int chromWidth = AV_CEIL_RSHIFT(width, 1);
  2319. for (y=0; y<height; y++) {
  2320. RENAME(extract_odd)(src, ydst, width);
  2321. if(y&1) {
  2322. RENAME(extract_even2avg)(src-srcStride, src, udst, vdst, chromWidth);
  2323. udst+= chromStride;
  2324. vdst+= chromStride;
  2325. }
  2326. src += srcStride;
  2327. ydst+= lumStride;
  2328. }
  2329. __asm__(
  2330. EMMS" \n\t"
  2331. SFENCE" \n\t"
  2332. ::: "memory"
  2333. );
  2334. }
  2335. #if !COMPILE_TEMPLATE_AMD3DNOW
  2336. static void RENAME(uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2337. int width, int height,
  2338. int lumStride, int chromStride, int srcStride)
  2339. {
  2340. int y;
  2341. const int chromWidth = AV_CEIL_RSHIFT(width, 1);
  2342. for (y=0; y<height; y++) {
  2343. RENAME(extract_odd)(src, ydst, width);
  2344. RENAME(extract_even2)(src, udst, vdst, chromWidth);
  2345. src += srcStride;
  2346. ydst+= lumStride;
  2347. udst+= chromStride;
  2348. vdst+= chromStride;
  2349. }
  2350. __asm__(
  2351. EMMS" \n\t"
  2352. SFENCE" \n\t"
  2353. ::: "memory"
  2354. );
  2355. }
  2356. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2357. #endif /* !COMPILE_TEMPLATE_SSE2 */
  2358. static av_cold void RENAME(rgb2rgb_init)(void)
  2359. {
  2360. #if !COMPILE_TEMPLATE_SSE2
  2361. #if !COMPILE_TEMPLATE_AMD3DNOW
  2362. rgb15to16 = RENAME(rgb15to16);
  2363. rgb15tobgr24 = RENAME(rgb15tobgr24);
  2364. rgb15to32 = RENAME(rgb15to32);
  2365. rgb16tobgr24 = RENAME(rgb16tobgr24);
  2366. rgb16to32 = RENAME(rgb16to32);
  2367. rgb16to15 = RENAME(rgb16to15);
  2368. rgb24tobgr16 = RENAME(rgb24tobgr16);
  2369. rgb24tobgr15 = RENAME(rgb24tobgr15);
  2370. rgb24tobgr32 = RENAME(rgb24tobgr32);
  2371. rgb32to16 = RENAME(rgb32to16);
  2372. rgb32to15 = RENAME(rgb32to15);
  2373. rgb32tobgr24 = RENAME(rgb32tobgr24);
  2374. rgb24to15 = RENAME(rgb24to15);
  2375. rgb24to16 = RENAME(rgb24to16);
  2376. rgb24tobgr24 = RENAME(rgb24tobgr24);
  2377. rgb32tobgr16 = RENAME(rgb32tobgr16);
  2378. rgb32tobgr15 = RENAME(rgb32tobgr15);
  2379. yv12toyuy2 = RENAME(yv12toyuy2);
  2380. yv12touyvy = RENAME(yv12touyvy);
  2381. yuv422ptoyuy2 = RENAME(yuv422ptoyuy2);
  2382. yuv422ptouyvy = RENAME(yuv422ptouyvy);
  2383. yuy2toyv12 = RENAME(yuy2toyv12);
  2384. vu9_to_vu12 = RENAME(vu9_to_vu12);
  2385. yvu9_to_yuy2 = RENAME(yvu9_to_yuy2);
  2386. uyvytoyuv422 = RENAME(uyvytoyuv422);
  2387. yuyvtoyuv422 = RENAME(yuyvtoyuv422);
  2388. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2389. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  2390. planar2x = RENAME(planar2x);
  2391. #endif /* COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW */
  2392. #if HAVE_7REGS
  2393. ff_rgb24toyv12 = RENAME(rgb24toyv12);
  2394. #endif /* HAVE_7REGS */
  2395. yuyvtoyuv420 = RENAME(yuyvtoyuv420);
  2396. uyvytoyuv420 = RENAME(uyvytoyuv420);
  2397. #endif /* !COMPILE_TEMPLATE_SSE2 */
  2398. #if !COMPILE_TEMPLATE_AMD3DNOW && !COMPILE_TEMPLATE_AVX
  2399. interleaveBytes = RENAME(interleaveBytes);
  2400. #endif /* !COMPILE_TEMPLATE_AMD3DNOW && !COMPILE_TEMPLATE_AVX */
  2401. #if !COMPILE_TEMPLATE_AVX || HAVE_AVX_EXTERNAL
  2402. #if !COMPILE_TEMPLATE_AMD3DNOW && (ARCH_X86_32 || COMPILE_TEMPLATE_SSE2) && COMPILE_TEMPLATE_MMXEXT == COMPILE_TEMPLATE_SSE2 && HAVE_X86ASM
  2403. deinterleaveBytes = RENAME(deinterleaveBytes);
  2404. #endif
  2405. #endif
  2406. }