dsputil_mmx.c 123 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962
  1. /*
  2. * MMX optimized DSP utils
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
  23. */
  24. #include "libavutil/cpu.h"
  25. #include "libavutil/x86_cpu.h"
  26. #include "libavcodec/dsputil.h"
  27. #include "libavcodec/h264dsp.h"
  28. #include "libavcodec/mpegvideo.h"
  29. #include "libavcodec/simple_idct.h"
  30. #include "dsputil_mmx.h"
  31. #include "idct_xvid.h"
  32. //#undef NDEBUG
  33. //#include <assert.h>
  34. /* pixel operations */
  35. DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
  36. DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
  37. DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
  38. {0x8000000080000000ULL, 0x8000000080000000ULL};
  39. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1 ) = {0x0001000100010001ULL, 0x0001000100010001ULL};
  40. DECLARE_ALIGNED(8, const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
  41. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL};
  42. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
  43. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
  44. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9 ) = {0x0009000900090009ULL, 0x0009000900090009ULL};
  45. DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
  46. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
  47. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17 ) = {0x0011001100110011ULL, 0x0011001100110011ULL};
  48. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL};
  49. DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
  50. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL};
  51. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
  52. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
  53. DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
  54. DECLARE_ALIGNED(8, const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL;
  55. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL};
  56. DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
  57. DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
  58. DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
  59. DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
  60. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0 ) = {0x0000000000000000ULL, 0x0000000000000000ULL};
  61. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1 ) = {0x0101010101010101ULL, 0x0101010101010101ULL};
  62. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3 ) = {0x0303030303030303ULL, 0x0303030303030303ULL};
  63. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4 ) = {0x0404040404040404ULL, 0x0404040404040404ULL};
  64. DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
  65. DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
  66. DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
  67. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL};
  68. DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
  69. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1 ) = {0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL};
  70. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL};
  71. DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
  72. DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL};
  73. DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
  74. DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
  75. #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
  76. #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
  77. #define MOVQ_BFE(regd) \
  78. __asm__ volatile ( \
  79. "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
  80. "paddb %%" #regd ", %%" #regd " \n\t" ::)
  81. #ifndef PIC
  82. #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
  83. #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
  84. #else
  85. // for shared library it's better to use this way for accessing constants
  86. // pcmpeqd -> -1
  87. #define MOVQ_BONE(regd) \
  88. __asm__ volatile ( \
  89. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  90. "psrlw $15, %%" #regd " \n\t" \
  91. "packuswb %%" #regd ", %%" #regd " \n\t" ::)
  92. #define MOVQ_WTWO(regd) \
  93. __asm__ volatile ( \
  94. "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
  95. "psrlw $15, %%" #regd " \n\t" \
  96. "psllw $1, %%" #regd " \n\t"::)
  97. #endif
  98. // using regr as temporary and for the output result
  99. // first argument is unmodifed and second is trashed
  100. // regfe is supposed to contain 0xfefefefefefefefe
  101. #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
  102. "movq " #rega ", " #regr " \n\t"\
  103. "pand " #regb ", " #regr " \n\t"\
  104. "pxor " #rega ", " #regb " \n\t"\
  105. "pand " #regfe "," #regb " \n\t"\
  106. "psrlq $1, " #regb " \n\t"\
  107. "paddb " #regb ", " #regr " \n\t"
  108. #define PAVGB_MMX(rega, regb, regr, regfe) \
  109. "movq " #rega ", " #regr " \n\t"\
  110. "por " #regb ", " #regr " \n\t"\
  111. "pxor " #rega ", " #regb " \n\t"\
  112. "pand " #regfe "," #regb " \n\t"\
  113. "psrlq $1, " #regb " \n\t"\
  114. "psubb " #regb ", " #regr " \n\t"
  115. // mm6 is supposed to contain 0xfefefefefefefefe
  116. #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
  117. "movq " #rega ", " #regr " \n\t"\
  118. "movq " #regc ", " #regp " \n\t"\
  119. "pand " #regb ", " #regr " \n\t"\
  120. "pand " #regd ", " #regp " \n\t"\
  121. "pxor " #rega ", " #regb " \n\t"\
  122. "pxor " #regc ", " #regd " \n\t"\
  123. "pand %%mm6, " #regb " \n\t"\
  124. "pand %%mm6, " #regd " \n\t"\
  125. "psrlq $1, " #regb " \n\t"\
  126. "psrlq $1, " #regd " \n\t"\
  127. "paddb " #regb ", " #regr " \n\t"\
  128. "paddb " #regd ", " #regp " \n\t"
  129. #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
  130. "movq " #rega ", " #regr " \n\t"\
  131. "movq " #regc ", " #regp " \n\t"\
  132. "por " #regb ", " #regr " \n\t"\
  133. "por " #regd ", " #regp " \n\t"\
  134. "pxor " #rega ", " #regb " \n\t"\
  135. "pxor " #regc ", " #regd " \n\t"\
  136. "pand %%mm6, " #regb " \n\t"\
  137. "pand %%mm6, " #regd " \n\t"\
  138. "psrlq $1, " #regd " \n\t"\
  139. "psrlq $1, " #regb " \n\t"\
  140. "psubb " #regb ", " #regr " \n\t"\
  141. "psubb " #regd ", " #regp " \n\t"
  142. /***********************************/
  143. /* MMX no rounding */
  144. #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
  145. #define SET_RND MOVQ_WONE
  146. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
  147. #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
  148. #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
  149. #include "dsputil_mmx_rnd_template.c"
  150. #undef DEF
  151. #undef SET_RND
  152. #undef PAVGBP
  153. #undef PAVGB
  154. /***********************************/
  155. /* MMX rounding */
  156. #define DEF(x, y) x ## _ ## y ##_mmx
  157. #define SET_RND MOVQ_WTWO
  158. #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
  159. #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
  160. #include "dsputil_mmx_rnd_template.c"
  161. #undef DEF
  162. #undef SET_RND
  163. #undef PAVGBP
  164. #undef PAVGB
  165. #undef OP_AVG
  166. /***********************************/
  167. /* 3Dnow specific */
  168. #define DEF(x) x ## _3dnow
  169. #define PAVGB "pavgusb"
  170. #define OP_AVG PAVGB
  171. #include "dsputil_mmx_avg_template.c"
  172. #undef DEF
  173. #undef PAVGB
  174. #undef OP_AVG
  175. /***********************************/
  176. /* MMX2 specific */
  177. #define DEF(x) x ## _mmx2
  178. /* Introduced only in MMX2 set */
  179. #define PAVGB "pavgb"
  180. #define OP_AVG PAVGB
  181. #include "dsputil_mmx_avg_template.c"
  182. #undef DEF
  183. #undef PAVGB
  184. #undef OP_AVG
  185. #define put_no_rnd_pixels16_mmx put_pixels16_mmx
  186. #define put_no_rnd_pixels8_mmx put_pixels8_mmx
  187. #define put_pixels16_mmx2 put_pixels16_mmx
  188. #define put_pixels8_mmx2 put_pixels8_mmx
  189. #define put_pixels4_mmx2 put_pixels4_mmx
  190. #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
  191. #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
  192. #define put_pixels16_3dnow put_pixels16_mmx
  193. #define put_pixels8_3dnow put_pixels8_mmx
  194. #define put_pixels4_3dnow put_pixels4_mmx
  195. #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
  196. #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
  197. /***********************************/
  198. /* standard MMX */
  199. void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  200. {
  201. const DCTELEM *p;
  202. uint8_t *pix;
  203. /* read the pixels */
  204. p = block;
  205. pix = pixels;
  206. /* unrolled loop */
  207. __asm__ volatile(
  208. "movq %3, %%mm0 \n\t"
  209. "movq 8%3, %%mm1 \n\t"
  210. "movq 16%3, %%mm2 \n\t"
  211. "movq 24%3, %%mm3 \n\t"
  212. "movq 32%3, %%mm4 \n\t"
  213. "movq 40%3, %%mm5 \n\t"
  214. "movq 48%3, %%mm6 \n\t"
  215. "movq 56%3, %%mm7 \n\t"
  216. "packuswb %%mm1, %%mm0 \n\t"
  217. "packuswb %%mm3, %%mm2 \n\t"
  218. "packuswb %%mm5, %%mm4 \n\t"
  219. "packuswb %%mm7, %%mm6 \n\t"
  220. "movq %%mm0, (%0) \n\t"
  221. "movq %%mm2, (%0, %1) \n\t"
  222. "movq %%mm4, (%0, %1, 2) \n\t"
  223. "movq %%mm6, (%0, %2) \n\t"
  224. ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
  225. :"memory");
  226. pix += line_size*4;
  227. p += 32;
  228. // if here would be an exact copy of the code above
  229. // compiler would generate some very strange code
  230. // thus using "r"
  231. __asm__ volatile(
  232. "movq (%3), %%mm0 \n\t"
  233. "movq 8(%3), %%mm1 \n\t"
  234. "movq 16(%3), %%mm2 \n\t"
  235. "movq 24(%3), %%mm3 \n\t"
  236. "movq 32(%3), %%mm4 \n\t"
  237. "movq 40(%3), %%mm5 \n\t"
  238. "movq 48(%3), %%mm6 \n\t"
  239. "movq 56(%3), %%mm7 \n\t"
  240. "packuswb %%mm1, %%mm0 \n\t"
  241. "packuswb %%mm3, %%mm2 \n\t"
  242. "packuswb %%mm5, %%mm4 \n\t"
  243. "packuswb %%mm7, %%mm6 \n\t"
  244. "movq %%mm0, (%0) \n\t"
  245. "movq %%mm2, (%0, %1) \n\t"
  246. "movq %%mm4, (%0, %1, 2) \n\t"
  247. "movq %%mm6, (%0, %2) \n\t"
  248. ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
  249. :"memory");
  250. }
  251. DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] =
  252. { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
  253. #define put_signed_pixels_clamped_mmx_half(off) \
  254. "movq "#off"(%2), %%mm1 \n\t"\
  255. "movq 16+"#off"(%2), %%mm2 \n\t"\
  256. "movq 32+"#off"(%2), %%mm3 \n\t"\
  257. "movq 48+"#off"(%2), %%mm4 \n\t"\
  258. "packsswb 8+"#off"(%2), %%mm1 \n\t"\
  259. "packsswb 24+"#off"(%2), %%mm2 \n\t"\
  260. "packsswb 40+"#off"(%2), %%mm3 \n\t"\
  261. "packsswb 56+"#off"(%2), %%mm4 \n\t"\
  262. "paddb %%mm0, %%mm1 \n\t"\
  263. "paddb %%mm0, %%mm2 \n\t"\
  264. "paddb %%mm0, %%mm3 \n\t"\
  265. "paddb %%mm0, %%mm4 \n\t"\
  266. "movq %%mm1, (%0) \n\t"\
  267. "movq %%mm2, (%0, %3) \n\t"\
  268. "movq %%mm3, (%0, %3, 2) \n\t"\
  269. "movq %%mm4, (%0, %1) \n\t"
  270. void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  271. {
  272. x86_reg line_skip = line_size;
  273. x86_reg line_skip3;
  274. __asm__ volatile (
  275. "movq "MANGLE(ff_vector128)", %%mm0 \n\t"
  276. "lea (%3, %3, 2), %1 \n\t"
  277. put_signed_pixels_clamped_mmx_half(0)
  278. "lea (%0, %3, 4), %0 \n\t"
  279. put_signed_pixels_clamped_mmx_half(64)
  280. :"+&r" (pixels), "=&r" (line_skip3)
  281. :"r" (block), "r"(line_skip)
  282. :"memory");
  283. }
  284. void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
  285. {
  286. const DCTELEM *p;
  287. uint8_t *pix;
  288. int i;
  289. /* read the pixels */
  290. p = block;
  291. pix = pixels;
  292. MOVQ_ZERO(mm7);
  293. i = 4;
  294. do {
  295. __asm__ volatile(
  296. "movq (%2), %%mm0 \n\t"
  297. "movq 8(%2), %%mm1 \n\t"
  298. "movq 16(%2), %%mm2 \n\t"
  299. "movq 24(%2), %%mm3 \n\t"
  300. "movq %0, %%mm4 \n\t"
  301. "movq %1, %%mm6 \n\t"
  302. "movq %%mm4, %%mm5 \n\t"
  303. "punpcklbw %%mm7, %%mm4 \n\t"
  304. "punpckhbw %%mm7, %%mm5 \n\t"
  305. "paddsw %%mm4, %%mm0 \n\t"
  306. "paddsw %%mm5, %%mm1 \n\t"
  307. "movq %%mm6, %%mm5 \n\t"
  308. "punpcklbw %%mm7, %%mm6 \n\t"
  309. "punpckhbw %%mm7, %%mm5 \n\t"
  310. "paddsw %%mm6, %%mm2 \n\t"
  311. "paddsw %%mm5, %%mm3 \n\t"
  312. "packuswb %%mm1, %%mm0 \n\t"
  313. "packuswb %%mm3, %%mm2 \n\t"
  314. "movq %%mm0, %0 \n\t"
  315. "movq %%mm2, %1 \n\t"
  316. :"+m"(*pix), "+m"(*(pix+line_size))
  317. :"r"(p)
  318. :"memory");
  319. pix += line_size*2;
  320. p += 16;
  321. } while (--i);
  322. }
  323. static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  324. {
  325. __asm__ volatile(
  326. "lea (%3, %3), %%"REG_a" \n\t"
  327. ASMALIGN(3)
  328. "1: \n\t"
  329. "movd (%1), %%mm0 \n\t"
  330. "movd (%1, %3), %%mm1 \n\t"
  331. "movd %%mm0, (%2) \n\t"
  332. "movd %%mm1, (%2, %3) \n\t"
  333. "add %%"REG_a", %1 \n\t"
  334. "add %%"REG_a", %2 \n\t"
  335. "movd (%1), %%mm0 \n\t"
  336. "movd (%1, %3), %%mm1 \n\t"
  337. "movd %%mm0, (%2) \n\t"
  338. "movd %%mm1, (%2, %3) \n\t"
  339. "add %%"REG_a", %1 \n\t"
  340. "add %%"REG_a", %2 \n\t"
  341. "subl $4, %0 \n\t"
  342. "jnz 1b \n\t"
  343. : "+g"(h), "+r" (pixels), "+r" (block)
  344. : "r"((x86_reg)line_size)
  345. : "%"REG_a, "memory"
  346. );
  347. }
  348. static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  349. {
  350. __asm__ volatile(
  351. "lea (%3, %3), %%"REG_a" \n\t"
  352. ASMALIGN(3)
  353. "1: \n\t"
  354. "movq (%1), %%mm0 \n\t"
  355. "movq (%1, %3), %%mm1 \n\t"
  356. "movq %%mm0, (%2) \n\t"
  357. "movq %%mm1, (%2, %3) \n\t"
  358. "add %%"REG_a", %1 \n\t"
  359. "add %%"REG_a", %2 \n\t"
  360. "movq (%1), %%mm0 \n\t"
  361. "movq (%1, %3), %%mm1 \n\t"
  362. "movq %%mm0, (%2) \n\t"
  363. "movq %%mm1, (%2, %3) \n\t"
  364. "add %%"REG_a", %1 \n\t"
  365. "add %%"REG_a", %2 \n\t"
  366. "subl $4, %0 \n\t"
  367. "jnz 1b \n\t"
  368. : "+g"(h), "+r" (pixels), "+r" (block)
  369. : "r"((x86_reg)line_size)
  370. : "%"REG_a, "memory"
  371. );
  372. }
  373. static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  374. {
  375. __asm__ volatile(
  376. "lea (%3, %3), %%"REG_a" \n\t"
  377. ASMALIGN(3)
  378. "1: \n\t"
  379. "movq (%1), %%mm0 \n\t"
  380. "movq 8(%1), %%mm4 \n\t"
  381. "movq (%1, %3), %%mm1 \n\t"
  382. "movq 8(%1, %3), %%mm5 \n\t"
  383. "movq %%mm0, (%2) \n\t"
  384. "movq %%mm4, 8(%2) \n\t"
  385. "movq %%mm1, (%2, %3) \n\t"
  386. "movq %%mm5, 8(%2, %3) \n\t"
  387. "add %%"REG_a", %1 \n\t"
  388. "add %%"REG_a", %2 \n\t"
  389. "movq (%1), %%mm0 \n\t"
  390. "movq 8(%1), %%mm4 \n\t"
  391. "movq (%1, %3), %%mm1 \n\t"
  392. "movq 8(%1, %3), %%mm5 \n\t"
  393. "movq %%mm0, (%2) \n\t"
  394. "movq %%mm4, 8(%2) \n\t"
  395. "movq %%mm1, (%2, %3) \n\t"
  396. "movq %%mm5, 8(%2, %3) \n\t"
  397. "add %%"REG_a", %1 \n\t"
  398. "add %%"REG_a", %2 \n\t"
  399. "subl $4, %0 \n\t"
  400. "jnz 1b \n\t"
  401. : "+g"(h), "+r" (pixels), "+r" (block)
  402. : "r"((x86_reg)line_size)
  403. : "%"REG_a, "memory"
  404. );
  405. }
  406. static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  407. {
  408. __asm__ volatile(
  409. "1: \n\t"
  410. "movdqu (%1), %%xmm0 \n\t"
  411. "movdqu (%1,%3), %%xmm1 \n\t"
  412. "movdqu (%1,%3,2), %%xmm2 \n\t"
  413. "movdqu (%1,%4), %%xmm3 \n\t"
  414. "movdqa %%xmm0, (%2) \n\t"
  415. "movdqa %%xmm1, (%2,%3) \n\t"
  416. "movdqa %%xmm2, (%2,%3,2) \n\t"
  417. "movdqa %%xmm3, (%2,%4) \n\t"
  418. "subl $4, %0 \n\t"
  419. "lea (%1,%3,4), %1 \n\t"
  420. "lea (%2,%3,4), %2 \n\t"
  421. "jnz 1b \n\t"
  422. : "+g"(h), "+r" (pixels), "+r" (block)
  423. : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
  424. : "memory"
  425. );
  426. }
  427. static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
  428. {
  429. __asm__ volatile(
  430. "1: \n\t"
  431. "movdqu (%1), %%xmm0 \n\t"
  432. "movdqu (%1,%3), %%xmm1 \n\t"
  433. "movdqu (%1,%3,2), %%xmm2 \n\t"
  434. "movdqu (%1,%4), %%xmm3 \n\t"
  435. "pavgb (%2), %%xmm0 \n\t"
  436. "pavgb (%2,%3), %%xmm1 \n\t"
  437. "pavgb (%2,%3,2), %%xmm2 \n\t"
  438. "pavgb (%2,%4), %%xmm3 \n\t"
  439. "movdqa %%xmm0, (%2) \n\t"
  440. "movdqa %%xmm1, (%2,%3) \n\t"
  441. "movdqa %%xmm2, (%2,%3,2) \n\t"
  442. "movdqa %%xmm3, (%2,%4) \n\t"
  443. "subl $4, %0 \n\t"
  444. "lea (%1,%3,4), %1 \n\t"
  445. "lea (%2,%3,4), %2 \n\t"
  446. "jnz 1b \n\t"
  447. : "+g"(h), "+r" (pixels), "+r" (block)
  448. : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
  449. : "memory"
  450. );
  451. }
  452. #define CLEAR_BLOCKS(name,n) \
  453. static void name(DCTELEM *blocks)\
  454. {\
  455. __asm__ volatile(\
  456. "pxor %%mm7, %%mm7 \n\t"\
  457. "mov %1, %%"REG_a" \n\t"\
  458. "1: \n\t"\
  459. "movq %%mm7, (%0, %%"REG_a") \n\t"\
  460. "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
  461. "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
  462. "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
  463. "add $32, %%"REG_a" \n\t"\
  464. " js 1b \n\t"\
  465. : : "r" (((uint8_t *)blocks)+128*n),\
  466. "i" (-128*n)\
  467. : "%"REG_a\
  468. );\
  469. }
  470. CLEAR_BLOCKS(clear_blocks_mmx, 6)
  471. CLEAR_BLOCKS(clear_block_mmx, 1)
  472. static void clear_block_sse(DCTELEM *block)
  473. {
  474. __asm__ volatile(
  475. "xorps %%xmm0, %%xmm0 \n"
  476. "movaps %%xmm0, (%0) \n"
  477. "movaps %%xmm0, 16(%0) \n"
  478. "movaps %%xmm0, 32(%0) \n"
  479. "movaps %%xmm0, 48(%0) \n"
  480. "movaps %%xmm0, 64(%0) \n"
  481. "movaps %%xmm0, 80(%0) \n"
  482. "movaps %%xmm0, 96(%0) \n"
  483. "movaps %%xmm0, 112(%0) \n"
  484. :: "r"(block)
  485. : "memory"
  486. );
  487. }
  488. static void clear_blocks_sse(DCTELEM *blocks)
  489. {\
  490. __asm__ volatile(
  491. "xorps %%xmm0, %%xmm0 \n"
  492. "mov %1, %%"REG_a" \n"
  493. "1: \n"
  494. "movaps %%xmm0, (%0, %%"REG_a") \n"
  495. "movaps %%xmm0, 16(%0, %%"REG_a") \n"
  496. "movaps %%xmm0, 32(%0, %%"REG_a") \n"
  497. "movaps %%xmm0, 48(%0, %%"REG_a") \n"
  498. "movaps %%xmm0, 64(%0, %%"REG_a") \n"
  499. "movaps %%xmm0, 80(%0, %%"REG_a") \n"
  500. "movaps %%xmm0, 96(%0, %%"REG_a") \n"
  501. "movaps %%xmm0, 112(%0, %%"REG_a") \n"
  502. "add $128, %%"REG_a" \n"
  503. " js 1b \n"
  504. : : "r" (((uint8_t *)blocks)+128*6),
  505. "i" (-128*6)
  506. : "%"REG_a
  507. );
  508. }
  509. static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
  510. x86_reg i=0;
  511. __asm__ volatile(
  512. "jmp 2f \n\t"
  513. "1: \n\t"
  514. "movq (%1, %0), %%mm0 \n\t"
  515. "movq (%2, %0), %%mm1 \n\t"
  516. "paddb %%mm0, %%mm1 \n\t"
  517. "movq %%mm1, (%2, %0) \n\t"
  518. "movq 8(%1, %0), %%mm0 \n\t"
  519. "movq 8(%2, %0), %%mm1 \n\t"
  520. "paddb %%mm0, %%mm1 \n\t"
  521. "movq %%mm1, 8(%2, %0) \n\t"
  522. "add $16, %0 \n\t"
  523. "2: \n\t"
  524. "cmp %3, %0 \n\t"
  525. " js 1b \n\t"
  526. : "+r" (i)
  527. : "r"(src), "r"(dst), "r"((x86_reg)w-15)
  528. );
  529. for(; i<w; i++)
  530. dst[i+0] += src[i+0];
  531. }
  532. static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
  533. x86_reg i=0;
  534. __asm__ volatile(
  535. "jmp 2f \n\t"
  536. "1: \n\t"
  537. "movq (%2, %0), %%mm0 \n\t"
  538. "movq 8(%2, %0), %%mm1 \n\t"
  539. "paddb (%3, %0), %%mm0 \n\t"
  540. "paddb 8(%3, %0), %%mm1 \n\t"
  541. "movq %%mm0, (%1, %0) \n\t"
  542. "movq %%mm1, 8(%1, %0) \n\t"
  543. "add $16, %0 \n\t"
  544. "2: \n\t"
  545. "cmp %4, %0 \n\t"
  546. " js 1b \n\t"
  547. : "+r" (i)
  548. : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
  549. );
  550. for(; i<w; i++)
  551. dst[i] = src1[i] + src2[i];
  552. }
  553. #if HAVE_7REGS && HAVE_TEN_OPERANDS
  554. static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
  555. x86_reg w2 = -w;
  556. x86_reg x;
  557. int l = *left & 0xff;
  558. int tl = *left_top & 0xff;
  559. int t;
  560. __asm__ volatile(
  561. "mov %7, %3 \n"
  562. "1: \n"
  563. "movzbl (%3,%4), %2 \n"
  564. "mov %2, %k3 \n"
  565. "sub %b1, %b3 \n"
  566. "add %b0, %b3 \n"
  567. "mov %2, %1 \n"
  568. "cmp %0, %2 \n"
  569. "cmovg %0, %2 \n"
  570. "cmovg %1, %0 \n"
  571. "cmp %k3, %0 \n"
  572. "cmovg %k3, %0 \n"
  573. "mov %7, %3 \n"
  574. "cmp %2, %0 \n"
  575. "cmovl %2, %0 \n"
  576. "add (%6,%4), %b0 \n"
  577. "mov %b0, (%5,%4) \n"
  578. "inc %4 \n"
  579. "jl 1b \n"
  580. :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
  581. :"r"(dst+w), "r"(diff+w), "rm"(top+w)
  582. );
  583. *left = l;
  584. *left_top = tl;
  585. }
  586. #endif
  587. #define H263_LOOP_FILTER \
  588. "pxor %%mm7, %%mm7 \n\t"\
  589. "movq %0, %%mm0 \n\t"\
  590. "movq %0, %%mm1 \n\t"\
  591. "movq %3, %%mm2 \n\t"\
  592. "movq %3, %%mm3 \n\t"\
  593. "punpcklbw %%mm7, %%mm0 \n\t"\
  594. "punpckhbw %%mm7, %%mm1 \n\t"\
  595. "punpcklbw %%mm7, %%mm2 \n\t"\
  596. "punpckhbw %%mm7, %%mm3 \n\t"\
  597. "psubw %%mm2, %%mm0 \n\t"\
  598. "psubw %%mm3, %%mm1 \n\t"\
  599. "movq %1, %%mm2 \n\t"\
  600. "movq %1, %%mm3 \n\t"\
  601. "movq %2, %%mm4 \n\t"\
  602. "movq %2, %%mm5 \n\t"\
  603. "punpcklbw %%mm7, %%mm2 \n\t"\
  604. "punpckhbw %%mm7, %%mm3 \n\t"\
  605. "punpcklbw %%mm7, %%mm4 \n\t"\
  606. "punpckhbw %%mm7, %%mm5 \n\t"\
  607. "psubw %%mm2, %%mm4 \n\t"\
  608. "psubw %%mm3, %%mm5 \n\t"\
  609. "psllw $2, %%mm4 \n\t"\
  610. "psllw $2, %%mm5 \n\t"\
  611. "paddw %%mm0, %%mm4 \n\t"\
  612. "paddw %%mm1, %%mm5 \n\t"\
  613. "pxor %%mm6, %%mm6 \n\t"\
  614. "pcmpgtw %%mm4, %%mm6 \n\t"\
  615. "pcmpgtw %%mm5, %%mm7 \n\t"\
  616. "pxor %%mm6, %%mm4 \n\t"\
  617. "pxor %%mm7, %%mm5 \n\t"\
  618. "psubw %%mm6, %%mm4 \n\t"\
  619. "psubw %%mm7, %%mm5 \n\t"\
  620. "psrlw $3, %%mm4 \n\t"\
  621. "psrlw $3, %%mm5 \n\t"\
  622. "packuswb %%mm5, %%mm4 \n\t"\
  623. "packsswb %%mm7, %%mm6 \n\t"\
  624. "pxor %%mm7, %%mm7 \n\t"\
  625. "movd %4, %%mm2 \n\t"\
  626. "punpcklbw %%mm2, %%mm2 \n\t"\
  627. "punpcklbw %%mm2, %%mm2 \n\t"\
  628. "punpcklbw %%mm2, %%mm2 \n\t"\
  629. "psubusb %%mm4, %%mm2 \n\t"\
  630. "movq %%mm2, %%mm3 \n\t"\
  631. "psubusb %%mm4, %%mm3 \n\t"\
  632. "psubb %%mm3, %%mm2 \n\t"\
  633. "movq %1, %%mm3 \n\t"\
  634. "movq %2, %%mm4 \n\t"\
  635. "pxor %%mm6, %%mm3 \n\t"\
  636. "pxor %%mm6, %%mm4 \n\t"\
  637. "paddusb %%mm2, %%mm3 \n\t"\
  638. "psubusb %%mm2, %%mm4 \n\t"\
  639. "pxor %%mm6, %%mm3 \n\t"\
  640. "pxor %%mm6, %%mm4 \n\t"\
  641. "paddusb %%mm2, %%mm2 \n\t"\
  642. "packsswb %%mm1, %%mm0 \n\t"\
  643. "pcmpgtb %%mm0, %%mm7 \n\t"\
  644. "pxor %%mm7, %%mm0 \n\t"\
  645. "psubb %%mm7, %%mm0 \n\t"\
  646. "movq %%mm0, %%mm1 \n\t"\
  647. "psubusb %%mm2, %%mm0 \n\t"\
  648. "psubb %%mm0, %%mm1 \n\t"\
  649. "pand %5, %%mm1 \n\t"\
  650. "psrlw $2, %%mm1 \n\t"\
  651. "pxor %%mm7, %%mm1 \n\t"\
  652. "psubb %%mm7, %%mm1 \n\t"\
  653. "movq %0, %%mm5 \n\t"\
  654. "movq %3, %%mm6 \n\t"\
  655. "psubb %%mm1, %%mm5 \n\t"\
  656. "paddb %%mm1, %%mm6 \n\t"
  657. static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  658. if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  659. const int strength= ff_h263_loop_filter_strength[qscale];
  660. __asm__ volatile(
  661. H263_LOOP_FILTER
  662. "movq %%mm3, %1 \n\t"
  663. "movq %%mm4, %2 \n\t"
  664. "movq %%mm5, %0 \n\t"
  665. "movq %%mm6, %3 \n\t"
  666. : "+m" (*(uint64_t*)(src - 2*stride)),
  667. "+m" (*(uint64_t*)(src - 1*stride)),
  668. "+m" (*(uint64_t*)(src + 0*stride)),
  669. "+m" (*(uint64_t*)(src + 1*stride))
  670. : "g" (2*strength), "m"(ff_pb_FC)
  671. );
  672. }
  673. }
  674. static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
  675. if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  676. const int strength= ff_h263_loop_filter_strength[qscale];
  677. DECLARE_ALIGNED(8, uint64_t, temp)[4];
  678. uint8_t *btemp= (uint8_t*)temp;
  679. src -= 2;
  680. transpose4x4(btemp , src , 8, stride);
  681. transpose4x4(btemp+4, src + 4*stride, 8, stride);
  682. __asm__ volatile(
  683. H263_LOOP_FILTER // 5 3 4 6
  684. : "+m" (temp[0]),
  685. "+m" (temp[1]),
  686. "+m" (temp[2]),
  687. "+m" (temp[3])
  688. : "g" (2*strength), "m"(ff_pb_FC)
  689. );
  690. __asm__ volatile(
  691. "movq %%mm5, %%mm1 \n\t"
  692. "movq %%mm4, %%mm0 \n\t"
  693. "punpcklbw %%mm3, %%mm5 \n\t"
  694. "punpcklbw %%mm6, %%mm4 \n\t"
  695. "punpckhbw %%mm3, %%mm1 \n\t"
  696. "punpckhbw %%mm6, %%mm0 \n\t"
  697. "movq %%mm5, %%mm3 \n\t"
  698. "movq %%mm1, %%mm6 \n\t"
  699. "punpcklwd %%mm4, %%mm5 \n\t"
  700. "punpcklwd %%mm0, %%mm1 \n\t"
  701. "punpckhwd %%mm4, %%mm3 \n\t"
  702. "punpckhwd %%mm0, %%mm6 \n\t"
  703. "movd %%mm5, (%0) \n\t"
  704. "punpckhdq %%mm5, %%mm5 \n\t"
  705. "movd %%mm5, (%0,%2) \n\t"
  706. "movd %%mm3, (%0,%2,2) \n\t"
  707. "punpckhdq %%mm3, %%mm3 \n\t"
  708. "movd %%mm3, (%0,%3) \n\t"
  709. "movd %%mm1, (%1) \n\t"
  710. "punpckhdq %%mm1, %%mm1 \n\t"
  711. "movd %%mm1, (%1,%2) \n\t"
  712. "movd %%mm6, (%1,%2,2) \n\t"
  713. "punpckhdq %%mm6, %%mm6 \n\t"
  714. "movd %%mm6, (%1,%3) \n\t"
  715. :: "r" (src),
  716. "r" (src + 4*stride),
  717. "r" ((x86_reg) stride ),
  718. "r" ((x86_reg)(3*stride))
  719. );
  720. }
  721. }
  722. /* draw the edges of width 'w' of an image of size width, height
  723. this mmx version can only handle w==8 || w==16 */
  724. static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
  725. {
  726. uint8_t *ptr, *last_line;
  727. int i;
  728. last_line = buf + (height - 1) * wrap;
  729. /* left and right */
  730. ptr = buf;
  731. if(w==8)
  732. {
  733. __asm__ volatile(
  734. "1: \n\t"
  735. "movd (%0), %%mm0 \n\t"
  736. "punpcklbw %%mm0, %%mm0 \n\t"
  737. "punpcklwd %%mm0, %%mm0 \n\t"
  738. "punpckldq %%mm0, %%mm0 \n\t"
  739. "movq %%mm0, -8(%0) \n\t"
  740. "movq -8(%0, %2), %%mm1 \n\t"
  741. "punpckhbw %%mm1, %%mm1 \n\t"
  742. "punpckhwd %%mm1, %%mm1 \n\t"
  743. "punpckhdq %%mm1, %%mm1 \n\t"
  744. "movq %%mm1, (%0, %2) \n\t"
  745. "add %1, %0 \n\t"
  746. "cmp %3, %0 \n\t"
  747. " jb 1b \n\t"
  748. : "+r" (ptr)
  749. : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
  750. );
  751. }
  752. else
  753. {
  754. __asm__ volatile(
  755. "1: \n\t"
  756. "movd (%0), %%mm0 \n\t"
  757. "punpcklbw %%mm0, %%mm0 \n\t"
  758. "punpcklwd %%mm0, %%mm0 \n\t"
  759. "punpckldq %%mm0, %%mm0 \n\t"
  760. "movq %%mm0, -8(%0) \n\t"
  761. "movq %%mm0, -16(%0) \n\t"
  762. "movq -8(%0, %2), %%mm1 \n\t"
  763. "punpckhbw %%mm1, %%mm1 \n\t"
  764. "punpckhwd %%mm1, %%mm1 \n\t"
  765. "punpckhdq %%mm1, %%mm1 \n\t"
  766. "movq %%mm1, (%0, %2) \n\t"
  767. "movq %%mm1, 8(%0, %2) \n\t"
  768. "add %1, %0 \n\t"
  769. "cmp %3, %0 \n\t"
  770. " jb 1b \n\t"
  771. : "+r" (ptr)
  772. : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
  773. );
  774. }
  775. for(i=0;i<w;i+=4) {
  776. /* top and bottom (and hopefully also the corners) */
  777. ptr= buf - (i + 1) * wrap - w;
  778. __asm__ volatile(
  779. "1: \n\t"
  780. "movq (%1, %0), %%mm0 \n\t"
  781. "movq %%mm0, (%0) \n\t"
  782. "movq %%mm0, (%0, %2) \n\t"
  783. "movq %%mm0, (%0, %2, 2) \n\t"
  784. "movq %%mm0, (%0, %3) \n\t"
  785. "add $8, %0 \n\t"
  786. "cmp %4, %0 \n\t"
  787. " jb 1b \n\t"
  788. : "+r" (ptr)
  789. : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
  790. );
  791. ptr= last_line + (i + 1) * wrap - w;
  792. __asm__ volatile(
  793. "1: \n\t"
  794. "movq (%1, %0), %%mm0 \n\t"
  795. "movq %%mm0, (%0) \n\t"
  796. "movq %%mm0, (%0, %2) \n\t"
  797. "movq %%mm0, (%0, %2, 2) \n\t"
  798. "movq %%mm0, (%0, %3) \n\t"
  799. "add $8, %0 \n\t"
  800. "cmp %4, %0 \n\t"
  801. " jb 1b \n\t"
  802. : "+r" (ptr)
  803. : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
  804. );
  805. }
  806. }
  807. #define PAETH(cpu, abs3)\
  808. static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
  809. {\
  810. x86_reg i = -bpp;\
  811. x86_reg end = w-3;\
  812. __asm__ volatile(\
  813. "pxor %%mm7, %%mm7 \n"\
  814. "movd (%1,%0), %%mm0 \n"\
  815. "movd (%2,%0), %%mm1 \n"\
  816. "punpcklbw %%mm7, %%mm0 \n"\
  817. "punpcklbw %%mm7, %%mm1 \n"\
  818. "add %4, %0 \n"\
  819. "1: \n"\
  820. "movq %%mm1, %%mm2 \n"\
  821. "movd (%2,%0), %%mm1 \n"\
  822. "movq %%mm2, %%mm3 \n"\
  823. "punpcklbw %%mm7, %%mm1 \n"\
  824. "movq %%mm2, %%mm4 \n"\
  825. "psubw %%mm1, %%mm3 \n"\
  826. "psubw %%mm0, %%mm4 \n"\
  827. "movq %%mm3, %%mm5 \n"\
  828. "paddw %%mm4, %%mm5 \n"\
  829. abs3\
  830. "movq %%mm4, %%mm6 \n"\
  831. "pminsw %%mm5, %%mm6 \n"\
  832. "pcmpgtw %%mm6, %%mm3 \n"\
  833. "pcmpgtw %%mm5, %%mm4 \n"\
  834. "movq %%mm4, %%mm6 \n"\
  835. "pand %%mm3, %%mm4 \n"\
  836. "pandn %%mm3, %%mm6 \n"\
  837. "pandn %%mm0, %%mm3 \n"\
  838. "movd (%3,%0), %%mm0 \n"\
  839. "pand %%mm1, %%mm6 \n"\
  840. "pand %%mm4, %%mm2 \n"\
  841. "punpcklbw %%mm7, %%mm0 \n"\
  842. "movq %6, %%mm5 \n"\
  843. "paddw %%mm6, %%mm0 \n"\
  844. "paddw %%mm2, %%mm3 \n"\
  845. "paddw %%mm3, %%mm0 \n"\
  846. "pand %%mm5, %%mm0 \n"\
  847. "movq %%mm0, %%mm3 \n"\
  848. "packuswb %%mm3, %%mm3 \n"\
  849. "movd %%mm3, (%1,%0) \n"\
  850. "add %4, %0 \n"\
  851. "cmp %5, %0 \n"\
  852. "jle 1b \n"\
  853. :"+r"(i)\
  854. :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
  855. "m"(ff_pw_255)\
  856. :"memory"\
  857. );\
  858. }
  859. #define ABS3_MMX2\
  860. "psubw %%mm5, %%mm7 \n"\
  861. "pmaxsw %%mm7, %%mm5 \n"\
  862. "pxor %%mm6, %%mm6 \n"\
  863. "pxor %%mm7, %%mm7 \n"\
  864. "psubw %%mm3, %%mm6 \n"\
  865. "psubw %%mm4, %%mm7 \n"\
  866. "pmaxsw %%mm6, %%mm3 \n"\
  867. "pmaxsw %%mm7, %%mm4 \n"\
  868. "pxor %%mm7, %%mm7 \n"
  869. #define ABS3_SSSE3\
  870. "pabsw %%mm3, %%mm3 \n"\
  871. "pabsw %%mm4, %%mm4 \n"\
  872. "pabsw %%mm5, %%mm5 \n"
  873. PAETH(mmx2, ABS3_MMX2)
  874. #if HAVE_SSSE3
  875. PAETH(ssse3, ABS3_SSSE3)
  876. #endif
  877. #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
  878. "paddw " #m4 ", " #m3 " \n\t" /* x1 */\
  879. "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
  880. "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
  881. "movq "#in7", " #m3 " \n\t" /* d */\
  882. "movq "#in0", %%mm5 \n\t" /* D */\
  883. "paddw " #m3 ", %%mm5 \n\t" /* x4 */\
  884. "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
  885. "movq "#in1", %%mm5 \n\t" /* C */\
  886. "movq "#in2", %%mm6 \n\t" /* B */\
  887. "paddw " #m6 ", %%mm5 \n\t" /* x3 */\
  888. "paddw " #m5 ", %%mm6 \n\t" /* x2 */\
  889. "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
  890. "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
  891. "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
  892. "paddw " #rnd ", %%mm4 \n\t" /* x2 */\
  893. "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
  894. "psraw $5, %%mm5 \n\t"\
  895. "packuswb %%mm5, %%mm5 \n\t"\
  896. OP(%%mm5, out, %%mm7, d)
  897. #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
  898. static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  899. uint64_t temp;\
  900. \
  901. __asm__ volatile(\
  902. "pxor %%mm7, %%mm7 \n\t"\
  903. "1: \n\t"\
  904. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  905. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  906. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  907. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  908. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  909. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  910. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  911. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  912. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  913. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  914. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  915. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  916. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  917. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  918. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  919. "paddw %%mm3, %%mm5 \n\t" /* b */\
  920. "paddw %%mm2, %%mm6 \n\t" /* c */\
  921. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  922. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  923. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  924. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  925. "paddw %%mm4, %%mm0 \n\t" /* a */\
  926. "paddw %%mm1, %%mm5 \n\t" /* d */\
  927. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  928. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  929. "paddw %6, %%mm6 \n\t"\
  930. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  931. "psraw $5, %%mm0 \n\t"\
  932. "movq %%mm0, %5 \n\t"\
  933. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  934. \
  935. "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
  936. "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
  937. "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
  938. "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
  939. "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
  940. "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
  941. "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
  942. "paddw %%mm0, %%mm2 \n\t" /* b */\
  943. "paddw %%mm5, %%mm3 \n\t" /* c */\
  944. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  945. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  946. "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
  947. "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
  948. "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
  949. "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
  950. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  951. "paddw %%mm2, %%mm1 \n\t" /* a */\
  952. "paddw %%mm6, %%mm4 \n\t" /* d */\
  953. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  954. "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
  955. "paddw %6, %%mm1 \n\t"\
  956. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
  957. "psraw $5, %%mm3 \n\t"\
  958. "movq %5, %%mm1 \n\t"\
  959. "packuswb %%mm3, %%mm1 \n\t"\
  960. OP_MMX2(%%mm1, (%1),%%mm4, q)\
  961. /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
  962. \
  963. "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
  964. "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
  965. "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
  966. "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
  967. "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
  968. "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
  969. "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
  970. "paddw %%mm1, %%mm5 \n\t" /* b */\
  971. "paddw %%mm4, %%mm0 \n\t" /* c */\
  972. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  973. "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
  974. "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
  975. "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
  976. "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
  977. "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
  978. "paddw %%mm3, %%mm2 \n\t" /* d */\
  979. "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
  980. "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
  981. "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
  982. "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
  983. "paddw %%mm2, %%mm6 \n\t" /* a */\
  984. "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
  985. "paddw %6, %%mm0 \n\t"\
  986. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  987. "psraw $5, %%mm0 \n\t"\
  988. /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
  989. \
  990. "paddw %%mm5, %%mm3 \n\t" /* a */\
  991. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
  992. "paddw %%mm4, %%mm6 \n\t" /* b */\
  993. "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
  994. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
  995. "paddw %%mm1, %%mm4 \n\t" /* c */\
  996. "paddw %%mm2, %%mm5 \n\t" /* d */\
  997. "paddw %%mm6, %%mm6 \n\t" /* 2b */\
  998. "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
  999. "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
  1000. "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
  1001. "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
  1002. "paddw %6, %%mm4 \n\t"\
  1003. "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
  1004. "psraw $5, %%mm4 \n\t"\
  1005. "packuswb %%mm4, %%mm0 \n\t"\
  1006. OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
  1007. \
  1008. "add %3, %0 \n\t"\
  1009. "add %4, %1 \n\t"\
  1010. "decl %2 \n\t"\
  1011. " jnz 1b \n\t"\
  1012. : "+a"(src), "+c"(dst), "+D"(h)\
  1013. : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
  1014. : "memory"\
  1015. );\
  1016. }\
  1017. \
  1018. static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1019. int i;\
  1020. int16_t temp[16];\
  1021. /* quick HACK, XXX FIXME MUST be optimized */\
  1022. for(i=0; i<h; i++)\
  1023. {\
  1024. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1025. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1026. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1027. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1028. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1029. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
  1030. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
  1031. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
  1032. temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
  1033. temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
  1034. temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
  1035. temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
  1036. temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
  1037. temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
  1038. temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
  1039. temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
  1040. __asm__ volatile(\
  1041. "movq (%0), %%mm0 \n\t"\
  1042. "movq 8(%0), %%mm1 \n\t"\
  1043. "paddw %2, %%mm0 \n\t"\
  1044. "paddw %2, %%mm1 \n\t"\
  1045. "psraw $5, %%mm0 \n\t"\
  1046. "psraw $5, %%mm1 \n\t"\
  1047. "packuswb %%mm1, %%mm0 \n\t"\
  1048. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1049. "movq 16(%0), %%mm0 \n\t"\
  1050. "movq 24(%0), %%mm1 \n\t"\
  1051. "paddw %2, %%mm0 \n\t"\
  1052. "paddw %2, %%mm1 \n\t"\
  1053. "psraw $5, %%mm0 \n\t"\
  1054. "psraw $5, %%mm1 \n\t"\
  1055. "packuswb %%mm1, %%mm0 \n\t"\
  1056. OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
  1057. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1058. : "memory"\
  1059. );\
  1060. dst+=dstStride;\
  1061. src+=srcStride;\
  1062. }\
  1063. }\
  1064. \
  1065. static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1066. __asm__ volatile(\
  1067. "pxor %%mm7, %%mm7 \n\t"\
  1068. "1: \n\t"\
  1069. "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
  1070. "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
  1071. "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
  1072. "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
  1073. "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
  1074. "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
  1075. "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
  1076. "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
  1077. "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
  1078. "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
  1079. "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
  1080. "psllq $24, %%mm4 \n\t" /* 000ABCDE */\
  1081. "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
  1082. "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
  1083. "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
  1084. "paddw %%mm3, %%mm5 \n\t" /* b */\
  1085. "paddw %%mm2, %%mm6 \n\t" /* c */\
  1086. "paddw %%mm5, %%mm5 \n\t" /* 2b */\
  1087. "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
  1088. "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
  1089. "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
  1090. "paddw %%mm4, %%mm0 \n\t" /* a */\
  1091. "paddw %%mm1, %%mm5 \n\t" /* d */\
  1092. "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
  1093. "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
  1094. "paddw %5, %%mm6 \n\t"\
  1095. "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
  1096. "psraw $5, %%mm0 \n\t"\
  1097. /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
  1098. \
  1099. "movd 5(%0), %%mm5 \n\t" /* FGHI */\
  1100. "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
  1101. "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
  1102. "paddw %%mm5, %%mm1 \n\t" /* a */\
  1103. "paddw %%mm6, %%mm2 \n\t" /* b */\
  1104. "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
  1105. "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
  1106. "paddw %%mm6, %%mm3 \n\t" /* c */\
  1107. "paddw %%mm5, %%mm4 \n\t" /* d */\
  1108. "paddw %%mm2, %%mm2 \n\t" /* 2b */\
  1109. "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
  1110. "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
  1111. "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
  1112. "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
  1113. "paddw %5, %%mm1 \n\t"\
  1114. "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
  1115. "psraw $5, %%mm3 \n\t"\
  1116. "packuswb %%mm3, %%mm0 \n\t"\
  1117. OP_MMX2(%%mm0, (%1), %%mm4, q)\
  1118. \
  1119. "add %3, %0 \n\t"\
  1120. "add %4, %1 \n\t"\
  1121. "decl %2 \n\t"\
  1122. " jnz 1b \n\t"\
  1123. : "+a"(src), "+c"(dst), "+d"(h)\
  1124. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
  1125. : "memory"\
  1126. );\
  1127. }\
  1128. \
  1129. static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1130. int i;\
  1131. int16_t temp[8];\
  1132. /* quick HACK, XXX FIXME MUST be optimized */\
  1133. for(i=0; i<h; i++)\
  1134. {\
  1135. temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
  1136. temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
  1137. temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
  1138. temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
  1139. temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
  1140. temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
  1141. temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
  1142. temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
  1143. __asm__ volatile(\
  1144. "movq (%0), %%mm0 \n\t"\
  1145. "movq 8(%0), %%mm1 \n\t"\
  1146. "paddw %2, %%mm0 \n\t"\
  1147. "paddw %2, %%mm1 \n\t"\
  1148. "psraw $5, %%mm0 \n\t"\
  1149. "psraw $5, %%mm1 \n\t"\
  1150. "packuswb %%mm1, %%mm0 \n\t"\
  1151. OP_3DNOW(%%mm0, (%1), %%mm1, q)\
  1152. :: "r"(temp), "r"(dst), "m"(ROUNDER)\
  1153. :"memory"\
  1154. );\
  1155. dst+=dstStride;\
  1156. src+=srcStride;\
  1157. }\
  1158. }
  1159. #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
  1160. \
  1161. static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1162. uint64_t temp[17*4];\
  1163. uint64_t *temp_ptr= temp;\
  1164. int count= 17;\
  1165. \
  1166. /*FIXME unroll */\
  1167. __asm__ volatile(\
  1168. "pxor %%mm7, %%mm7 \n\t"\
  1169. "1: \n\t"\
  1170. "movq (%0), %%mm0 \n\t"\
  1171. "movq (%0), %%mm1 \n\t"\
  1172. "movq 8(%0), %%mm2 \n\t"\
  1173. "movq 8(%0), %%mm3 \n\t"\
  1174. "punpcklbw %%mm7, %%mm0 \n\t"\
  1175. "punpckhbw %%mm7, %%mm1 \n\t"\
  1176. "punpcklbw %%mm7, %%mm2 \n\t"\
  1177. "punpckhbw %%mm7, %%mm3 \n\t"\
  1178. "movq %%mm0, (%1) \n\t"\
  1179. "movq %%mm1, 17*8(%1) \n\t"\
  1180. "movq %%mm2, 2*17*8(%1) \n\t"\
  1181. "movq %%mm3, 3*17*8(%1) \n\t"\
  1182. "add $8, %1 \n\t"\
  1183. "add %3, %0 \n\t"\
  1184. "decl %2 \n\t"\
  1185. " jnz 1b \n\t"\
  1186. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1187. : "r" ((x86_reg)srcStride)\
  1188. : "memory"\
  1189. );\
  1190. \
  1191. temp_ptr= temp;\
  1192. count=4;\
  1193. \
  1194. /*FIXME reorder for speed */\
  1195. __asm__ volatile(\
  1196. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1197. "1: \n\t"\
  1198. "movq (%0), %%mm0 \n\t"\
  1199. "movq 8(%0), %%mm1 \n\t"\
  1200. "movq 16(%0), %%mm2 \n\t"\
  1201. "movq 24(%0), %%mm3 \n\t"\
  1202. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1203. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1204. "add %4, %1 \n\t"\
  1205. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1206. \
  1207. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1208. "add %4, %1 \n\t"\
  1209. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1210. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
  1211. "add %4, %1 \n\t"\
  1212. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
  1213. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
  1214. "add %4, %1 \n\t"\
  1215. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
  1216. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
  1217. "add %4, %1 \n\t"\
  1218. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
  1219. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
  1220. "add %4, %1 \n\t"\
  1221. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
  1222. \
  1223. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
  1224. "add %4, %1 \n\t" \
  1225. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
  1226. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
  1227. \
  1228. "add $136, %0 \n\t"\
  1229. "add %6, %1 \n\t"\
  1230. "decl %2 \n\t"\
  1231. " jnz 1b \n\t"\
  1232. \
  1233. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1234. : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
  1235. :"memory"\
  1236. );\
  1237. }\
  1238. \
  1239. static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1240. uint64_t temp[9*2];\
  1241. uint64_t *temp_ptr= temp;\
  1242. int count= 9;\
  1243. \
  1244. /*FIXME unroll */\
  1245. __asm__ volatile(\
  1246. "pxor %%mm7, %%mm7 \n\t"\
  1247. "1: \n\t"\
  1248. "movq (%0), %%mm0 \n\t"\
  1249. "movq (%0), %%mm1 \n\t"\
  1250. "punpcklbw %%mm7, %%mm0 \n\t"\
  1251. "punpckhbw %%mm7, %%mm1 \n\t"\
  1252. "movq %%mm0, (%1) \n\t"\
  1253. "movq %%mm1, 9*8(%1) \n\t"\
  1254. "add $8, %1 \n\t"\
  1255. "add %3, %0 \n\t"\
  1256. "decl %2 \n\t"\
  1257. " jnz 1b \n\t"\
  1258. : "+r" (src), "+r" (temp_ptr), "+r"(count)\
  1259. : "r" ((x86_reg)srcStride)\
  1260. : "memory"\
  1261. );\
  1262. \
  1263. temp_ptr= temp;\
  1264. count=2;\
  1265. \
  1266. /*FIXME reorder for speed */\
  1267. __asm__ volatile(\
  1268. /*"pxor %%mm7, %%mm7 \n\t"*/\
  1269. "1: \n\t"\
  1270. "movq (%0), %%mm0 \n\t"\
  1271. "movq 8(%0), %%mm1 \n\t"\
  1272. "movq 16(%0), %%mm2 \n\t"\
  1273. "movq 24(%0), %%mm3 \n\t"\
  1274. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
  1275. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
  1276. "add %4, %1 \n\t"\
  1277. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
  1278. \
  1279. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
  1280. "add %4, %1 \n\t"\
  1281. QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
  1282. \
  1283. QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
  1284. "add %4, %1 \n\t"\
  1285. QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
  1286. QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
  1287. \
  1288. "add $72, %0 \n\t"\
  1289. "add %6, %1 \n\t"\
  1290. "decl %2 \n\t"\
  1291. " jnz 1b \n\t"\
  1292. \
  1293. : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
  1294. : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
  1295. : "memory"\
  1296. );\
  1297. }\
  1298. \
  1299. static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1300. OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
  1301. }\
  1302. \
  1303. static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1304. uint64_t temp[8];\
  1305. uint8_t * const half= (uint8_t*)temp;\
  1306. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1307. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1308. }\
  1309. \
  1310. static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1311. OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
  1312. }\
  1313. \
  1314. static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1315. uint64_t temp[8];\
  1316. uint8_t * const half= (uint8_t*)temp;\
  1317. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
  1318. OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
  1319. }\
  1320. \
  1321. static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1322. uint64_t temp[8];\
  1323. uint8_t * const half= (uint8_t*)temp;\
  1324. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1325. OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
  1326. }\
  1327. \
  1328. static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1329. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1330. }\
  1331. \
  1332. static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1333. uint64_t temp[8];\
  1334. uint8_t * const half= (uint8_t*)temp;\
  1335. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
  1336. OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
  1337. }\
  1338. static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1339. uint64_t half[8 + 9];\
  1340. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1341. uint8_t * const halfHV= ((uint8_t*)half);\
  1342. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1343. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1344. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1345. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1346. }\
  1347. static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1348. uint64_t half[8 + 9];\
  1349. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1350. uint8_t * const halfHV= ((uint8_t*)half);\
  1351. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1352. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1353. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1354. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1355. }\
  1356. static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1357. uint64_t half[8 + 9];\
  1358. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1359. uint8_t * const halfHV= ((uint8_t*)half);\
  1360. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1361. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1362. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1363. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1364. }\
  1365. static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1366. uint64_t half[8 + 9];\
  1367. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1368. uint8_t * const halfHV= ((uint8_t*)half);\
  1369. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1370. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1371. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1372. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1373. }\
  1374. static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1375. uint64_t half[8 + 9];\
  1376. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1377. uint8_t * const halfHV= ((uint8_t*)half);\
  1378. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1379. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1380. OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
  1381. }\
  1382. static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1383. uint64_t half[8 + 9];\
  1384. uint8_t * const halfH= ((uint8_t*)half) + 64;\
  1385. uint8_t * const halfHV= ((uint8_t*)half);\
  1386. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1387. put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
  1388. OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
  1389. }\
  1390. static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1391. uint64_t half[8 + 9];\
  1392. uint8_t * const halfH= ((uint8_t*)half);\
  1393. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1394. put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
  1395. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1396. }\
  1397. static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1398. uint64_t half[8 + 9];\
  1399. uint8_t * const halfH= ((uint8_t*)half);\
  1400. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1401. put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
  1402. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1403. }\
  1404. static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1405. uint64_t half[9];\
  1406. uint8_t * const halfH= ((uint8_t*)half);\
  1407. put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
  1408. OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
  1409. }\
  1410. static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1411. OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
  1412. }\
  1413. \
  1414. static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1415. uint64_t temp[32];\
  1416. uint8_t * const half= (uint8_t*)temp;\
  1417. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1418. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1419. }\
  1420. \
  1421. static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1422. OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
  1423. }\
  1424. \
  1425. static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1426. uint64_t temp[32];\
  1427. uint8_t * const half= (uint8_t*)temp;\
  1428. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
  1429. OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
  1430. }\
  1431. \
  1432. static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1433. uint64_t temp[32];\
  1434. uint8_t * const half= (uint8_t*)temp;\
  1435. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1436. OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
  1437. }\
  1438. \
  1439. static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1440. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
  1441. }\
  1442. \
  1443. static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1444. uint64_t temp[32];\
  1445. uint8_t * const half= (uint8_t*)temp;\
  1446. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
  1447. OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
  1448. }\
  1449. static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1450. uint64_t half[16*2 + 17*2];\
  1451. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1452. uint8_t * const halfHV= ((uint8_t*)half);\
  1453. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1454. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1455. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1456. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1457. }\
  1458. static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1459. uint64_t half[16*2 + 17*2];\
  1460. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1461. uint8_t * const halfHV= ((uint8_t*)half);\
  1462. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1463. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1464. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1465. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1466. }\
  1467. static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1468. uint64_t half[16*2 + 17*2];\
  1469. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1470. uint8_t * const halfHV= ((uint8_t*)half);\
  1471. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1472. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1473. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1474. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1475. }\
  1476. static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1477. uint64_t half[16*2 + 17*2];\
  1478. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1479. uint8_t * const halfHV= ((uint8_t*)half);\
  1480. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1481. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1482. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1483. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1484. }\
  1485. static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1486. uint64_t half[16*2 + 17*2];\
  1487. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1488. uint8_t * const halfHV= ((uint8_t*)half);\
  1489. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1490. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1491. OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
  1492. }\
  1493. static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1494. uint64_t half[16*2 + 17*2];\
  1495. uint8_t * const halfH= ((uint8_t*)half) + 256;\
  1496. uint8_t * const halfHV= ((uint8_t*)half);\
  1497. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1498. put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
  1499. OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
  1500. }\
  1501. static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1502. uint64_t half[17*2];\
  1503. uint8_t * const halfH= ((uint8_t*)half);\
  1504. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1505. put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
  1506. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1507. }\
  1508. static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1509. uint64_t half[17*2];\
  1510. uint8_t * const halfH= ((uint8_t*)half);\
  1511. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1512. put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
  1513. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1514. }\
  1515. static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1516. uint64_t half[17*2];\
  1517. uint8_t * const halfH= ((uint8_t*)half);\
  1518. put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
  1519. OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
  1520. }
  1521. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  1522. #define AVG_3DNOW_OP(a,b,temp, size) \
  1523. "mov" #size " " #b ", " #temp " \n\t"\
  1524. "pavgusb " #temp ", " #a " \n\t"\
  1525. "mov" #size " " #a ", " #b " \n\t"
  1526. #define AVG_MMX2_OP(a,b,temp, size) \
  1527. "mov" #size " " #b ", " #temp " \n\t"\
  1528. "pavgb " #temp ", " #a " \n\t"\
  1529. "mov" #size " " #a ", " #b " \n\t"
  1530. QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
  1531. QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
  1532. QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
  1533. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
  1534. QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
  1535. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
  1536. QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
  1537. QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
  1538. QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
  1539. /***********************************/
  1540. /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
  1541. #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
  1542. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1543. OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
  1544. }
  1545. #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
  1546. static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1547. OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
  1548. }
  1549. #define QPEL_2TAP(OPNAME, SIZE, MMX)\
  1550. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
  1551. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
  1552. QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
  1553. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
  1554. OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
  1555. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
  1556. OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
  1557. static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
  1558. OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
  1559. static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1560. OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
  1561. }\
  1562. static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1563. OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
  1564. }\
  1565. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
  1566. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
  1567. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
  1568. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
  1569. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
  1570. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
  1571. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
  1572. QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
  1573. QPEL_2TAP(put_, 16, mmx2)
  1574. QPEL_2TAP(avg_, 16, mmx2)
  1575. QPEL_2TAP(put_, 8, mmx2)
  1576. QPEL_2TAP(avg_, 8, mmx2)
  1577. QPEL_2TAP(put_, 16, 3dnow)
  1578. QPEL_2TAP(avg_, 16, 3dnow)
  1579. QPEL_2TAP(put_, 8, 3dnow)
  1580. QPEL_2TAP(avg_, 8, 3dnow)
  1581. #if 0
  1582. static void just_return(void) { return; }
  1583. #endif
  1584. static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
  1585. int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
  1586. const int w = 8;
  1587. const int ix = ox>>(16+shift);
  1588. const int iy = oy>>(16+shift);
  1589. const int oxs = ox>>4;
  1590. const int oys = oy>>4;
  1591. const int dxxs = dxx>>4;
  1592. const int dxys = dxy>>4;
  1593. const int dyxs = dyx>>4;
  1594. const int dyys = dyy>>4;
  1595. const uint16_t r4[4] = {r,r,r,r};
  1596. const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
  1597. const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
  1598. const uint64_t shift2 = 2*shift;
  1599. uint8_t edge_buf[(h+1)*stride];
  1600. int x, y;
  1601. const int dxw = (dxx-(1<<(16+shift)))*(w-1);
  1602. const int dyh = (dyy-(1<<(16+shift)))*(h-1);
  1603. const int dxh = dxy*(h-1);
  1604. const int dyw = dyx*(w-1);
  1605. if( // non-constant fullpel offset (3% of blocks)
  1606. ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
  1607. (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
  1608. // uses more than 16 bits of subpel mv (only at huge resolution)
  1609. || (dxx|dxy|dyx|dyy)&15 )
  1610. {
  1611. //FIXME could still use mmx for some of the rows
  1612. ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
  1613. return;
  1614. }
  1615. src += ix + iy*stride;
  1616. if( (unsigned)ix >= width-w ||
  1617. (unsigned)iy >= height-h )
  1618. {
  1619. ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
  1620. src = edge_buf;
  1621. }
  1622. __asm__ volatile(
  1623. "movd %0, %%mm6 \n\t"
  1624. "pxor %%mm7, %%mm7 \n\t"
  1625. "punpcklwd %%mm6, %%mm6 \n\t"
  1626. "punpcklwd %%mm6, %%mm6 \n\t"
  1627. :: "r"(1<<shift)
  1628. );
  1629. for(x=0; x<w; x+=4){
  1630. uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
  1631. oxs - dxys + dxxs*(x+1),
  1632. oxs - dxys + dxxs*(x+2),
  1633. oxs - dxys + dxxs*(x+3) };
  1634. uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
  1635. oys - dyys + dyxs*(x+1),
  1636. oys - dyys + dyxs*(x+2),
  1637. oys - dyys + dyxs*(x+3) };
  1638. for(y=0; y<h; y++){
  1639. __asm__ volatile(
  1640. "movq %0, %%mm4 \n\t"
  1641. "movq %1, %%mm5 \n\t"
  1642. "paddw %2, %%mm4 \n\t"
  1643. "paddw %3, %%mm5 \n\t"
  1644. "movq %%mm4, %0 \n\t"
  1645. "movq %%mm5, %1 \n\t"
  1646. "psrlw $12, %%mm4 \n\t"
  1647. "psrlw $12, %%mm5 \n\t"
  1648. : "+m"(*dx4), "+m"(*dy4)
  1649. : "m"(*dxy4), "m"(*dyy4)
  1650. );
  1651. __asm__ volatile(
  1652. "movq %%mm6, %%mm2 \n\t"
  1653. "movq %%mm6, %%mm1 \n\t"
  1654. "psubw %%mm4, %%mm2 \n\t"
  1655. "psubw %%mm5, %%mm1 \n\t"
  1656. "movq %%mm2, %%mm0 \n\t"
  1657. "movq %%mm4, %%mm3 \n\t"
  1658. "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
  1659. "pmullw %%mm5, %%mm3 \n\t" // dx*dy
  1660. "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
  1661. "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
  1662. "movd %4, %%mm5 \n\t"
  1663. "movd %3, %%mm4 \n\t"
  1664. "punpcklbw %%mm7, %%mm5 \n\t"
  1665. "punpcklbw %%mm7, %%mm4 \n\t"
  1666. "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
  1667. "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
  1668. "movd %2, %%mm5 \n\t"
  1669. "movd %1, %%mm4 \n\t"
  1670. "punpcklbw %%mm7, %%mm5 \n\t"
  1671. "punpcklbw %%mm7, %%mm4 \n\t"
  1672. "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
  1673. "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
  1674. "paddw %5, %%mm1 \n\t"
  1675. "paddw %%mm3, %%mm2 \n\t"
  1676. "paddw %%mm1, %%mm0 \n\t"
  1677. "paddw %%mm2, %%mm0 \n\t"
  1678. "psrlw %6, %%mm0 \n\t"
  1679. "packuswb %%mm0, %%mm0 \n\t"
  1680. "movd %%mm0, %0 \n\t"
  1681. : "=m"(dst[x+y*stride])
  1682. : "m"(src[0]), "m"(src[1]),
  1683. "m"(src[stride]), "m"(src[stride+1]),
  1684. "m"(*r4), "m"(shift2)
  1685. );
  1686. src += stride;
  1687. }
  1688. src += 4-h*stride;
  1689. }
  1690. }
  1691. #define PREFETCH(name, op) \
  1692. static void name(void *mem, int stride, int h){\
  1693. const uint8_t *p= mem;\
  1694. do{\
  1695. __asm__ volatile(#op" %0" :: "m"(*p));\
  1696. p+= stride;\
  1697. }while(--h);\
  1698. }
  1699. PREFETCH(prefetch_mmx2, prefetcht0)
  1700. PREFETCH(prefetch_3dnow, prefetch)
  1701. #undef PREFETCH
  1702. #include "h264_qpel_mmx.c"
  1703. void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src,
  1704. int stride, int h, int x, int y);
  1705. void ff_put_vc1_chroma_mc8_mmx_nornd (uint8_t *dst, uint8_t *src,
  1706. int stride, int h, int x, int y);
  1707. void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
  1708. int stride, int h, int x, int y);
  1709. void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src,
  1710. int stride, int h, int x, int y);
  1711. void ff_avg_vc1_chroma_mc8_mmx2_nornd (uint8_t *dst, uint8_t *src,
  1712. int stride, int h, int x, int y);
  1713. void ff_avg_rv40_chroma_mc8_mmx2 (uint8_t *dst, uint8_t *src,
  1714. int stride, int h, int x, int y);
  1715. void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src,
  1716. int stride, int h, int x, int y);
  1717. void ff_avg_vc1_chroma_mc8_3dnow_nornd(uint8_t *dst, uint8_t *src,
  1718. int stride, int h, int x, int y);
  1719. void ff_avg_rv40_chroma_mc8_3dnow (uint8_t *dst, uint8_t *src,
  1720. int stride, int h, int x, int y);
  1721. void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
  1722. int stride, int h, int x, int y);
  1723. void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
  1724. int stride, int h, int x, int y);
  1725. void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
  1726. int stride, int h, int x, int y);
  1727. void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
  1728. int stride, int h, int x, int y);
  1729. void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
  1730. int stride, int h, int x, int y);
  1731. void ff_avg_rv40_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
  1732. int stride, int h, int x, int y);
  1733. void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
  1734. int stride, int h, int x, int y);
  1735. void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
  1736. int stride, int h, int x, int y);
  1737. void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
  1738. int stride, int h, int x, int y);
  1739. void ff_put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
  1740. int stride, int h, int x, int y);
  1741. void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
  1742. int stride, int h, int x, int y);
  1743. void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
  1744. int stride, int h, int x, int y);
  1745. void ff_avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
  1746. int stride, int h, int x, int y);
  1747. void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
  1748. int stride, int h, int x, int y);
  1749. /* CAVS specific */
  1750. void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1751. put_pixels8_mmx(dst, src, stride, 8);
  1752. }
  1753. void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1754. avg_pixels8_mmx(dst, src, stride, 8);
  1755. }
  1756. void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1757. put_pixels16_mmx(dst, src, stride, 16);
  1758. }
  1759. void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
  1760. avg_pixels16_mmx(dst, src, stride, 16);
  1761. }
  1762. /* VC1 specific */
  1763. void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
  1764. put_pixels8_mmx(dst, src, stride, 8);
  1765. }
  1766. void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
  1767. avg_pixels8_mmx2(dst, src, stride, 8);
  1768. }
  1769. /* XXX: those functions should be suppressed ASAP when all IDCTs are
  1770. converted */
  1771. #if CONFIG_GPL
  1772. static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  1773. {
  1774. ff_mmx_idct (block);
  1775. ff_put_pixels_clamped_mmx(block, dest, line_size);
  1776. }
  1777. static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  1778. {
  1779. ff_mmx_idct (block);
  1780. ff_add_pixels_clamped_mmx(block, dest, line_size);
  1781. }
  1782. static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
  1783. {
  1784. ff_mmxext_idct (block);
  1785. ff_put_pixels_clamped_mmx(block, dest, line_size);
  1786. }
  1787. static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
  1788. {
  1789. ff_mmxext_idct (block);
  1790. ff_add_pixels_clamped_mmx(block, dest, line_size);
  1791. }
  1792. #endif
  1793. static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
  1794. {
  1795. ff_idct_xvid_mmx (block);
  1796. ff_put_pixels_clamped_mmx(block, dest, line_size);
  1797. }
  1798. static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
  1799. {
  1800. ff_idct_xvid_mmx (block);
  1801. ff_add_pixels_clamped_mmx(block, dest, line_size);
  1802. }
  1803. static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
  1804. {
  1805. ff_idct_xvid_mmx2 (block);
  1806. ff_put_pixels_clamped_mmx(block, dest, line_size);
  1807. }
  1808. static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
  1809. {
  1810. ff_idct_xvid_mmx2 (block);
  1811. ff_add_pixels_clamped_mmx(block, dest, line_size);
  1812. }
  1813. static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
  1814. {
  1815. int i;
  1816. __asm__ volatile("pxor %%mm7, %%mm7":);
  1817. for(i=0; i<blocksize; i+=2) {
  1818. __asm__ volatile(
  1819. "movq %0, %%mm0 \n\t"
  1820. "movq %1, %%mm1 \n\t"
  1821. "movq %%mm0, %%mm2 \n\t"
  1822. "movq %%mm1, %%mm3 \n\t"
  1823. "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
  1824. "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
  1825. "pslld $31, %%mm2 \n\t" // keep only the sign bit
  1826. "pxor %%mm2, %%mm1 \n\t"
  1827. "movq %%mm3, %%mm4 \n\t"
  1828. "pand %%mm1, %%mm3 \n\t"
  1829. "pandn %%mm1, %%mm4 \n\t"
  1830. "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  1831. "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  1832. "movq %%mm3, %1 \n\t"
  1833. "movq %%mm0, %0 \n\t"
  1834. :"+m"(mag[i]), "+m"(ang[i])
  1835. ::"memory"
  1836. );
  1837. }
  1838. __asm__ volatile("femms");
  1839. }
  1840. static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
  1841. {
  1842. int i;
  1843. __asm__ volatile(
  1844. "movaps %0, %%xmm5 \n\t"
  1845. ::"m"(ff_pdw_80000000[0])
  1846. );
  1847. for(i=0; i<blocksize; i+=4) {
  1848. __asm__ volatile(
  1849. "movaps %0, %%xmm0 \n\t"
  1850. "movaps %1, %%xmm1 \n\t"
  1851. "xorps %%xmm2, %%xmm2 \n\t"
  1852. "xorps %%xmm3, %%xmm3 \n\t"
  1853. "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
  1854. "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
  1855. "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
  1856. "xorps %%xmm2, %%xmm1 \n\t"
  1857. "movaps %%xmm3, %%xmm4 \n\t"
  1858. "andps %%xmm1, %%xmm3 \n\t"
  1859. "andnps %%xmm1, %%xmm4 \n\t"
  1860. "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
  1861. "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
  1862. "movaps %%xmm3, %1 \n\t"
  1863. "movaps %%xmm0, %0 \n\t"
  1864. :"+m"(mag[i]), "+m"(ang[i])
  1865. ::"memory"
  1866. );
  1867. }
  1868. }
  1869. #define IF1(x) x
  1870. #define IF0(x)
  1871. #define MIX5(mono,stereo)\
  1872. __asm__ volatile(\
  1873. "movss 0(%2), %%xmm5 \n"\
  1874. "movss 8(%2), %%xmm6 \n"\
  1875. "movss 24(%2), %%xmm7 \n"\
  1876. "shufps $0, %%xmm5, %%xmm5 \n"\
  1877. "shufps $0, %%xmm6, %%xmm6 \n"\
  1878. "shufps $0, %%xmm7, %%xmm7 \n"\
  1879. "1: \n"\
  1880. "movaps (%0,%1), %%xmm0 \n"\
  1881. "movaps 0x400(%0,%1), %%xmm1 \n"\
  1882. "movaps 0x800(%0,%1), %%xmm2 \n"\
  1883. "movaps 0xc00(%0,%1), %%xmm3 \n"\
  1884. "movaps 0x1000(%0,%1), %%xmm4 \n"\
  1885. "mulps %%xmm5, %%xmm0 \n"\
  1886. "mulps %%xmm6, %%xmm1 \n"\
  1887. "mulps %%xmm5, %%xmm2 \n"\
  1888. "mulps %%xmm7, %%xmm3 \n"\
  1889. "mulps %%xmm7, %%xmm4 \n"\
  1890. stereo("addps %%xmm1, %%xmm0 \n")\
  1891. "addps %%xmm1, %%xmm2 \n"\
  1892. "addps %%xmm3, %%xmm0 \n"\
  1893. "addps %%xmm4, %%xmm2 \n"\
  1894. mono("addps %%xmm2, %%xmm0 \n")\
  1895. "movaps %%xmm0, (%0,%1) \n"\
  1896. stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
  1897. "add $16, %0 \n"\
  1898. "jl 1b \n"\
  1899. :"+&r"(i)\
  1900. :"r"(samples[0]+len), "r"(matrix)\
  1901. :XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
  1902. "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
  1903. "memory"\
  1904. );
  1905. #define MIX_MISC(stereo)\
  1906. __asm__ volatile(\
  1907. "1: \n"\
  1908. "movaps (%3,%0), %%xmm0 \n"\
  1909. stereo("movaps %%xmm0, %%xmm1 \n")\
  1910. "mulps %%xmm4, %%xmm0 \n"\
  1911. stereo("mulps %%xmm5, %%xmm1 \n")\
  1912. "lea 1024(%3,%0), %1 \n"\
  1913. "mov %5, %2 \n"\
  1914. "2: \n"\
  1915. "movaps (%1), %%xmm2 \n"\
  1916. stereo("movaps %%xmm2, %%xmm3 \n")\
  1917. "mulps (%4,%2), %%xmm2 \n"\
  1918. stereo("mulps 16(%4,%2), %%xmm3 \n")\
  1919. "addps %%xmm2, %%xmm0 \n"\
  1920. stereo("addps %%xmm3, %%xmm1 \n")\
  1921. "add $1024, %1 \n"\
  1922. "add $32, %2 \n"\
  1923. "jl 2b \n"\
  1924. "movaps %%xmm0, (%3,%0) \n"\
  1925. stereo("movaps %%xmm1, 1024(%3,%0) \n")\
  1926. "add $16, %0 \n"\
  1927. "jl 1b \n"\
  1928. :"+&r"(i), "=&r"(j), "=&r"(k)\
  1929. :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
  1930. :"memory"\
  1931. );
  1932. static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
  1933. {
  1934. int (*matrix_cmp)[2] = (int(*)[2])matrix;
  1935. intptr_t i,j,k;
  1936. i = -len*sizeof(float);
  1937. if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
  1938. MIX5(IF0,IF1);
  1939. } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
  1940. MIX5(IF1,IF0);
  1941. } else {
  1942. DECLARE_ALIGNED(16, float, matrix_simd)[in_ch][2][4];
  1943. j = 2*in_ch*sizeof(float);
  1944. __asm__ volatile(
  1945. "1: \n"
  1946. "sub $8, %0 \n"
  1947. "movss (%2,%0), %%xmm4 \n"
  1948. "movss 4(%2,%0), %%xmm5 \n"
  1949. "shufps $0, %%xmm4, %%xmm4 \n"
  1950. "shufps $0, %%xmm5, %%xmm5 \n"
  1951. "movaps %%xmm4, (%1,%0,4) \n"
  1952. "movaps %%xmm5, 16(%1,%0,4) \n"
  1953. "jg 1b \n"
  1954. :"+&r"(j)
  1955. :"r"(matrix_simd), "r"(matrix)
  1956. :"memory"
  1957. );
  1958. if(out_ch == 2) {
  1959. MIX_MISC(IF1);
  1960. } else {
  1961. MIX_MISC(IF0);
  1962. }
  1963. }
  1964. }
  1965. static void vector_fmul_3dnow(float *dst, const float *src, int len){
  1966. x86_reg i = (len-4)*4;
  1967. __asm__ volatile(
  1968. "1: \n\t"
  1969. "movq (%1,%0), %%mm0 \n\t"
  1970. "movq 8(%1,%0), %%mm1 \n\t"
  1971. "pfmul (%2,%0), %%mm0 \n\t"
  1972. "pfmul 8(%2,%0), %%mm1 \n\t"
  1973. "movq %%mm0, (%1,%0) \n\t"
  1974. "movq %%mm1, 8(%1,%0) \n\t"
  1975. "sub $16, %0 \n\t"
  1976. "jge 1b \n\t"
  1977. "femms \n\t"
  1978. :"+r"(i)
  1979. :"r"(dst), "r"(src)
  1980. :"memory"
  1981. );
  1982. }
  1983. static void vector_fmul_sse(float *dst, const float *src, int len){
  1984. x86_reg i = (len-8)*4;
  1985. __asm__ volatile(
  1986. "1: \n\t"
  1987. "movaps (%1,%0), %%xmm0 \n\t"
  1988. "movaps 16(%1,%0), %%xmm1 \n\t"
  1989. "mulps (%2,%0), %%xmm0 \n\t"
  1990. "mulps 16(%2,%0), %%xmm1 \n\t"
  1991. "movaps %%xmm0, (%1,%0) \n\t"
  1992. "movaps %%xmm1, 16(%1,%0) \n\t"
  1993. "sub $32, %0 \n\t"
  1994. "jge 1b \n\t"
  1995. :"+r"(i)
  1996. :"r"(dst), "r"(src)
  1997. :"memory"
  1998. );
  1999. }
  2000. static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
  2001. x86_reg i = len*4-16;
  2002. __asm__ volatile(
  2003. "1: \n\t"
  2004. "pswapd 8(%1), %%mm0 \n\t"
  2005. "pswapd (%1), %%mm1 \n\t"
  2006. "pfmul (%3,%0), %%mm0 \n\t"
  2007. "pfmul 8(%3,%0), %%mm1 \n\t"
  2008. "movq %%mm0, (%2,%0) \n\t"
  2009. "movq %%mm1, 8(%2,%0) \n\t"
  2010. "add $16, %1 \n\t"
  2011. "sub $16, %0 \n\t"
  2012. "jge 1b \n\t"
  2013. :"+r"(i), "+r"(src1)
  2014. :"r"(dst), "r"(src0)
  2015. );
  2016. __asm__ volatile("femms");
  2017. }
  2018. static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
  2019. x86_reg i = len*4-32;
  2020. __asm__ volatile(
  2021. "1: \n\t"
  2022. "movaps 16(%1), %%xmm0 \n\t"
  2023. "movaps (%1), %%xmm1 \n\t"
  2024. "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
  2025. "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
  2026. "mulps (%3,%0), %%xmm0 \n\t"
  2027. "mulps 16(%3,%0), %%xmm1 \n\t"
  2028. "movaps %%xmm0, (%2,%0) \n\t"
  2029. "movaps %%xmm1, 16(%2,%0) \n\t"
  2030. "add $32, %1 \n\t"
  2031. "sub $32, %0 \n\t"
  2032. "jge 1b \n\t"
  2033. :"+r"(i), "+r"(src1)
  2034. :"r"(dst), "r"(src0)
  2035. );
  2036. }
  2037. static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
  2038. const float *src2, int len){
  2039. x86_reg i = (len-4)*4;
  2040. __asm__ volatile(
  2041. "1: \n\t"
  2042. "movq (%2,%0), %%mm0 \n\t"
  2043. "movq 8(%2,%0), %%mm1 \n\t"
  2044. "pfmul (%3,%0), %%mm0 \n\t"
  2045. "pfmul 8(%3,%0), %%mm1 \n\t"
  2046. "pfadd (%4,%0), %%mm0 \n\t"
  2047. "pfadd 8(%4,%0), %%mm1 \n\t"
  2048. "movq %%mm0, (%1,%0) \n\t"
  2049. "movq %%mm1, 8(%1,%0) \n\t"
  2050. "sub $16, %0 \n\t"
  2051. "jge 1b \n\t"
  2052. :"+r"(i)
  2053. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  2054. :"memory"
  2055. );
  2056. __asm__ volatile("femms");
  2057. }
  2058. static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
  2059. const float *src2, int len){
  2060. x86_reg i = (len-8)*4;
  2061. __asm__ volatile(
  2062. "1: \n\t"
  2063. "movaps (%2,%0), %%xmm0 \n\t"
  2064. "movaps 16(%2,%0), %%xmm1 \n\t"
  2065. "mulps (%3,%0), %%xmm0 \n\t"
  2066. "mulps 16(%3,%0), %%xmm1 \n\t"
  2067. "addps (%4,%0), %%xmm0 \n\t"
  2068. "addps 16(%4,%0), %%xmm1 \n\t"
  2069. "movaps %%xmm0, (%1,%0) \n\t"
  2070. "movaps %%xmm1, 16(%1,%0) \n\t"
  2071. "sub $32, %0 \n\t"
  2072. "jge 1b \n\t"
  2073. :"+r"(i)
  2074. :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
  2075. :"memory"
  2076. );
  2077. }
  2078. static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
  2079. const float *win, float add_bias, int len){
  2080. #if HAVE_6REGS
  2081. if(add_bias == 0){
  2082. x86_reg i = -len*4;
  2083. x86_reg j = len*4-8;
  2084. __asm__ volatile(
  2085. "1: \n"
  2086. "pswapd (%5,%1), %%mm1 \n"
  2087. "movq (%5,%0), %%mm0 \n"
  2088. "pswapd (%4,%1), %%mm5 \n"
  2089. "movq (%3,%0), %%mm4 \n"
  2090. "movq %%mm0, %%mm2 \n"
  2091. "movq %%mm1, %%mm3 \n"
  2092. "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
  2093. "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j]
  2094. "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
  2095. "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i]
  2096. "pfadd %%mm3, %%mm2 \n"
  2097. "pfsub %%mm0, %%mm1 \n"
  2098. "pswapd %%mm2, %%mm2 \n"
  2099. "movq %%mm1, (%2,%0) \n"
  2100. "movq %%mm2, (%2,%1) \n"
  2101. "sub $8, %1 \n"
  2102. "add $8, %0 \n"
  2103. "jl 1b \n"
  2104. "femms \n"
  2105. :"+r"(i), "+r"(j)
  2106. :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
  2107. );
  2108. }else
  2109. #endif
  2110. ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
  2111. }
  2112. static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
  2113. const float *win, float add_bias, int len){
  2114. #if HAVE_6REGS
  2115. if(add_bias == 0){
  2116. x86_reg i = -len*4;
  2117. x86_reg j = len*4-16;
  2118. __asm__ volatile(
  2119. "1: \n"
  2120. "movaps (%5,%1), %%xmm1 \n"
  2121. "movaps (%5,%0), %%xmm0 \n"
  2122. "movaps (%4,%1), %%xmm5 \n"
  2123. "movaps (%3,%0), %%xmm4 \n"
  2124. "shufps $0x1b, %%xmm1, %%xmm1 \n"
  2125. "shufps $0x1b, %%xmm5, %%xmm5 \n"
  2126. "movaps %%xmm0, %%xmm2 \n"
  2127. "movaps %%xmm1, %%xmm3 \n"
  2128. "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
  2129. "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j]
  2130. "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
  2131. "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i]
  2132. "addps %%xmm3, %%xmm2 \n"
  2133. "subps %%xmm0, %%xmm1 \n"
  2134. "shufps $0x1b, %%xmm2, %%xmm2 \n"
  2135. "movaps %%xmm1, (%2,%0) \n"
  2136. "movaps %%xmm2, (%2,%1) \n"
  2137. "sub $16, %1 \n"
  2138. "add $16, %0 \n"
  2139. "jl 1b \n"
  2140. :"+r"(i), "+r"(j)
  2141. :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
  2142. );
  2143. }else
  2144. #endif
  2145. ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
  2146. }
  2147. static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
  2148. {
  2149. x86_reg i = -4*len;
  2150. __asm__ volatile(
  2151. "movss %3, %%xmm4 \n"
  2152. "shufps $0, %%xmm4, %%xmm4 \n"
  2153. "1: \n"
  2154. "cvtpi2ps (%2,%0), %%xmm0 \n"
  2155. "cvtpi2ps 8(%2,%0), %%xmm1 \n"
  2156. "cvtpi2ps 16(%2,%0), %%xmm2 \n"
  2157. "cvtpi2ps 24(%2,%0), %%xmm3 \n"
  2158. "movlhps %%xmm1, %%xmm0 \n"
  2159. "movlhps %%xmm3, %%xmm2 \n"
  2160. "mulps %%xmm4, %%xmm0 \n"
  2161. "mulps %%xmm4, %%xmm2 \n"
  2162. "movaps %%xmm0, (%1,%0) \n"
  2163. "movaps %%xmm2, 16(%1,%0) \n"
  2164. "add $32, %0 \n"
  2165. "jl 1b \n"
  2166. :"+r"(i)
  2167. :"r"(dst+len), "r"(src+len), "m"(mul)
  2168. );
  2169. }
  2170. static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
  2171. {
  2172. x86_reg i = -4*len;
  2173. __asm__ volatile(
  2174. "movss %3, %%xmm4 \n"
  2175. "shufps $0, %%xmm4, %%xmm4 \n"
  2176. "1: \n"
  2177. "cvtdq2ps (%2,%0), %%xmm0 \n"
  2178. "cvtdq2ps 16(%2,%0), %%xmm1 \n"
  2179. "mulps %%xmm4, %%xmm0 \n"
  2180. "mulps %%xmm4, %%xmm1 \n"
  2181. "movaps %%xmm0, (%1,%0) \n"
  2182. "movaps %%xmm1, 16(%1,%0) \n"
  2183. "add $32, %0 \n"
  2184. "jl 1b \n"
  2185. :"+r"(i)
  2186. :"r"(dst+len), "r"(src+len), "m"(mul)
  2187. );
  2188. }
  2189. static void vector_clipf_sse(float *dst, const float *src, float min, float max,
  2190. int len)
  2191. {
  2192. x86_reg i = (len-16)*4;
  2193. __asm__ volatile(
  2194. "movss %3, %%xmm4 \n"
  2195. "movss %4, %%xmm5 \n"
  2196. "shufps $0, %%xmm4, %%xmm4 \n"
  2197. "shufps $0, %%xmm5, %%xmm5 \n"
  2198. "1: \n\t"
  2199. "movaps (%2,%0), %%xmm0 \n\t" // 3/1 on intel
  2200. "movaps 16(%2,%0), %%xmm1 \n\t"
  2201. "movaps 32(%2,%0), %%xmm2 \n\t"
  2202. "movaps 48(%2,%0), %%xmm3 \n\t"
  2203. "maxps %%xmm4, %%xmm0 \n\t"
  2204. "maxps %%xmm4, %%xmm1 \n\t"
  2205. "maxps %%xmm4, %%xmm2 \n\t"
  2206. "maxps %%xmm4, %%xmm3 \n\t"
  2207. "minps %%xmm5, %%xmm0 \n\t"
  2208. "minps %%xmm5, %%xmm1 \n\t"
  2209. "minps %%xmm5, %%xmm2 \n\t"
  2210. "minps %%xmm5, %%xmm3 \n\t"
  2211. "movaps %%xmm0, (%1,%0) \n\t"
  2212. "movaps %%xmm1, 16(%1,%0) \n\t"
  2213. "movaps %%xmm2, 32(%1,%0) \n\t"
  2214. "movaps %%xmm3, 48(%1,%0) \n\t"
  2215. "sub $64, %0 \n\t"
  2216. "jge 1b \n\t"
  2217. :"+&r"(i)
  2218. :"r"(dst), "r"(src), "m"(min), "m"(max)
  2219. :"memory"
  2220. );
  2221. }
  2222. static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
  2223. x86_reg reglen = len;
  2224. // not bit-exact: pf2id uses different rounding than C and SSE
  2225. __asm__ volatile(
  2226. "add %0 , %0 \n\t"
  2227. "lea (%2,%0,2) , %2 \n\t"
  2228. "add %0 , %1 \n\t"
  2229. "neg %0 \n\t"
  2230. "1: \n\t"
  2231. "pf2id (%2,%0,2) , %%mm0 \n\t"
  2232. "pf2id 8(%2,%0,2) , %%mm1 \n\t"
  2233. "pf2id 16(%2,%0,2) , %%mm2 \n\t"
  2234. "pf2id 24(%2,%0,2) , %%mm3 \n\t"
  2235. "packssdw %%mm1 , %%mm0 \n\t"
  2236. "packssdw %%mm3 , %%mm2 \n\t"
  2237. "movq %%mm0 , (%1,%0) \n\t"
  2238. "movq %%mm2 , 8(%1,%0) \n\t"
  2239. "add $16 , %0 \n\t"
  2240. " js 1b \n\t"
  2241. "femms \n\t"
  2242. :"+r"(reglen), "+r"(dst), "+r"(src)
  2243. );
  2244. }
  2245. static void float_to_int16_sse(int16_t *dst, const float *src, long len){
  2246. x86_reg reglen = len;
  2247. __asm__ volatile(
  2248. "add %0 , %0 \n\t"
  2249. "lea (%2,%0,2) , %2 \n\t"
  2250. "add %0 , %1 \n\t"
  2251. "neg %0 \n\t"
  2252. "1: \n\t"
  2253. "cvtps2pi (%2,%0,2) , %%mm0 \n\t"
  2254. "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t"
  2255. "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t"
  2256. "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t"
  2257. "packssdw %%mm1 , %%mm0 \n\t"
  2258. "packssdw %%mm3 , %%mm2 \n\t"
  2259. "movq %%mm0 , (%1,%0) \n\t"
  2260. "movq %%mm2 , 8(%1,%0) \n\t"
  2261. "add $16 , %0 \n\t"
  2262. " js 1b \n\t"
  2263. "emms \n\t"
  2264. :"+r"(reglen), "+r"(dst), "+r"(src)
  2265. );
  2266. }
  2267. static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
  2268. x86_reg reglen = len;
  2269. __asm__ volatile(
  2270. "add %0 , %0 \n\t"
  2271. "lea (%2,%0,2) , %2 \n\t"
  2272. "add %0 , %1 \n\t"
  2273. "neg %0 \n\t"
  2274. "1: \n\t"
  2275. "cvtps2dq (%2,%0,2) , %%xmm0 \n\t"
  2276. "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t"
  2277. "packssdw %%xmm1 , %%xmm0 \n\t"
  2278. "movdqa %%xmm0 , (%1,%0) \n\t"
  2279. "add $16 , %0 \n\t"
  2280. " js 1b \n\t"
  2281. :"+r"(reglen), "+r"(dst), "+r"(src)
  2282. );
  2283. }
  2284. void ff_vp3_idct_mmx(int16_t *input_data);
  2285. void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
  2286. void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
  2287. void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block);
  2288. void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
  2289. void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
  2290. void ff_vp3_idct_sse2(int16_t *input_data);
  2291. void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
  2292. void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
  2293. void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
  2294. void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
  2295. void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
  2296. int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift);
  2297. int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift);
  2298. int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
  2299. int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
  2300. int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
  2301. void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
  2302. int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
  2303. int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
  2304. #if !HAVE_YASM
  2305. #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6)
  2306. #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
  2307. #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
  2308. #endif
  2309. #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
  2310. #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
  2311. /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
  2312. static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
  2313. DECLARE_ALIGNED(16, int16_t, tmp)[len];\
  2314. int i,j,c;\
  2315. for(c=0; c<channels; c++){\
  2316. float_to_int16_##cpu(tmp, src[c], len);\
  2317. for(i=0, j=c; i<len; i++, j+=channels)\
  2318. dst[j] = tmp[i];\
  2319. }\
  2320. }\
  2321. \
  2322. static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
  2323. if(channels==1)\
  2324. float_to_int16_##cpu(dst, src[0], len);\
  2325. else if(channels==2){\
  2326. x86_reg reglen = len; \
  2327. const float *src0 = src[0];\
  2328. const float *src1 = src[1];\
  2329. __asm__ volatile(\
  2330. "shl $2, %0 \n"\
  2331. "add %0, %1 \n"\
  2332. "add %0, %2 \n"\
  2333. "add %0, %3 \n"\
  2334. "neg %0 \n"\
  2335. body\
  2336. :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
  2337. );\
  2338. }else if(channels==6){\
  2339. ff_float_to_int16_interleave6_##cpu(dst, src, len);\
  2340. }else\
  2341. float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
  2342. }
  2343. FLOAT_TO_INT16_INTERLEAVE(3dnow,
  2344. "1: \n"
  2345. "pf2id (%2,%0), %%mm0 \n"
  2346. "pf2id 8(%2,%0), %%mm1 \n"
  2347. "pf2id (%3,%0), %%mm2 \n"
  2348. "pf2id 8(%3,%0), %%mm3 \n"
  2349. "packssdw %%mm1, %%mm0 \n"
  2350. "packssdw %%mm3, %%mm2 \n"
  2351. "movq %%mm0, %%mm1 \n"
  2352. "punpcklwd %%mm2, %%mm0 \n"
  2353. "punpckhwd %%mm2, %%mm1 \n"
  2354. "movq %%mm0, (%1,%0)\n"
  2355. "movq %%mm1, 8(%1,%0)\n"
  2356. "add $16, %0 \n"
  2357. "js 1b \n"
  2358. "femms \n"
  2359. )
  2360. FLOAT_TO_INT16_INTERLEAVE(sse,
  2361. "1: \n"
  2362. "cvtps2pi (%2,%0), %%mm0 \n"
  2363. "cvtps2pi 8(%2,%0), %%mm1 \n"
  2364. "cvtps2pi (%3,%0), %%mm2 \n"
  2365. "cvtps2pi 8(%3,%0), %%mm3 \n"
  2366. "packssdw %%mm1, %%mm0 \n"
  2367. "packssdw %%mm3, %%mm2 \n"
  2368. "movq %%mm0, %%mm1 \n"
  2369. "punpcklwd %%mm2, %%mm0 \n"
  2370. "punpckhwd %%mm2, %%mm1 \n"
  2371. "movq %%mm0, (%1,%0)\n"
  2372. "movq %%mm1, 8(%1,%0)\n"
  2373. "add $16, %0 \n"
  2374. "js 1b \n"
  2375. "emms \n"
  2376. )
  2377. FLOAT_TO_INT16_INTERLEAVE(sse2,
  2378. "1: \n"
  2379. "cvtps2dq (%2,%0), %%xmm0 \n"
  2380. "cvtps2dq (%3,%0), %%xmm1 \n"
  2381. "packssdw %%xmm1, %%xmm0 \n"
  2382. "movhlps %%xmm0, %%xmm1 \n"
  2383. "punpcklwd %%xmm1, %%xmm0 \n"
  2384. "movdqa %%xmm0, (%1,%0) \n"
  2385. "add $16, %0 \n"
  2386. "js 1b \n"
  2387. )
  2388. static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
  2389. if(channels==6)
  2390. ff_float_to_int16_interleave6_3dn2(dst, src, len);
  2391. else
  2392. float_to_int16_interleave_3dnow(dst, src, len, channels);
  2393. }
  2394. float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
  2395. void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
  2396. {
  2397. int mm_flags = av_get_cpu_flags();
  2398. if (avctx->dsp_mask) {
  2399. if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
  2400. mm_flags |= (avctx->dsp_mask & 0xffff);
  2401. else
  2402. mm_flags &= ~(avctx->dsp_mask & 0xffff);
  2403. }
  2404. #if 0
  2405. av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
  2406. if (mm_flags & AV_CPU_FLAG_MMX)
  2407. av_log(avctx, AV_LOG_INFO, " mmx");
  2408. if (mm_flags & AV_CPU_FLAG_MMX2)
  2409. av_log(avctx, AV_LOG_INFO, " mmx2");
  2410. if (mm_flags & AV_CPU_FLAG_3DNOW)
  2411. av_log(avctx, AV_LOG_INFO, " 3dnow");
  2412. if (mm_flags & AV_CPU_FLAG_SSE)
  2413. av_log(avctx, AV_LOG_INFO, " sse");
  2414. if (mm_flags & AV_CPU_FLAG_SSE2)
  2415. av_log(avctx, AV_LOG_INFO, " sse2");
  2416. av_log(avctx, AV_LOG_INFO, "\n");
  2417. #endif
  2418. if (mm_flags & AV_CPU_FLAG_MMX) {
  2419. const int idct_algo= avctx->idct_algo;
  2420. if(avctx->lowres==0){
  2421. if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
  2422. c->idct_put= ff_simple_idct_put_mmx;
  2423. c->idct_add= ff_simple_idct_add_mmx;
  2424. c->idct = ff_simple_idct_mmx;
  2425. c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
  2426. #if CONFIG_GPL
  2427. }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
  2428. if(mm_flags & AV_CPU_FLAG_MMX2){
  2429. c->idct_put= ff_libmpeg2mmx2_idct_put;
  2430. c->idct_add= ff_libmpeg2mmx2_idct_add;
  2431. c->idct = ff_mmxext_idct;
  2432. }else{
  2433. c->idct_put= ff_libmpeg2mmx_idct_put;
  2434. c->idct_add= ff_libmpeg2mmx_idct_add;
  2435. c->idct = ff_mmx_idct;
  2436. }
  2437. c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
  2438. #endif
  2439. }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
  2440. idct_algo==FF_IDCT_VP3 && HAVE_YASM){
  2441. if(mm_flags & AV_CPU_FLAG_SSE2){
  2442. c->idct_put= ff_vp3_idct_put_sse2;
  2443. c->idct_add= ff_vp3_idct_add_sse2;
  2444. c->idct = ff_vp3_idct_sse2;
  2445. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2446. }else{
  2447. c->idct_put= ff_vp3_idct_put_mmx;
  2448. c->idct_add= ff_vp3_idct_add_mmx;
  2449. c->idct = ff_vp3_idct_mmx;
  2450. c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
  2451. }
  2452. }else if(idct_algo==FF_IDCT_CAVS){
  2453. c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
  2454. }else if(idct_algo==FF_IDCT_XVIDMMX){
  2455. if(mm_flags & AV_CPU_FLAG_SSE2){
  2456. c->idct_put= ff_idct_xvid_sse2_put;
  2457. c->idct_add= ff_idct_xvid_sse2_add;
  2458. c->idct = ff_idct_xvid_sse2;
  2459. c->idct_permutation_type= FF_SSE2_IDCT_PERM;
  2460. }else if(mm_flags & AV_CPU_FLAG_MMX2){
  2461. c->idct_put= ff_idct_xvid_mmx2_put;
  2462. c->idct_add= ff_idct_xvid_mmx2_add;
  2463. c->idct = ff_idct_xvid_mmx2;
  2464. }else{
  2465. c->idct_put= ff_idct_xvid_mmx_put;
  2466. c->idct_add= ff_idct_xvid_mmx_add;
  2467. c->idct = ff_idct_xvid_mmx;
  2468. }
  2469. }
  2470. }
  2471. c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
  2472. c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
  2473. c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
  2474. c->clear_block = clear_block_mmx;
  2475. c->clear_blocks = clear_blocks_mmx;
  2476. if ((mm_flags & AV_CPU_FLAG_SSE) &&
  2477. !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
  2478. /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
  2479. c->clear_block = clear_block_sse;
  2480. c->clear_blocks = clear_blocks_sse;
  2481. }
  2482. #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  2483. c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
  2484. c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
  2485. c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
  2486. c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
  2487. SET_HPEL_FUNCS(put, 0, 16, mmx);
  2488. SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
  2489. SET_HPEL_FUNCS(avg, 0, 16, mmx);
  2490. SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
  2491. SET_HPEL_FUNCS(put, 1, 8, mmx);
  2492. SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
  2493. SET_HPEL_FUNCS(avg, 1, 8, mmx);
  2494. SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
  2495. c->gmc= gmc_mmx;
  2496. c->add_bytes= add_bytes_mmx;
  2497. c->add_bytes_l2= add_bytes_l2_mmx;
  2498. c->draw_edges = draw_edges_mmx;
  2499. if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
  2500. c->h263_v_loop_filter= h263_v_loop_filter_mmx;
  2501. c->h263_h_loop_filter= h263_h_loop_filter_mmx;
  2502. }
  2503. #if HAVE_YASM
  2504. c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd;
  2505. c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx;
  2506. c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_mmx_nornd;
  2507. c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx;
  2508. c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx;
  2509. #endif
  2510. if (mm_flags & AV_CPU_FLAG_MMX2) {
  2511. c->prefetch = prefetch_mmx2;
  2512. c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
  2513. c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
  2514. c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
  2515. c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
  2516. c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
  2517. c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
  2518. c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
  2519. c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
  2520. c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
  2521. c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
  2522. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2523. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
  2524. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
  2525. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
  2526. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
  2527. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
  2528. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
  2529. if (CONFIG_VP3_DECODER && HAVE_YASM) {
  2530. c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
  2531. c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
  2532. }
  2533. }
  2534. if (CONFIG_VP3_DECODER && HAVE_YASM) {
  2535. c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
  2536. }
  2537. if (CONFIG_VP3_DECODER
  2538. && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
  2539. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
  2540. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
  2541. }
  2542. #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
  2543. c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
  2544. c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
  2545. c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
  2546. c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
  2547. c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
  2548. c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
  2549. c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
  2550. c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
  2551. c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
  2552. c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
  2553. c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
  2554. c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
  2555. c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
  2556. c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
  2557. c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
  2558. c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
  2559. SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
  2560. SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
  2561. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
  2562. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
  2563. SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
  2564. SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
  2565. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
  2566. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
  2567. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
  2568. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
  2569. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
  2570. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
  2571. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
  2572. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
  2573. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
  2574. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
  2575. #if HAVE_YASM
  2576. c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2;
  2577. c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2;
  2578. c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_mmx2_nornd;
  2579. c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd;
  2580. c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2;
  2581. c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
  2582. c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2;
  2583. c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
  2584. #endif
  2585. #if HAVE_7REGS && HAVE_TEN_OPERANDS
  2586. if( mm_flags&AV_CPU_FLAG_3DNOW )
  2587. c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
  2588. #endif
  2589. if (CONFIG_VC1_DECODER)
  2590. ff_vc1dsp_init_mmx(c, avctx);
  2591. c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
  2592. } else if (mm_flags & AV_CPU_FLAG_3DNOW) {
  2593. c->prefetch = prefetch_3dnow;
  2594. c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
  2595. c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
  2596. c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
  2597. c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
  2598. c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
  2599. c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
  2600. c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
  2601. c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
  2602. c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
  2603. c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
  2604. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2605. c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
  2606. c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
  2607. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
  2608. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
  2609. c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
  2610. c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
  2611. }
  2612. if (CONFIG_VP3_DECODER
  2613. && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
  2614. c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
  2615. c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
  2616. }
  2617. SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
  2618. SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
  2619. SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
  2620. SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
  2621. SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
  2622. SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
  2623. SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
  2624. SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
  2625. SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
  2626. SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
  2627. SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
  2628. SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
  2629. SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
  2630. SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
  2631. SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
  2632. SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
  2633. #if HAVE_YASM
  2634. c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd;
  2635. c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow;
  2636. c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_3dnow_nornd;
  2637. c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_3dnow;
  2638. c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_3dnow;
  2639. #endif
  2640. }
  2641. #define H264_QPEL_FUNCS(x, y, CPU)\
  2642. c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
  2643. c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
  2644. c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
  2645. c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
  2646. if((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW)){
  2647. // these functions are slower than mmx on AMD, but faster on Intel
  2648. c->put_pixels_tab[0][0] = put_pixels16_sse2;
  2649. c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
  2650. c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
  2651. H264_QPEL_FUNCS(0, 0, sse2);
  2652. }
  2653. if(mm_flags & AV_CPU_FLAG_SSE2){
  2654. H264_QPEL_FUNCS(0, 1, sse2);
  2655. H264_QPEL_FUNCS(0, 2, sse2);
  2656. H264_QPEL_FUNCS(0, 3, sse2);
  2657. H264_QPEL_FUNCS(1, 1, sse2);
  2658. H264_QPEL_FUNCS(1, 2, sse2);
  2659. H264_QPEL_FUNCS(1, 3, sse2);
  2660. H264_QPEL_FUNCS(2, 1, sse2);
  2661. H264_QPEL_FUNCS(2, 2, sse2);
  2662. H264_QPEL_FUNCS(2, 3, sse2);
  2663. H264_QPEL_FUNCS(3, 1, sse2);
  2664. H264_QPEL_FUNCS(3, 2, sse2);
  2665. H264_QPEL_FUNCS(3, 3, sse2);
  2666. }
  2667. #if HAVE_SSSE3
  2668. if(mm_flags & AV_CPU_FLAG_SSSE3){
  2669. H264_QPEL_FUNCS(1, 0, ssse3);
  2670. H264_QPEL_FUNCS(1, 1, ssse3);
  2671. H264_QPEL_FUNCS(1, 2, ssse3);
  2672. H264_QPEL_FUNCS(1, 3, ssse3);
  2673. H264_QPEL_FUNCS(2, 0, ssse3);
  2674. H264_QPEL_FUNCS(2, 1, ssse3);
  2675. H264_QPEL_FUNCS(2, 2, ssse3);
  2676. H264_QPEL_FUNCS(2, 3, ssse3);
  2677. H264_QPEL_FUNCS(3, 0, ssse3);
  2678. H264_QPEL_FUNCS(3, 1, ssse3);
  2679. H264_QPEL_FUNCS(3, 2, ssse3);
  2680. H264_QPEL_FUNCS(3, 3, ssse3);
  2681. c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
  2682. #if HAVE_YASM
  2683. c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_ssse3_nornd;
  2684. c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_ssse3_nornd;
  2685. c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd;
  2686. c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd;
  2687. c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
  2688. c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3;
  2689. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
  2690. if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
  2691. c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
  2692. #endif
  2693. }
  2694. #endif
  2695. if(mm_flags & AV_CPU_FLAG_3DNOW){
  2696. c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
  2697. c->vector_fmul = vector_fmul_3dnow;
  2698. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2699. c->float_to_int16 = float_to_int16_3dnow;
  2700. c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
  2701. }
  2702. }
  2703. if(mm_flags & AV_CPU_FLAG_3DNOWEXT){
  2704. c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
  2705. c->vector_fmul_window = vector_fmul_window_3dnow2;
  2706. if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
  2707. c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
  2708. }
  2709. }
  2710. if(mm_flags & AV_CPU_FLAG_MMX2){
  2711. #if HAVE_YASM
  2712. c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
  2713. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
  2714. #endif
  2715. }
  2716. if(mm_flags & AV_CPU_FLAG_SSE){
  2717. c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
  2718. c->ac3_downmix = ac3_downmix_sse;
  2719. c->vector_fmul = vector_fmul_sse;
  2720. c->vector_fmul_reverse = vector_fmul_reverse_sse;
  2721. c->vector_fmul_add = vector_fmul_add_sse;
  2722. c->vector_fmul_window = vector_fmul_window_sse;
  2723. c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
  2724. c->vector_clipf = vector_clipf_sse;
  2725. c->float_to_int16 = float_to_int16_sse;
  2726. c->float_to_int16_interleave = float_to_int16_interleave_sse;
  2727. #if HAVE_YASM
  2728. c->scalarproduct_float = ff_scalarproduct_float_sse;
  2729. #endif
  2730. }
  2731. if(mm_flags & AV_CPU_FLAG_3DNOW)
  2732. c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
  2733. if(mm_flags & AV_CPU_FLAG_SSE2){
  2734. c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
  2735. c->float_to_int16 = float_to_int16_sse2;
  2736. c->float_to_int16_interleave = float_to_int16_interleave_sse2;
  2737. #if HAVE_YASM
  2738. c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
  2739. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
  2740. #endif
  2741. }
  2742. if((mm_flags & AV_CPU_FLAG_SSSE3) && !(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW)) && HAVE_YASM) // cachesplit
  2743. c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
  2744. }
  2745. if (CONFIG_ENCODERS)
  2746. dsputilenc_init_mmx(c, avctx);
  2747. #if 0
  2748. // for speed testing
  2749. get_pixels = just_return;
  2750. put_pixels_clamped = just_return;
  2751. add_pixels_clamped = just_return;
  2752. pix_abs16x16 = just_return;
  2753. pix_abs16x16_x2 = just_return;
  2754. pix_abs16x16_y2 = just_return;
  2755. pix_abs16x16_xy2 = just_return;
  2756. put_pixels_tab[0] = just_return;
  2757. put_pixels_tab[1] = just_return;
  2758. put_pixels_tab[2] = just_return;
  2759. put_pixels_tab[3] = just_return;
  2760. put_no_rnd_pixels_tab[0] = just_return;
  2761. put_no_rnd_pixels_tab[1] = just_return;
  2762. put_no_rnd_pixels_tab[2] = just_return;
  2763. put_no_rnd_pixels_tab[3] = just_return;
  2764. avg_pixels_tab[0] = just_return;
  2765. avg_pixels_tab[1] = just_return;
  2766. avg_pixels_tab[2] = just_return;
  2767. avg_pixels_tab[3] = just_return;
  2768. avg_no_rnd_pixels_tab[0] = just_return;
  2769. avg_no_rnd_pixels_tab[1] = just_return;
  2770. avg_no_rnd_pixels_tab[2] = just_return;
  2771. avg_no_rnd_pixels_tab[3] = just_return;
  2772. //av_fdct = just_return;
  2773. //ff_idct = just_return;
  2774. #endif
  2775. }