123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051 |
- /*
- * Copyright (c) 2015 - 2017 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com)
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
- #include "libavutil/mips/generic_macros_msa.h"
- #include "libavcodec/mips/hevcdsp_mips.h"
- #include "libavcodec/mips/hevc_macros_msa.h"
- static const uint8_t ff_hevc_mask_arr[16 * 2] __attribute__((aligned(0x40))) = {
- /* 8 width cases */
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
- 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20
- };
- #define HEVC_BIW_RND_CLIP2(in0, in1, vec0, vec1, wgt, rnd, offset, \
- out0, out1) \
- { \
- v4i32 out0_r, out1_r, out0_l, out1_l; \
- \
- ILVR_H2_SW(in0, vec0, in1, vec1, out0_r, out1_r); \
- ILVL_H2_SW(in0, vec0, in1, vec1, out0_l, out1_l); \
- \
- out0_r = __msa_dpadd_s_w(offset, (v8i16) out0_r, (v8i16) wgt); \
- out1_r = __msa_dpadd_s_w(offset, (v8i16) out1_r, (v8i16) wgt); \
- out0_l = __msa_dpadd_s_w(offset, (v8i16) out0_l, (v8i16) wgt); \
- out1_l = __msa_dpadd_s_w(offset, (v8i16) out1_l, (v8i16) wgt); \
- \
- SRAR_W4_SW(out0_r, out1_r, out0_l, out1_l, rnd); \
- PCKEV_H2_SH(out0_l, out0_r, out1_l, out1_r, out0, out1); \
- CLIP_SH2_0_255(out0, out1); \
- }
- #define HEVC_BIW_RND_CLIP4(in0, in1, in2, in3, vec0, vec1, vec2, vec3, \
- wgt, rnd, offset, out0, out1, out2, out3) \
- { \
- HEVC_BIW_RND_CLIP2(in0, in1, vec0, vec1, wgt, rnd, offset, out0, out1); \
- HEVC_BIW_RND_CLIP2(in2, in3, vec2, vec3, wgt, rnd, offset, out2, out3); \
- }
- #define HEVC_BIW_RND_CLIP2_MAX_SATU(in0, in1, vec0, vec1, wgt, rnd, \
- offset, out0, out1) \
- { \
- v4i32 out0_r, out1_r, out0_l, out1_l; \
- \
- ILVR_H2_SW(in0, vec0, in1, vec1, out0_r, out1_r); \
- ILVL_H2_SW(in0, vec0, in1, vec1, out0_l, out1_l); \
- out0_r = __msa_dpadd_s_w(offset, (v8i16) out0_r, (v8i16) wgt); \
- out1_r = __msa_dpadd_s_w(offset, (v8i16) out1_r, (v8i16) wgt); \
- out0_l = __msa_dpadd_s_w(offset, (v8i16) out0_l, (v8i16) wgt); \
- out1_l = __msa_dpadd_s_w(offset, (v8i16) out1_l, (v8i16) wgt); \
- SRAR_W4_SW(out0_r, out1_r, out0_l, out1_l, rnd); \
- PCKEV_H2_SH(out0_l, out0_r, out1_l, out1_r, out0, out1); \
- CLIP_SH2_0_255_MAX_SATU(out0, out1); \
- }
- #define HEVC_BIW_RND_CLIP4_MAX_SATU(in0, in1, in2, in3, vec0, vec1, vec2, \
- vec3, wgt, rnd, offset, out0, out1, \
- out2, out3) \
- { \
- HEVC_BIW_RND_CLIP2_MAX_SATU(in0, in1, vec0, vec1, wgt, rnd, offset, \
- out0, out1); \
- HEVC_BIW_RND_CLIP2_MAX_SATU(in2, in3, vec2, vec3, wgt, rnd, offset, \
- out2, out3); \
- }
- static void hevc_biwgt_copy_4w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt, tp0, tp1, tp2, tp3;
- uint64_t tpd0, tpd1, tpd2, tpd3;
- int32_t offset, weight;
- v16u8 out0, out1;
- v16i8 zero = { 0 };
- v16i8 src0 = { 0 }, src1 = { 0 };
- v8i16 in0 = { 0 }, in1 = { 0 }, in2 = { 0 }, in3 = { 0 };
- v8i16 dst0, dst1, dst2, dst3, weight_vec;
- v4i32 dst0_r, dst0_l, offset_vec, rnd_vec;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- offset_vec = __msa_fill_w(offset);
- weight_vec = (v8i16) __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- if (2 == height) {
- LW2(src0_ptr, src_stride, tp0, tp1);
- INSERT_W2_SB(tp0, tp1, src0);
- LD2(src1_ptr, src2_stride, tpd0, tpd1);
- INSERT_D2_SH(tpd0, tpd1, in0);
- dst0 = (v8i16) __msa_ilvr_b(zero, src0);
- dst0 <<= 6;
- ILVRL_H2_SW(dst0, in0, dst0_r, dst0_l);
- dst0_r = __msa_dpadd_s_w(offset_vec, (v8i16) dst0_r, weight_vec);
- dst0_l = __msa_dpadd_s_w(offset_vec, (v8i16) dst0_l, weight_vec);
- SRAR_W2_SW(dst0_r, dst0_l, rnd_vec);
- dst0 = (v8i16) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
- dst0 = CLIP_SH_0_255_MAX_SATU(dst0);
- out0 = (v16u8) __msa_pckev_b((v16i8) dst0, (v16i8) dst0);
- ST_W2(out0, 0, 1, dst, dst_stride);
- } else if (4 == height) {
- LW4(src0_ptr, src_stride, tp0, tp1, tp2, tp3);
- INSERT_W4_SB(tp0, tp1, tp2, tp3, src0);
- LD4(src1_ptr, src2_stride, tpd0, tpd1, tpd2, tpd3);
- INSERT_D2_SH(tpd0, tpd1, in0);
- INSERT_D2_SH(tpd2, tpd3, in1);
- ILVRL_B2_SH(zero, src0, dst0, dst1);
- SLLI_2V(dst0, dst1, 6);
- HEVC_BIW_RND_CLIP2_MAX_SATU(dst0, dst1, in0, in1, weight_vec, rnd_vec,
- offset_vec, dst0, dst1);
- out0 = (v16u8) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
- ST_W4(out0, 0, 1, 2, 3, dst, dst_stride);
- } else if (0 == height % 8) {
- for (loop_cnt = (height >> 3); loop_cnt--;) {
- LW4(src0_ptr, src_stride, tp0, tp1, tp2, tp3);
- src0_ptr += 4 * src_stride;
- INSERT_W4_SB(tp0, tp1, tp2, tp3, src0);
- LW4(src0_ptr, src_stride, tp0, tp1, tp2, tp3);
- src0_ptr += 4 * src_stride;
- INSERT_W4_SB(tp0, tp1, tp2, tp3, src1);
- LD4(src1_ptr, src2_stride, tpd0, tpd1, tpd2, tpd3);
- src1_ptr += (4 * src2_stride);
- INSERT_D2_SH(tpd0, tpd1, in0);
- INSERT_D2_SH(tpd2, tpd3, in1);
- LD4(src1_ptr, src2_stride, tpd0, tpd1, tpd2, tpd3);
- src1_ptr += (4 * src2_stride);
- INSERT_D2_SH(tpd0, tpd1, in2);
- INSERT_D2_SH(tpd2, tpd3, in3);
- ILVRL_B2_SH(zero, src0, dst0, dst1);
- ILVRL_B2_SH(zero, src1, dst2, dst3);
- SLLI_4V(dst0, dst1, dst2, dst3, 6);
- HEVC_BIW_RND_CLIP4_MAX_SATU(dst0, dst1, dst2, dst3, in0, in1, in2,
- in3, weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- PCKEV_B2_UB(dst1, dst0, dst3, dst2, out0, out1);
- ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
- dst += (8 * dst_stride);
- }
- }
- }
- static void hevc_biwgt_copy_6w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight;
- uint64_t tp0, tp1, tp2, tp3;
- v16u8 out0, out1;
- v16i8 zero = { 0 };
- v16i8 src0 = { 0 }, src1 = { 0 };
- v8i16 in0, in1, in2, in3;
- v8i16 dst0, dst1, dst2, dst3;
- v4i32 offset_vec, weight_vec, rnd_vec;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- weight_vec = __msa_fill_w(weight);
- offset_vec = __msa_fill_w(offset);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3);
- src0_ptr += (4 * src_stride);
- INSERT_D2_SB(tp0, tp1, src0);
- INSERT_D2_SB(tp2, tp3, src1);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- ILVRL_B2_SH(zero, src0, dst0, dst1);
- ILVRL_B2_SH(zero, src1, dst2, dst3);
- SLLI_4V(dst0, dst1, dst2, dst3, 6);
- HEVC_BIW_RND_CLIP4_MAX_SATU(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- PCKEV_B2_UB(dst1, dst0, dst3, dst2, out0, out1);
- ST_W2(out0, 0, 2, dst, dst_stride);
- ST_H2(out0, 2, 6, dst + 4, dst_stride);
- ST_W2(out1, 0, 2, dst + 2 * dst_stride, dst_stride);
- ST_H2(out1, 2, 6, dst + 2 * dst_stride + 4, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_biwgt_copy_8w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint64_t tp0, tp1, tp2, tp3;
- int32_t offset, weight;
- v16u8 out0, out1, out2;
- v16i8 zero = { 0 };
- v16i8 src0 = { 0 }, src1 = { 0 }, src2 = { 0 };
- v8i16 in0, in1, in2, in3, in4, in5;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
- v4i32 offset_vec, weight_vec, rnd_vec;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- if (2 == height) {
- LD2(src0_ptr, src_stride, tp0, tp1);
- INSERT_D2_SB(tp0, tp1, src0);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- ILVRL_B2_SH(zero, src0, dst0, dst1);
- SLLI_2V(dst0, dst1, 6);
- HEVC_BIW_RND_CLIP2(dst0, dst1, in0, in1,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1);
- out0 = (v16u8) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
- ST_D2(out0, 0, 1, dst, dst_stride);
- } else if (6 == height) {
- LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3);
- src0_ptr += 4 * src_stride;
- INSERT_D2_SB(tp0, tp1, src0);
- INSERT_D2_SB(tp2, tp3, src1);
- LD2(src0_ptr, src_stride, tp0, tp1);
- INSERT_D2_SB(tp0, tp1, src2);
- ILVRL_B2_SH(zero, src0, dst0, dst1);
- ILVRL_B2_SH(zero, src1, dst2, dst3);
- ILVRL_B2_SH(zero, src2, dst4, dst5);
- LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5);
- SLLI_4V(dst0, dst1, dst2, dst3, 6);
- SLLI_2V(dst4, dst5, 6);
- HEVC_BIW_RND_CLIP4_MAX_SATU(dst0, dst1, dst2, dst3, in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec, dst0, dst1,
- dst2, dst3);
- HEVC_BIW_RND_CLIP2_MAX_SATU(dst4, dst5, in4, in5, weight_vec, rnd_vec,
- offset_vec, dst4, dst5);
- PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
- ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
- ST_D2(out2, 0, 1, dst + 4 * dst_stride, dst_stride);
- } else if (0 == height % 4) {
- uint32_t loop_cnt;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD4(src0_ptr, src_stride, tp0, tp1, tp2, tp3);
- src0_ptr += (4 * src_stride);
- INSERT_D2_SB(tp0, tp1, src0);
- INSERT_D2_SB(tp2, tp3, src1);
- ILVRL_B2_SH(zero, src0, dst0, dst1);
- ILVRL_B2_SH(zero, src1, dst2, dst3);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- SLLI_4V(dst0, dst1, dst2, dst3, 6);
- HEVC_BIW_RND_CLIP4_MAX_SATU(dst0, dst1, dst2, dst3, in0, in1, in2,
- in3, weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- PCKEV_B2_UB(dst1, dst0, dst3, dst2, out0, out1);
- ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- }
- static void hevc_biwgt_copy_12w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight;
- v16i8 zero = { 0 };
- v16u8 out0, out1, out2;
- v16i8 src0, src1, src2, src3;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
- v4i32 offset_vec, weight_vec, rnd_vec;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- for (loop_cnt = (16 >> 2); loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in6, in7);
- src1_ptr += (4 * src2_stride);
- ILVR_D2_SH(in5, in4, in7, in6, in4, in5);
- ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
- dst0, dst1, dst2, dst3);
- SLLI_4V(dst0, dst1, dst2, dst3, 6);
- ILVL_W2_SB(src1, src0, src3, src2, src0, src1);
- ILVR_B2_SH(zero, src0, zero, src1, dst4, dst5);
- dst4 <<= 6;
- dst5 <<= 6;
- HEVC_BIW_RND_CLIP4_MAX_SATU(dst0, dst1, dst2, dst3, in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec, dst0, dst1,
- dst2, dst3);
- HEVC_BIW_RND_CLIP2_MAX_SATU(dst4, dst5, in4, in5, weight_vec, rnd_vec,
- offset_vec, dst4, dst5);
- PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
- ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
- ST_W4(out2, 0, 1, 2, 3, dst + 8, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_biwgt_copy_16w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight;
- v16u8 out0, out1, out2, out3;
- v16i8 zero = { 0 };
- v16i8 src0, src1, src2, src3;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- v4i32 offset_vec, weight_vec, rnd_vec;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in6, in7);
- src1_ptr += (4 * src2_stride);
- ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, tmp0, tmp1,
- tmp2, tmp3);
- ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, tmp4, tmp5,
- tmp6, tmp7);
- SLLI_4V(tmp0, tmp1, tmp2, tmp3, 6);
- SLLI_4V(tmp4, tmp5, tmp6, tmp7, 6);
- HEVC_BIW_RND_CLIP4_MAX_SATU(tmp0, tmp1, tmp4, tmp5, in0, in1, in4, in5,
- weight_vec, rnd_vec, offset_vec, tmp0, tmp1,
- tmp4, tmp5);
- HEVC_BIW_RND_CLIP4_MAX_SATU(tmp2, tmp3, tmp6, tmp7, in2, in3, in6, in7,
- weight_vec, rnd_vec, offset_vec, tmp2, tmp3,
- tmp6, tmp7);
- PCKEV_B2_UB(tmp4, tmp0, tmp5, tmp1, out0, out1);
- PCKEV_B2_UB(tmp6, tmp2, tmp7, tmp3, out2, out3);
- ST_UB4(out0, out1, out2, out3, dst, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_biwgt_copy_24w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight;
- v16u8 out0, out1, out2, out3, out4, out5;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, zero = { 0 };
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8, dst9, dst10;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, dst11;
- v4i32 offset_vec, weight_vec, rnd_vec;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- for (loop_cnt = 8; loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src0, src1, src4, src5);
- LD_SB4(src0_ptr + 16, src_stride, src2, src3, src6, src7);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in6, in7);
- LD_SH4(src1_ptr + 16, src2_stride, in8, in9, in10, in11);
- src1_ptr += (4 * src2_stride);
- ILVRL_B2_SH(zero, src0, dst0, dst1);
- ILVRL_B2_SH(zero, src1, dst2, dst3);
- ILVR_B2_SH(zero, src2, zero, src3, dst4, dst5);
- ILVRL_B2_SH(zero, src4, dst6, dst7);
- ILVRL_B2_SH(zero, src5, dst8, dst9);
- ILVR_B2_SH(zero, src6, zero, src7, dst10, dst11);
- SLLI_4V(dst0, dst1, dst2, dst3, 6);
- SLLI_4V(dst4, dst5, dst6, dst7, 6);
- SLLI_4V(dst8, dst9, dst10, dst11, 6);
- HEVC_BIW_RND_CLIP4_MAX_SATU(dst0, dst1, dst2, dst3, in0, in4, in1, in5,
- weight_vec, rnd_vec, offset_vec, dst0, dst1,
- dst2, dst3);
- HEVC_BIW_RND_CLIP4_MAX_SATU(dst4, dst5, dst6, dst7, in8, in9, in2, in6,
- weight_vec, rnd_vec, offset_vec, dst4, dst5,
- dst6, dst7);
- HEVC_BIW_RND_CLIP4_MAX_SATU(dst8, dst9, dst10, dst11, in3, in7, in10,
- in11, weight_vec, rnd_vec, offset_vec,
- dst8, dst9, dst10, dst11);
- PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
- PCKEV_B3_UB(dst7, dst6, dst9, dst8, dst11, dst10, out3, out4, out5);
- ST_UB4(out0, out1, out3, out4, dst, dst_stride);
- ST_D4(out2, out5, 0, 1, 0, 1, dst + 16, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_biwgt_copy_32w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight;
- v16u8 out0, out1, out2, out3;
- v16i8 zero = { 0 };
- v16i8 src0, src1, src2, src3;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- v4i32 offset_vec, weight_vec, rnd_vec;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- for (loop_cnt = (height >> 1); loop_cnt--;) {
- LD_SB2(src0_ptr, 16, src0, src1);
- src0_ptr += src_stride;
- LD_SB2(src0_ptr, 16, src2, src3);
- src0_ptr += src_stride;
- LD_SH4(src1_ptr, 8, in0, in1, in2, in3);
- src1_ptr += src2_stride;
- LD_SH4(src1_ptr, 8, in4, in5, in6, in7);
- src1_ptr += src2_stride;
- ILVRL_B2_SH(zero, src0, tmp0, tmp4);
- ILVRL_B2_SH(zero, src1, tmp1, tmp5);
- ILVRL_B2_SH(zero, src2, tmp2, tmp6);
- ILVRL_B2_SH(zero, src3, tmp3, tmp7);
- SLLI_4V(tmp0, tmp1, tmp2, tmp3, 6);
- SLLI_4V(tmp4, tmp5, tmp6, tmp7, 6);
- HEVC_BIW_RND_CLIP4_MAX_SATU(tmp0, tmp4, tmp1, tmp5, in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec, tmp0, tmp4,
- tmp1, tmp5);
- HEVC_BIW_RND_CLIP4_MAX_SATU(tmp2, tmp6, tmp3, tmp7, in4, in5, in6, in7,
- weight_vec, rnd_vec, offset_vec, tmp2, tmp6,
- tmp3, tmp7);
- PCKEV_B2_UB(tmp4, tmp0, tmp5, tmp1, out0, out1);
- PCKEV_B2_UB(tmp6, tmp2, tmp7, tmp3, out2, out3);
- ST_UB2(out0, out1, dst, 16);
- dst += dst_stride;
- ST_UB2(out2, out3, dst, 16);
- dst += dst_stride;
- }
- }
- static void hevc_biwgt_copy_48w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight;
- v16u8 out0, out1, out2;
- v16i8 src0, src1, src2;
- v16i8 zero = { 0 };
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5, in0, in1, in2, in3, in4, in5;
- v4i32 offset_vec, weight_vec, rnd_vec;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- for (loop_cnt = 64; loop_cnt--;) {
- LD_SB3(src0_ptr, 16, src0, src1, src2);
- src0_ptr += src_stride;
- LD_SH6(src1_ptr, 8, in0, in1, in2, in3, in4, in5);
- src1_ptr += src2_stride;
- ILVRL_B2_SH(zero, src0, dst0, dst1);
- ILVRL_B2_SH(zero, src1, dst2, dst3);
- ILVRL_B2_SH(zero, src2, dst4, dst5);
- SLLI_4V(dst0, dst1, dst2, dst3, 6);
- SLLI_2V(dst4, dst5, 6);
- HEVC_BIW_RND_CLIP4_MAX_SATU(dst0, dst1, dst2, dst3, in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec, dst0, dst1,
- dst2, dst3);
- HEVC_BIW_RND_CLIP2_MAX_SATU(dst4, dst5, in4, in5, weight_vec, rnd_vec,
- offset_vec, dst4, dst5);
- PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
- ST_UB2(out0, out1, dst, 16);
- ST_UB(out2, dst + 32);
- dst += dst_stride;
- }
- }
- static void hevc_biwgt_copy_64w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight;
- v16u8 out0, out1, out2, out3;
- v16i8 zero = { 0 };
- v16i8 src0, src1, src2, src3;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- v4i32 offset_vec, weight_vec, rnd_vec;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- for (loop_cnt = height; loop_cnt--;) {
- LD_SB4(src0_ptr, 16, src0, src1, src2, src3);
- src0_ptr += src_stride;
- LD_SH8(src1_ptr, 8, in0, in1, in2, in3, in4, in5, in6, in7);
- src1_ptr += src2_stride;
- ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, tmp0, tmp1,
- tmp2, tmp3);
- ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, tmp4, tmp5,
- tmp6, tmp7);
- SLLI_4V(tmp0, tmp1, tmp2, tmp3, 6);
- SLLI_4V(tmp4, tmp5, tmp6, tmp7, 6);
- HEVC_BIW_RND_CLIP4_MAX_SATU(tmp0, tmp4, tmp1, tmp5, in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec, tmp0, tmp4,
- tmp1, tmp5);
- HEVC_BIW_RND_CLIP4_MAX_SATU(tmp2, tmp6, tmp3, tmp7, in4, in5, in6, in7,
- weight_vec, rnd_vec, offset_vec, tmp2, tmp6,
- tmp3, tmp7);
- PCKEV_B2_UB(tmp4, tmp0, tmp5, tmp1, out0, out1);
- PCKEV_B2_UB(tmp6, tmp2, tmp7, tmp3, out2, out3);
- ST_UB4(out0, out1, out2, out3, dst, 16);
- dst += dst_stride;
- }
- }
- static void hevc_hz_biwgt_8t_4w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v8i16 filt0, filt1, filt2, filt3;
- v16i8 src0, src1, src2, src3;
- v16i8 mask1, mask2, mask3;
- v16i8 vec0, vec1, vec2, vec3;
- v8i16 dst0, dst1;
- v8i16 in0, in1, in2, in3;
- v8i16 filter_vec, out0, out1;
- v4i32 weight_vec, offset_vec, rnd_vec;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[16]);
- src0_ptr -= 3;
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
- XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B4_SB(src0, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src2, src3, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- HEVC_BIW_RND_CLIP2(dst0, dst1, in0, in1,
- weight_vec, rnd_vec, offset_vec,
- out0, out1);
- out0 = (v8i16) __msa_pckev_b((v16i8) out1, (v16i8) out0);
- ST_W4(out0, 0, 1, 2, 3, dst, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_hz_biwgt_8t_8w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v8i16 filt0, filt1, filt2, filt3;
- v16i8 src0, src1, src2, src3;
- v16i8 mask1, mask2, mask3;
- v16i8 vec0, vec1, vec2, vec3;
- v8i16 dst0, dst1, dst2, dst3;
- v8i16 in0, in1, in2, in3;
- v8i16 filter_vec, out0, out1, out2, out3;
- v4i32 weight_vec, offset_vec, rnd_vec;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- src0_ptr -= 3;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst2 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst3 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- out0, out1, out2, out3);
- PCKEV_B2_SH(out1, out0, out3, out2, out0, out1);
- ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_hz_biwgt_8t_12w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, vec0, vec1, vec2, vec3;
- v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
- v8i16 filt0, filt1, filt2, filt3, out0, out1, out2, out3;
- v8i16 dst0, dst1, dst2, dst3, in0, in1, in2, in3, filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 3;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset = (offset0 + offset1) << rnd_val;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- mask4 = LD_SB(&ff_hevc_mask_arr[16]);
- mask5 = mask4 + 2;
- mask6 = mask4 + 4;
- mask7 = mask4 + 6;
- for (loop_cnt = 4; loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
- vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
- vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
- vec3);
- dst2 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
- vec3);
- dst3 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3, in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec, out0, out1, out2,
- out3);
- PCKEV_B2_SH(out1, out0, out3, out2, out0, out1);
- ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
- LD_SB4(src0_ptr + 8, src_stride, src0, src1, src2, src3);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr + 8, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
- XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7, vec0, vec1, vec2,
- vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src2, src3, mask4, mask5, mask6, mask7, vec0, vec1, vec2,
- vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- HEVC_BIW_RND_CLIP2(dst0, dst1, in0, in1, weight_vec, rnd_vec,
- offset_vec, out0, out1);
- out0 = (v8i16) __msa_pckev_b((v16i8) out1, (v16i8) out0);
- ST_W4(out0, 0, 1, 2, 3, dst + 8, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_hz_biwgt_8t_16w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3;
- v8i16 in0, in1, in2, in3;
- v8i16 filt0, filt1, filt2, filt3;
- v16i8 mask1, mask2, mask3;
- v8i16 filter_vec, out0, out1, out2, out3;
- v16i8 vec0, vec1, vec2, vec3;
- v8i16 dst0, dst1, dst2, dst3;
- v4i32 weight_vec, offset_vec, rnd_vec;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- src0_ptr -= 3;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- for (loop_cnt = (height >> 1); loop_cnt--;) {
- LD_SB2(src0_ptr, 8, src0, src1);
- src0_ptr += src_stride;
- LD_SB2(src0_ptr, 8, src2, src3);
- src0_ptr += src_stride;
- LD_SH2(src1_ptr, 8, in0, in1);
- src1_ptr += src2_stride;
- LD_SH2(src1_ptr, 8, in2, in3);
- src1_ptr += src2_stride;
- XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst2 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst3 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- out0, out1, out2, out3);
- PCKEV_B2_SH(out1, out0, out3, out2, out0, out1);
- ST_SH2(out0, out1, dst, dst_stride);
- dst += (2 * dst_stride);
- }
- }
- static void hevc_hz_biwgt_8t_24w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- uint64_t dst_val0;
- int32_t offset, weight, constant;
- v16i8 src0, src1;
- v8i16 in0, in1, in2;
- v8i16 filt0, filt1, filt2, filt3;
- v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
- v16i8 vec0, vec1, vec2, vec3;
- v8i16 dst0, dst1, dst2;
- v4i32 dst2_r, dst2_l;
- v8i16 filter_vec, out0, out1, out2;
- v4i32 weight_vec, offset_vec, rnd_vec;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- src0_ptr = src0_ptr - 3;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- mask4 = mask0 + 8;
- mask5 = mask0 + 10;
- mask6 = mask0 + 12;
- mask7 = mask0 + 14;
- LD_SB2(src0_ptr, 16, src0, src1);
- src0_ptr += src_stride;
- LD_SH2(src1_ptr, 8, in0, in1);
- in2 = LD_SH(src1_ptr + 16);
- src1_ptr += src2_stride;
- XORI_B2_128_SB(src0, src1);
- for (loop_cnt = 31; loop_cnt--;) {
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
- vec0, vec1, vec2, vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst2 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- HEVC_BIW_RND_CLIP2(dst0, dst1, in0, in1,
- weight_vec, rnd_vec, offset_vec,
- out0, out1);
- ILVRL_H2_SW(dst2, in2, dst2_r, dst2_l);
- dst2_r = __msa_dpadd_s_w(offset_vec, (v8i16) dst2_r,
- (v8i16) weight_vec);
- dst2_l = __msa_dpadd_s_w(offset_vec, (v8i16) dst2_l,
- (v8i16) weight_vec);
- SRAR_W2_SW(dst2_r, dst2_l, rnd_vec);
- dst2_r = (v4i32) __msa_pckev_h((v8i16) dst2_l, (v8i16) dst2_r);
- out2 = CLIP_SH_0_255(dst2_r);
- LD_SB2(src0_ptr, 16, src0, src1);
- src0_ptr += src_stride;
- LD_SH2(src1_ptr, 8, in0, in1);
- in2 = LD_SH(src1_ptr + 16);
- src1_ptr += src2_stride;
- XORI_B2_128_SB(src0, src1);
- PCKEV_B2_SH(out1, out0, out2, out2, out0, out2);
- dst_val0 = __msa_copy_u_d((v2i64) out2, 0);
- ST_SH(out0, dst);
- SD(dst_val0, dst + 16);
- dst += dst_stride;
- }
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7, vec0, vec1, vec2, vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
- dst2 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- HEVC_BIW_RND_CLIP2(dst0, dst1, in0, in1, weight_vec, rnd_vec, offset_vec,
- out0, out1);
- ILVRL_H2_SW(dst2, in2, dst2_r, dst2_l);
- dst2_r = __msa_dpadd_s_w(offset_vec, (v8i16) dst2_r, (v8i16) weight_vec);
- dst2_l = __msa_dpadd_s_w(offset_vec, (v8i16) dst2_l, (v8i16) weight_vec);
- SRAR_W2_SW(dst2_r, dst2_l, rnd_vec);
- dst2_r = (v4i32) __msa_pckev_h((v8i16) dst2_l, (v8i16) dst2_r);
- out2 = CLIP_SH_0_255(dst2_r);
- PCKEV_B2_SH(out1, out0, out2, out2, out0, out2);
- dst_val0 = __msa_copy_u_d((v2i64) out2, 0);
- ST_SH(out0, dst);
- SD(dst_val0, dst + 16);
- dst += dst_stride;
- }
- static void hevc_hz_biwgt_8t_32w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2;
- v8i16 in0, in1, in2, in3;
- v8i16 filt0, filt1, filt2, filt3;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
- v16i8 vec0, vec1, vec2, vec3;
- v8i16 dst0, dst1, dst2, dst3;
- v8i16 filter_vec, out0, out1, out2, out3;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 3;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- mask4 = mask0 + 8;
- mask5 = mask0 + 10;
- mask6 = mask0 + 12;
- mask7 = mask0 + 14;
- for (loop_cnt = height; loop_cnt--;) {
- LD_SB2(src0_ptr, 16, src0, src1);
- src2 = LD_SB(src0_ptr + 24);
- src0_ptr += src_stride;
- LD_SH4(src1_ptr, 8, in0, in1, in2, in3);
- src1_ptr += src2_stride;
- XORI_B3_128_SB(src0, src1, src2);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
- vec0, vec1, vec2, vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst2 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst3 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- out0, out1, out2, out3);
- PCKEV_B2_SH(out1, out0, out3, out2, out0, out1);
- ST_SH2(out0, out1, dst, 16);
- dst += dst_stride;
- }
- }
- static void hevc_hz_biwgt_8t_48w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, src4;
- v8i16 in0, in1, in2, in3;
- v8i16 filt0, filt1, filt2, filt3;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
- v16i8 vec0, vec1, vec2, vec3;
- v8i16 dst0, dst1, dst2, dst3;
- v8i16 filter_vec, out0, out1, out2, out3;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 3;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- mask4 = mask0 + 8;
- mask5 = mask0 + 10;
- mask6 = mask0 + 12;
- mask7 = mask0 + 14;
- for (loop_cnt = 64; loop_cnt--;) {
- LD_SB2(src0_ptr, 16, src0, src1);
- src2 = LD_SB(src0_ptr + 24);
- LD_SH4(src1_ptr, 8, in0, in1, in2, in3);
- XORI_B3_128_SB(src0, src1, src2);
- LD_SB2(src0_ptr + 32, 8, src3, src4);
- src0_ptr += src_stride;
- XORI_B2_128_SB(src3, src4);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
- vec0, vec1, vec2, vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst2 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst3 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3, in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- out0, out1, out2, out3);
- PCKEV_B2_SH(out1, out0, out3, out2, out0, out1);
- ST_SH2(out0, out1, dst, 16);
- LD_SH2(src1_ptr + 32, 8, in2, in3);
- src1_ptr += src2_stride;
- VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- HEVC_BIW_RND_CLIP2(dst0, dst1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- out0, out1);
- out0 = (v8i16) __msa_pckev_b((v16i8) out1, (v16i8) out0);
- ST_SH(out0, dst + 32);
- dst += dst_stride;
- }
- }
- static void hevc_hz_biwgt_8t_64w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint8_t *src0_ptr_tmp;
- uint8_t *dst_tmp;
- int16_t *src1_ptr_tmp;
- uint32_t loop_cnt, cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2;
- v8i16 in0, in1, in2, in3;
- v8i16 filt0, filt1, filt2, filt3;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
- v16i8 vec0, vec1, vec2, vec3;
- v8i16 dst0, dst1, dst2, dst3;
- v8i16 filter_vec, out0, out1, out2, out3;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 3;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- mask4 = mask0 + 8;
- mask5 = mask0 + 10;
- mask6 = mask0 + 12;
- mask7 = mask0 + 14;
- for (loop_cnt = height; loop_cnt--;) {
- src0_ptr_tmp = src0_ptr;
- dst_tmp = dst;
- src1_ptr_tmp = src1_ptr;
- for (cnt = 2; cnt--;) {
- LD_SB2(src0_ptr_tmp, 16, src0, src1);
- src2 = LD_SB(src0_ptr_tmp + 24);
- src0_ptr_tmp += 32;
- LD_SH4(src1_ptr_tmp, 8, in0, in1, in2, in3);
- src1_ptr_tmp += 32;
- XORI_B3_128_SB(src0, src1, src2);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1,
- filt2, filt3);
- VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
- vec0, vec1, vec2, vec3);
- dst1 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1,
- filt2, filt3);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst2 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1,
- filt2, filt3);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst3 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1,
- filt2, filt3);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- out0, out1, out2, out3);
- PCKEV_B2_SH(out1, out0, out3, out2, out0, out1);
- ST_SH2(out0, out1, dst_tmp, 16);
- dst_tmp += 32;
- }
- src0_ptr += src_stride;
- src1_ptr += src2_stride;
- dst += dst_stride;
- }
- }
- static void hevc_vt_biwgt_8t_4w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
- v16i8 src11, src12, src13, src14;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
- v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
- v16i8 src1110_r, src1211_r, src1312_r, src1413_r;
- v16i8 src2110, src4332, src6554, src8776, src10998;
- v16i8 src12111110, src14131312;
- v8i16 dst10, dst32, dst54, dst76;
- v8i16 filt0, filt1, filt2, filt3;
- v8i16 filter_vec, out0, out1, out2, out3;
- v4i32 weight_vec, weight1_vec, offset_vec, rnd_vec, const_vec;
- src0_ptr -= (3 * src_stride);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_w(128);
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- weight1_vec = __msa_fill_w(weight1);
- offset_vec += const_vec * weight1_vec;
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- LD_SB7(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6);
- src0_ptr += (7 * src_stride);
- ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
- src10_r, src32_r, src54_r, src21_r);
- ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
- ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
- src2110, src4332, src6554);
- XORI_B3_128_SB(src2110, src4332, src6554);
- for (loop_cnt = (height >> 3); loop_cnt--;) {
- LD_SB8(src0_ptr, src_stride,
- src7, src8, src9, src10, src11, src12, src13, src14);
- src0_ptr += (8 * src_stride);
- LD_SH8(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5, in6, in7);
- src1_ptr += (8 * src2_stride);
- ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
- ILVR_D2_SH(in5, in4, in7, in6, in2, in3);
- ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
- src76_r, src87_r, src98_r, src109_r);
- ILVR_B4_SB(src11, src10, src12, src11, src13, src12, src14, src13,
- src1110_r, src1211_r, src1312_r, src1413_r);
- ILVR_D4_SB(src87_r, src76_r, src109_r, src98_r, src1211_r, src1110_r,
- src1413_r, src1312_r,
- src8776, src10998, src12111110, src14131312);
- XORI_B4_128_SB(src8776, src10998, src12111110, src14131312);
- DOTP_SB4_SH(src2110, src4332, src6554, src8776, filt0, filt0, filt0,
- filt0, dst10, dst32, dst54, dst76);
- DPADD_SB4_SH(src4332, src6554, src8776, src10998, filt1, filt1, filt1,
- filt1, dst10, dst32, dst54, dst76);
- DPADD_SB4_SH(src6554, src8776, src10998, src12111110, filt2, filt2,
- filt2, filt2, dst10, dst32, dst54, dst76);
- DPADD_SB4_SH(src8776, src10998, src12111110, src14131312, filt3, filt3,
- filt3, filt3, dst10, dst32, dst54, dst76);
- HEVC_BIW_RND_CLIP4(dst10, dst32, dst54, dst76,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- out0, out1, out2, out3);
- PCKEV_B2_SH(out1, out0, out3, out2, out0, out1);
- ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
- dst += (8 * dst_stride);
- src2110 = src10998;
- src4332 = src12111110;
- src6554 = src14131312;
- src6 = src14;
- }
- }
- static void hevc_vt_biwgt_8t_8w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight;
- v16i8 src0, src1, src2, src3, src4, src5;
- v16i8 src6, src7, src8, src9, src10;
- v8i16 in0, in1, in2, in3;
- v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
- v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v8i16 filt0, filt1, filt2, filt3;
- v8i16 filter_vec, out0, out1, out2, out3;
- v4i32 weight_vec, weight1_vec, offset_vec, rnd_vec, const_vec;
- src0_ptr -= (3 * src_stride);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_w(128);
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- weight1_vec = __msa_fill_w(weight1);
- offset_vec += const_vec * weight1_vec;
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- LD_SB7(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6);
- src0_ptr += (7 * src_stride);
- XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
- ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
- src10_r, src32_r, src54_r, src21_r);
- ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src7, src8, src9, src10);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- XORI_B4_128_SB(src7, src8, src9, src10);
- ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
- src76_r, src87_r, src98_r, src109_r);
- DOTP_SB4_SH(src10_r, src21_r, src32_r, src43_r, filt0, filt0, filt0,
- filt0, tmp0, tmp1, tmp2, tmp3);
- DPADD_SB4_SH(src32_r, src43_r, src54_r, src65_r, filt1, filt1, filt1,
- filt1, tmp0, tmp1, tmp2, tmp3);
- DPADD_SB4_SH(src54_r, src65_r, src76_r, src87_r, filt2, filt2, filt2,
- filt2, tmp0, tmp1, tmp2, tmp3);
- DPADD_SB4_SH(src76_r, src87_r, src98_r, src109_r, filt3, filt3, filt3,
- filt3, tmp0, tmp1, tmp2, tmp3);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- out0, out1, out2, out3);
- PCKEV_B2_SH(out1, out0, out3, out2, out0, out1);
- ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
- dst += (4 * dst_stride);
- src10_r = src54_r;
- src32_r = src76_r;
- src54_r = src98_r;
- src21_r = src65_r;
- src43_r = src87_r;
- src65_r = src109_r;
- src6 = src10;
- }
- }
- static void hevc_vt_biwgt_8t_12w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
- v8i16 in0, in1, in2, in3;
- v16i8 src10_r, src32_r, src54_r, src76_r;
- v16i8 src21_r, src43_r, src65_r, src87_r;
- v8i16 tmp0, tmp1, tmp2;
- v16i8 src10_l, src32_l, src54_l, src76_l;
- v16i8 src21_l, src43_l, src65_l, src87_l;
- v16i8 src2110, src4332, src6554, src8776;
- v8i16 filt0, filt1, filt2, filt3;
- v8i16 out0, out1, out2, filter_vec;
- v4i32 dst2_r, dst2_l;
- v4i32 weight_vec, weight1_vec, offset_vec, rnd_vec, const_vec;
- src0_ptr -= (3 * src_stride);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_w(128);
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- weight1_vec = __msa_fill_w(weight1);
- offset_vec += const_vec * weight1_vec;
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- LD_SB7(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6);
- src0_ptr += (7 * src_stride);
- XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
- ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
- src10_r, src32_r, src54_r, src21_r);
- ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
- ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
- src10_l, src32_l, src54_l, src21_l);
- ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
- ILVR_D3_SB(src21_l, src10_l, src43_l, src32_l, src65_l, src54_l,
- src2110, src4332, src6554);
- for (loop_cnt = 8; loop_cnt--;) {
- LD_SB2(src0_ptr, src_stride, src7, src8);
- src0_ptr += (2 * src_stride);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- LD_SH2((src1_ptr + 8), src2_stride, in2, in3);
- src1_ptr += (2 * src2_stride);
- in2 = (v8i16) __msa_ilvr_d((v2i64) in3, (v2i64) in2);
- XORI_B2_128_SB(src7, src8);
- ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
- ILVL_B2_SB(src7, src6, src8, src7, src76_l, src87_l);
- src8776 = (v16i8) __msa_ilvr_d((v2i64) src87_l, (v2i64) src76_l);
- DOTP_SB3_SH(src10_r, src21_r, src2110, filt0, filt0, filt0,
- tmp0, tmp1, tmp2);
- DPADD_SB2_SH(src32_r, src43_r, filt1, filt1, tmp0, tmp1);
- tmp2 = __msa_dpadd_s_h(tmp2, src4332, (v16i8) filt1);
- DPADD_SB2_SH(src54_r, src65_r, filt2, filt2, tmp0, tmp1);
- tmp2 = __msa_dpadd_s_h(tmp2, src6554, (v16i8) filt2);
- DPADD_SB2_SH(src76_r, src87_r, filt3, filt3, tmp0, tmp1);
- tmp2 = __msa_dpadd_s_h(tmp2, src8776, (v16i8) filt3);
- HEVC_BIW_RND_CLIP2(tmp0, tmp1, in0, in1,
- weight_vec, rnd_vec, offset_vec,
- out0, out1);
- ILVRL_H2_SW(tmp2, in2, dst2_r, dst2_l);
- dst2_r = __msa_dpadd_s_w(offset_vec, (v8i16) dst2_r,
- (v8i16) weight_vec);
- dst2_l = __msa_dpadd_s_w(offset_vec, (v8i16) dst2_l,
- (v8i16) weight_vec);
- SRAR_W2_SW(dst2_r, dst2_l, rnd_vec);
- dst2_r = (v4i32) __msa_pckev_h((v8i16) dst2_l, (v8i16) dst2_r);
- out2 = CLIP_SH_0_255(dst2_r);
- PCKEV_B2_SH(out1, out0, out2, out2, out0, out2);
- ST_D2(out0, 0, 1, dst, dst_stride);
- ST_W2(out2, 0, 1, dst + 8, dst_stride);
- dst += (2 * dst_stride);
- src10_r = src32_r;
- src32_r = src54_r;
- src54_r = src76_r;
- src21_r = src43_r;
- src43_r = src65_r;
- src65_r = src87_r;
- src2110 = src4332;
- src4332 = src6554;
- src6554 = src8776;
- src6 = src8;
- }
- }
- static void hevc_vt_biwgt_8t_16multx2mult_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val,
- int32_t width)
- {
- uint8_t *src0_ptr_tmp;
- int16_t *src1_ptr_tmp;
- uint8_t *dst_tmp;
- uint32_t loop_cnt, cnt;
- int32_t offset, weight;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
- v8i16 in0, in1, in2, in3;
- v16i8 src10_r, src32_r, src54_r, src76_r;
- v16i8 src21_r, src43_r, src65_r, src87_r;
- v16i8 src10_l, src32_l, src54_l, src76_l;
- v16i8 src21_l, src43_l, src65_l, src87_l;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v8i16 filt0, filt1, filt2, filt3;
- v8i16 filter_vec;
- v8i16 out0, out1, out2, out3;
- v4i32 weight_vec, weight1_vec, offset_vec, rnd_vec, const_vec;
- src0_ptr -= (3 * src_stride);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_w(128);
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- weight1_vec = __msa_fill_w(weight1);
- offset_vec += const_vec * weight1_vec;
- filter_vec = LD_SH(filter);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- for (cnt = (width >> 4); cnt--;) {
- src0_ptr_tmp = src0_ptr;
- src1_ptr_tmp = src1_ptr;
- dst_tmp = dst;
- LD_SB7(src0_ptr_tmp, src_stride,
- src0, src1, src2, src3, src4, src5, src6);
- src0_ptr_tmp += (7 * src_stride);
- XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
- ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
- src10_r, src32_r, src54_r, src21_r);
- ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
- ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
- src10_l, src32_l, src54_l, src21_l);
- ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
- for (loop_cnt = (height >> 1); loop_cnt--;) {
- LD_SB2(src0_ptr_tmp, src_stride, src7, src8);
- src0_ptr_tmp += (2 * src_stride);
- LD_SH2(src1_ptr_tmp, src2_stride, in0, in1);
- LD_SH2((src1_ptr_tmp + 8), src2_stride, in2, in3);
- src1_ptr_tmp += (2 * src2_stride);
- XORI_B2_128_SB(src7, src8);
- ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
- ILVL_B2_SB(src7, src6, src8, src7, src76_l, src87_l);
- DOTP_SB4_SH(src10_r, src21_r, src10_l, src21_l, filt0, filt0,
- filt0, filt0, tmp0, tmp1, tmp2, tmp3);
- DPADD_SB4_SH(src32_r, src43_r, src32_l, src43_l, filt1, filt1,
- filt1, filt1, tmp0, tmp1, tmp2, tmp3);
- DPADD_SB4_SH(src54_r, src65_r, src54_l, src65_l, filt2, filt2,
- filt2, filt2, tmp0, tmp1, tmp2, tmp3);
- DPADD_SB4_SH(src76_r, src87_r, src76_l, src87_l, filt3, filt3,
- filt3, filt3, tmp0, tmp1, tmp2, tmp3);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- out0, out1, out2, out3);
- PCKEV_B2_SH(out2, out0, out3, out1, out0, out1);
- ST_SH2(out0, out1, dst_tmp, dst_stride);
- dst_tmp += (2 * dst_stride);
- src10_r = src32_r;
- src32_r = src54_r;
- src54_r = src76_r;
- src21_r = src43_r;
- src43_r = src65_r;
- src65_r = src87_r;
- src10_l = src32_l;
- src32_l = src54_l;
- src54_l = src76_l;
- src21_l = src43_l;
- src43_l = src65_l;
- src65_l = src87_l;
- src6 = src8;
- }
- src0_ptr += 16;
- src1_ptr += 16;
- dst += 16;
- }
- }
- static void hevc_vt_biwgt_8t_16w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_vt_biwgt_8t_16multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter, height,
- weight0, weight1, offset0, offset1,
- rnd_val, 16);
- }
- static void hevc_vt_biwgt_8t_24w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_vt_biwgt_8t_16multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter, height,
- weight0, weight1, offset0, offset1,
- rnd_val, 16);
- hevc_vt_biwgt_8t_8w_msa(src0_ptr + 16, src_stride,
- src1_ptr + 16, src2_stride,
- dst + 16, dst_stride, filter, height,
- weight0, weight1, offset0, offset1, rnd_val);
- }
- static void hevc_vt_biwgt_8t_32w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_vt_biwgt_8t_16multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter, height,
- weight0, weight1, offset0, offset1,
- rnd_val, 32);
- }
- static void hevc_vt_biwgt_8t_48w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_vt_biwgt_8t_16multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter, height,
- weight0, weight1, offset0, offset1,
- rnd_val, 48);
- }
- static void hevc_vt_biwgt_8t_64w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_vt_biwgt_8t_16multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter, height,
- weight0, weight1, offset0, offset1,
- rnd_val, 64);
- }
- static void hevc_hv_biwgt_8t_4w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- uint64_t tp0, tp1;
- int32_t offset, weight;
- v16u8 out;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
- v8i16 in0 = { 0 }, in1 = { 0 };
- v8i16 filt0, filt1, filt2, filt3;
- v8i16 filt_h0, filt_h1, filt_h2, filt_h3;
- v16i8 mask1, mask2, mask3;
- v8i16 filter_vec, weight_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
- v8i16 dst30, dst41, dst52, dst63, dst66, dst87;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v8i16 dst10, dst32, dst54, dst76;
- v8i16 dst21, dst43, dst65, dst97, dst108, dst109, dst98;
- v4i32 offset_vec, rnd_vec, const_vec, dst0, dst1, dst2, dst3;
- v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
- src0_ptr -= ((3 * src_stride) + 3);
- filter_vec = LD_SH(filter_x);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- weight_vec = (v8i16) __msa_fill_w(weight);
- LD_SB7(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6);
- src0_ptr += (7 * src_stride);
- XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
- VSHF_B4_SB(src0, src3, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
- VSHF_B4_SB(src1, src4, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
- VSHF_B4_SB(src2, src5, mask0, mask1, mask2, mask3,
- vec8, vec9, vec10, vec11);
- VSHF_B4_SB(src3, src6, mask0, mask1, mask2, mask3,
- vec12, vec13, vec14, vec15);
- dst30 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- dst41 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
- filt3);
- dst52 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
- filt3);
- dst63 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2,
- filt3);
- ILVRL_H2_SH(dst41, dst30, dst10, dst43);
- ILVRL_H2_SH(dst52, dst41, dst21, dst54);
- ILVRL_H2_SH(dst63, dst52, dst32, dst65);
- dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
- for (loop_cnt = height >> 2; loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src7, src8, src9, src10);
- src0_ptr += (4 * src_stride);
- XORI_B4_128_SB(src7, src8, src9, src10);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in0);
- src1_ptr += (2 * src2_stride);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in1);
- src1_ptr += (2 * src2_stride);
- VSHF_B4_SB(src7, src9, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- VSHF_B4_SB(src8, src10, mask0, mask1, mask2, mask3,
- vec4, vec5, vec6, vec7);
- dst97 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- dst108 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
- filt3);
- dst76 = __msa_ilvr_h(dst97, dst66);
- ILVRL_H2_SH(dst108, dst97, dst87, dst109);
- dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
- dst98 = __msa_ilvr_h(dst66, dst108);
- dst0 = HEVC_FILT_8TAP(dst10, dst32, dst54, dst76, filt_h0, filt_h1,
- filt_h2, filt_h3);
- dst1 = HEVC_FILT_8TAP(dst21, dst43, dst65, dst87, filt_h0, filt_h1,
- filt_h2, filt_h3);
- dst2 = HEVC_FILT_8TAP(dst32, dst54, dst76, dst98, filt_h0, filt_h1,
- filt_h2, filt_h3);
- dst3 = HEVC_FILT_8TAP(dst43, dst65, dst87, dst109, filt_h0, filt_h1,
- filt_h2, filt_h3);
- SRA_4V(dst0, dst1, dst2, dst3, 6);
- PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp1, tmp3);
- ILVRL_H2_SH(tmp1, in0, tmp0, tmp1);
- ILVRL_H2_SH(tmp3, in1, tmp2, tmp3);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- CLIP_SW4_0_255_MAX_SATU(dst0, dst1, dst2, dst3);
- PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp0, tmp1);
- out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
- ST_W4(out, 0, 1, 2, 3, dst, dst_stride);
- dst += (4 * dst_stride);
- dst10 = dst54;
- dst32 = dst76;
- dst54 = dst98;
- dst21 = dst65;
- dst43 = dst87;
- dst65 = dst109;
- dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
- }
- }
- static void hevc_hv_biwgt_8t_8multx2mult_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val,
- int32_t width8mult)
- {
- uint32_t loop_cnt, cnt;
- int32_t offset, weight;
- uint8_t *src0_ptr_tmp;
- int16_t *src1_ptr_tmp;
- uint8_t *dst_tmp;
- v16u8 out;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
- v8i16 in0, in1;
- v8i16 filt0, filt1, filt2, filt3;
- v8i16 filt_h0, filt_h1, filt_h2, filt_h3;
- v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
- v16i8 mask1, mask2, mask3;
- v8i16 filter_vec, weight_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
- v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
- v8i16 dst10_l, dst32_l, dst54_l, dst76_l;
- v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
- v8i16 dst21_l, dst43_l, dst65_l, dst87_l;
- v4i32 offset_vec, rnd_vec, const_vec;
- src0_ptr -= ((3 * src_stride) + 3);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- weight_vec = (v8i16) __msa_fill_w(weight);
- filter_vec = LD_SH(filter_x);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- for (cnt = width8mult; cnt--;) {
- src0_ptr_tmp = src0_ptr;
- src1_ptr_tmp = src1_ptr;
- dst_tmp = dst;
- LD_SB7(src0_ptr_tmp, src_stride,
- src0, src1, src2, src3, src4, src5, src6);
- src0_ptr_tmp += (7 * src_stride);
- XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
- /* row 0 row 1 row 2 row 3 */
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
- vec4, vec5, vec6, vec7);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
- vec8, vec9, vec10, vec11);
- VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
- vec12, vec13, vec14, vec15);
- dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- dst1 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
- filt3);
- dst2 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
- filt3);
- dst3 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1,
- filt2, filt3);
- /* row 4 row 5 row 6 */
- VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3,
- vec4, vec5, vec6, vec7);
- VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3,
- vec8, vec9, vec10, vec11);
- dst4 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- dst5 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
- filt3);
- dst6 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
- filt3);
- for (loop_cnt = height >> 1; loop_cnt--;) {
- LD_SB2(src0_ptr_tmp, src_stride, src7, src8);
- XORI_B2_128_SB(src7, src8);
- src0_ptr_tmp += 2 * src_stride;
- LD_SH2(src1_ptr_tmp, src2_stride, in0, in1);
- src1_ptr_tmp += (2 * src2_stride);
- ILVR_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst2, dst1, dst10_r,
- dst32_r, dst54_r, dst21_r);
- ILVL_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst2, dst1, dst10_l,
- dst32_l, dst54_l, dst21_l);
- ILVR_H2_SH(dst4, dst3, dst6, dst5, dst43_r, dst65_r);
- ILVL_H2_SH(dst4, dst3, dst6, dst5, dst43_l, dst65_l);
- VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst7 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1,
- filt2, filt3);
- ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
- dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
- dst0_l = HEVC_FILT_8TAP(dst10_l, dst32_l, dst54_l, dst76_l,
- filt_h0, filt_h1, filt_h2, filt_h3);
- dst0_r >>= 6;
- dst0_l >>= 6;
- /* row 8 */
- VSHF_B4_SB(src8, src8, mask0, mask1, mask2, mask3,
- vec0, vec1, vec2, vec3);
- dst8 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1,
- filt2, filt3);
- ILVRL_H2_SH(dst8, dst7, dst87_r, dst87_l);
- dst1_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r,
- filt_h0, filt_h1, filt_h2, filt_h3);
- dst1_l = HEVC_FILT_8TAP(dst21_l, dst43_l, dst65_l, dst87_l,
- filt_h0, filt_h1, filt_h2, filt_h3);
- dst1_r >>= 6;
- dst1_l >>= 6;
- PCKEV_H2_SH(dst0_l, dst0_r, dst1_l, dst1_r, tmp1, tmp3);
- ILVRL_H2_SH(tmp1, in0, tmp0, tmp1);
- ILVRL_H2_SH(tmp3, in1, tmp2, tmp3);
- dst0_r = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst0_l = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst1_r = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst1_l = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- SRAR_W4_SW(dst0_l, dst0_r, dst1_l, dst1_r, rnd_vec);
- CLIP_SW4_0_255_MAX_SATU(dst0_l, dst0_r, dst1_l, dst1_r);
- PCKEV_H2_SH(dst0_l, dst0_r, dst1_l, dst1_r, tmp0, tmp1);
- out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
- ST_D2(out, 0, 1, dst_tmp, dst_stride);
- dst_tmp += (2 * dst_stride);
- dst0 = dst2;
- dst1 = dst3;
- dst2 = dst4;
- dst3 = dst5;
- dst4 = dst6;
- dst5 = dst7;
- dst6 = dst8;
- }
- src0_ptr += 8;
- src1_ptr += 8;
- dst += 8;
- }
- }
- static void hevc_hv_biwgt_8t_8w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_hv_biwgt_8t_8multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0,
- offset1, rnd_val, 1);
- }
- static void hevc_hv_biwgt_8t_12w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- uint8_t *src0_ptr_tmp, *dst_tmp;
- int16_t *src1_ptr_tmp;
- int32_t offset, weight;
- uint64_t tp0, tp1;
- v16u8 out;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
- v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
- v8i16 in0 = { 0 }, in1 = { 0 };
- v8i16 filter_vec, weight_vec, tmp0, tmp1, tmp2, tmp3;
- v8i16 filt0, filt1, filt2, filt3, filt_h0, filt_h1, filt_h2, filt_h3;
- v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, dsth7, dsth8;
- v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst21_r, dst43_r, dst65_r;
- v8i16 dst10_l, dst32_l, dst54_l, dst76_l, dst21_l, dst43_l, dst65_l;
- v8i16 dst30, dst41, dst52, dst63, dst66, dst87, dst10, dst32, dst54, dst76;
- v8i16 dst21, dst43, dst65, dst97, dst108, dst109, dst98, dst87_r, dst87_l;
- v4i32 offset_vec, rnd_vec, const_vec, dst0, dst1, dst2, dst3;
- src0_ptr -= ((3 * src_stride) + 3);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- weight_vec = (v8i16) __msa_fill_w(weight);
- filter_vec = LD_SH(filter_x);
- SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
- mask0 = LD_SB(ff_hevc_mask_arr);
- mask1 = mask0 + 2;
- mask2 = mask0 + 4;
- mask3 = mask0 + 6;
- src0_ptr_tmp = src0_ptr;
- src1_ptr_tmp = src1_ptr;
- dst_tmp = dst;
- LD_SB7(src0_ptr_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
- src0_ptr_tmp += (7 * src_stride);
- XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
- VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
- VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
- VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
- vec11);
- VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3, vec12, vec13, vec14,
- vec15);
- dsth0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- dsth1 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
- filt3);
- dsth2 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
- filt3);
- dsth3 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1,
- filt2, filt3);
- VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
- VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
- VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
- vec11);
- dsth4 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- dsth5 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
- filt3);
- dsth6 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
- filt3);
- for (loop_cnt = 8; loop_cnt--;) {
- LD_SB2(src0_ptr_tmp, src_stride, src7, src8);
- src0_ptr_tmp += (2 * src_stride);
- XORI_B2_128_SB(src7, src8);
- LD_SH2(src1_ptr_tmp, src2_stride, in0, in1);
- src1_ptr_tmp += (2 * src2_stride);
- ILVR_H4_SH(dsth1, dsth0, dsth3, dsth2, dsth5, dsth4, dsth2, dsth1,
- dst10_r, dst32_r, dst54_r, dst21_r);
- ILVL_H4_SH(dsth1, dsth0, dsth3, dsth2, dsth5, dsth4, dsth2, dsth1,
- dst10_l, dst32_l, dst54_l, dst21_l);
- ILVR_H2_SH(dsth4, dsth3, dsth6, dsth5, dst43_r, dst65_r);
- ILVL_H2_SH(dsth4, dsth3, dsth6, dsth5, dst43_l, dst65_l);
- VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
- vec3);
- dsth7 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- ILVRL_H2_SH(dsth7, dsth6, dst76_r, dst76_l);
- dst0 = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r, filt_h0,
- filt_h1, filt_h2, filt_h3);
- dst1 = HEVC_FILT_8TAP(dst10_l, dst32_l, dst54_l, dst76_l, filt_h0,
- filt_h1, filt_h2, filt_h3);
- dst0 >>= 6;
- dst1 >>= 6;
- VSHF_B4_SB(src8, src8, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
- vec3);
- dsth8 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- ILVRL_H2_SH(dsth8, dsth7, dst87_r, dst87_l);
- dst2 = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r, filt_h0,
- filt_h1, filt_h2, filt_h3);
- dst3 = HEVC_FILT_8TAP(dst21_l, dst43_l, dst65_l, dst87_l, filt_h0,
- filt_h1, filt_h2, filt_h3);
- dst2 >>= 6;
- dst3 >>= 6;
- PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp1, tmp3);
- ILVRL_H2_SH(tmp1, in0, tmp0, tmp1);
- ILVRL_H2_SH(tmp3, in1, tmp2, tmp3);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- SRAR_W4_SW(dst1, dst0, dst3, dst2, rnd_vec);
- CLIP_SW4_0_255_MAX_SATU(dst1, dst0, dst3, dst2);
- PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp0, tmp1);
- out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
- ST_D2(out, 0, 1, dst_tmp, dst_stride);
- dst_tmp += (2 * dst_stride);
- dsth0 = dsth2;
- dsth1 = dsth3;
- dsth2 = dsth4;
- dsth3 = dsth5;
- dsth4 = dsth6;
- dsth5 = dsth7;
- dsth6 = dsth8;
- }
- src0_ptr += 8;
- src1_ptr += 8;
- dst += 8;
- mask4 = LD_SB(ff_hevc_mask_arr + 16);
- mask5 = mask4 + 2;
- mask6 = mask4 + 4;
- mask7 = mask4 + 6;
- LD_SB7(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6);
- src0_ptr += (7 * src_stride);
- XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
- VSHF_B4_SB(src0, src3, mask4, mask5, mask6, mask7, vec0, vec1, vec2, vec3);
- VSHF_B4_SB(src1, src4, mask4, mask5, mask6, mask7, vec4, vec5, vec6, vec7);
- VSHF_B4_SB(src2, src5, mask4, mask5, mask6, mask7, vec8, vec9, vec10,
- vec11);
- VSHF_B4_SB(src3, src6, mask4, mask5, mask6, mask7, vec12, vec13, vec14,
- vec15);
- dst30 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- dst41 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
- filt3);
- dst52 = HEVC_FILT_8TAP_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2,
- filt3);
- dst63 = HEVC_FILT_8TAP_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2,
- filt3);
- ILVRL_H2_SH(dst41, dst30, dst10, dst43);
- ILVRL_H2_SH(dst52, dst41, dst21, dst54);
- ILVRL_H2_SH(dst63, dst52, dst32, dst65);
- dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
- for (loop_cnt = 4; loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src7, src8, src9, src10);
- src0_ptr += (4 * src_stride);
- XORI_B4_128_SB(src7, src8, src9, src10);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in0);
- src1_ptr += (2 * src2_stride);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in1);
- src1_ptr += (2 * src2_stride);
- VSHF_B4_SB(src7, src9, mask4, mask5, mask6, mask7, vec0, vec1, vec2,
- vec3);
- VSHF_B4_SB(src8, src10, mask4, mask5, mask6, mask7, vec4, vec5, vec6,
- vec7);
- dst97 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2,
- filt3);
- dst108 = HEVC_FILT_8TAP_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2,
- filt3);
- dst76 = __msa_ilvr_h(dst97, dst66);
- ILVRL_H2_SH(dst108, dst97, dst87, dst109);
- dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
- dst98 = __msa_ilvr_h(dst66, dst108);
- dst0 = HEVC_FILT_8TAP(dst10, dst32, dst54, dst76, filt_h0, filt_h1,
- filt_h2, filt_h3);
- dst1 = HEVC_FILT_8TAP(dst21, dst43, dst65, dst87, filt_h0, filt_h1,
- filt_h2, filt_h3);
- dst2 = HEVC_FILT_8TAP(dst32, dst54, dst76, dst98, filt_h0, filt_h1,
- filt_h2, filt_h3);
- dst3 = HEVC_FILT_8TAP(dst43, dst65, dst87, dst109, filt_h0, filt_h1,
- filt_h2, filt_h3);
- SRA_4V(dst0, dst1, dst2, dst3, 6);
- PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp1, tmp3);
- ILVRL_H2_SH(tmp1, in0, tmp0, tmp1);
- ILVRL_H2_SH(tmp3, in1, tmp2, tmp3);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- CLIP_SW4_0_255_MAX_SATU(dst0, dst1, dst2, dst3);
- PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp0, tmp1);
- out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
- ST_W4(out, 0, 1, 2, 3, dst, dst_stride);
- dst += (4 * dst_stride);
- dst10 = dst54;
- dst32 = dst76;
- dst54 = dst98;
- dst21 = dst65;
- dst43 = dst87;
- dst65 = dst109;
- dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
- }
- }
- static void hevc_hv_biwgt_8t_16w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_hv_biwgt_8t_8multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0,
- offset1, rnd_val, 2);
- }
- static void hevc_hv_biwgt_8t_24w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_hv_biwgt_8t_8multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0,
- offset1, rnd_val, 3);
- }
- static void hevc_hv_biwgt_8t_32w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_hv_biwgt_8t_8multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0,
- offset1, rnd_val, 4);
- }
- static void hevc_hv_biwgt_8t_48w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_hv_biwgt_8t_8multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0,
- offset1, rnd_val, 6);
- }
- static void hevc_hv_biwgt_8t_64w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_hv_biwgt_8t_8multx2mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0,
- offset1, rnd_val, 8);
- }
- static void hevc_hz_biwgt_4t_4x2_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- int32_t offset, weight, constant;
- v8i16 filt0, filt1;
- v16i8 src0, src1;
- v8i16 in0, in1;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[16]);
- v16i8 mask1, vec0, vec1;
- v8i16 dst0;
- v4i32 dst0_r, dst0_l;
- v8i16 out0, filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- LD_SB2(src0_ptr, src_stride, src0, src1);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- in0 = (v8i16) __msa_ilvr_d((v2i64) in1, (v2i64) in0);
- XORI_B2_128_SB(src0, src1);
- VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- ILVRL_H2_SW(dst0, in0, dst0_r, dst0_l);
- dst0_r = __msa_dpadd_s_w(offset_vec, (v8i16) dst0_r, (v8i16) weight_vec);
- dst0_l = __msa_dpadd_s_w(offset_vec, (v8i16) dst0_l, (v8i16) weight_vec);
- SRAR_W2_SW(dst0_r, dst0_l, rnd_vec);
- dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
- out0 = CLIP_SH_0_255(dst0_r);
- out0 = (v8i16) __msa_pckev_b((v16i8) out0, (v16i8) out0);
- ST_W2(out0, 0, 1, dst, dst_stride);
- }
- static void hevc_hz_biwgt_4t_4x4_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- int32_t offset, weight, constant;
- v8i16 filt0, filt1;
- v16i8 src0, src1, src2, src3;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[16]);
- v16i8 mask1;
- v8i16 dst0, dst1;
- v16i8 vec0, vec1;
- v8i16 in0, in1, in2, in3;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- /* rearranging filter */
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3);
- XORI_B4_128_SB(src0, src1, src2, src3);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
- VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src3, src2, src3, mask0, mask1, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP2(dst0, dst1, in0, in1,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1);
- dst0 = (v8i16) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
- ST_W4(dst0, 0, 1, 2, 3, dst, dst_stride);
- }
- static void hevc_hz_biwgt_4t_4x8multiple_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t weight, offset, constant;
- v8i16 filt0, filt1;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[16]);
- v16i8 mask1;
- v16i8 vec0, vec1;
- v8i16 dst0, dst1, dst2, dst3;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- mask1 = mask0 + 2;
- for (loop_cnt = (height >> 3); loop_cnt--;) {
- LD_SB8(src0_ptr, src_stride,
- src0, src1, src2, src3, src4, src5, src6, src7);
- src0_ptr += (8 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- LD_SH4(src1_ptr, src2_stride, in4, in5, in6, in7);
- src1_ptr += (4 * src2_stride);
- ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
- ILVR_D2_SH(in5, in4, in7, in6, in2, in3);
- XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
- VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src3, src2, src3, mask0, mask1, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src4, src5, src4, src5, mask0, mask1, vec0, vec1);
- dst2 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src6, src7, src6, src7, mask0, mask1, vec0, vec1);
- dst3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst1);
- ST_W8(dst0, dst1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
- dst += (8 * dst_stride);
- }
- }
- static void hevc_hz_biwgt_4t_4w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- if (2 == height) {
- hevc_hz_biwgt_4t_4x2_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (4 == height) {
- hevc_hz_biwgt_4t_4x4_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (0 == (height % 8)) {
- hevc_hz_biwgt_4t_4x8multiple_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter, height,
- weight0, weight1, offset0, offset1,
- rnd_val);
- }
- }
- static void hevc_hz_biwgt_4t_6w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v8i16 filt0, filt1;
- v16i8 src0, src1, src2, src3;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- v16i8 mask1;
- v16i8 vec0, vec1;
- v8i16 in0, in1, in2, in3;
- v8i16 dst0, dst1, dst2, dst3;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- mask1 = mask0 + 2;
- for (loop_cnt = 2; loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
- dst2 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst1);
- ST_W2(dst0, 0, 2, dst, dst_stride);
- ST_H2(dst0, 2, 6, dst + 4, dst_stride);
- ST_W2(dst1, 0, 2, dst + 2 * dst_stride, dst_stride);
- ST_H2(dst1, 2, 6, dst + 2 * dst_stride + 4, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_hz_biwgt_4t_8x2_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- int32_t offset, weight, constant;
- v8i16 filt0, filt1;
- v16i8 src0, src1;
- v8i16 in0, in1;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- v16i8 mask1, vec0, vec1;
- v8i16 dst0, dst1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- mask1 = mask0 + 2;
- LD_SB2(src0_ptr, src_stride, src0, src1);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- XORI_B2_128_SB(src0, src1);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP2(dst0, dst1, in0, in1,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1);
- dst0 = (v8i16) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
- ST_D2(dst0, 0, 1, dst, dst_stride);
- }
- static void hevc_hz_biwgt_4t_8x6_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- int32_t weight, offset, constant;
- v8i16 filt0, filt1;
- v16i8 src0, src1, src2, src3, src4, src5;
- v8i16 in0, in1, in2, in3, in4, in5;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- v16i8 mask1;
- v16i8 vec0, vec1;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- mask1 = mask0 + 2;
- LD_SB6(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- LD_SH2(src1_ptr, src2_stride, in4, in5);
- XORI_B6_128_SB(src0, src1, src2, src3, src4, src5);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
- dst2 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- HEVC_BIW_RND_CLIP2(dst4, dst5, in4, in5,
- weight_vec, rnd_vec, offset_vec,
- dst4, dst5);
- PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst1);
- dst3 = (v8i16) __msa_pckev_b((v16i8) dst5, (v16i8) dst4);
- ST_D4(dst0, dst1, 0, 1, 0, 1, dst, dst_stride);
- ST_D2(dst3, 0, 1, dst + 4 * dst_stride, dst_stride);
- }
- static void hevc_hz_biwgt_4t_8x4multiple_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v8i16 filt0, filt1;
- v16i8 src0, src1, src2, src3;
- v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
- v16i8 mask1;
- v16i8 vec0, vec1;
- v8i16 in0, in1, in2, in3;
- v8i16 dst0, dst1, dst2, dst3;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- mask1 = mask0 + 2;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
- dst2 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst1);
- ST_D4(dst0, dst1, 0, 1, 0, 1, dst, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_hz_biwgt_4t_8w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- if (2 == height) {
- hevc_hz_biwgt_4t_8x2_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (6 == height) {
- hevc_hz_biwgt_4t_8x6_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (0 == (height % 4)) {
- hevc_hz_biwgt_4t_8x4multiple_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter, height,
- weight0, weight1, offset0, offset1,
- rnd_val);
- }
- }
- static void hevc_hz_biwgt_4t_12w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v8i16 filt0, filt1;
- v16i8 src0, src1, src2, src3;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- v16i8 mask2 = {
- 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
- };
- v16i8 mask1, mask3;
- v16i8 vec0, vec1;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- mask1 = mask0 + 2;
- mask3 = mask2 + 2;
- for (loop_cnt = 4; loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src0, src1, src2, src3);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in6, in7);
- src1_ptr += (4 * src2_stride);
- ILVR_D2_SH(in5, in4, in7, in6, in4, in5);
- XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
- dst2 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src0, src1, src0, src1, mask2, mask3, vec0, vec1);
- dst4 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src3, src2, src3, mask2, mask3, vec0, vec1);
- dst5 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- HEVC_BIW_RND_CLIP2(dst4, dst5, in4, in5,
- weight_vec, rnd_vec, offset_vec,
- dst4, dst5);
- PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst1);
- dst3 = (v8i16) __msa_pckev_b((v16i8) dst5, (v16i8) dst4);
- ST_D4(dst0, dst1, 0, 1, 0, 1, dst, dst_stride);
- ST_W4(dst3, 0, 1, 2, 3, dst + 8, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_hz_biwgt_4t_16w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v8i16 filt0, filt1;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- v16i8 mask1;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
- v16i8 vec0, vec1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- mask1 = mask0 + 2;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src0, src2, src4, src6);
- LD_SB4(src0_ptr + 8, src_stride, src1, src3, src5, src7);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in2, in4, in6);
- LD_SH4(src1_ptr + 8, src2_stride, in1, in3, in5, in7);
- src1_ptr += (4 * src2_stride);
- XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
- dst2 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst6 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
- dst7 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst1);
- ST_SH2(dst0, dst1, dst, dst_stride);
- dst += (2 * dst_stride);
- HEVC_BIW_RND_CLIP4(dst4, dst5, dst6, dst7,
- in4, in5, in6, in7,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst1);
- ST_SH2(dst0, dst1, dst, dst_stride);
- dst += (2 * dst_stride);
- }
- }
- static void hevc_hz_biwgt_4t_24w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3;
- v8i16 filt0, filt1;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- v16i8 mask1, mask2, mask3;
- v16i8 vec0, vec1;
- v8i16 dst0, dst1, dst2, dst3;
- v8i16 in0, in1, in2, in3, in4, in5;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- mask1 = mask0 + 2;
- mask2 = mask0 + 8;
- mask3 = mask0 + 10;
- for (loop_cnt = 16; loop_cnt--;) {
- LD_SB2(src0_ptr, src_stride, src0, src2);
- LD_SB2(src0_ptr + 16, src_stride, src1, src3);
- src0_ptr += (2 * src_stride);
- LD_SH2(src1_ptr, src2_stride, in0, in2);
- LD_SH2(src1_ptr + 8, src2_stride, in1, in3);
- LD_SH2(src1_ptr + 16, src2_stride, in4, in5);
- src1_ptr += (2 * src2_stride);
- XORI_B4_128_SB(src0, src1, src2, src3);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src0, src1, src0, src1, mask2, mask3, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
- dst2 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src3, src2, src3, mask2, mask3, vec0, vec1);
- dst3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst1);
- ST_SH2(dst0, dst1, dst, dst_stride);
- /* 8 width */
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP2(dst0, dst1, in4, in5,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1);
- dst0 = (v8i16) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
- ST_D2(dst0, 0, 1, (dst + 16), dst_stride);
- dst += (2 * dst_stride);
- }
- }
- static void hevc_hz_biwgt_4t_32w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2;
- v8i16 filt0, filt1;
- v16i8 mask0 = LD_SB(&ff_hevc_mask_arr[0]);
- v16i8 mask1, mask2, mask3;
- v8i16 dst0, dst1, dst2, dst3;
- v16i8 vec0, vec1;
- v8i16 in0, in1, in2, in3;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= 1;
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- mask1 = mask0 + 2;
- mask2 = mask0 + 8;
- mask3 = mask0 + 10;
- for (loop_cnt = height; loop_cnt--;) {
- LD_SB2(src0_ptr, 16, src0, src1);
- src2 = LD_SB(src0_ptr + 24);
- src0_ptr += src_stride;
- LD_SH4(src1_ptr, 8, in0, in1, in2, in3);
- src1_ptr += src2_stride;
- XORI_B3_128_SB(src0, src1, src2);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src0, src1, src0, src1, mask2, mask3, vec0, vec1);
- dst1 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
- dst2 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
- dst3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- HEVC_BIW_RND_CLIP4(dst0, dst1, dst2, dst3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- dst0, dst1, dst2, dst3);
- PCKEV_B2_SH(dst1, dst0, dst3, dst2, dst0, dst1);
- ST_SH2(dst0, dst1, dst, 16);
- dst += dst_stride;
- }
- }
- static void hevc_vt_biwgt_4t_4x2_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- int32_t weight, offset, constant;
- v16i8 src0, src1, src2, src3, src4;
- v8i16 in0, in1, dst10;
- v16i8 src10_r, src32_r, src21_r, src43_r, src2110, src4332;
- v4i32 dst10_r, dst10_l;
- v8i16 filt0, filt1;
- v8i16 filter_vec, out;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= src_stride;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_r, (v2i64) src10_r);
- src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
- LD_SB2(src0_ptr, src_stride, src3, src4);
- src0_ptr += (2 * src_stride);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- src1_ptr += (2 * src2_stride);
- in0 = (v8i16) __msa_ilvr_d((v2i64) in1, (v2i64) in0);
- ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
- src4332 = (v16i8) __msa_ilvr_d((v2i64) src43_r, (v2i64) src32_r);
- src4332 = (v16i8) __msa_xori_b((v16u8) src4332, 128);
- dst10 = HEVC_FILT_4TAP_SH(src2110, src4332, filt0, filt1);
- ILVRL_H2_SW(dst10, in0, dst10_r, dst10_l);
- dst10_r = __msa_dpadd_s_w(offset_vec, (v8i16) dst10_r, (v8i16) weight_vec);
- dst10_l = __msa_dpadd_s_w(offset_vec, (v8i16) dst10_l, (v8i16) weight_vec);
- SRAR_W2_SW(dst10_r, dst10_l, rnd_vec);
- dst10_r = (v4i32) __msa_pckev_h((v8i16) dst10_l, (v8i16) dst10_r);
- out = CLIP_SH_0_255(dst10_r);
- out = (v8i16) __msa_pckev_b((v16i8) out, (v16i8) out);
- ST_W2(out, 0, 1, dst, dst_stride);
- }
- static void hevc_vt_biwgt_4t_4x4_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- int32_t weight, offset, constant;
- v16i8 src0, src1, src2, src3, src4, src5, src6;
- v8i16 in0, in1, in2, in3;
- v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r;
- v16i8 src2110, src4332, src6554;
- v8i16 dst10, dst32;
- v8i16 filt0, filt1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= src_stride;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_r, (v2i64) src10_r);
- src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
- LD_SB4(src0_ptr, src_stride, src3, src4, src5, src6);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
- ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5,
- src32_r, src43_r, src54_r, src65_r);
- ILVR_D2_SB(src43_r, src32_r, src65_r, src54_r, src4332, src6554);
- XORI_B2_128_SB(src4332, src6554);
- dst10 = HEVC_FILT_4TAP_SH(src2110, src4332, filt0, filt1);
- dst32 = HEVC_FILT_4TAP_SH(src4332, src6554, filt0, filt1);
- HEVC_BIW_RND_CLIP2(dst10, dst32, in0, in1,
- weight_vec, rnd_vec, offset_vec,
- dst10, dst32);
- dst10 = (v8i16) __msa_pckev_b((v16i8) dst32, (v16i8) dst10);
- ST_W4(dst10, 0, 1, 2, 3, dst, dst_stride);
- dst += (4 * dst_stride);
- }
- static void hevc_vt_biwgt_4t_4x8multiple_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t weight, offset, constant;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
- v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
- v16i8 src2110, src4332, src6554, src8776;
- v8i16 dst10, dst32, dst54, dst76;
- v8i16 filt0, filt1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= src_stride;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_r, (v2i64) src10_r);
- src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
- for (loop_cnt = (height >> 3); loop_cnt--;) {
- LD_SB6(src0_ptr, src_stride, src3, src4, src5, src6, src7, src8);
- src0_ptr += (6 * src_stride);
- LD_SH8(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5, in6, in7);
- src1_ptr += (8 * src2_stride);
- ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
- ILVR_D2_SH(in5, in4, in7, in6, in2, in3);
- ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5,
- src32_r, src43_r, src54_r, src65_r);
- ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
- ILVR_D3_SB(src43_r, src32_r, src65_r, src54_r, src87_r, src76_r,
- src4332, src6554, src8776);
- XORI_B3_128_SB(src4332, src6554, src8776);
- dst10 = HEVC_FILT_4TAP_SH(src2110, src4332, filt0, filt1);
- dst32 = HEVC_FILT_4TAP_SH(src4332, src6554, filt0, filt1);
- dst54 = HEVC_FILT_4TAP_SH(src6554, src8776, filt0, filt1);
- LD_SB2(src0_ptr, src_stride, src9, src2);
- src0_ptr += (2 * src_stride);
- ILVR_B2_SB(src9, src8, src2, src9, src98_r, src109_r);
- src2110 = (v16i8) __msa_ilvr_d((v2i64) src109_r, (v2i64) src98_r);
- src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
- dst76 = HEVC_FILT_4TAP_SH(src8776, src2110, filt0, filt1);
- HEVC_BIW_RND_CLIP4(dst10, dst32, dst54, dst76,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- dst10, dst32, dst54, dst76);
- PCKEV_B2_SH(dst32, dst10, dst76, dst54, dst10, dst32);
- ST_W8(dst10, dst32, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
- dst += (8 * dst_stride);
- }
- }
- static void hevc_vt_biwgt_4t_4w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- if (2 == height) {
- hevc_vt_biwgt_4t_4x2_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (4 == height) {
- hevc_vt_biwgt_4t_4x4_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (0 == (height % 8)) {
- hevc_vt_biwgt_4t_4x8multiple_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter, height,
- weight0, weight1, offset0, offset1,
- rnd_val);
- }
- }
- static void hevc_vt_biwgt_4t_6w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, src4;
- v8i16 in0, in1, in2, in3;
- v16i8 src10_r, src32_r, src21_r, src43_r;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v8i16 filt0, filt1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= src_stride;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB2(src0_ptr, src_stride, src3, src4);
- src0_ptr += (2 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- XORI_B2_128_SB(src3, src4);
- ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
- tmp0 = HEVC_FILT_4TAP_SH(src10_r, src32_r, filt0, filt1);
- tmp1 = HEVC_FILT_4TAP_SH(src21_r, src43_r, filt0, filt1);
- LD_SB2(src0_ptr, src_stride, src1, src2);
- src0_ptr += (2 * src_stride);
- XORI_B2_128_SB(src1, src2);
- ILVR_B2_SB(src1, src4, src2, src1, src10_r, src21_r);
- tmp2 = HEVC_FILT_4TAP_SH(src32_r, src10_r, filt0, filt1);
- tmp3 = HEVC_FILT_4TAP_SH(src43_r, src21_r, filt0, filt1);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_SH(tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
- ST_W2(tmp0, 0, 2, dst, dst_stride);
- ST_H2(tmp0, 2, 6, dst + 4, dst_stride);
- ST_W2(tmp1, 0, 2, dst + 2 * dst_stride, dst_stride);
- ST_H2(tmp1, 2, 6, dst + 2 * dst_stride + 4, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_vt_biwgt_4t_8x2_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, src4;
- v8i16 in0, in1, tmp0, tmp1;
- v16i8 src10_r, src32_r, src21_r, src43_r;
- v8i16 filt0, filt1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= src_stride;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- LD_SB2(src0_ptr, src_stride, src3, src4);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- XORI_B2_128_SB(src3, src4);
- ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
- tmp0 = HEVC_FILT_4TAP_SH(src10_r, src32_r, filt0, filt1);
- tmp1 = HEVC_FILT_4TAP_SH(src21_r, src43_r, filt0, filt1);
- HEVC_BIW_RND_CLIP2(tmp0, tmp1, in0, in1,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1);
- tmp0 = (v8i16) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
- ST_D2(tmp0, 0, 1, dst, dst_stride);
- }
- static void hevc_vt_biwgt_4t_8x6_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
- v8i16 in0, in1, in2, in3, in4, in5;
- v16i8 src10_r, src32_r, src54_r, src76_r;
- v16i8 src21_r, src43_r, src65_r, src87_r;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
- v8i16 filt0, filt1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= src_stride;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- LD_SB6(src0_ptr, src_stride, src3, src4, src5, src6, src7, src8);
- LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5);
- XORI_B6_128_SB(src3, src4, src5, src6, src7, src8);
- ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5,
- src32_r, src43_r, src54_r, src65_r);
- ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
- tmp0 = HEVC_FILT_4TAP_SH(src10_r, src32_r, filt0, filt1);
- tmp1 = HEVC_FILT_4TAP_SH(src21_r, src43_r, filt0, filt1);
- tmp2 = HEVC_FILT_4TAP_SH(src32_r, src54_r, filt0, filt1);
- tmp3 = HEVC_FILT_4TAP_SH(src43_r, src65_r, filt0, filt1);
- tmp4 = HEVC_FILT_4TAP_SH(src54_r, src76_r, filt0, filt1);
- tmp5 = HEVC_FILT_4TAP_SH(src65_r, src87_r, filt0, filt1);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp2, tmp3);
- HEVC_BIW_RND_CLIP2(tmp4, tmp5, in4, in5,
- weight_vec, rnd_vec, offset_vec,
- tmp4, tmp5);
- PCKEV_B2_SH(tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
- tmp3 = (v8i16) __msa_pckev_b((v16i8) tmp5, (v16i8) tmp4);
- ST_D4(tmp0, tmp1, 0, 1, 0, 1, dst, dst_stride);
- ST_D2(tmp3, 0, 1, dst + 4 * dst_stride, dst_stride);
- }
- static void hevc_vt_biwgt_4t_8x4multiple_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, src4;
- v8i16 in0, in1, in2, in3;
- v16i8 src10_r, src32_r, src21_r, src43_r;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v8i16 filt0, filt1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= src_stride;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB2(src0_ptr, src_stride, src3, src4);
- src0_ptr += (2 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- XORI_B2_128_SB(src3, src4);
- ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
- tmp0 = HEVC_FILT_4TAP_SH(src10_r, src32_r, filt0, filt1);
- tmp1 = HEVC_FILT_4TAP_SH(src21_r, src43_r, filt0, filt1);
- LD_SB2(src0_ptr, src_stride, src1, src2);
- src0_ptr += (2 * src_stride);
- XORI_B2_128_SB(src1, src2);
- ILVR_B2_SB(src1, src4, src2, src1, src10_r, src21_r);
- tmp2 = HEVC_FILT_4TAP_SH(src32_r, src10_r, filt0, filt1);
- tmp3 = HEVC_FILT_4TAP_SH(src43_r, src21_r, filt0, filt1);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_SH(tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
- ST_D4(tmp0, tmp1, 0, 1, 0, 1, dst, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_vt_biwgt_4t_8w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- if (2 == height) {
- hevc_vt_biwgt_4t_8x2_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (6 == height) {
- hevc_vt_biwgt_4t_8x6_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter,
- weight0, weight1, offset0, offset1, rnd_val);
- } else {
- hevc_vt_biwgt_4t_8x4multiple_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter, height,
- weight0, weight1, offset0, offset1,
- rnd_val);
- }
- }
- static void hevc_vt_biwgt_4t_12w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, src4, src5;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v16i8 src10_r, src32_r, src21_r, src43_r;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
- v16i8 src10_l, src32_l, src54_l, src21_l, src43_l, src65_l;
- v16i8 src2110, src4332;
- v8i16 filt0, filt1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= (1 * src_stride);
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
- src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_l, (v2i64) src10_l);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB2(src0_ptr, src_stride, src3, src4);
- src0_ptr += (2 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- LD_SH4(src1_ptr + 8, src2_stride, in4, in5, in6, in7);
- src1_ptr += (4 * src2_stride);
- ILVR_D2_SH(in5, in4, in7, in6, in4, in5);
- XORI_B2_128_SB(src3, src4);
- ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
- ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
- src4332 = (v16i8) __msa_ilvr_d((v2i64) src43_l, (v2i64) src32_l);
- tmp0 = HEVC_FILT_4TAP_SH(src10_r, src32_r, filt0, filt1);
- tmp1 = HEVC_FILT_4TAP_SH(src21_r, src43_r, filt0, filt1);
- tmp4 = HEVC_FILT_4TAP_SH(src2110, src4332, filt0, filt1);
- LD_SB2(src0_ptr, src_stride, src5, src2);
- src0_ptr += (2 * src_stride);
- XORI_B2_128_SB(src5, src2);
- ILVR_B2_SB(src5, src4, src2, src5, src10_r, src21_r);
- ILVL_B2_SB(src5, src4, src2, src5, src54_l, src65_l);
- src2110 = (v16i8) __msa_ilvr_d((v2i64) src65_l, (v2i64) src54_l);
- tmp2 = HEVC_FILT_4TAP_SH(src32_r, src10_r, filt0, filt1);
- tmp3 = HEVC_FILT_4TAP_SH(src43_r, src21_r, filt0, filt1);
- tmp5 = HEVC_FILT_4TAP_SH(src4332, src2110, filt0, filt1);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp2, tmp3);
- HEVC_BIW_RND_CLIP2(tmp4, tmp5, in4, in5,
- weight_vec, rnd_vec, offset_vec,
- tmp4, tmp5);
- PCKEV_B2_SH(tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
- tmp2 = (v8i16) __msa_pckev_b((v16i8) tmp5, (v16i8) tmp4);
- ST_D4(tmp0, tmp1, 0, 1, 0, 1, dst, dst_stride);
- ST_W4(tmp2, 0, 1, 2, 3, dst + 8, dst_stride);
- dst += (4 * dst_stride);
- }
- }
- static void hevc_vt_biwgt_4t_16w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, src4, src5;
- v8i16 in0, in1, in2, in3;
- v16i8 src10_r, src32_r, src21_r, src43_r;
- v16i8 src10_l, src32_l, src21_l, src43_l;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v8i16 filt0, filt1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= src_stride;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB2(src0_ptr, src_stride, src3, src4);
- src0_ptr += (2 * src_stride);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- LD_SH2(src1_ptr + 8, src2_stride, in2, in3);
- src1_ptr += (2 * src2_stride);
- XORI_B2_128_SB(src3, src4);
- ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
- ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
- tmp0 = HEVC_FILT_4TAP_SH(src10_r, src32_r, filt0, filt1);
- tmp1 = HEVC_FILT_4TAP_SH(src21_r, src43_r, filt0, filt1);
- tmp2 = HEVC_FILT_4TAP_SH(src10_l, src32_l, filt0, filt1);
- tmp3 = HEVC_FILT_4TAP_SH(src21_l, src43_l, filt0, filt1);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_SH(tmp2, tmp0, tmp3, tmp1, tmp0, tmp1);
- ST_SH2(tmp0, tmp1, dst, dst_stride);
- dst += (2 * dst_stride);
- LD_SB2(src0_ptr, src_stride, src5, src2);
- src0_ptr += (2 * src_stride);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- LD_SH2(src1_ptr + 8, src2_stride, in2, in3);
- src1_ptr += (2 * src2_stride);
- XORI_B2_128_SB(src5, src2);
- ILVR_B2_SB(src5, src4, src2, src5, src10_r, src21_r);
- ILVL_B2_SB(src5, src4, src2, src5, src10_l, src21_l);
- tmp0 = HEVC_FILT_4TAP_SH(src32_r, src10_r, filt0, filt1);
- tmp1 = HEVC_FILT_4TAP_SH(src43_r, src21_r, filt0, filt1);
- tmp2 = HEVC_FILT_4TAP_SH(src32_l, src10_l, filt0, filt1);
- tmp3 = HEVC_FILT_4TAP_SH(src43_l, src21_l, filt0, filt1);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_SH(tmp2, tmp0, tmp3, tmp1, tmp0, tmp1);
- ST_SH2(tmp0, tmp1, dst, dst_stride);
- dst += (2 * dst_stride);
- }
- }
- static void hevc_vt_biwgt_4t_24w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, src4, src5;
- v16i8 src6, src7, src8, src9, src10, src11;
- v8i16 in0, in1, in2, in3, in4, in5;
- v16i8 src10_r, src32_r, src76_r, src98_r;
- v16i8 src10_l, src32_l, src21_l, src43_l;
- v16i8 src21_r, src43_r, src87_r, src109_r;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
- v8i16 filt0, filt1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= src_stride;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- /* 16width */
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- XORI_B3_128_SB(src0, src1, src2);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
- /* 8width */
- LD_SB3(src0_ptr + 16, src_stride, src6, src7, src8);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src6, src7, src8);
- ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- /* 16width */
- LD_SB2(src0_ptr, src_stride, src3, src4);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- LD_SH2(src1_ptr + 8, src2_stride, in2, in3);
- XORI_B2_128_SB(src3, src4);
- ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
- ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
- /* 8width */
- LD_SB2(src0_ptr + 16, src_stride, src9, src10);
- src0_ptr += (2 * src_stride);
- LD_SH2(src1_ptr + 16, src2_stride, in4, in5);
- src1_ptr += (2 * src2_stride);
- XORI_B2_128_SB(src9, src10);
- ILVR_B2_SB(src9, src8, src10, src9, src98_r, src109_r);
- /* 16width */
- tmp0 = HEVC_FILT_4TAP_SH(src10_r, src32_r, filt0, filt1);
- tmp4 = HEVC_FILT_4TAP_SH(src10_l, src32_l, filt0, filt1);
- tmp1 = HEVC_FILT_4TAP_SH(src21_r, src43_r, filt0, filt1);
- tmp5 = HEVC_FILT_4TAP_SH(src21_l, src43_l, filt0, filt1);
- /* 8width */
- tmp2 = HEVC_FILT_4TAP_SH(src76_r, src98_r, filt0, filt1);
- tmp3 = HEVC_FILT_4TAP_SH(src87_r, src109_r, filt0, filt1);
- /* 16width */
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp4, tmp5,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp4, tmp5);
- /* 8width */
- HEVC_BIW_RND_CLIP2(tmp2, tmp3, in4, in5,
- weight_vec, rnd_vec, offset_vec,
- tmp2, tmp3);
- /* 16width */
- PCKEV_B2_SH(tmp4, tmp0, tmp5, tmp1, tmp0, tmp1);
- /* 8width */
- tmp2 = (v8i16) __msa_pckev_b((v16i8) tmp3, (v16i8) tmp2);
- ST_SH2(tmp0, tmp1, dst, dst_stride);
- ST_D2(tmp2, 0, 1, dst + 16, dst_stride);
- dst += (2 * dst_stride);
- /* 16width */
- LD_SB2(src0_ptr, src_stride, src5, src2);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- LD_SH2(src1_ptr + 8, src2_stride, in2, in3);
- XORI_B2_128_SB(src5, src2);
- ILVR_B2_SB(src5, src4, src2, src5, src10_r, src21_r);
- ILVL_B2_SB(src5, src4, src2, src5, src10_l, src21_l);
- /* 8width */
- LD_SB2(src0_ptr + 16, src_stride, src11, src8);
- src0_ptr += (2 * src_stride);
- LD_SH2(src1_ptr + 16, src2_stride, in4, in5);
- src1_ptr += (2 * src2_stride);
- XORI_B2_128_SB(src11, src8);
- ILVR_B2_SB(src11, src10, src8, src11, src76_r, src87_r);
- /* 16width */
- tmp0 = HEVC_FILT_4TAP_SH(src32_r, src10_r, filt0, filt1);
- tmp4 = HEVC_FILT_4TAP_SH(src32_l, src10_l, filt0, filt1);
- tmp1 = HEVC_FILT_4TAP_SH(src43_r, src21_r, filt0, filt1);
- tmp5 = HEVC_FILT_4TAP_SH(src43_l, src21_l, filt0, filt1);
- /* 8width */
- tmp2 = HEVC_FILT_4TAP_SH(src98_r, src76_r, filt0, filt1);
- tmp3 = HEVC_FILT_4TAP_SH(src109_r, src87_r, filt0, filt1);
- /* 16width */
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp4, tmp5,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp4, tmp5);
- /* 8width */
- HEVC_BIW_RND_CLIP2(tmp2, tmp3, in4, in5,
- weight_vec, rnd_vec, offset_vec,
- tmp2, tmp3);
- /* 16width */
- PCKEV_B2_SH(tmp4, tmp0, tmp5, tmp1, tmp0, tmp1);
- /* 8width */
- tmp2 = (v8i16) __msa_pckev_b((v16i8) tmp3, (v16i8) tmp2);
- ST_SH2(tmp0, tmp1, dst, dst_stride);
- ST_D2(tmp2, 0, 1, dst + 16, dst_stride);
- dst += (2 * dst_stride);
- }
- }
- static void hevc_vt_biwgt_4t_32w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- uint8_t *dst_tmp = dst + 16;
- int32_t offset, weight, constant;
- v16i8 src0, src1, src2, src3, src4, src6, src7, src8, src9, src10;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- v16i8 src10_r, src32_r, src76_r, src98_r;
- v16i8 src21_r, src43_r, src87_r, src109_r;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- v16i8 src10_l, src32_l, src76_l, src98_l;
- v16i8 src21_l, src43_l, src87_l, src109_l;
- v8i16 filt0, filt1;
- v8i16 filter_vec;
- v4i32 weight_vec, offset_vec, rnd_vec;
- src0_ptr -= src_stride;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- constant = 128 * weight1;
- constant <<= 6;
- offset += constant;
- offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- filter_vec = LD_SH(filter);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- /* 16width */
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- XORI_B3_128_SB(src0, src1, src2);
- ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
- ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
- /* next 16width */
- LD_SB3(src0_ptr + 16, src_stride, src6, src7, src8);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src6, src7, src8);
- ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
- ILVL_B2_SB(src7, src6, src8, src7, src76_l, src87_l);
- for (loop_cnt = (height >> 1); loop_cnt--;) {
- /* 16width */
- LD_SB2(src0_ptr, src_stride, src3, src4);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- LD_SH2(src1_ptr + 8, src2_stride, in2, in3);
- XORI_B2_128_SB(src3, src4);
- ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
- ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
- /* 16width */
- tmp0 = HEVC_FILT_4TAP_SH(src10_r, src32_r, filt0, filt1);
- tmp4 = HEVC_FILT_4TAP_SH(src10_l, src32_l, filt0, filt1);
- tmp1 = HEVC_FILT_4TAP_SH(src21_r, src43_r, filt0, filt1);
- tmp5 = HEVC_FILT_4TAP_SH(src21_l, src43_l, filt0, filt1);
- /* 16width */
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp4, tmp5,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp4, tmp5);
- /* 16width */
- PCKEV_B2_SH(tmp4, tmp0, tmp5, tmp1, tmp0, tmp1);
- ST_SH2(tmp0, tmp1, dst, dst_stride);
- dst += (2 * dst_stride);
- src10_r = src32_r;
- src21_r = src43_r;
- src10_l = src32_l;
- src21_l = src43_l;
- src2 = src4;
- /* next 16width */
- LD_SB2(src0_ptr + 16, src_stride, src9, src10);
- src0_ptr += (2 * src_stride);
- LD_SH2(src1_ptr + 16, src2_stride, in4, in5);
- LD_SH2(src1_ptr + 24, src2_stride, in6, in7);
- src1_ptr += (2 * src2_stride);
- XORI_B2_128_SB(src9, src10);
- ILVR_B2_SB(src9, src8, src10, src9, src98_r, src109_r);
- ILVL_B2_SB(src9, src8, src10, src9, src98_l, src109_l);
- /* next 16width */
- tmp2 = HEVC_FILT_4TAP_SH(src76_r, src98_r, filt0, filt1);
- tmp6 = HEVC_FILT_4TAP_SH(src76_l, src98_l, filt0, filt1);
- tmp3 = HEVC_FILT_4TAP_SH(src87_r, src109_r, filt0, filt1);
- tmp7 = HEVC_FILT_4TAP_SH(src87_l, src109_l, filt0, filt1);
- /* next 16width */
- HEVC_BIW_RND_CLIP4(tmp2, tmp3, tmp6, tmp7,
- in4, in5, in6, in7,
- weight_vec, rnd_vec, offset_vec,
- tmp2, tmp3, tmp6, tmp7);
- /* next 16width */
- PCKEV_B2_SH(tmp6, tmp2, tmp7, tmp3, tmp2, tmp3);
- ST_SH2(tmp2, tmp3, dst_tmp, dst_stride);
- dst_tmp += (2 * dst_stride);
- src76_r = src98_r;
- src87_r = src109_r;
- src76_l = src98_l;
- src87_l = src109_l;
- src8 = src10;
- }
- }
- static void hevc_hv_biwgt_4t_4x2_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint64_t tp0, tp1;
- int32_t offset, weight;
- v8i16 in0 = { 0 };
- v16u8 out;
- v16i8 src0, src1, src2, src3, src4;
- v8i16 filt0, filt1;
- v8i16 filt_h0, filt_h1;
- v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
- v16i8 mask1;
- v8i16 filter_vec, tmp, weight_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst20, dst31, dst42, dst10, dst32, dst21, dst43, tmp0, tmp1;
- v4i32 dst0, dst1, offset_vec, rnd_vec, const_vec;
- src0_ptr -= (src_stride + 1);
- filter_vec = LD_SH(filter_x);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = (v8i16) __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- LD_SB5(src0_ptr, src_stride, src0, src1, src2, src3, src4);
- XORI_B5_128_SB(src0, src1, src2, src3, src4);
- VSHF_B2_SB(src0, src2, src0, src2, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src3, src1, src3, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src4, src2, src4, mask0, mask1, vec4, vec5);
- dst20 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dst31 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dst42 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- ILVRL_H2_SH(dst31, dst20, dst10, dst32);
- ILVRL_H2_SH(dst42, dst31, dst21, dst43);
- dst0 = HEVC_FILT_4TAP(dst10, dst32, filt_h0, filt_h1);
- dst1 = HEVC_FILT_4TAP(dst21, dst43, filt_h0, filt_h1);
- dst0 >>= 6;
- dst1 >>= 6;
- dst0 = (v4i32) __msa_pckev_h((v8i16) dst1, (v8i16) dst0);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in0);
- ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- SRAR_W2_SW(dst0, dst1, rnd_vec);
- tmp = __msa_pckev_h((v8i16) dst1, (v8i16) dst0);
- tmp = CLIP_SH_0_255_MAX_SATU(tmp);
- out = (v16u8) __msa_pckev_b((v16i8) tmp, (v16i8) tmp);
- ST_W2(out, 0, 1, dst, dst_stride);
- }
- static void hevc_hv_biwgt_4t_4x4_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint64_t tp0, tp1;
- int32_t offset, weight;
- v16u8 out;
- v8i16 in0 = { 0 }, in1 = { 0 };
- v16i8 src0, src1, src2, src3, src4, src5, src6;
- v8i16 filt0, filt1;
- v8i16 filt_h0, filt_h1;
- v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
- v16i8 mask1;
- v8i16 filter_vec, weight_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v8i16 dst30, dst41, dst52, dst63;
- v8i16 dst10, dst32, dst54, dst21, dst43, dst65;
- v4i32 offset_vec, rnd_vec, const_vec;
- v4i32 dst0, dst1, dst2, dst3;
- src0_ptr -= (src_stride + 1);
- filter_vec = LD_SH(filter_x);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = (v8i16) __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- LD_SB7(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6);
- XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
- VSHF_B2_SB(src0, src3, src0, src3, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src4, src1, src4, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src5, src2, src5, mask0, mask1, vec4, vec5);
- VSHF_B2_SB(src3, src6, src3, src6, mask0, mask1, vec6, vec7);
- dst30 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dst41 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dst52 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- dst63 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- ILVRL_H2_SH(dst41, dst30, dst10, dst43);
- ILVRL_H2_SH(dst52, dst41, dst21, dst54);
- ILVRL_H2_SH(dst63, dst52, dst32, dst65);
- dst0 = HEVC_FILT_4TAP(dst10, dst32, filt_h0, filt_h1);
- dst1 = HEVC_FILT_4TAP(dst21, dst43, filt_h0, filt_h1);
- dst2 = HEVC_FILT_4TAP(dst32, dst54, filt_h0, filt_h1);
- dst3 = HEVC_FILT_4TAP(dst43, dst65, filt_h0, filt_h1);
- SRA_4V(dst0, dst1, dst2, dst3, 6);
- PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp1, tmp3);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in0);
- src1_ptr += (2 * src2_stride);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in1);
- ILVRL_H2_SH(tmp1, in0, tmp0, tmp1);
- ILVRL_H2_SH(tmp3, in1, tmp2, tmp3);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp0, tmp1);
- CLIP_SH2_0_255_MAX_SATU(tmp0, tmp1);
- out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
- ST_W4(out, 0, 1, 2, 3, dst, dst_stride);
- }
- static void hevc_hv_biwgt_4t_4multx8mult_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- uint64_t tp0, tp1;
- int32_t offset, weight;
- v16u8 out0, out1;
- v8i16 in0 = { 0 }, in1 = { 0 }, in2 = { 0 }, in3 = { 0 };
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
- v8i16 filt0, filt1;
- v8i16 filt_h0, filt_h1;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
- v16i8 mask1;
- v8i16 filter_vec, weight_vec;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- v8i16 dst10, dst21, dst22, dst73, dst84, dst95, dst106;
- v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
- v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
- v8i16 dst98_r, dst109_r;
- v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
- v4i32 offset_vec, rnd_vec, const_vec;
- src0_ptr -= (src_stride + 1);
- filter_vec = LD_SH(filter_x);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = (v8i16) __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src2, src1, src2, mask0, mask1, vec2, vec3);
- dst10 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dst21 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- ILVRL_H2_SH(dst21, dst10, dst10_r, dst21_r);
- dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
- for (loop_cnt = height >> 3; loop_cnt--;) {
- LD_SB8(src0_ptr, src_stride,
- src3, src4, src5, src6, src7, src8, src9, src10);
- src0_ptr += (8 * src_stride);
- XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
- VSHF_B2_SB(src3, src7, src3, src7, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src4, src8, src4, src8, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src5, src9, src5, src9, mask0, mask1, vec4, vec5);
- VSHF_B2_SB(src6, src10, src6, src10, mask0, mask1, vec6, vec7);
- dst73 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dst84 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dst95 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- dst106 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- dst32_r = __msa_ilvr_h(dst73, dst22);
- ILVRL_H2_SH(dst84, dst73, dst43_r, dst87_r);
- ILVRL_H2_SH(dst95, dst84, dst54_r, dst98_r);
- ILVRL_H2_SH(dst106, dst95, dst65_r, dst109_r);
- dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
- dst76_r = __msa_ilvr_h(dst22, dst106);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- src1_ptr += 2 * src2_stride;
- INSERT_D2_SH(tp0, tp1, in0);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- src1_ptr += 2 * src2_stride;
- INSERT_D2_SH(tp0, tp1, in1);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- src1_ptr += 2 * src2_stride;
- INSERT_D2_SH(tp0, tp1, in2);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- src1_ptr += 2 * src2_stride;
- INSERT_D2_SH(tp0, tp1, in3);
- dst0 = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst1 = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst2 = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
- dst3 = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
- dst4 = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
- dst5 = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
- dst6 = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
- dst7 = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
- SRA_4V(dst0, dst1, dst2, dst3, 6);
- SRA_4V(dst4, dst5, dst6, dst7, 6);
- PCKEV_H4_SW(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst1,
- dst2, dst3);
- ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
- ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
- ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
- ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
- dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
- dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
- dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
- PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, tmp0, tmp1,
- tmp2, tmp3);
- CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
- ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
- dst += (8 * dst_stride);
- dst10_r = dst98_r;
- dst21_r = dst109_r;
- dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
- }
- }
- static void hevc_hv_biwgt_4t_4w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- if (2 == height) {
- hevc_hv_biwgt_4t_4x2_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (4 == height) {
- hevc_hv_biwgt_4t_4x4_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (0 == (height % 8)) {
- hevc_hv_biwgt_4t_4multx8mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1,
- offset0, offset1, rnd_val);
- }
- }
- static void hevc_hv_biwgt_4t_6w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t tpw0, tpw1, tpw2, tpw3;
- uint64_t tp0, tp1;
- int32_t offset, weight;
- v16u8 out0, out1, out2;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
- v8i16 in0 = { 0 }, in1 = { 0 }, in2 = { 0 }, in3 = { 0 };
- v8i16 in4 = { 0 }, in5 = { 0 };
- v8i16 filt0, filt1;
- v8i16 filt_h0, filt_h1, filter_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
- v16i8 mask1;
- v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, dsth7, dsth8, dsth9;
- v8i16 dsth10, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, weight_vec;
- v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r, dst21_r, dst43_r;
- v8i16 dst65_r, dst87_r, dst109_r, dst10_l, dst32_l, dst54_l, dst76_l;
- v8i16 dst98_l, dst21_l, dst43_l, dst65_l, dst87_l, dst109_l;
- v8i16 dst1021_l, dst3243_l, dst5465_l, dst7687_l, dst98109_l;
- v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
- v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
- v4i32 dst4_r, dst5_r, dst6_r, dst7_r;
- v4i32 offset_vec, rnd_vec, const_vec;
- src0_ptr -= (src_stride + 1);
- filter_vec = LD_SH(filter_x);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = (v8i16) __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
- ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
- LD_SB8(src0_ptr, src_stride, src3, src4, src5, src6, src7, src8, src9,
- src10);
- XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
- dsth3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dsth4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dsth5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- dsth6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src9, src9, src9, src9, mask0, mask1, vec4, vec5);
- VSHF_B2_SB(src10, src10, src10, src10, mask0, mask1, vec6, vec7);
- dsth7 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dsth8 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dsth9 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- dsth10 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
- ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
- ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
- ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
- ILVRL_H2_SH(dsth7, dsth6, dst76_r, dst76_l);
- ILVRL_H2_SH(dsth8, dsth7, dst87_r, dst87_l);
- ILVRL_H2_SH(dsth9, dsth8, dst98_r, dst98_l);
- ILVRL_H2_SH(dsth10, dsth9, dst109_r, dst109_l);
- PCKEV_D2_SH(dst21_l, dst10_l, dst43_l, dst32_l, dst1021_l, dst3243_l);
- PCKEV_D2_SH(dst65_l, dst54_l, dst87_l, dst76_l, dst5465_l, dst7687_l);
- dst98109_l = (v8i16) __msa_pckev_d((v2i64) dst109_l, (v2i64) dst98_l);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
- dst4_r = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
- dst5_r = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
- dst6_r = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
- dst7_r = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
- dst0_l = HEVC_FILT_4TAP(dst1021_l, dst3243_l, filt_h0, filt_h1);
- dst1_l = HEVC_FILT_4TAP(dst3243_l, dst5465_l, filt_h0, filt_h1);
- dst2_l = HEVC_FILT_4TAP(dst5465_l, dst7687_l, filt_h0, filt_h1);
- dst3_l = HEVC_FILT_4TAP(dst7687_l, dst98109_l, filt_h0, filt_h1);
- SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
- SRA_4V(dst4_r, dst5_r, dst6_r, dst7_r, 6);
- SRA_4V(dst0_l, dst1_l, dst2_l, dst3_l, 6);
- PCKEV_H2_SW(dst1_r, dst0_r, dst3_r, dst2_r, dst0, dst1);
- PCKEV_H2_SW(dst5_r, dst4_r, dst7_r, dst6_r, dst2, dst3);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in0);
- LD2(src1_ptr + 2 * src2_stride, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in1);
- LD2(src1_ptr + 4 * src2_stride, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in2);
- LD2(src1_ptr + 6 * src2_stride, src2_stride, tp0, tp1);
- INSERT_D2_SH(tp0, tp1, in3);
- ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
- ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
- ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
- ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
- dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
- dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
- dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
- PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, tmp0, tmp1,
- tmp2, tmp3);
- CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
- ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
- PCKEV_H2_SW(dst1_l, dst0_l, dst3_l, dst2_l, dst4, dst5);
- LW4(src1_ptr + 4, src2_stride, tpw0, tpw1, tpw2, tpw3);
- src1_ptr += (4 * src2_stride);
- INSERT_W4_SH(tpw0, tpw1, tpw2, tpw3, in4);
- LW4(src1_ptr + 4, src2_stride, tpw0, tpw1, tpw2, tpw3);
- INSERT_W4_SH(tpw0, tpw1, tpw2, tpw3, in5);
- ILVRL_H2_SH(dst4, in4, tmp0, tmp1);
- ILVRL_H2_SH(dst5, in5, tmp2, tmp3);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp4, tmp5);
- CLIP_SH2_0_255_MAX_SATU(tmp4, tmp5);
- out2 = (v16u8) __msa_pckev_b((v16i8) tmp5, (v16i8) tmp4);
- ST_H8(out2, 0, 1, 2, 3, 4, 5, 6, 7, dst + 4, dst_stride);
- }
- static void hevc_hv_biwgt_4t_8x2_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- int32_t weight, offset;
- v16u8 out;
- v16i8 src0, src1, src2, src3, src4;
- v8i16 filt0, filt1;
- v8i16 filt_h0, filt_h1;
- v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
- v16i8 mask1;
- v8i16 filter_vec, weight_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
- v8i16 dst0, dst1, dst2, dst3, dst4;
- v8i16 in0, in1;
- v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
- v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
- v8i16 dst10_l, dst32_l, dst21_l, dst43_l;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v4i32 offset_vec, rnd_vec, const_vec;
- src0_ptr -= (src_stride + 1);
- filter_vec = LD_SH(filter_x);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = (v8i16) __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- LD_SB5(src0_ptr, src_stride, src0, src1, src2, src3, src4);
- XORI_B5_128_SB(src0, src1, src2, src3, src4);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
- dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dst1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dst2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- dst3 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- dst4 = HEVC_FILT_4TAP_SH(vec8, vec9, filt0, filt1);
- ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
- ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
- ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
- ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
- PCKEV_H2_SH(dst0_l, dst0_r, dst1_l, dst1_r, tmp1, tmp3);
- ILVRL_H2_SH(tmp1, in0, tmp0, tmp1);
- ILVRL_H2_SH(tmp3, in1, tmp2, tmp3);
- dst0_r = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst0_l = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst1_r = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst1_l = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
- PCKEV_H2_SH(dst0_l, dst0_r, dst1_l, dst1_r, tmp0, tmp1);
- CLIP_SH2_0_255_MAX_SATU(tmp0, tmp1);
- out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
- ST_D2(out, 0, 1, dst, dst_stride);
- }
- static void hevc_hv_biwgt_4t_8multx4_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val,
- int32_t width8mult)
- {
- int32_t weight, offset;
- uint32_t cnt;
- v16u8 out0, out1;
- v16i8 src0, src1, src2, src3, src4, src5, src6, mask0, mask1;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec, weight_vec;
- v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, in0, in1, in2, in3;
- v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
- v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
- v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
- v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
- v4i32 offset_vec, rnd_vec, const_vec;
- src0_ptr -= (src_stride + 1);
- filter_vec = LD_SH(filter_x);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
- mask0 = LD_SB(ff_hevc_mask_arr);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- weight_vec = (v8i16) __msa_fill_w(weight);
- for (cnt = width8mult; cnt--;) {
- LD_SB7(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6);
- src0_ptr += 8;
- XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += 8;
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
- ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
- dsth3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dsth4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dsth5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- dsth6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
- ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
- ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
- ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
- dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
- dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
- SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
- SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
- PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
- dst3_r, dst0, dst1, dst2, dst3);
- ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
- ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
- ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
- ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
- dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
- dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
- dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
- PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
- tmp0, tmp1, tmp2, tmp3);
- CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
- ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
- dst += 8;
- }
- }
- static void hevc_hv_biwgt_4t_8x6_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t offset, weight;
- v16u8 out0, out1, out2;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
- v8i16 filt0, filt1;
- v8i16 filt_h0, filt_h1;
- v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
- v16i8 mask1;
- v8i16 filter_vec, weight_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
- v16i8 vec10, vec11, vec12, vec13, vec14, vec15, vec16, vec17;
- v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, dsth7, dsth8;
- v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
- v4i32 dst4_r, dst4_l, dst5_r, dst5_l;
- v8i16 dst10_r, dst32_r, dst10_l, dst32_l;
- v8i16 dst21_r, dst43_r, dst21_l, dst43_l;
- v8i16 dst54_r, dst54_l, dst65_r, dst65_l;
- v8i16 dst76_r, dst76_l, dst87_r, dst87_l;
- v8i16 in0, in1, in2, in3, in4, in5;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
- v4i32 offset_vec, rnd_vec, const_vec;
- src0_ptr -= (src_stride + 1);
- filter_vec = LD_SH(filter_x);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = (v8i16) __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- LD_SB5(src0_ptr, src_stride, src0, src1, src2, src3, src4);
- src0_ptr += (5 * src_stride);
- LD_SB4(src0_ptr, src_stride, src5, src6, src7, src8);
- XORI_B5_128_SB(src0, src1, src2, src3, src4);
- XORI_B4_128_SB(src5, src6, src7, src8);
- LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec10, vec11);
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec12, vec13);
- VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec14, vec15);
- VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec16, vec17);
- dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- dsth3 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- dsth4 = HEVC_FILT_4TAP_SH(vec8, vec9, filt0, filt1);
- dsth5 = HEVC_FILT_4TAP_SH(vec10, vec11, filt0, filt1);
- dsth6 = HEVC_FILT_4TAP_SH(vec12, vec13, filt0, filt1);
- dsth7 = HEVC_FILT_4TAP_SH(vec14, vec15, filt0, filt1);
- dsth8 = HEVC_FILT_4TAP_SH(vec16, vec17, filt0, filt1);
- ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
- ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
- ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
- ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
- ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
- ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
- ILVRL_H2_SH(dsth7, dsth6, dst76_r, dst76_l);
- ILVRL_H2_SH(dsth8, dsth7, dst87_r, dst87_l);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
- dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
- dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
- dst4_r = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
- dst4_l = HEVC_FILT_4TAP(dst54_l, dst76_l, filt_h0, filt_h1);
- dst5_r = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
- dst5_l = HEVC_FILT_4TAP(dst65_l, dst87_l, filt_h0, filt_h1);
- SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
- SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
- SRA_4V(dst4_r, dst4_l, dst5_r, dst5_l, 6);
- PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l, dst3_r,
- dst0, dst1, dst2, dst3);
- ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
- ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
- ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
- ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
- dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
- dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
- dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
- PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
- tmp0, tmp1, tmp2, tmp3);
- CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
- PCKEV_H2_SW(dst4_l, dst4_r, dst5_l, dst5_r, dst0, dst1);
- ILVRL_H2_SH(dst0, in4, tmp0, tmp1);
- ILVRL_H2_SH(dst1, in5, tmp2, tmp3);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp4, tmp5);
- CLIP_SH2_0_255_MAX_SATU(tmp4, tmp5);
- out2 = (v16u8) __msa_pckev_b((v16i8) tmp5, (v16i8) tmp4);
- ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
- ST_D2(out2, 0, 1, dst + 4 * dst_stride, dst_stride);
- }
- static void hevc_hv_biwgt_4t_8multx4mult_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val,
- int32_t width)
- {
- uint32_t loop_cnt;
- uint32_t cnt;
- int32_t offset, weight;
- uint8_t *src0_ptr_tmp;
- int16_t *src1_ptr_tmp;
- uint8_t *dst_tmp;
- v16u8 out0, out1;
- v16i8 src0, src1, src2, src3, src4, src5, src6;
- v8i16 in0, in1, in2, in3;
- v8i16 filt0, filt1;
- v8i16 filt_h0, filt_h1;
- v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
- v16i8 mask1;
- v8i16 filter_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6;
- v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
- v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
- v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l, weight_vec;
- v4i32 offset_vec, rnd_vec, const_vec;
- src0_ptr -= (src_stride + 1);
- filter_vec = LD_SH(filter_x);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- weight_vec = (v8i16) __msa_fill_w(weight);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- for (cnt = width >> 3; cnt--;) {
- src0_ptr_tmp = src0_ptr;
- src1_ptr_tmp = src1_ptr;
- dst_tmp = dst;
- LD_SB3(src0_ptr_tmp, src_stride, src0, src1, src2);
- src0_ptr_tmp += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
- ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
- for (loop_cnt = height >> 2; loop_cnt--;) {
- LD_SB4(src0_ptr_tmp, src_stride, src3, src4, src5, src6);
- src0_ptr_tmp += (4 * src_stride);
- LD_SH4(src1_ptr_tmp, src2_stride, in0, in1, in2, in3);
- src1_ptr_tmp += (4 * src2_stride);
- XORI_B4_128_SB(src3, src4, src5, src6);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
- dsth3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dsth4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dsth5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- dsth6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
- ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
- ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
- ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
- dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
- dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
- SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
- SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
- PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
- dst3_r, dst0, dst1, dst2, dst3);
- ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
- ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
- ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
- ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
- dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
- dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
- dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
- PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
- tmp0, tmp1, tmp2, tmp3);
- CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
- ST_D4(out0, out1, 0, 1, 0, 1, dst_tmp, dst_stride);
- dst_tmp += (4 * dst_stride);
- dst10_r = dst54_r;
- dst10_l = dst54_l;
- dst21_r = dst65_r;
- dst21_l = dst65_l;
- dsth2 = dsth6;
- }
- src0_ptr += 8;
- dst += 8;
- src1_ptr += 8;
- }
- }
- static void hevc_hv_biwgt_4t_8w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- if (2 == height) {
- hevc_hv_biwgt_4t_8x2_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (4 == height) {
- hevc_hv_biwgt_4t_8multx4_msa(src0_ptr, src_stride, src1_ptr,
- src2_stride, dst, dst_stride, filter_x,
- filter_y, weight0, weight1, offset0,
- offset1, rnd_val, 1);
- } else if (6 == height) {
- hevc_hv_biwgt_4t_8x6_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- weight0, weight1, offset0, offset1, rnd_val);
- } else if (0 == (height % 4)) {
- hevc_hv_biwgt_4t_8multx4mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride, filter_x, filter_y,
- height, weight0,
- weight1, offset0, offset1, rnd_val, 8);
- }
- }
- static void hevc_hv_biwgt_4t_12w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- uint32_t loop_cnt;
- uint64_t tp0, tp1;
- int32_t offset, weight;
- uint8_t *src0_ptr_tmp, *dst_tmp;
- int16_t *src1_ptr_tmp;
- v16u8 out0, out1;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v16i8 mask0, mask1, mask2, mask3;
- v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, weight_vec;
- v8i16 dst10, dst21, dst22, dst73, dst84, dst95, dst106;
- v8i16 dst76_r, dst98_r, dst87_r, dst109_r;
- v8i16 in0 = { 0 }, in1 = { 0 }, in2 = { 0 }, in3 = { 0 };
- v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
- v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
- v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
- v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
- v4i32 offset_vec, rnd_vec, const_vec;
- src0_ptr -= (src_stride + 1);
- filter_vec = LD_SH(filter_x);
- SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- filter_vec = LD_SH(filter_y);
- UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
- mask0 = LD_SB(ff_hevc_mask_arr);
- mask1 = mask0 + 2;
- offset = (offset0 + offset1) << rnd_val;
- weight0 = weight0 & 0x0000FFFF;
- weight = weight0 | (weight1 << 16);
- const_vec = __msa_fill_w((128 * weight1));
- const_vec <<= 6;
- offset_vec = __msa_fill_w(offset);
- rnd_vec = __msa_fill_w(rnd_val + 1);
- offset_vec += const_vec;
- weight_vec = (v8i16) __msa_fill_w(weight);
- src0_ptr_tmp = src0_ptr;
- dst_tmp = dst;
- src1_ptr_tmp = src1_ptr;
- LD_SB3(src0_ptr_tmp, src_stride, src0, src1, src2);
- src0_ptr_tmp += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
- ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
- for (loop_cnt = 4; loop_cnt--;) {
- LD_SB4(src0_ptr_tmp, src_stride, src3, src4, src5, src6);
- src0_ptr_tmp += (4 * src_stride);
- XORI_B4_128_SB(src3, src4, src5, src6);
- LD_SH4(src1_ptr_tmp, src2_stride, in0, in1, in2, in3);
- src1_ptr_tmp += (4 * src2_stride);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
- dsth3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dsth4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dsth5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- dsth6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
- ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
- ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
- ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
- dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
- dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
- SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
- SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
- PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
- dst3_r, dst0, dst1, dst2, dst3);
- ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
- ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
- ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
- ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
- dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
- dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
- dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
- PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
- tmp0, tmp1, tmp2, tmp3);
- CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
- ST_D4(out0, out1, 0, 1, 0, 1, dst_tmp, dst_stride);
- dst_tmp += (4 * dst_stride);
- dst10_r = dst54_r;
- dst10_l = dst54_l;
- dst21_r = dst65_r;
- dst21_l = dst65_l;
- dsth2 = dsth6;
- }
- src0_ptr += 8;
- dst += 8;
- src1_ptr += 8;
- mask2 = LD_SB(ff_hevc_mask_arr + 16);
- mask3 = mask2 + 2;
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
- VSHF_B2_SB(src0, src1, src0, src1, mask2, mask3, vec0, vec1);
- VSHF_B2_SB(src1, src2, src1, src2, mask2, mask3, vec2, vec3);
- dst10 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dst21 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- ILVRL_H2_SH(dst21, dst10, dst10_r, dst21_r);
- dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
- for (loop_cnt = 2; loop_cnt--;) {
- LD_SB8(src0_ptr, src_stride, src3, src4, src5, src6, src7, src8, src9,
- src10);
- src0_ptr += (8 * src_stride);
- XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
- VSHF_B2_SB(src3, src7, src3, src7, mask2, mask3, vec0, vec1);
- VSHF_B2_SB(src4, src8, src4, src8, mask2, mask3, vec2, vec3);
- VSHF_B2_SB(src5, src9, src5, src9, mask2, mask3, vec4, vec5);
- VSHF_B2_SB(src6, src10, src6, src10, mask2, mask3, vec6, vec7);
- dst73 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
- dst84 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
- dst95 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- dst106 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- dst32_r = __msa_ilvr_h(dst73, dst22);
- ILVRL_H2_SH(dst84, dst73, dst43_r, dst87_r);
- ILVRL_H2_SH(dst95, dst84, dst54_r, dst98_r);
- ILVRL_H2_SH(dst106, dst95, dst65_r, dst109_r);
- dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
- dst76_r = __msa_ilvr_h(dst22, dst106);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- src1_ptr += 2 * src2_stride;
- INSERT_D2_SH(tp0, tp1, in0);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- src1_ptr += 2 * src2_stride;
- INSERT_D2_SH(tp0, tp1, in1);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- src1_ptr += 2 * src2_stride;
- INSERT_D2_SH(tp0, tp1, in2);
- LD2(src1_ptr, src2_stride, tp0, tp1);
- src1_ptr += 2 * src2_stride;
- INSERT_D2_SH(tp0, tp1, in3);
- dst0 = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst1 = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst2 = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
- dst3 = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
- dst4 = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
- dst5 = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
- dst6 = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
- dst7 = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
- SRA_4V(dst0, dst1, dst2, dst3, 6);
- SRA_4V(dst4, dst5, dst6, dst7, 6);
- PCKEV_H4_SW(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
- dst0, dst1, dst2, dst3);
- ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
- ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
- ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
- ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
- dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
- dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
- dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
- dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
- dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
- dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
- dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
- dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
- SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
- SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
- PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
- tmp0, tmp1, tmp2, tmp3);
- CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
- ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
- dst += (8 * dst_stride);
- dst10_r = dst98_r;
- dst21_r = dst109_r;
- dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
- }
- }
- static void hevc_hv_biwgt_4t_16w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- if (4 == height) {
- hevc_hv_biwgt_4t_8multx4_msa(src0_ptr, src_stride, src1_ptr,
- src2_stride, dst, dst_stride, filter_x,
- filter_y, weight0, weight1, offset0,
- offset1, rnd_val, 2);
- } else {
- hevc_hv_biwgt_4t_8multx4mult_msa(src0_ptr, src_stride, src1_ptr,
- src2_stride, dst, dst_stride,
- filter_x, filter_y, height, weight0,
- weight1, offset0, offset1, rnd_val, 16);
- }
- }
- static void hevc_hv_biwgt_4t_24w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_hv_biwgt_4t_8multx4mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride,
- filter_x, filter_y, height, weight0,
- weight1, offset0, offset1, rnd_val, 24);
- }
- static void hevc_hv_biwgt_4t_32w_msa(uint8_t *src0_ptr,
- int32_t src_stride,
- int16_t *src1_ptr,
- int32_t src2_stride,
- uint8_t *dst,
- int32_t dst_stride,
- const int8_t *filter_x,
- const int8_t *filter_y,
- int32_t height,
- int32_t weight0,
- int32_t weight1,
- int32_t offset0,
- int32_t offset1,
- int32_t rnd_val)
- {
- hevc_hv_biwgt_4t_8multx4mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride,
- filter_x, filter_y, height, weight0,
- weight1, offset0, offset1, rnd_val, 32);
- }
- #define BI_W_MC_COPY(WIDTH) \
- void ff_hevc_put_hevc_bi_w_pel_pixels##WIDTH##_8_msa(uint8_t *dst, \
- ptrdiff_t dst_stride, \
- uint8_t *src, \
- ptrdiff_t src_stride, \
- int16_t *src_16bit, \
- int height, \
- int denom, \
- int weight0, \
- int weight1, \
- int offset0, \
- int offset1, \
- intptr_t mx, \
- intptr_t my, \
- int width) \
- { \
- int shift = 14 + 1 - 8; \
- int log2Wd = denom + shift - 1; \
- \
- hevc_biwgt_copy_##WIDTH##w_msa(src, src_stride, src_16bit, MAX_PB_SIZE, \
- dst, dst_stride, height, \
- weight0, weight1, offset0, \
- offset1, log2Wd); \
- }
- BI_W_MC_COPY(4);
- BI_W_MC_COPY(6);
- BI_W_MC_COPY(8);
- BI_W_MC_COPY(12);
- BI_W_MC_COPY(16);
- BI_W_MC_COPY(24);
- BI_W_MC_COPY(32);
- BI_W_MC_COPY(48);
- BI_W_MC_COPY(64);
- #undef BI_W_MC_COPY
- #define BI_W_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
- void ff_hevc_put_hevc_bi_w_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
- ptrdiff_t \
- dst_stride, \
- uint8_t *src, \
- ptrdiff_t \
- src_stride, \
- int16_t *src_16bit, \
- int height, \
- int denom, \
- int weight0, \
- int weight1, \
- int offset0, \
- int offset1, \
- intptr_t mx, \
- intptr_t my, \
- int width) \
- { \
- const int8_t *filter = ff_hevc_##PEL##_filters[FILT_DIR - 1]; \
- int log2Wd = denom + 14 - 8; \
- \
- hevc_##DIR1##_biwgt_##TAP##t_##WIDTH##w_msa(src, src_stride, src_16bit, \
- MAX_PB_SIZE, dst, dst_stride, \
- filter, height, weight0, \
- weight1, offset0, offset1, \
- log2Wd); \
- }
- BI_W_MC(qpel, h, 4, 8, hz, mx);
- BI_W_MC(qpel, h, 8, 8, hz, mx);
- BI_W_MC(qpel, h, 12, 8, hz, mx);
- BI_W_MC(qpel, h, 16, 8, hz, mx);
- BI_W_MC(qpel, h, 24, 8, hz, mx);
- BI_W_MC(qpel, h, 32, 8, hz, mx);
- BI_W_MC(qpel, h, 48, 8, hz, mx);
- BI_W_MC(qpel, h, 64, 8, hz, mx);
- BI_W_MC(qpel, v, 4, 8, vt, my);
- BI_W_MC(qpel, v, 8, 8, vt, my);
- BI_W_MC(qpel, v, 12, 8, vt, my);
- BI_W_MC(qpel, v, 16, 8, vt, my);
- BI_W_MC(qpel, v, 24, 8, vt, my);
- BI_W_MC(qpel, v, 32, 8, vt, my);
- BI_W_MC(qpel, v, 48, 8, vt, my);
- BI_W_MC(qpel, v, 64, 8, vt, my);
- BI_W_MC(epel, h, 4, 4, hz, mx);
- BI_W_MC(epel, h, 8, 4, hz, mx);
- BI_W_MC(epel, h, 6, 4, hz, mx);
- BI_W_MC(epel, h, 12, 4, hz, mx);
- BI_W_MC(epel, h, 16, 4, hz, mx);
- BI_W_MC(epel, h, 24, 4, hz, mx);
- BI_W_MC(epel, h, 32, 4, hz, mx);
- BI_W_MC(epel, v, 4, 4, vt, my);
- BI_W_MC(epel, v, 8, 4, vt, my);
- BI_W_MC(epel, v, 6, 4, vt, my);
- BI_W_MC(epel, v, 12, 4, vt, my);
- BI_W_MC(epel, v, 16, 4, vt, my);
- BI_W_MC(epel, v, 24, 4, vt, my);
- BI_W_MC(epel, v, 32, 4, vt, my);
- #undef BI_W_MC
- #define BI_W_MC_HV(PEL, WIDTH, TAP) \
- void ff_hevc_put_hevc_bi_w_##PEL##_hv##WIDTH##_8_msa(uint8_t *dst, \
- ptrdiff_t dst_stride, \
- uint8_t *src, \
- ptrdiff_t src_stride, \
- int16_t *src_16bit, \
- int height, \
- int denom, \
- int weight0, \
- int weight1, \
- int offset0, \
- int offset1, \
- intptr_t mx, \
- intptr_t my, \
- int width) \
- { \
- const int8_t *filter_x = ff_hevc_##PEL##_filters[mx - 1]; \
- const int8_t *filter_y = ff_hevc_##PEL##_filters[my - 1]; \
- int log2Wd = denom + 14 - 8; \
- \
- hevc_hv_biwgt_##TAP##t_##WIDTH##w_msa(src, src_stride, src_16bit, \
- MAX_PB_SIZE, dst, dst_stride, \
- filter_x, filter_y, height, \
- weight0, weight1, offset0, \
- offset1, log2Wd); \
- }
- BI_W_MC_HV(qpel, 4, 8);
- BI_W_MC_HV(qpel, 8, 8);
- BI_W_MC_HV(qpel, 12, 8);
- BI_W_MC_HV(qpel, 16, 8);
- BI_W_MC_HV(qpel, 24, 8);
- BI_W_MC_HV(qpel, 32, 8);
- BI_W_MC_HV(qpel, 48, 8);
- BI_W_MC_HV(qpel, 64, 8);
- BI_W_MC_HV(epel, 4, 4);
- BI_W_MC_HV(epel, 8, 4);
- BI_W_MC_HV(epel, 6, 4);
- BI_W_MC_HV(epel, 12, 4);
- BI_W_MC_HV(epel, 16, 4);
- BI_W_MC_HV(epel, 24, 4);
- BI_W_MC_HV(epel, 32, 4);
- #undef BI_W_MC_HV
|