12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132 |
- //===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- #include "MCTargetDesc/X86BaseInfo.h"
- #include "MCTargetDesc/X86IntelInstPrinter.h"
- #include "MCTargetDesc/X86MCExpr.h"
- #include "MCTargetDesc/X86TargetStreamer.h"
- #include "TargetInfo/X86TargetInfo.h"
- #include "X86AsmParserCommon.h"
- #include "X86Operand.h"
- #include "llvm/ADT/STLExtras.h"
- #include "llvm/ADT/SmallString.h"
- #include "llvm/ADT/SmallVector.h"
- #include "llvm/ADT/StringSwitch.h"
- #include "llvm/ADT/Twine.h"
- #include "llvm/MC/MCContext.h"
- #include "llvm/MC/MCExpr.h"
- #include "llvm/MC/MCInst.h"
- #include "llvm/MC/MCInstrInfo.h"
- #include "llvm/MC/MCParser/MCAsmLexer.h"
- #include "llvm/MC/MCParser/MCAsmParser.h"
- #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
- #include "llvm/MC/MCParser/MCTargetAsmParser.h"
- #include "llvm/MC/MCRegisterInfo.h"
- #include "llvm/MC/MCSection.h"
- #include "llvm/MC/MCStreamer.h"
- #include "llvm/MC/MCSubtargetInfo.h"
- #include "llvm/MC/MCSymbol.h"
- #include "llvm/MC/TargetRegistry.h"
- #include "llvm/Support/CommandLine.h"
- #include "llvm/Support/Compiler.h"
- #include "llvm/Support/SourceMgr.h"
- #include "llvm/Support/raw_ostream.h"
- #include <algorithm>
- #include <memory>
- using namespace llvm;
- static cl::opt<bool> LVIInlineAsmHardening(
- "x86-experimental-lvi-inline-asm-hardening",
- cl::desc("Harden inline assembly code that may be vulnerable to Load Value"
- " Injection (LVI). This feature is experimental."), cl::Hidden);
- static bool checkScale(unsigned Scale, StringRef &ErrMsg) {
- if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
- ErrMsg = "scale factor in address must be 1, 2, 4 or 8";
- return true;
- }
- return false;
- }
- namespace {
- static const char OpPrecedence[] = {
- 0, // IC_OR
- 1, // IC_XOR
- 2, // IC_AND
- 4, // IC_LSHIFT
- 4, // IC_RSHIFT
- 5, // IC_PLUS
- 5, // IC_MINUS
- 6, // IC_MULTIPLY
- 6, // IC_DIVIDE
- 6, // IC_MOD
- 7, // IC_NOT
- 8, // IC_NEG
- 9, // IC_RPAREN
- 10, // IC_LPAREN
- 0, // IC_IMM
- 0, // IC_REGISTER
- 3, // IC_EQ
- 3, // IC_NE
- 3, // IC_LT
- 3, // IC_LE
- 3, // IC_GT
- 3 // IC_GE
- };
- class X86AsmParser : public MCTargetAsmParser {
- ParseInstructionInfo *InstInfo;
- bool Code16GCC;
- unsigned ForcedDataPrefix = 0;
- enum VEXEncoding {
- VEXEncoding_Default,
- VEXEncoding_VEX,
- VEXEncoding_VEX2,
- VEXEncoding_VEX3,
- VEXEncoding_EVEX,
- };
- VEXEncoding ForcedVEXEncoding = VEXEncoding_Default;
- enum DispEncoding {
- DispEncoding_Default,
- DispEncoding_Disp8,
- DispEncoding_Disp32,
- };
- DispEncoding ForcedDispEncoding = DispEncoding_Default;
- private:
- SMLoc consumeToken() {
- MCAsmParser &Parser = getParser();
- SMLoc Result = Parser.getTok().getLoc();
- Parser.Lex();
- return Result;
- }
- X86TargetStreamer &getTargetStreamer() {
- assert(getParser().getStreamer().getTargetStreamer() &&
- "do not have a target streamer");
- MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
- return static_cast<X86TargetStreamer &>(TS);
- }
- unsigned MatchInstruction(const OperandVector &Operands, MCInst &Inst,
- uint64_t &ErrorInfo, FeatureBitset &MissingFeatures,
- bool matchingInlineAsm, unsigned VariantID = 0) {
- // In Code16GCC mode, match as 32-bit.
- if (Code16GCC)
- SwitchMode(X86::Mode32Bit);
- unsigned rv = MatchInstructionImpl(Operands, Inst, ErrorInfo,
- MissingFeatures, matchingInlineAsm,
- VariantID);
- if (Code16GCC)
- SwitchMode(X86::Mode16Bit);
- return rv;
- }
- enum InfixCalculatorTok {
- IC_OR = 0,
- IC_XOR,
- IC_AND,
- IC_LSHIFT,
- IC_RSHIFT,
- IC_PLUS,
- IC_MINUS,
- IC_MULTIPLY,
- IC_DIVIDE,
- IC_MOD,
- IC_NOT,
- IC_NEG,
- IC_RPAREN,
- IC_LPAREN,
- IC_IMM,
- IC_REGISTER,
- IC_EQ,
- IC_NE,
- IC_LT,
- IC_LE,
- IC_GT,
- IC_GE
- };
- enum IntelOperatorKind {
- IOK_INVALID = 0,
- IOK_LENGTH,
- IOK_SIZE,
- IOK_TYPE,
- };
- enum MasmOperatorKind {
- MOK_INVALID = 0,
- MOK_LENGTHOF,
- MOK_SIZEOF,
- MOK_TYPE,
- };
- class InfixCalculator {
- typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
- SmallVector<InfixCalculatorTok, 4> InfixOperatorStack;
- SmallVector<ICToken, 4> PostfixStack;
- bool isUnaryOperator(InfixCalculatorTok Op) const {
- return Op == IC_NEG || Op == IC_NOT;
- }
- public:
- int64_t popOperand() {
- assert (!PostfixStack.empty() && "Poped an empty stack!");
- ICToken Op = PostfixStack.pop_back_val();
- if (!(Op.first == IC_IMM || Op.first == IC_REGISTER))
- return -1; // The invalid Scale value will be caught later by checkScale
- return Op.second;
- }
- void pushOperand(InfixCalculatorTok Op, int64_t Val = 0) {
- assert ((Op == IC_IMM || Op == IC_REGISTER) &&
- "Unexpected operand!");
- PostfixStack.push_back(std::make_pair(Op, Val));
- }
- void popOperator() { InfixOperatorStack.pop_back(); }
- void pushOperator(InfixCalculatorTok Op) {
- // Push the new operator if the stack is empty.
- if (InfixOperatorStack.empty()) {
- InfixOperatorStack.push_back(Op);
- return;
- }
- // Push the new operator if it has a higher precedence than the operator
- // on the top of the stack or the operator on the top of the stack is a
- // left parentheses.
- unsigned Idx = InfixOperatorStack.size() - 1;
- InfixCalculatorTok StackOp = InfixOperatorStack[Idx];
- if (OpPrecedence[Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) {
- InfixOperatorStack.push_back(Op);
- return;
- }
- // The operator on the top of the stack has higher precedence than the
- // new operator.
- unsigned ParenCount = 0;
- while (true) {
- // Nothing to process.
- if (InfixOperatorStack.empty())
- break;
- Idx = InfixOperatorStack.size() - 1;
- StackOp = InfixOperatorStack[Idx];
- if (!(OpPrecedence[StackOp] >= OpPrecedence[Op] || ParenCount))
- break;
- // If we have an even parentheses count and we see a left parentheses,
- // then stop processing.
- if (!ParenCount && StackOp == IC_LPAREN)
- break;
- if (StackOp == IC_RPAREN) {
- ++ParenCount;
- InfixOperatorStack.pop_back();
- } else if (StackOp == IC_LPAREN) {
- --ParenCount;
- InfixOperatorStack.pop_back();
- } else {
- InfixOperatorStack.pop_back();
- PostfixStack.push_back(std::make_pair(StackOp, 0));
- }
- }
- // Push the new operator.
- InfixOperatorStack.push_back(Op);
- }
- int64_t execute() {
- // Push any remaining operators onto the postfix stack.
- while (!InfixOperatorStack.empty()) {
- InfixCalculatorTok StackOp = InfixOperatorStack.pop_back_val();
- if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
- PostfixStack.push_back(std::make_pair(StackOp, 0));
- }
- if (PostfixStack.empty())
- return 0;
- SmallVector<ICToken, 16> OperandStack;
- for (unsigned i = 0, e = PostfixStack.size(); i != e; ++i) {
- ICToken Op = PostfixStack[i];
- if (Op.first == IC_IMM || Op.first == IC_REGISTER) {
- OperandStack.push_back(Op);
- } else if (isUnaryOperator(Op.first)) {
- assert (OperandStack.size() > 0 && "Too few operands.");
- ICToken Operand = OperandStack.pop_back_val();
- assert (Operand.first == IC_IMM &&
- "Unary operation with a register!");
- switch (Op.first) {
- default:
- report_fatal_error("Unexpected operator!");
- break;
- case IC_NEG:
- OperandStack.push_back(std::make_pair(IC_IMM, -Operand.second));
- break;
- case IC_NOT:
- OperandStack.push_back(std::make_pair(IC_IMM, ~Operand.second));
- break;
- }
- } else {
- assert (OperandStack.size() > 1 && "Too few operands.");
- int64_t Val;
- ICToken Op2 = OperandStack.pop_back_val();
- ICToken Op1 = OperandStack.pop_back_val();
- switch (Op.first) {
- default:
- report_fatal_error("Unexpected operator!");
- break;
- case IC_PLUS:
- Val = Op1.second + Op2.second;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_MINUS:
- Val = Op1.second - Op2.second;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_MULTIPLY:
- assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Multiply operation with an immediate and a register!");
- Val = Op1.second * Op2.second;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_DIVIDE:
- assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Divide operation with an immediate and a register!");
- assert (Op2.second != 0 && "Division by zero!");
- Val = Op1.second / Op2.second;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_MOD:
- assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Modulo operation with an immediate and a register!");
- Val = Op1.second % Op2.second;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_OR:
- assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Or operation with an immediate and a register!");
- Val = Op1.second | Op2.second;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_XOR:
- assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Xor operation with an immediate and a register!");
- Val = Op1.second ^ Op2.second;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_AND:
- assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "And operation with an immediate and a register!");
- Val = Op1.second & Op2.second;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_LSHIFT:
- assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Left shift operation with an immediate and a register!");
- Val = Op1.second << Op2.second;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_RSHIFT:
- assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Right shift operation with an immediate and a register!");
- Val = Op1.second >> Op2.second;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_EQ:
- assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Equals operation with an immediate and a register!");
- Val = (Op1.second == Op2.second) ? -1 : 0;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_NE:
- assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Not-equals operation with an immediate and a register!");
- Val = (Op1.second != Op2.second) ? -1 : 0;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_LT:
- assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Less-than operation with an immediate and a register!");
- Val = (Op1.second < Op2.second) ? -1 : 0;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_LE:
- assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Less-than-or-equal operation with an immediate and a "
- "register!");
- Val = (Op1.second <= Op2.second) ? -1 : 0;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_GT:
- assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Greater-than operation with an immediate and a register!");
- Val = (Op1.second > Op2.second) ? -1 : 0;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- case IC_GE:
- assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
- "Greater-than-or-equal operation with an immediate and a "
- "register!");
- Val = (Op1.second >= Op2.second) ? -1 : 0;
- OperandStack.push_back(std::make_pair(IC_IMM, Val));
- break;
- }
- }
- }
- assert (OperandStack.size() == 1 && "Expected a single result.");
- return OperandStack.pop_back_val().second;
- }
- };
- enum IntelExprState {
- IES_INIT,
- IES_OR,
- IES_XOR,
- IES_AND,
- IES_EQ,
- IES_NE,
- IES_LT,
- IES_LE,
- IES_GT,
- IES_GE,
- IES_LSHIFT,
- IES_RSHIFT,
- IES_PLUS,
- IES_MINUS,
- IES_OFFSET,
- IES_CAST,
- IES_NOT,
- IES_MULTIPLY,
- IES_DIVIDE,
- IES_MOD,
- IES_LBRAC,
- IES_RBRAC,
- IES_LPAREN,
- IES_RPAREN,
- IES_REGISTER,
- IES_INTEGER,
- IES_IDENTIFIER,
- IES_ERROR
- };
- class IntelExprStateMachine {
- IntelExprState State, PrevState;
- unsigned BaseReg, IndexReg, TmpReg, Scale;
- int64_t Imm;
- const MCExpr *Sym;
- StringRef SymName;
- InfixCalculator IC;
- InlineAsmIdentifierInfo Info;
- short BracCount;
- bool MemExpr;
- bool OffsetOperator;
- SMLoc OffsetOperatorLoc;
- AsmTypeInfo CurType;
- bool setSymRef(const MCExpr *Val, StringRef ID, StringRef &ErrMsg) {
- if (Sym) {
- ErrMsg = "cannot use more than one symbol in memory operand";
- return true;
- }
- Sym = Val;
- SymName = ID;
- return false;
- }
- public:
- IntelExprStateMachine()
- : State(IES_INIT), PrevState(IES_ERROR), BaseReg(0), IndexReg(0),
- TmpReg(0), Scale(0), Imm(0), Sym(nullptr), BracCount(0),
- MemExpr(false), OffsetOperator(false) {}
- void addImm(int64_t imm) { Imm += imm; }
- short getBracCount() const { return BracCount; }
- bool isMemExpr() const { return MemExpr; }
- bool isOffsetOperator() const { return OffsetOperator; }
- SMLoc getOffsetLoc() const { return OffsetOperatorLoc; }
- unsigned getBaseReg() const { return BaseReg; }
- unsigned getIndexReg() const { return IndexReg; }
- unsigned getScale() const { return Scale; }
- const MCExpr *getSym() const { return Sym; }
- StringRef getSymName() const { return SymName; }
- StringRef getType() const { return CurType.Name; }
- unsigned getSize() const { return CurType.Size; }
- unsigned getElementSize() const { return CurType.ElementSize; }
- unsigned getLength() const { return CurType.Length; }
- int64_t getImm() { return Imm + IC.execute(); }
- bool isValidEndState() const {
- return State == IES_RBRAC || State == IES_INTEGER;
- }
- bool hadError() const { return State == IES_ERROR; }
- const InlineAsmIdentifierInfo &getIdentifierInfo() const { return Info; }
- void onOr() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_OR;
- IC.pushOperator(IC_OR);
- break;
- }
- PrevState = CurrState;
- }
- void onXor() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_XOR;
- IC.pushOperator(IC_XOR);
- break;
- }
- PrevState = CurrState;
- }
- void onAnd() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_AND;
- IC.pushOperator(IC_AND);
- break;
- }
- PrevState = CurrState;
- }
- void onEq() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_EQ;
- IC.pushOperator(IC_EQ);
- break;
- }
- PrevState = CurrState;
- }
- void onNE() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_NE;
- IC.pushOperator(IC_NE);
- break;
- }
- PrevState = CurrState;
- }
- void onLT() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_LT;
- IC.pushOperator(IC_LT);
- break;
- }
- PrevState = CurrState;
- }
- void onLE() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_LE;
- IC.pushOperator(IC_LE);
- break;
- }
- PrevState = CurrState;
- }
- void onGT() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_GT;
- IC.pushOperator(IC_GT);
- break;
- }
- PrevState = CurrState;
- }
- void onGE() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_GE;
- IC.pushOperator(IC_GE);
- break;
- }
- PrevState = CurrState;
- }
- void onLShift() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_LSHIFT;
- IC.pushOperator(IC_LSHIFT);
- break;
- }
- PrevState = CurrState;
- }
- void onRShift() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- State = IES_RSHIFT;
- IC.pushOperator(IC_RSHIFT);
- break;
- }
- PrevState = CurrState;
- }
- bool onPlus(StringRef &ErrMsg) {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- case IES_REGISTER:
- case IES_OFFSET:
- State = IES_PLUS;
- IC.pushOperator(IC_PLUS);
- if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
- // If we already have a BaseReg, then assume this is the IndexReg with
- // no explicit scale.
- if (!BaseReg) {
- BaseReg = TmpReg;
- } else {
- if (IndexReg) {
- ErrMsg = "BaseReg/IndexReg already set!";
- return true;
- }
- IndexReg = TmpReg;
- Scale = 0;
- }
- }
- break;
- }
- PrevState = CurrState;
- return false;
- }
- bool onMinus(StringRef &ErrMsg) {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_OR:
- case IES_XOR:
- case IES_AND:
- case IES_EQ:
- case IES_NE:
- case IES_LT:
- case IES_LE:
- case IES_GT:
- case IES_GE:
- case IES_LSHIFT:
- case IES_RSHIFT:
- case IES_PLUS:
- case IES_NOT:
- case IES_MULTIPLY:
- case IES_DIVIDE:
- case IES_MOD:
- case IES_LPAREN:
- case IES_RPAREN:
- case IES_LBRAC:
- case IES_RBRAC:
- case IES_INTEGER:
- case IES_REGISTER:
- case IES_INIT:
- case IES_OFFSET:
- State = IES_MINUS;
- // push minus operator if it is not a negate operator
- if (CurrState == IES_REGISTER || CurrState == IES_RPAREN ||
- CurrState == IES_INTEGER || CurrState == IES_RBRAC ||
- CurrState == IES_OFFSET)
- IC.pushOperator(IC_MINUS);
- else if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
- // We have negate operator for Scale: it's illegal
- ErrMsg = "Scale can't be negative";
- return true;
- } else
- IC.pushOperator(IC_NEG);
- if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
- // If we already have a BaseReg, then assume this is the IndexReg with
- // no explicit scale.
- if (!BaseReg) {
- BaseReg = TmpReg;
- } else {
- if (IndexReg) {
- ErrMsg = "BaseReg/IndexReg already set!";
- return true;
- }
- IndexReg = TmpReg;
- Scale = 0;
- }
- }
- break;
- }
- PrevState = CurrState;
- return false;
- }
- void onNot() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_OR:
- case IES_XOR:
- case IES_AND:
- case IES_EQ:
- case IES_NE:
- case IES_LT:
- case IES_LE:
- case IES_GT:
- case IES_GE:
- case IES_LSHIFT:
- case IES_RSHIFT:
- case IES_PLUS:
- case IES_MINUS:
- case IES_NOT:
- case IES_MULTIPLY:
- case IES_DIVIDE:
- case IES_MOD:
- case IES_LPAREN:
- case IES_LBRAC:
- case IES_INIT:
- State = IES_NOT;
- IC.pushOperator(IC_NOT);
- break;
- }
- PrevState = CurrState;
- }
- bool onRegister(unsigned Reg, StringRef &ErrMsg) {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_PLUS:
- case IES_LPAREN:
- case IES_LBRAC:
- State = IES_REGISTER;
- TmpReg = Reg;
- IC.pushOperand(IC_REGISTER);
- break;
- case IES_MULTIPLY:
- // Index Register - Scale * Register
- if (PrevState == IES_INTEGER) {
- if (IndexReg) {
- ErrMsg = "BaseReg/IndexReg already set!";
- return true;
- }
- State = IES_REGISTER;
- IndexReg = Reg;
- // Get the scale and replace the 'Scale * Register' with '0'.
- Scale = IC.popOperand();
- if (checkScale(Scale, ErrMsg))
- return true;
- IC.pushOperand(IC_IMM);
- IC.popOperator();
- } else {
- State = IES_ERROR;
- }
- break;
- }
- PrevState = CurrState;
- return false;
- }
- bool onIdentifierExpr(const MCExpr *SymRef, StringRef SymRefName,
- const InlineAsmIdentifierInfo &IDInfo,
- const AsmTypeInfo &Type, bool ParsingMSInlineAsm,
- StringRef &ErrMsg) {
- // InlineAsm: Treat an enum value as an integer
- if (ParsingMSInlineAsm)
- if (IDInfo.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
- return onInteger(IDInfo.Enum.EnumVal, ErrMsg);
- // Treat a symbolic constant like an integer
- if (auto *CE = dyn_cast<MCConstantExpr>(SymRef))
- return onInteger(CE->getValue(), ErrMsg);
- PrevState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_CAST:
- case IES_PLUS:
- case IES_MINUS:
- case IES_NOT:
- case IES_INIT:
- case IES_LBRAC:
- case IES_LPAREN:
- if (setSymRef(SymRef, SymRefName, ErrMsg))
- return true;
- MemExpr = true;
- State = IES_INTEGER;
- IC.pushOperand(IC_IMM);
- if (ParsingMSInlineAsm)
- Info = IDInfo;
- setTypeInfo(Type);
- break;
- }
- return false;
- }
- bool onInteger(int64_t TmpInt, StringRef &ErrMsg) {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_PLUS:
- case IES_MINUS:
- case IES_NOT:
- case IES_OR:
- case IES_XOR:
- case IES_AND:
- case IES_EQ:
- case IES_NE:
- case IES_LT:
- case IES_LE:
- case IES_GT:
- case IES_GE:
- case IES_LSHIFT:
- case IES_RSHIFT:
- case IES_DIVIDE:
- case IES_MOD:
- case IES_MULTIPLY:
- case IES_LPAREN:
- case IES_INIT:
- case IES_LBRAC:
- State = IES_INTEGER;
- if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
- // Index Register - Register * Scale
- if (IndexReg) {
- ErrMsg = "BaseReg/IndexReg already set!";
- return true;
- }
- IndexReg = TmpReg;
- Scale = TmpInt;
- if (checkScale(Scale, ErrMsg))
- return true;
- // Get the scale and replace the 'Register * Scale' with '0'.
- IC.popOperator();
- } else {
- IC.pushOperand(IC_IMM, TmpInt);
- }
- break;
- }
- PrevState = CurrState;
- return false;
- }
- void onStar() {
- PrevState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_REGISTER:
- case IES_RPAREN:
- State = IES_MULTIPLY;
- IC.pushOperator(IC_MULTIPLY);
- break;
- }
- }
- void onDivide() {
- PrevState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- State = IES_DIVIDE;
- IC.pushOperator(IC_DIVIDE);
- break;
- }
- }
- void onMod() {
- PrevState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_RPAREN:
- State = IES_MOD;
- IC.pushOperator(IC_MOD);
- break;
- }
- }
- bool onLBrac() {
- if (BracCount)
- return true;
- PrevState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_RBRAC:
- case IES_INTEGER:
- case IES_RPAREN:
- State = IES_PLUS;
- IC.pushOperator(IC_PLUS);
- CurType.Length = 1;
- CurType.Size = CurType.ElementSize;
- break;
- case IES_INIT:
- case IES_CAST:
- assert(!BracCount && "BracCount should be zero on parsing's start");
- State = IES_LBRAC;
- break;
- }
- MemExpr = true;
- BracCount++;
- return false;
- }
- bool onRBrac() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_OFFSET:
- case IES_REGISTER:
- case IES_RPAREN:
- if (BracCount-- != 1)
- return true;
- State = IES_RBRAC;
- if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
- // If we already have a BaseReg, then assume this is the IndexReg with
- // no explicit scale.
- if (!BaseReg) {
- BaseReg = TmpReg;
- } else {
- assert (!IndexReg && "BaseReg/IndexReg already set!");
- IndexReg = TmpReg;
- Scale = 0;
- }
- }
- break;
- }
- PrevState = CurrState;
- return false;
- }
- void onLParen() {
- IntelExprState CurrState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_PLUS:
- case IES_MINUS:
- case IES_NOT:
- case IES_OR:
- case IES_XOR:
- case IES_AND:
- case IES_EQ:
- case IES_NE:
- case IES_LT:
- case IES_LE:
- case IES_GT:
- case IES_GE:
- case IES_LSHIFT:
- case IES_RSHIFT:
- case IES_MULTIPLY:
- case IES_DIVIDE:
- case IES_MOD:
- case IES_LPAREN:
- case IES_INIT:
- case IES_LBRAC:
- State = IES_LPAREN;
- IC.pushOperator(IC_LPAREN);
- break;
- }
- PrevState = CurrState;
- }
- void onRParen() {
- PrevState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_INTEGER:
- case IES_OFFSET:
- case IES_REGISTER:
- case IES_RBRAC:
- case IES_RPAREN:
- State = IES_RPAREN;
- IC.pushOperator(IC_RPAREN);
- break;
- }
- }
- bool onOffset(const MCExpr *Val, SMLoc OffsetLoc, StringRef ID,
- const InlineAsmIdentifierInfo &IDInfo,
- bool ParsingMSInlineAsm, StringRef &ErrMsg) {
- PrevState = State;
- switch (State) {
- default:
- ErrMsg = "unexpected offset operator expression";
- return true;
- case IES_PLUS:
- case IES_INIT:
- case IES_LBRAC:
- if (setSymRef(Val, ID, ErrMsg))
- return true;
- OffsetOperator = true;
- OffsetOperatorLoc = OffsetLoc;
- State = IES_OFFSET;
- // As we cannot yet resolve the actual value (offset), we retain
- // the requested semantics by pushing a '0' to the operands stack
- IC.pushOperand(IC_IMM);
- if (ParsingMSInlineAsm) {
- Info = IDInfo;
- }
- break;
- }
- return false;
- }
- void onCast(AsmTypeInfo Info) {
- PrevState = State;
- switch (State) {
- default:
- State = IES_ERROR;
- break;
- case IES_LPAREN:
- setTypeInfo(Info);
- State = IES_CAST;
- break;
- }
- }
- void setTypeInfo(AsmTypeInfo Type) { CurType = Type; }
- };
- bool Error(SMLoc L, const Twine &Msg, SMRange Range = None,
- bool MatchingInlineAsm = false) {
- MCAsmParser &Parser = getParser();
- if (MatchingInlineAsm) {
- if (!getLexer().isAtStartOfStatement())
- Parser.eatToEndOfStatement();
- return false;
- }
- return Parser.Error(L, Msg, Range);
- }
- bool MatchRegisterByName(unsigned &RegNo, StringRef RegName, SMLoc StartLoc,
- SMLoc EndLoc);
- bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc,
- bool RestoreOnFailure);
- std::unique_ptr<X86Operand> DefaultMemSIOperand(SMLoc Loc);
- std::unique_ptr<X86Operand> DefaultMemDIOperand(SMLoc Loc);
- bool IsSIReg(unsigned Reg);
- unsigned GetSIDIForRegClass(unsigned RegClassID, unsigned Reg, bool IsSIReg);
- void
- AddDefaultSrcDestOperands(OperandVector &Operands,
- std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
- std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
- bool VerifyAndAdjustOperands(OperandVector &OrigOperands,
- OperandVector &FinalOperands);
- bool ParseOperand(OperandVector &Operands);
- bool ParseATTOperand(OperandVector &Operands);
- bool ParseIntelOperand(OperandVector &Operands);
- bool ParseIntelOffsetOperator(const MCExpr *&Val, StringRef &ID,
- InlineAsmIdentifierInfo &Info, SMLoc &End);
- bool ParseIntelDotOperator(IntelExprStateMachine &SM, SMLoc &End);
- unsigned IdentifyIntelInlineAsmOperator(StringRef Name);
- unsigned ParseIntelInlineAsmOperator(unsigned OpKind);
- unsigned IdentifyMasmOperator(StringRef Name);
- bool ParseMasmOperator(unsigned OpKind, int64_t &Val);
- bool ParseRoundingModeOp(SMLoc Start, OperandVector &Operands);
- bool ParseIntelNamedOperator(StringRef Name, IntelExprStateMachine &SM,
- bool &ParseError, SMLoc &End);
- bool ParseMasmNamedOperator(StringRef Name, IntelExprStateMachine &SM,
- bool &ParseError, SMLoc &End);
- void RewriteIntelExpression(IntelExprStateMachine &SM, SMLoc Start,
- SMLoc End);
- bool ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End);
- bool ParseIntelInlineAsmIdentifier(const MCExpr *&Val, StringRef &Identifier,
- InlineAsmIdentifierInfo &Info,
- bool IsUnevaluatedOperand, SMLoc &End,
- bool IsParsingOffsetOperator = false);
- bool ParseMemOperand(unsigned SegReg, const MCExpr *Disp, SMLoc StartLoc,
- SMLoc EndLoc, OperandVector &Operands);
- X86::CondCode ParseConditionCode(StringRef CCode);
- bool ParseIntelMemoryOperandSize(unsigned &Size);
- bool CreateMemForMSInlineAsm(unsigned SegReg, const MCExpr *Disp,
- unsigned BaseReg, unsigned IndexReg,
- unsigned Scale, SMLoc Start, SMLoc End,
- unsigned Size, StringRef Identifier,
- const InlineAsmIdentifierInfo &Info,
- OperandVector &Operands);
- bool parseDirectiveArch();
- bool parseDirectiveNops(SMLoc L);
- bool parseDirectiveEven(SMLoc L);
- bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
- /// CodeView FPO data directives.
- bool parseDirectiveFPOProc(SMLoc L);
- bool parseDirectiveFPOSetFrame(SMLoc L);
- bool parseDirectiveFPOPushReg(SMLoc L);
- bool parseDirectiveFPOStackAlloc(SMLoc L);
- bool parseDirectiveFPOStackAlign(SMLoc L);
- bool parseDirectiveFPOEndPrologue(SMLoc L);
- bool parseDirectiveFPOEndProc(SMLoc L);
- /// SEH directives.
- bool parseSEHRegisterNumber(unsigned RegClassID, unsigned &RegNo);
- bool parseDirectiveSEHPushReg(SMLoc);
- bool parseDirectiveSEHSetFrame(SMLoc);
- bool parseDirectiveSEHSaveReg(SMLoc);
- bool parseDirectiveSEHSaveXMM(SMLoc);
- bool parseDirectiveSEHPushFrame(SMLoc);
- unsigned checkTargetMatchPredicate(MCInst &Inst) override;
- bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
- bool processInstruction(MCInst &Inst, const OperandVector &Ops);
- // Load Value Injection (LVI) Mitigations for machine code
- void emitWarningForSpecialLVIInstruction(SMLoc Loc);
- void applyLVICFIMitigation(MCInst &Inst, MCStreamer &Out);
- void applyLVILoadHardeningMitigation(MCInst &Inst, MCStreamer &Out);
- /// Wrapper around MCStreamer::emitInstruction(). Possibly adds
- /// instrumentation around Inst.
- void emitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out);
- bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- OperandVector &Operands, MCStreamer &Out,
- uint64_t &ErrorInfo,
- bool MatchingInlineAsm) override;
- void MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op, OperandVector &Operands,
- MCStreamer &Out, bool MatchingInlineAsm);
- bool ErrorMissingFeature(SMLoc IDLoc, const FeatureBitset &MissingFeatures,
- bool MatchingInlineAsm);
- bool MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
- OperandVector &Operands, MCStreamer &Out,
- uint64_t &ErrorInfo,
- bool MatchingInlineAsm);
- bool MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
- OperandVector &Operands, MCStreamer &Out,
- uint64_t &ErrorInfo,
- bool MatchingInlineAsm);
- bool OmitRegisterFromClobberLists(unsigned RegNo) override;
- /// Parses AVX512 specific operand primitives: masked registers ({%k<NUM>}, {z})
- /// and memory broadcasting ({1to<NUM>}) primitives, updating Operands vector if required.
- /// return false if no parsing errors occurred, true otherwise.
- bool HandleAVX512Operand(OperandVector &Operands);
- bool ParseZ(std::unique_ptr<X86Operand> &Z, const SMLoc &StartLoc);
- bool is64BitMode() const {
- // FIXME: Can tablegen auto-generate this?
- return getSTI().getFeatureBits()[X86::Mode64Bit];
- }
- bool is32BitMode() const {
- // FIXME: Can tablegen auto-generate this?
- return getSTI().getFeatureBits()[X86::Mode32Bit];
- }
- bool is16BitMode() const {
- // FIXME: Can tablegen auto-generate this?
- return getSTI().getFeatureBits()[X86::Mode16Bit];
- }
- void SwitchMode(unsigned mode) {
- MCSubtargetInfo &STI = copySTI();
- FeatureBitset AllModes({X86::Mode64Bit, X86::Mode32Bit, X86::Mode16Bit});
- FeatureBitset OldMode = STI.getFeatureBits() & AllModes;
- FeatureBitset FB = ComputeAvailableFeatures(
- STI.ToggleFeature(OldMode.flip(mode)));
- setAvailableFeatures(FB);
- assert(FeatureBitset({mode}) == (STI.getFeatureBits() & AllModes));
- }
- unsigned getPointerWidth() {
- if (is16BitMode()) return 16;
- if (is32BitMode()) return 32;
- if (is64BitMode()) return 64;
- llvm_unreachable("invalid mode");
- }
- bool isParsingIntelSyntax() {
- return getParser().getAssemblerDialect();
- }
- /// @name Auto-generated Matcher Functions
- /// {
- #define GET_ASSEMBLER_HEADER
- #include "X86GenAsmMatcher.inc"
- /// }
- public:
- enum X86MatchResultTy {
- Match_Unsupported = FIRST_TARGET_MATCH_RESULT_TY,
- #define GET_OPERAND_DIAGNOSTIC_TYPES
- #include "X86GenAsmMatcher.inc"
- };
- X86AsmParser(const MCSubtargetInfo &sti, MCAsmParser &Parser,
- const MCInstrInfo &mii, const MCTargetOptions &Options)
- : MCTargetAsmParser(Options, sti, mii), InstInfo(nullptr),
- Code16GCC(false) {
- Parser.addAliasForDirective(".word", ".2byte");
- // Initialize the set of available features.
- setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
- }
- bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
- OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
- SMLoc &EndLoc) override;
- bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
- bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc, OperandVector &Operands) override;
- bool ParseDirective(AsmToken DirectiveID) override;
- };
- } // end anonymous namespace
- /// @name Auto-generated Match Functions
- /// {
- static unsigned MatchRegisterName(StringRef Name);
- /// }
- static bool CheckBaseRegAndIndexRegAndScale(unsigned BaseReg, unsigned IndexReg,
- unsigned Scale, bool Is64BitMode,
- StringRef &ErrMsg) {
- // If we have both a base register and an index register make sure they are
- // both 64-bit or 32-bit registers.
- // To support VSIB, IndexReg can be 128-bit or 256-bit registers.
- if (BaseReg != 0 &&
- !(BaseReg == X86::RIP || BaseReg == X86::EIP ||
- X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) ||
- X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) ||
- X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg))) {
- ErrMsg = "invalid base+index expression";
- return true;
- }
- if (IndexReg != 0 &&
- !(IndexReg == X86::EIZ || IndexReg == X86::RIZ ||
- X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::VR128XRegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::VR256XRegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::VR512RegClassID].contains(IndexReg))) {
- ErrMsg = "invalid base+index expression";
- return true;
- }
- if (((BaseReg == X86::RIP || BaseReg == X86::EIP) && IndexReg != 0) ||
- IndexReg == X86::EIP || IndexReg == X86::RIP ||
- IndexReg == X86::ESP || IndexReg == X86::RSP) {
- ErrMsg = "invalid base+index expression";
- return true;
- }
- // Check for use of invalid 16-bit registers. Only BX/BP/SI/DI are allowed,
- // and then only in non-64-bit modes.
- if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
- (Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
- BaseReg != X86::SI && BaseReg != X86::DI))) {
- ErrMsg = "invalid 16-bit base register";
- return true;
- }
- if (BaseReg == 0 &&
- X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg)) {
- ErrMsg = "16-bit memory operand may not include only index register";
- return true;
- }
- if (BaseReg != 0 && IndexReg != 0) {
- if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
- (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
- IndexReg == X86::EIZ)) {
- ErrMsg = "base register is 64-bit, but index register is not";
- return true;
- }
- if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
- (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
- IndexReg == X86::RIZ)) {
- ErrMsg = "base register is 32-bit, but index register is not";
- return true;
- }
- if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg)) {
- if (X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) {
- ErrMsg = "base register is 16-bit, but index register is not";
- return true;
- }
- if ((BaseReg != X86::BX && BaseReg != X86::BP) ||
- (IndexReg != X86::SI && IndexReg != X86::DI)) {
- ErrMsg = "invalid 16-bit base/index register combination";
- return true;
- }
- }
- }
- // RIP/EIP-relative addressing is only supported in 64-bit mode.
- if (!Is64BitMode && BaseReg != 0 &&
- (BaseReg == X86::RIP || BaseReg == X86::EIP)) {
- ErrMsg = "IP-relative addressing requires 64-bit mode";
- return true;
- }
- return checkScale(Scale, ErrMsg);
- }
- bool X86AsmParser::MatchRegisterByName(unsigned &RegNo, StringRef RegName,
- SMLoc StartLoc, SMLoc EndLoc) {
- // If we encounter a %, ignore it. This code handles registers with and
- // without the prefix, unprefixed registers can occur in cfi directives.
- RegName.consume_front("%");
- RegNo = MatchRegisterName(RegName);
- // If the match failed, try the register name as lowercase.
- if (RegNo == 0)
- RegNo = MatchRegisterName(RegName.lower());
- // The "flags" and "mxcsr" registers cannot be referenced directly.
- // Treat it as an identifier instead.
- if (isParsingMSInlineAsm() && isParsingIntelSyntax() &&
- (RegNo == X86::EFLAGS || RegNo == X86::MXCSR))
- RegNo = 0;
- if (!is64BitMode()) {
- // FIXME: This should be done using Requires<Not64BitMode> and
- // Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also
- // checked.
- if (RegNo == X86::RIZ || RegNo == X86::RIP ||
- X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
- X86II::isX86_64NonExtLowByteReg(RegNo) ||
- X86II::isX86_64ExtendedReg(RegNo)) {
- return Error(StartLoc,
- "register %" + RegName + " is only available in 64-bit mode",
- SMRange(StartLoc, EndLoc));
- }
- }
- // If this is "db[0-15]", match it as an alias
- // for dr[0-15].
- if (RegNo == 0 && RegName.startswith("db")) {
- if (RegName.size() == 3) {
- switch (RegName[2]) {
- case '0':
- RegNo = X86::DR0;
- break;
- case '1':
- RegNo = X86::DR1;
- break;
- case '2':
- RegNo = X86::DR2;
- break;
- case '3':
- RegNo = X86::DR3;
- break;
- case '4':
- RegNo = X86::DR4;
- break;
- case '5':
- RegNo = X86::DR5;
- break;
- case '6':
- RegNo = X86::DR6;
- break;
- case '7':
- RegNo = X86::DR7;
- break;
- case '8':
- RegNo = X86::DR8;
- break;
- case '9':
- RegNo = X86::DR9;
- break;
- }
- } else if (RegName.size() == 4 && RegName[2] == '1') {
- switch (RegName[3]) {
- case '0':
- RegNo = X86::DR10;
- break;
- case '1':
- RegNo = X86::DR11;
- break;
- case '2':
- RegNo = X86::DR12;
- break;
- case '3':
- RegNo = X86::DR13;
- break;
- case '4':
- RegNo = X86::DR14;
- break;
- case '5':
- RegNo = X86::DR15;
- break;
- }
- }
- }
- if (RegNo == 0) {
- if (isParsingIntelSyntax())
- return true;
- return Error(StartLoc, "invalid register name", SMRange(StartLoc, EndLoc));
- }
- return false;
- }
- bool X86AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
- SMLoc &EndLoc, bool RestoreOnFailure) {
- MCAsmParser &Parser = getParser();
- MCAsmLexer &Lexer = getLexer();
- RegNo = 0;
- SmallVector<AsmToken, 5> Tokens;
- auto OnFailure = [RestoreOnFailure, &Lexer, &Tokens]() {
- if (RestoreOnFailure) {
- while (!Tokens.empty()) {
- Lexer.UnLex(Tokens.pop_back_val());
- }
- }
- };
- const AsmToken &PercentTok = Parser.getTok();
- StartLoc = PercentTok.getLoc();
- // If we encounter a %, ignore it. This code handles registers with and
- // without the prefix, unprefixed registers can occur in cfi directives.
- if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent)) {
- Tokens.push_back(PercentTok);
- Parser.Lex(); // Eat percent token.
- }
- const AsmToken &Tok = Parser.getTok();
- EndLoc = Tok.getEndLoc();
- if (Tok.isNot(AsmToken::Identifier)) {
- OnFailure();
- if (isParsingIntelSyntax()) return true;
- return Error(StartLoc, "invalid register name",
- SMRange(StartLoc, EndLoc));
- }
- if (MatchRegisterByName(RegNo, Tok.getString(), StartLoc, EndLoc)) {
- OnFailure();
- return true;
- }
- // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
- if (RegNo == X86::ST0) {
- Tokens.push_back(Tok);
- Parser.Lex(); // Eat 'st'
- // Check to see if we have '(4)' after %st.
- if (Lexer.isNot(AsmToken::LParen))
- return false;
- // Lex the paren.
- Tokens.push_back(Parser.getTok());
- Parser.Lex();
- const AsmToken &IntTok = Parser.getTok();
- if (IntTok.isNot(AsmToken::Integer)) {
- OnFailure();
- return Error(IntTok.getLoc(), "expected stack index");
- }
- switch (IntTok.getIntVal()) {
- case 0: RegNo = X86::ST0; break;
- case 1: RegNo = X86::ST1; break;
- case 2: RegNo = X86::ST2; break;
- case 3: RegNo = X86::ST3; break;
- case 4: RegNo = X86::ST4; break;
- case 5: RegNo = X86::ST5; break;
- case 6: RegNo = X86::ST6; break;
- case 7: RegNo = X86::ST7; break;
- default:
- OnFailure();
- return Error(IntTok.getLoc(), "invalid stack index");
- }
- // Lex IntTok
- Tokens.push_back(IntTok);
- Parser.Lex();
- if (Lexer.isNot(AsmToken::RParen)) {
- OnFailure();
- return Error(Parser.getTok().getLoc(), "expected ')'");
- }
- EndLoc = Parser.getTok().getEndLoc();
- Parser.Lex(); // Eat ')'
- return false;
- }
- EndLoc = Parser.getTok().getEndLoc();
- if (RegNo == 0) {
- OnFailure();
- if (isParsingIntelSyntax()) return true;
- return Error(StartLoc, "invalid register name",
- SMRange(StartLoc, EndLoc));
- }
- Parser.Lex(); // Eat identifier token.
- return false;
- }
- bool X86AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
- SMLoc &EndLoc) {
- return ParseRegister(RegNo, StartLoc, EndLoc, /*RestoreOnFailure=*/false);
- }
- OperandMatchResultTy X86AsmParser::tryParseRegister(unsigned &RegNo,
- SMLoc &StartLoc,
- SMLoc &EndLoc) {
- bool Result =
- ParseRegister(RegNo, StartLoc, EndLoc, /*RestoreOnFailure=*/true);
- bool PendingErrors = getParser().hasPendingError();
- getParser().clearPendingErrors();
- if (PendingErrors)
- return MatchOperand_ParseFail;
- if (Result)
- return MatchOperand_NoMatch;
- return MatchOperand_Success;
- }
- std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(SMLoc Loc) {
- bool Parse32 = is32BitMode() || Code16GCC;
- unsigned Basereg = is64BitMode() ? X86::RSI : (Parse32 ? X86::ESI : X86::SI);
- const MCExpr *Disp = MCConstantExpr::create(0, getContext());
- return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
- /*BaseReg=*/Basereg, /*IndexReg=*/0, /*Scale=*/1,
- Loc, Loc, 0);
- }
- std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(SMLoc Loc) {
- bool Parse32 = is32BitMode() || Code16GCC;
- unsigned Basereg = is64BitMode() ? X86::RDI : (Parse32 ? X86::EDI : X86::DI);
- const MCExpr *Disp = MCConstantExpr::create(0, getContext());
- return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
- /*BaseReg=*/Basereg, /*IndexReg=*/0, /*Scale=*/1,
- Loc, Loc, 0);
- }
- bool X86AsmParser::IsSIReg(unsigned Reg) {
- switch (Reg) {
- default: llvm_unreachable("Only (R|E)SI and (R|E)DI are expected!");
- case X86::RSI:
- case X86::ESI:
- case X86::SI:
- return true;
- case X86::RDI:
- case X86::EDI:
- case X86::DI:
- return false;
- }
- }
- unsigned X86AsmParser::GetSIDIForRegClass(unsigned RegClassID, unsigned Reg,
- bool IsSIReg) {
- switch (RegClassID) {
- default: llvm_unreachable("Unexpected register class");
- case X86::GR64RegClassID:
- return IsSIReg ? X86::RSI : X86::RDI;
- case X86::GR32RegClassID:
- return IsSIReg ? X86::ESI : X86::EDI;
- case X86::GR16RegClassID:
- return IsSIReg ? X86::SI : X86::DI;
- }
- }
- void X86AsmParser::AddDefaultSrcDestOperands(
- OperandVector& Operands, std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
- std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
- if (isParsingIntelSyntax()) {
- Operands.push_back(std::move(Dst));
- Operands.push_back(std::move(Src));
- }
- else {
- Operands.push_back(std::move(Src));
- Operands.push_back(std::move(Dst));
- }
- }
- bool X86AsmParser::VerifyAndAdjustOperands(OperandVector &OrigOperands,
- OperandVector &FinalOperands) {
- if (OrigOperands.size() > 1) {
- // Check if sizes match, OrigOperands also contains the instruction name
- assert(OrigOperands.size() == FinalOperands.size() + 1 &&
- "Operand size mismatch");
- SmallVector<std::pair<SMLoc, std::string>, 2> Warnings;
- // Verify types match
- int RegClassID = -1;
- for (unsigned int i = 0; i < FinalOperands.size(); ++i) {
- X86Operand &OrigOp = static_cast<X86Operand &>(*OrigOperands[i + 1]);
- X86Operand &FinalOp = static_cast<X86Operand &>(*FinalOperands[i]);
- if (FinalOp.isReg() &&
- (!OrigOp.isReg() || FinalOp.getReg() != OrigOp.getReg()))
- // Return false and let a normal complaint about bogus operands happen
- return false;
- if (FinalOp.isMem()) {
- if (!OrigOp.isMem())
- // Return false and let a normal complaint about bogus operands happen
- return false;
- unsigned OrigReg = OrigOp.Mem.BaseReg;
- unsigned FinalReg = FinalOp.Mem.BaseReg;
- // If we've already encounterd a register class, make sure all register
- // bases are of the same register class
- if (RegClassID != -1 &&
- !X86MCRegisterClasses[RegClassID].contains(OrigReg)) {
- return Error(OrigOp.getStartLoc(),
- "mismatching source and destination index registers");
- }
- if (X86MCRegisterClasses[X86::GR64RegClassID].contains(OrigReg))
- RegClassID = X86::GR64RegClassID;
- else if (X86MCRegisterClasses[X86::GR32RegClassID].contains(OrigReg))
- RegClassID = X86::GR32RegClassID;
- else if (X86MCRegisterClasses[X86::GR16RegClassID].contains(OrigReg))
- RegClassID = X86::GR16RegClassID;
- else
- // Unexpected register class type
- // Return false and let a normal complaint about bogus operands happen
- return false;
- bool IsSI = IsSIReg(FinalReg);
- FinalReg = GetSIDIForRegClass(RegClassID, FinalReg, IsSI);
- if (FinalReg != OrigReg) {
- std::string RegName = IsSI ? "ES:(R|E)SI" : "ES:(R|E)DI";
- Warnings.push_back(std::make_pair(
- OrigOp.getStartLoc(),
- "memory operand is only for determining the size, " + RegName +
- " will be used for the location"));
- }
- FinalOp.Mem.Size = OrigOp.Mem.Size;
- FinalOp.Mem.SegReg = OrigOp.Mem.SegReg;
- FinalOp.Mem.BaseReg = FinalReg;
- }
- }
- // Produce warnings only if all the operands passed the adjustment - prevent
- // legal cases like "movsd (%rax), %xmm0" mistakenly produce warnings
- for (auto &WarningMsg : Warnings) {
- Warning(WarningMsg.first, WarningMsg.second);
- }
- // Remove old operands
- for (unsigned int i = 0; i < FinalOperands.size(); ++i)
- OrigOperands.pop_back();
- }
- // OrigOperands.append(FinalOperands.begin(), FinalOperands.end());
- for (unsigned int i = 0; i < FinalOperands.size(); ++i)
- OrigOperands.push_back(std::move(FinalOperands[i]));
- return false;
- }
- bool X86AsmParser::ParseOperand(OperandVector &Operands) {
- if (isParsingIntelSyntax())
- return ParseIntelOperand(Operands);
- return ParseATTOperand(Operands);
- }
- bool X86AsmParser::CreateMemForMSInlineAsm(
- unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg,
- unsigned Scale, SMLoc Start, SMLoc End, unsigned Size, StringRef Identifier,
- const InlineAsmIdentifierInfo &Info, OperandVector &Operands) {
- // If we found a decl other than a VarDecl, then assume it is a FuncDecl or
- // some other label reference.
- if (Info.isKind(InlineAsmIdentifierInfo::IK_Label)) {
- // Insert an explicit size if the user didn't have one.
- if (!Size) {
- Size = getPointerWidth();
- InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
- /*Len=*/0, Size);
- }
- // Create an absolute memory reference in order to match against
- // instructions taking a PC relative operand.
- Operands.push_back(X86Operand::CreateMem(getPointerWidth(), Disp, Start,
- End, Size, Identifier,
- Info.Label.Decl));
- return false;
- }
- // We either have a direct symbol reference, or an offset from a symbol. The
- // parser always puts the symbol on the LHS, so look there for size
- // calculation purposes.
- unsigned FrontendSize = 0;
- void *Decl = nullptr;
- bool IsGlobalLV = false;
- if (Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {
- // Size is in terms of bits in this context.
- FrontendSize = Info.Var.Type * 8;
- Decl = Info.Var.Decl;
- IsGlobalLV = Info.Var.IsGlobalLV;
- }
- // It is widely common for MS InlineAsm to use a global variable and one/two
- // registers in a mmory expression, and though unaccessible via rip/eip.
- if (IsGlobalLV && (BaseReg || IndexReg)) {
- Operands.push_back(X86Operand::CreateMem(getPointerWidth(), Disp, Start,
- End, Size, Identifier, Decl,
- FrontendSize));
- return false;
- }
- // Otherwise, we set the base register to a non-zero value
- // if we don't know the actual value at this time. This is necessary to
- // get the matching correct in some cases.
- BaseReg = BaseReg ? BaseReg : 1;
- Operands.push_back(X86Operand::CreateMem(
- getPointerWidth(), SegReg, Disp, BaseReg, IndexReg, Scale, Start, End,
- Size,
- /*DefaultBaseReg=*/X86::RIP, Identifier, Decl, FrontendSize));
- return false;
- }
- // Some binary bitwise operators have a named synonymous
- // Query a candidate string for being such a named operator
- // and if so - invoke the appropriate handler
- bool X86AsmParser::ParseIntelNamedOperator(StringRef Name,
- IntelExprStateMachine &SM,
- bool &ParseError, SMLoc &End) {
- // A named operator should be either lower or upper case, but not a mix...
- // except in MASM, which uses full case-insensitivity.
- if (Name.compare(Name.lower()) && Name.compare(Name.upper()) &&
- !getParser().isParsingMasm())
- return false;
- if (Name.equals_insensitive("not")) {
- SM.onNot();
- } else if (Name.equals_insensitive("or")) {
- SM.onOr();
- } else if (Name.equals_insensitive("shl")) {
- SM.onLShift();
- } else if (Name.equals_insensitive("shr")) {
- SM.onRShift();
- } else if (Name.equals_insensitive("xor")) {
- SM.onXor();
- } else if (Name.equals_insensitive("and")) {
- SM.onAnd();
- } else if (Name.equals_insensitive("mod")) {
- SM.onMod();
- } else if (Name.equals_insensitive("offset")) {
- SMLoc OffsetLoc = getTok().getLoc();
- const MCExpr *Val = nullptr;
- StringRef ID;
- InlineAsmIdentifierInfo Info;
- ParseError = ParseIntelOffsetOperator(Val, ID, Info, End);
- if (ParseError)
- return true;
- StringRef ErrMsg;
- ParseError =
- SM.onOffset(Val, OffsetLoc, ID, Info, isParsingMSInlineAsm(), ErrMsg);
- if (ParseError)
- return Error(SMLoc::getFromPointer(Name.data()), ErrMsg);
- } else {
- return false;
- }
- if (!Name.equals_insensitive("offset"))
- End = consumeToken();
- return true;
- }
- bool X86AsmParser::ParseMasmNamedOperator(StringRef Name,
- IntelExprStateMachine &SM,
- bool &ParseError, SMLoc &End) {
- if (Name.equals_insensitive("eq")) {
- SM.onEq();
- } else if (Name.equals_insensitive("ne")) {
- SM.onNE();
- } else if (Name.equals_insensitive("lt")) {
- SM.onLT();
- } else if (Name.equals_insensitive("le")) {
- SM.onLE();
- } else if (Name.equals_insensitive("gt")) {
- SM.onGT();
- } else if (Name.equals_insensitive("ge")) {
- SM.onGE();
- } else {
- return false;
- }
- End = consumeToken();
- return true;
- }
- bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
- MCAsmParser &Parser = getParser();
- StringRef ErrMsg;
- AsmToken::TokenKind PrevTK = AsmToken::Error;
- bool Done = false;
- while (!Done) {
- // Get a fresh reference on each loop iteration in case the previous
- // iteration moved the token storage during UnLex().
- const AsmToken &Tok = Parser.getTok();
- bool UpdateLocLex = true;
- AsmToken::TokenKind TK = getLexer().getKind();
- switch (TK) {
- default:
- if ((Done = SM.isValidEndState()))
- break;
- return Error(Tok.getLoc(), "unknown token in expression");
- case AsmToken::Error:
- return Error(getLexer().getErrLoc(), getLexer().getErr());
- break;
- case AsmToken::EndOfStatement:
- Done = true;
- break;
- case AsmToken::Real:
- // DotOperator: [ebx].0
- UpdateLocLex = false;
- if (ParseIntelDotOperator(SM, End))
- return true;
- break;
- case AsmToken::Dot:
- if (!Parser.isParsingMasm()) {
- if ((Done = SM.isValidEndState()))
- break;
- return Error(Tok.getLoc(), "unknown token in expression");
- }
- // MASM allows spaces around the dot operator (e.g., "var . x")
- Lex();
- UpdateLocLex = false;
- if (ParseIntelDotOperator(SM, End))
- return true;
- break;
- case AsmToken::Dollar:
- if (!Parser.isParsingMasm()) {
- if ((Done = SM.isValidEndState()))
- break;
- return Error(Tok.getLoc(), "unknown token in expression");
- }
- LLVM_FALLTHROUGH;
- case AsmToken::String: {
- if (Parser.isParsingMasm()) {
- // MASM parsers handle strings in expressions as constants.
- SMLoc ValueLoc = Tok.getLoc();
- int64_t Res;
- const MCExpr *Val;
- if (Parser.parsePrimaryExpr(Val, End, nullptr))
- return true;
- UpdateLocLex = false;
- if (!Val->evaluateAsAbsolute(Res, getStreamer().getAssemblerPtr()))
- return Error(ValueLoc, "expected absolute value");
- if (SM.onInteger(Res, ErrMsg))
- return Error(ValueLoc, ErrMsg);
- break;
- }
- LLVM_FALLTHROUGH;
- }
- case AsmToken::At:
- case AsmToken::Identifier: {
- SMLoc IdentLoc = Tok.getLoc();
- StringRef Identifier = Tok.getString();
- UpdateLocLex = false;
- if (Parser.isParsingMasm()) {
- size_t DotOffset = Identifier.find_first_of('.');
- if (DotOffset != StringRef::npos) {
- consumeToken();
- StringRef LHS = Identifier.slice(0, DotOffset);
- StringRef Dot = Identifier.slice(DotOffset, DotOffset + 1);
- StringRef RHS = Identifier.slice(DotOffset + 1, StringRef::npos);
- if (!RHS.empty()) {
- getLexer().UnLex(AsmToken(AsmToken::Identifier, RHS));
- }
- getLexer().UnLex(AsmToken(AsmToken::Dot, Dot));
- if (!LHS.empty()) {
- getLexer().UnLex(AsmToken(AsmToken::Identifier, LHS));
- }
- break;
- }
- }
- // (MASM only) <TYPE> PTR operator
- if (Parser.isParsingMasm()) {
- const AsmToken &NextTok = getLexer().peekTok();
- if (NextTok.is(AsmToken::Identifier) &&
- NextTok.getIdentifier().equals_insensitive("ptr")) {
- AsmTypeInfo Info;
- if (Parser.lookUpType(Identifier, Info))
- return Error(Tok.getLoc(), "unknown type");
- SM.onCast(Info);
- // Eat type and PTR.
- consumeToken();
- End = consumeToken();
- break;
- }
- }
- // Register, or (MASM only) <register>.<field>
- unsigned Reg;
- if (Tok.is(AsmToken::Identifier)) {
- if (!ParseRegister(Reg, IdentLoc, End, /*RestoreOnFailure=*/true)) {
- if (SM.onRegister(Reg, ErrMsg))
- return Error(IdentLoc, ErrMsg);
- break;
- }
- if (Parser.isParsingMasm()) {
- const std::pair<StringRef, StringRef> IDField =
- Tok.getString().split('.');
- const StringRef ID = IDField.first, Field = IDField.second;
- SMLoc IDEndLoc = SMLoc::getFromPointer(ID.data() + ID.size());
- if (!Field.empty() &&
- !MatchRegisterByName(Reg, ID, IdentLoc, IDEndLoc)) {
- if (SM.onRegister(Reg, ErrMsg))
- return Error(IdentLoc, ErrMsg);
- AsmFieldInfo Info;
- SMLoc FieldStartLoc = SMLoc::getFromPointer(Field.data());
- if (Parser.lookUpField(Field, Info))
- return Error(FieldStartLoc, "unknown offset");
- else if (SM.onPlus(ErrMsg))
- return Error(getTok().getLoc(), ErrMsg);
- else if (SM.onInteger(Info.Offset, ErrMsg))
- return Error(IdentLoc, ErrMsg);
- SM.setTypeInfo(Info.Type);
- End = consumeToken();
- break;
- }
- }
- }
- // Operator synonymous ("not", "or" etc.)
- bool ParseError = false;
- if (ParseIntelNamedOperator(Identifier, SM, ParseError, End)) {
- if (ParseError)
- return true;
- break;
- }
- if (Parser.isParsingMasm() &&
- ParseMasmNamedOperator(Identifier, SM, ParseError, End)) {
- if (ParseError)
- return true;
- break;
- }
- // Symbol reference, when parsing assembly content
- InlineAsmIdentifierInfo Info;
- AsmFieldInfo FieldInfo;
- const MCExpr *Val;
- if (isParsingMSInlineAsm() || Parser.isParsingMasm()) {
- // MS Dot Operator expression
- if (Identifier.count('.') &&
- (PrevTK == AsmToken::RBrac || PrevTK == AsmToken::RParen)) {
- if (ParseIntelDotOperator(SM, End))
- return true;
- break;
- }
- }
- if (isParsingMSInlineAsm()) {
- // MS InlineAsm operators (TYPE/LENGTH/SIZE)
- if (unsigned OpKind = IdentifyIntelInlineAsmOperator(Identifier)) {
- if (int64_t Val = ParseIntelInlineAsmOperator(OpKind)) {
- if (SM.onInteger(Val, ErrMsg))
- return Error(IdentLoc, ErrMsg);
- } else {
- return true;
- }
- break;
- }
- // MS InlineAsm identifier
- // Call parseIdentifier() to combine @ with the identifier behind it.
- if (TK == AsmToken::At && Parser.parseIdentifier(Identifier))
- return Error(IdentLoc, "expected identifier");
- if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info, false, End))
- return true;
- else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.Type,
- true, ErrMsg))
- return Error(IdentLoc, ErrMsg);
- break;
- }
- if (Parser.isParsingMasm()) {
- if (unsigned OpKind = IdentifyMasmOperator(Identifier)) {
- int64_t Val;
- if (ParseMasmOperator(OpKind, Val))
- return true;
- if (SM.onInteger(Val, ErrMsg))
- return Error(IdentLoc, ErrMsg);
- break;
- }
- if (!getParser().lookUpType(Identifier, FieldInfo.Type)) {
- // Field offset immediate; <TYPE>.<field specification>
- Lex(); // eat type
- bool EndDot = parseOptionalToken(AsmToken::Dot);
- while (EndDot || (getTok().is(AsmToken::Identifier) &&
- getTok().getString().startswith("."))) {
- getParser().parseIdentifier(Identifier);
- if (!EndDot)
- Identifier.consume_front(".");
- EndDot = Identifier.consume_back(".");
- if (getParser().lookUpField(FieldInfo.Type.Name, Identifier,
- FieldInfo)) {
- SMLoc IDEnd =
- SMLoc::getFromPointer(Identifier.data() + Identifier.size());
- return Error(IdentLoc, "Unable to lookup field reference!",
- SMRange(IdentLoc, IDEnd));
- }
- if (!EndDot)
- EndDot = parseOptionalToken(AsmToken::Dot);
- }
- if (SM.onInteger(FieldInfo.Offset, ErrMsg))
- return Error(IdentLoc, ErrMsg);
- break;
- }
- }
- if (getParser().parsePrimaryExpr(Val, End, &FieldInfo.Type)) {
- return Error(Tok.getLoc(), "Unexpected identifier!");
- } else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.Type,
- false, ErrMsg)) {
- return Error(IdentLoc, ErrMsg);
- }
- break;
- }
- case AsmToken::Integer: {
- // Look for 'b' or 'f' following an Integer as a directional label
- SMLoc Loc = getTok().getLoc();
- int64_t IntVal = getTok().getIntVal();
- End = consumeToken();
- UpdateLocLex = false;
- if (getLexer().getKind() == AsmToken::Identifier) {
- StringRef IDVal = getTok().getString();
- if (IDVal == "f" || IDVal == "b") {
- MCSymbol *Sym =
- getContext().getDirectionalLocalSymbol(IntVal, IDVal == "b");
- MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
- const MCExpr *Val =
- MCSymbolRefExpr::create(Sym, Variant, getContext());
- if (IDVal == "b" && Sym->isUndefined())
- return Error(Loc, "invalid reference to undefined symbol");
- StringRef Identifier = Sym->getName();
- InlineAsmIdentifierInfo Info;
- AsmTypeInfo Type;
- if (SM.onIdentifierExpr(Val, Identifier, Info, Type,
- isParsingMSInlineAsm(), ErrMsg))
- return Error(Loc, ErrMsg);
- End = consumeToken();
- } else {
- if (SM.onInteger(IntVal, ErrMsg))
- return Error(Loc, ErrMsg);
- }
- } else {
- if (SM.onInteger(IntVal, ErrMsg))
- return Error(Loc, ErrMsg);
- }
- break;
- }
- case AsmToken::Plus:
- if (SM.onPlus(ErrMsg))
- return Error(getTok().getLoc(), ErrMsg);
- break;
- case AsmToken::Minus:
- if (SM.onMinus(ErrMsg))
- return Error(getTok().getLoc(), ErrMsg);
- break;
- case AsmToken::Tilde: SM.onNot(); break;
- case AsmToken::Star: SM.onStar(); break;
- case AsmToken::Slash: SM.onDivide(); break;
- case AsmToken::Percent: SM.onMod(); break;
- case AsmToken::Pipe: SM.onOr(); break;
- case AsmToken::Caret: SM.onXor(); break;
- case AsmToken::Amp: SM.onAnd(); break;
- case AsmToken::LessLess:
- SM.onLShift(); break;
- case AsmToken::GreaterGreater:
- SM.onRShift(); break;
- case AsmToken::LBrac:
- if (SM.onLBrac())
- return Error(Tok.getLoc(), "unexpected bracket encountered");
- break;
- case AsmToken::RBrac:
- if (SM.onRBrac())
- return Error(Tok.getLoc(), "unexpected bracket encountered");
- break;
- case AsmToken::LParen: SM.onLParen(); break;
- case AsmToken::RParen: SM.onRParen(); break;
- }
- if (SM.hadError())
- return Error(Tok.getLoc(), "unknown token in expression");
- if (!Done && UpdateLocLex)
- End = consumeToken();
- PrevTK = TK;
- }
- return false;
- }
- void X86AsmParser::RewriteIntelExpression(IntelExprStateMachine &SM,
- SMLoc Start, SMLoc End) {
- SMLoc Loc = Start;
- unsigned ExprLen = End.getPointer() - Start.getPointer();
- // Skip everything before a symbol displacement (if we have one)
- if (SM.getSym() && !SM.isOffsetOperator()) {
- StringRef SymName = SM.getSymName();
- if (unsigned Len = SymName.data() - Start.getPointer())
- InstInfo->AsmRewrites->emplace_back(AOK_Skip, Start, Len);
- Loc = SMLoc::getFromPointer(SymName.data() + SymName.size());
- ExprLen = End.getPointer() - (SymName.data() + SymName.size());
- // If we have only a symbol than there's no need for complex rewrite,
- // simply skip everything after it
- if (!(SM.getBaseReg() || SM.getIndexReg() || SM.getImm())) {
- if (ExprLen)
- InstInfo->AsmRewrites->emplace_back(AOK_Skip, Loc, ExprLen);
- return;
- }
- }
- // Build an Intel Expression rewrite
- StringRef BaseRegStr;
- StringRef IndexRegStr;
- StringRef OffsetNameStr;
- if (SM.getBaseReg())
- BaseRegStr = X86IntelInstPrinter::getRegisterName(SM.getBaseReg());
- if (SM.getIndexReg())
- IndexRegStr = X86IntelInstPrinter::getRegisterName(SM.getIndexReg());
- if (SM.isOffsetOperator())
- OffsetNameStr = SM.getSymName();
- // Emit it
- IntelExpr Expr(BaseRegStr, IndexRegStr, SM.getScale(), OffsetNameStr,
- SM.getImm(), SM.isMemExpr());
- InstInfo->AsmRewrites->emplace_back(Loc, ExprLen, Expr);
- }
- // Inline assembly may use variable names with namespace alias qualifiers.
- bool X86AsmParser::ParseIntelInlineAsmIdentifier(
- const MCExpr *&Val, StringRef &Identifier, InlineAsmIdentifierInfo &Info,
- bool IsUnevaluatedOperand, SMLoc &End, bool IsParsingOffsetOperator) {
- MCAsmParser &Parser = getParser();
- assert(isParsingMSInlineAsm() && "Expected to be parsing inline assembly.");
- Val = nullptr;
- StringRef LineBuf(Identifier.data());
- SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand);
- const AsmToken &Tok = Parser.getTok();
- SMLoc Loc = Tok.getLoc();
- // Advance the token stream until the end of the current token is
- // after the end of what the frontend claimed.
- const char *EndPtr = Tok.getLoc().getPointer() + LineBuf.size();
- do {
- End = Tok.getEndLoc();
- getLexer().Lex();
- } while (End.getPointer() < EndPtr);
- Identifier = LineBuf;
- // The frontend should end parsing on an assembler token boundary, unless it
- // failed parsing.
- assert((End.getPointer() == EndPtr ||
- Info.isKind(InlineAsmIdentifierInfo::IK_Invalid)) &&
- "frontend claimed part of a token?");
- // If the identifier lookup was unsuccessful, assume that we are dealing with
- // a label.
- if (Info.isKind(InlineAsmIdentifierInfo::IK_Invalid)) {
- StringRef InternalName =
- SemaCallback->LookupInlineAsmLabel(Identifier, getSourceManager(),
- Loc, false);
- assert(InternalName.size() && "We should have an internal name here.");
- // Push a rewrite for replacing the identifier name with the internal name,
- // unless we are parsing the operand of an offset operator
- if (!IsParsingOffsetOperator)
- InstInfo->AsmRewrites->emplace_back(AOK_Label, Loc, Identifier.size(),
- InternalName);
- else
- Identifier = InternalName;
- } else if (Info.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
- return false;
- // Create the symbol reference.
- MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
- MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
- Val = MCSymbolRefExpr::create(Sym, Variant, getParser().getContext());
- return false;
- }
- //ParseRoundingModeOp - Parse AVX-512 rounding mode operand
- bool X86AsmParser::ParseRoundingModeOp(SMLoc Start, OperandVector &Operands) {
- MCAsmParser &Parser = getParser();
- const AsmToken &Tok = Parser.getTok();
- // Eat "{" and mark the current place.
- const SMLoc consumedToken = consumeToken();
- if (Tok.isNot(AsmToken::Identifier))
- return Error(Tok.getLoc(), "Expected an identifier after {");
- if (Tok.getIdentifier().startswith("r")){
- int rndMode = StringSwitch<int>(Tok.getIdentifier())
- .Case("rn", X86::STATIC_ROUNDING::TO_NEAREST_INT)
- .Case("rd", X86::STATIC_ROUNDING::TO_NEG_INF)
- .Case("ru", X86::STATIC_ROUNDING::TO_POS_INF)
- .Case("rz", X86::STATIC_ROUNDING::TO_ZERO)
- .Default(-1);
- if (-1 == rndMode)
- return Error(Tok.getLoc(), "Invalid rounding mode.");
- Parser.Lex(); // Eat "r*" of r*-sae
- if (!getLexer().is(AsmToken::Minus))
- return Error(Tok.getLoc(), "Expected - at this point");
- Parser.Lex(); // Eat "-"
- Parser.Lex(); // Eat the sae
- if (!getLexer().is(AsmToken::RCurly))
- return Error(Tok.getLoc(), "Expected } at this point");
- SMLoc End = Tok.getEndLoc();
- Parser.Lex(); // Eat "}"
- const MCExpr *RndModeOp =
- MCConstantExpr::create(rndMode, Parser.getContext());
- Operands.push_back(X86Operand::CreateImm(RndModeOp, Start, End));
- return false;
- }
- if(Tok.getIdentifier().equals("sae")){
- Parser.Lex(); // Eat the sae
- if (!getLexer().is(AsmToken::RCurly))
- return Error(Tok.getLoc(), "Expected } at this point");
- Parser.Lex(); // Eat "}"
- Operands.push_back(X86Operand::CreateToken("{sae}", consumedToken));
- return false;
- }
- return Error(Tok.getLoc(), "unknown token in expression");
- }
- /// Parse the '.' operator.
- bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
- SMLoc &End) {
- const AsmToken &Tok = getTok();
- AsmFieldInfo Info;
- // Drop the optional '.'.
- StringRef DotDispStr = Tok.getString();
- if (DotDispStr.startswith("."))
- DotDispStr = DotDispStr.drop_front(1);
- StringRef TrailingDot;
- // .Imm gets lexed as a real.
- if (Tok.is(AsmToken::Real)) {
- APInt DotDisp;
- DotDispStr.getAsInteger(10, DotDisp);
- Info.Offset = DotDisp.getZExtValue();
- } else if ((isParsingMSInlineAsm() || getParser().isParsingMasm()) &&
- Tok.is(AsmToken::Identifier)) {
- if (DotDispStr.endswith(".")) {
- TrailingDot = DotDispStr.substr(DotDispStr.size() - 1);
- DotDispStr = DotDispStr.drop_back(1);
- }
- const std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
- const StringRef Base = BaseMember.first, Member = BaseMember.second;
- if (getParser().lookUpField(SM.getType(), DotDispStr, Info) &&
- getParser().lookUpField(SM.getSymName(), DotDispStr, Info) &&
- getParser().lookUpField(DotDispStr, Info) &&
- (!SemaCallback ||
- SemaCallback->LookupInlineAsmField(Base, Member, Info.Offset)))
- return Error(Tok.getLoc(), "Unable to lookup field reference!");
- } else {
- return Error(Tok.getLoc(), "Unexpected token type!");
- }
- // Eat the DotExpression and update End
- End = SMLoc::getFromPointer(DotDispStr.data());
- const char *DotExprEndLoc = DotDispStr.data() + DotDispStr.size();
- while (Tok.getLoc().getPointer() < DotExprEndLoc)
- Lex();
- if (!TrailingDot.empty())
- getLexer().UnLex(AsmToken(AsmToken::Dot, TrailingDot));
- SM.addImm(Info.Offset);
- SM.setTypeInfo(Info.Type);
- return false;
- }
- /// Parse the 'offset' operator.
- /// This operator is used to specify the location of a given operand
- bool X86AsmParser::ParseIntelOffsetOperator(const MCExpr *&Val, StringRef &ID,
- InlineAsmIdentifierInfo &Info,
- SMLoc &End) {
- // Eat offset, mark start of identifier.
- SMLoc Start = Lex().getLoc();
- ID = getTok().getString();
- if (!isParsingMSInlineAsm()) {
- if ((getTok().isNot(AsmToken::Identifier) &&
- getTok().isNot(AsmToken::String)) ||
- getParser().parsePrimaryExpr(Val, End, nullptr))
- return Error(Start, "unexpected token!");
- } else if (ParseIntelInlineAsmIdentifier(Val, ID, Info, false, End, true)) {
- return Error(Start, "unable to lookup expression");
- } else if (Info.isKind(InlineAsmIdentifierInfo::IK_EnumVal)) {
- return Error(Start, "offset operator cannot yet handle constants");
- }
- return false;
- }
- // Query a candidate string for being an Intel assembly operator
- // Report back its kind, or IOK_INVALID if does not evaluated as a known one
- unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(StringRef Name) {
- return StringSwitch<unsigned>(Name)
- .Cases("TYPE","type",IOK_TYPE)
- .Cases("SIZE","size",IOK_SIZE)
- .Cases("LENGTH","length",IOK_LENGTH)
- .Default(IOK_INVALID);
- }
- /// Parse the 'LENGTH', 'TYPE' and 'SIZE' operators. The LENGTH operator
- /// returns the number of elements in an array. It returns the value 1 for
- /// non-array variables. The SIZE operator returns the size of a C or C++
- /// variable. A variable's size is the product of its LENGTH and TYPE. The
- /// TYPE operator returns the size of a C or C++ type or variable. If the
- /// variable is an array, TYPE returns the size of a single element.
- unsigned X86AsmParser::ParseIntelInlineAsmOperator(unsigned OpKind) {
- MCAsmParser &Parser = getParser();
- const AsmToken &Tok = Parser.getTok();
- Parser.Lex(); // Eat operator.
- const MCExpr *Val = nullptr;
- InlineAsmIdentifierInfo Info;
- SMLoc Start = Tok.getLoc(), End;
- StringRef Identifier = Tok.getString();
- if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
- /*IsUnevaluatedOperand=*/true, End))
- return 0;
- if (!Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {
- Error(Start, "unable to lookup expression");
- return 0;
- }
- unsigned CVal = 0;
- switch(OpKind) {
- default: llvm_unreachable("Unexpected operand kind!");
- case IOK_LENGTH: CVal = Info.Var.Length; break;
- case IOK_SIZE: CVal = Info.Var.Size; break;
- case IOK_TYPE: CVal = Info.Var.Type; break;
- }
- return CVal;
- }
- // Query a candidate string for being an Intel assembly operator
- // Report back its kind, or IOK_INVALID if does not evaluated as a known one
- unsigned X86AsmParser::IdentifyMasmOperator(StringRef Name) {
- return StringSwitch<unsigned>(Name.lower())
- .Case("type", MOK_TYPE)
- .Cases("size", "sizeof", MOK_SIZEOF)
- .Cases("length", "lengthof", MOK_LENGTHOF)
- .Default(MOK_INVALID);
- }
- /// Parse the 'LENGTHOF', 'SIZEOF', and 'TYPE' operators. The LENGTHOF operator
- /// returns the number of elements in an array. It returns the value 1 for
- /// non-array variables. The SIZEOF operator returns the size of a type or
- /// variable in bytes. A variable's size is the product of its LENGTH and TYPE.
- /// The TYPE operator returns the size of a variable. If the variable is an
- /// array, TYPE returns the size of a single element.
- bool X86AsmParser::ParseMasmOperator(unsigned OpKind, int64_t &Val) {
- MCAsmParser &Parser = getParser();
- SMLoc OpLoc = Parser.getTok().getLoc();
- Parser.Lex(); // Eat operator.
- Val = 0;
- if (OpKind == MOK_SIZEOF || OpKind == MOK_TYPE) {
- // Check for SIZEOF(<type>) and TYPE(<type>).
- bool InParens = Parser.getTok().is(AsmToken::LParen);
- const AsmToken &IDTok = InParens ? getLexer().peekTok() : Parser.getTok();
- AsmTypeInfo Type;
- if (IDTok.is(AsmToken::Identifier) &&
- !Parser.lookUpType(IDTok.getIdentifier(), Type)) {
- Val = Type.Size;
- // Eat tokens.
- if (InParens)
- parseToken(AsmToken::LParen);
- parseToken(AsmToken::Identifier);
- if (InParens)
- parseToken(AsmToken::RParen);
- }
- }
- if (!Val) {
- IntelExprStateMachine SM;
- SMLoc End, Start = Parser.getTok().getLoc();
- if (ParseIntelExpression(SM, End))
- return true;
- switch (OpKind) {
- default:
- llvm_unreachable("Unexpected operand kind!");
- case MOK_SIZEOF:
- Val = SM.getSize();
- break;
- case MOK_LENGTHOF:
- Val = SM.getLength();
- break;
- case MOK_TYPE:
- Val = SM.getElementSize();
- break;
- }
- if (!Val)
- return Error(OpLoc, "expression has unknown type", SMRange(Start, End));
- }
- return false;
- }
- bool X86AsmParser::ParseIntelMemoryOperandSize(unsigned &Size) {
- Size = StringSwitch<unsigned>(getTok().getString())
- .Cases("BYTE", "byte", 8)
- .Cases("WORD", "word", 16)
- .Cases("DWORD", "dword", 32)
- .Cases("FLOAT", "float", 32)
- .Cases("LONG", "long", 32)
- .Cases("FWORD", "fword", 48)
- .Cases("DOUBLE", "double", 64)
- .Cases("QWORD", "qword", 64)
- .Cases("MMWORD","mmword", 64)
- .Cases("XWORD", "xword", 80)
- .Cases("TBYTE", "tbyte", 80)
- .Cases("XMMWORD", "xmmword", 128)
- .Cases("YMMWORD", "ymmword", 256)
- .Cases("ZMMWORD", "zmmword", 512)
- .Default(0);
- if (Size) {
- const AsmToken &Tok = Lex(); // Eat operand size (e.g., byte, word).
- if (!(Tok.getString().equals("PTR") || Tok.getString().equals("ptr")))
- return Error(Tok.getLoc(), "Expected 'PTR' or 'ptr' token!");
- Lex(); // Eat ptr.
- }
- return false;
- }
- bool X86AsmParser::ParseIntelOperand(OperandVector &Operands) {
- MCAsmParser &Parser = getParser();
- const AsmToken &Tok = Parser.getTok();
- SMLoc Start, End;
- // Parse optional Size directive.
- unsigned Size;
- if (ParseIntelMemoryOperandSize(Size))
- return true;
- bool PtrInOperand = bool(Size);
- Start = Tok.getLoc();
- // Rounding mode operand.
- if (getLexer().is(AsmToken::LCurly))
- return ParseRoundingModeOp(Start, Operands);
- // Register operand.
- unsigned RegNo = 0;
- if (Tok.is(AsmToken::Identifier) && !ParseRegister(RegNo, Start, End)) {
- if (RegNo == X86::RIP)
- return Error(Start, "rip can only be used as a base register");
- // A Register followed by ':' is considered a segment override
- if (Tok.isNot(AsmToken::Colon)) {
- if (PtrInOperand)
- return Error(Start, "expected memory operand after 'ptr', "
- "found register operand instead");
- Operands.push_back(X86Operand::CreateReg(RegNo, Start, End));
- return false;
- }
- // An alleged segment override. check if we have a valid segment register
- if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo))
- return Error(Start, "invalid segment register");
- // Eat ':' and update Start location
- Start = Lex().getLoc();
- }
- // Immediates and Memory
- IntelExprStateMachine SM;
- if (ParseIntelExpression(SM, End))
- return true;
- if (isParsingMSInlineAsm())
- RewriteIntelExpression(SM, Start, Tok.getLoc());
- int64_t Imm = SM.getImm();
- const MCExpr *Disp = SM.getSym();
- const MCExpr *ImmDisp = MCConstantExpr::create(Imm, getContext());
- if (Disp && Imm)
- Disp = MCBinaryExpr::createAdd(Disp, ImmDisp, getContext());
- if (!Disp)
- Disp = ImmDisp;
- // RegNo != 0 specifies a valid segment register,
- // and we are parsing a segment override
- if (!SM.isMemExpr() && !RegNo) {
- if (isParsingMSInlineAsm() && SM.isOffsetOperator()) {
- const InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo();
- if (Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {
- // Disp includes the address of a variable; make sure this is recorded
- // for later handling.
- Operands.push_back(X86Operand::CreateImm(Disp, Start, End,
- SM.getSymName(), Info.Var.Decl,
- Info.Var.IsGlobalLV));
- return false;
- }
- }
- Operands.push_back(X86Operand::CreateImm(Disp, Start, End));
- return false;
- }
- StringRef ErrMsg;
- unsigned BaseReg = SM.getBaseReg();
- unsigned IndexReg = SM.getIndexReg();
- unsigned Scale = SM.getScale();
- if (!PtrInOperand)
- Size = SM.getElementSize() << 3;
- if (Scale == 0 && BaseReg != X86::ESP && BaseReg != X86::RSP &&
- (IndexReg == X86::ESP || IndexReg == X86::RSP))
- std::swap(BaseReg, IndexReg);
- // If BaseReg is a vector register and IndexReg is not, swap them unless
- // Scale was specified in which case it would be an error.
- if (Scale == 0 &&
- !(X86MCRegisterClasses[X86::VR128XRegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::VR256XRegClassID].contains(IndexReg) ||
- X86MCRegisterClasses[X86::VR512RegClassID].contains(IndexReg)) &&
- (X86MCRegisterClasses[X86::VR128XRegClassID].contains(BaseReg) ||
- X86MCRegisterClasses[X86::VR256XRegClassID].contains(BaseReg) ||
- X86MCRegisterClasses[X86::VR512RegClassID].contains(BaseReg)))
- std::swap(BaseReg, IndexReg);
- if (Scale != 0 &&
- X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg))
- return Error(Start, "16-bit addresses cannot have a scale");
- // If there was no explicit scale specified, change it to 1.
- if (Scale == 0)
- Scale = 1;
- // If this is a 16-bit addressing mode with the base and index in the wrong
- // order, swap them so CheckBaseRegAndIndexRegAndScale doesn't fail. It is
- // shared with att syntax where order matters.
- if ((BaseReg == X86::SI || BaseReg == X86::DI) &&
- (IndexReg == X86::BX || IndexReg == X86::BP))
- std::swap(BaseReg, IndexReg);
- if ((BaseReg || IndexReg) &&
- CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
- ErrMsg))
- return Error(Start, ErrMsg);
- if (isParsingMSInlineAsm())
- return CreateMemForMSInlineAsm(RegNo, Disp, BaseReg, IndexReg, Scale, Start,
- End, Size, SM.getSymName(),
- SM.getIdentifierInfo(), Operands);
- // When parsing x64 MS-style assembly, all non-absolute references to a named
- // variable default to RIP-relative.
- if (Parser.isParsingMasm() && is64BitMode() && SM.getElementSize() > 0) {
- Operands.push_back(X86Operand::CreateMem(getPointerWidth(), RegNo, Disp,
- BaseReg, IndexReg, Scale, Start,
- End, Size,
- /*DefaultBaseReg=*/X86::RIP));
- return false;
- }
- if ((BaseReg || IndexReg || RegNo))
- Operands.push_back(X86Operand::CreateMem(getPointerWidth(), RegNo, Disp,
- BaseReg, IndexReg, Scale, Start,
- End, Size));
- else
- Operands.push_back(
- X86Operand::CreateMem(getPointerWidth(), Disp, Start, End, Size));
- return false;
- }
- bool X86AsmParser::ParseATTOperand(OperandVector &Operands) {
- MCAsmParser &Parser = getParser();
- switch (getLexer().getKind()) {
- case AsmToken::Dollar: {
- // $42 or $ID -> immediate.
- SMLoc Start = Parser.getTok().getLoc(), End;
- Parser.Lex();
- const MCExpr *Val;
- // This is an immediate, so we should not parse a register. Do a precheck
- // for '%' to supercede intra-register parse errors.
- SMLoc L = Parser.getTok().getLoc();
- if (check(getLexer().is(AsmToken::Percent), L,
- "expected immediate expression") ||
- getParser().parseExpression(Val, End) ||
- check(isa<X86MCExpr>(Val), L, "expected immediate expression"))
- return true;
- Operands.push_back(X86Operand::CreateImm(Val, Start, End));
- return false;
- }
- case AsmToken::LCurly: {
- SMLoc Start = Parser.getTok().getLoc();
- return ParseRoundingModeOp(Start, Operands);
- }
- default: {
- // This a memory operand or a register. We have some parsing complications
- // as a '(' may be part of an immediate expression or the addressing mode
- // block. This is complicated by the fact that an assembler-level variable
- // may refer either to a register or an immediate expression.
- SMLoc Loc = Parser.getTok().getLoc(), EndLoc;
- const MCExpr *Expr = nullptr;
- unsigned Reg = 0;
- if (getLexer().isNot(AsmToken::LParen)) {
- // No '(' so this is either a displacement expression or a register.
- if (Parser.parseExpression(Expr, EndLoc))
- return true;
- if (auto *RE = dyn_cast<X86MCExpr>(Expr)) {
- // Segment Register. Reset Expr and copy value to register.
- Expr = nullptr;
- Reg = RE->getRegNo();
- // Check the register.
- if (Reg == X86::EIZ || Reg == X86::RIZ)
- return Error(
- Loc, "%eiz and %riz can only be used as index registers",
- SMRange(Loc, EndLoc));
- if (Reg == X86::RIP)
- return Error(Loc, "%rip can only be used as a base register",
- SMRange(Loc, EndLoc));
- // Return register that are not segment prefixes immediately.
- if (!Parser.parseOptionalToken(AsmToken::Colon)) {
- Operands.push_back(X86Operand::CreateReg(Reg, Loc, EndLoc));
- return false;
- }
- if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(Reg))
- return Error(Loc, "invalid segment register");
- // Accept a '*' absolute memory reference after the segment. Place it
- // before the full memory operand.
- if (getLexer().is(AsmToken::Star))
- Operands.push_back(X86Operand::CreateToken("*", consumeToken()));
- }
- }
- // This is a Memory operand.
- return ParseMemOperand(Reg, Expr, Loc, EndLoc, Operands);
- }
- }
- }
- // X86::COND_INVALID if not a recognized condition code or alternate mnemonic,
- // otherwise the EFLAGS Condition Code enumerator.
- X86::CondCode X86AsmParser::ParseConditionCode(StringRef CC) {
- return StringSwitch<X86::CondCode>(CC)
- .Case("o", X86::COND_O) // Overflow
- .Case("no", X86::COND_NO) // No Overflow
- .Cases("b", "nae", X86::COND_B) // Below/Neither Above nor Equal
- .Cases("ae", "nb", X86::COND_AE) // Above or Equal/Not Below
- .Cases("e", "z", X86::COND_E) // Equal/Zero
- .Cases("ne", "nz", X86::COND_NE) // Not Equal/Not Zero
- .Cases("be", "na", X86::COND_BE) // Below or Equal/Not Above
- .Cases("a", "nbe", X86::COND_A) // Above/Neither Below nor Equal
- .Case("s", X86::COND_S) // Sign
- .Case("ns", X86::COND_NS) // No Sign
- .Cases("p", "pe", X86::COND_P) // Parity/Parity Even
- .Cases("np", "po", X86::COND_NP) // No Parity/Parity Odd
- .Cases("l", "nge", X86::COND_L) // Less/Neither Greater nor Equal
- .Cases("ge", "nl", X86::COND_GE) // Greater or Equal/Not Less
- .Cases("le", "ng", X86::COND_LE) // Less or Equal/Not Greater
- .Cases("g", "nle", X86::COND_G) // Greater/Neither Less nor Equal
- .Default(X86::COND_INVALID);
- }
- // true on failure, false otherwise
- // If no {z} mark was found - Parser doesn't advance
- bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z,
- const SMLoc &StartLoc) {
- MCAsmParser &Parser = getParser();
- // Assuming we are just pass the '{' mark, quering the next token
- // Searched for {z}, but none was found. Return false, as no parsing error was
- // encountered
- if (!(getLexer().is(AsmToken::Identifier) &&
- (getLexer().getTok().getIdentifier() == "z")))
- return false;
- Parser.Lex(); // Eat z
- // Query and eat the '}' mark
- if (!getLexer().is(AsmToken::RCurly))
- return Error(getLexer().getLoc(), "Expected } at this point");
- Parser.Lex(); // Eat '}'
- // Assign Z with the {z} mark opernad
- Z = X86Operand::CreateToken("{z}", StartLoc);
- return false;
- }
- // true on failure, false otherwise
- bool X86AsmParser::HandleAVX512Operand(OperandVector &Operands) {
- MCAsmParser &Parser = getParser();
- if (getLexer().is(AsmToken::LCurly)) {
- // Eat "{" and mark the current place.
- const SMLoc consumedToken = consumeToken();
- // Distinguish {1to<NUM>} from {%k<NUM>}.
- if(getLexer().is(AsmToken::Integer)) {
- // Parse memory broadcasting ({1to<NUM>}).
- if (getLexer().getTok().getIntVal() != 1)
- return TokError("Expected 1to<NUM> at this point");
- StringRef Prefix = getLexer().getTok().getString();
- Parser.Lex(); // Eat first token of 1to8
- if (!getLexer().is(AsmToken::Identifier))
- return TokError("Expected 1to<NUM> at this point");
- // Recognize only reasonable suffixes.
- SmallVector<char, 5> BroadcastVector;
- StringRef BroadcastString = (Prefix + getLexer().getTok().getIdentifier())
- .toStringRef(BroadcastVector);
- if (!BroadcastString.startswith("1to"))
- return TokError("Expected 1to<NUM> at this point");
- const char *BroadcastPrimitive =
- StringSwitch<const char *>(BroadcastString)
- .Case("1to2", "{1to2}")
- .Case("1to4", "{1to4}")
- .Case("1to8", "{1to8}")
- .Case("1to16", "{1to16}")
- .Case("1to32", "{1to32}")
- .Default(nullptr);
- if (!BroadcastPrimitive)
- return TokError("Invalid memory broadcast primitive.");
- Parser.Lex(); // Eat trailing token of 1toN
- if (!getLexer().is(AsmToken::RCurly))
- return TokError("Expected } at this point");
- Parser.Lex(); // Eat "}"
- Operands.push_back(X86Operand::CreateToken(BroadcastPrimitive,
- consumedToken));
- // No AVX512 specific primitives can pass
- // after memory broadcasting, so return.
- return false;
- } else {
- // Parse either {k}{z}, {z}{k}, {k} or {z}
- // last one have no meaning, but GCC accepts it
- // Currently, we're just pass a '{' mark
- std::unique_ptr<X86Operand> Z;
- if (ParseZ(Z, consumedToken))
- return true;
- // Reaching here means that parsing of the allegadly '{z}' mark yielded
- // no errors.
- // Query for the need of further parsing for a {%k<NUM>} mark
- if (!Z || getLexer().is(AsmToken::LCurly)) {
- SMLoc StartLoc = Z ? consumeToken() : consumedToken;
- // Parse an op-mask register mark ({%k<NUM>}), which is now to be
- // expected
- unsigned RegNo;
- SMLoc RegLoc;
- if (!ParseRegister(RegNo, RegLoc, StartLoc) &&
- X86MCRegisterClasses[X86::VK1RegClassID].contains(RegNo)) {
- if (RegNo == X86::K0)
- return Error(RegLoc, "Register k0 can't be used as write mask");
- if (!getLexer().is(AsmToken::RCurly))
- return Error(getLexer().getLoc(), "Expected } at this point");
- Operands.push_back(X86Operand::CreateToken("{", StartLoc));
- Operands.push_back(
- X86Operand::CreateReg(RegNo, StartLoc, StartLoc));
- Operands.push_back(X86Operand::CreateToken("}", consumeToken()));
- } else
- return Error(getLexer().getLoc(),
- "Expected an op-mask register at this point");
- // {%k<NUM>} mark is found, inquire for {z}
- if (getLexer().is(AsmToken::LCurly) && !Z) {
- // Have we've found a parsing error, or found no (expected) {z} mark
- // - report an error
- if (ParseZ(Z, consumeToken()) || !Z)
- return Error(getLexer().getLoc(),
- "Expected a {z} mark at this point");
- }
- // '{z}' on its own is meaningless, hence should be ignored.
- // on the contrary - have it been accompanied by a K register,
- // allow it.
- if (Z)
- Operands.push_back(std::move(Z));
- }
- }
- }
- return false;
- }
- /// ParseMemOperand: 'seg : disp(basereg, indexreg, scale)'. The '%ds:' prefix
- /// has already been parsed if present. disp may be provided as well.
- bool X86AsmParser::ParseMemOperand(unsigned SegReg, const MCExpr *Disp,
- SMLoc StartLoc, SMLoc EndLoc,
- OperandVector &Operands) {
- MCAsmParser &Parser = getParser();
- SMLoc Loc;
- // Based on the initial passed values, we may be in any of these cases, we are
- // in one of these cases (with current position (*)):
- // 1. seg : * disp (base-index-scale-expr)
- // 2. seg : *(disp) (base-index-scale-expr)
- // 3. seg : *(base-index-scale-expr)
- // 4. disp *(base-index-scale-expr)
- // 5. *(disp) (base-index-scale-expr)
- // 6. *(base-index-scale-expr)
- // 7. disp *
- // 8. *(disp)
- // If we do not have an displacement yet, check if we're in cases 4 or 6 by
- // checking if the first object after the parenthesis is a register (or an
- // identifier referring to a register) and parse the displacement or default
- // to 0 as appropriate.
- auto isAtMemOperand = [this]() {
- if (this->getLexer().isNot(AsmToken::LParen))
- return false;
- AsmToken Buf[2];
- StringRef Id;
- auto TokCount = this->getLexer().peekTokens(Buf, true);
- if (TokCount == 0)
- return false;
- switch (Buf[0].getKind()) {
- case AsmToken::Percent:
- case AsmToken::Comma:
- return true;
- // These lower cases are doing a peekIdentifier.
- case AsmToken::At:
- case AsmToken::Dollar:
- if ((TokCount > 1) &&
- (Buf[1].is(AsmToken::Identifier) || Buf[1].is(AsmToken::String)) &&
- (Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
- Id = StringRef(Buf[0].getLoc().getPointer(),
- Buf[1].getIdentifier().size() + 1);
- break;
- case AsmToken::Identifier:
- case AsmToken::String:
- Id = Buf[0].getIdentifier();
- break;
- default:
- return false;
- }
- // We have an ID. Check if it is bound to a register.
- if (!Id.empty()) {
- MCSymbol *Sym = this->getContext().getOrCreateSymbol(Id);
- if (Sym->isVariable()) {
- auto V = Sym->getVariableValue(/*SetUsed*/ false);
- return isa<X86MCExpr>(V);
- }
- }
- return false;
- };
- if (!Disp) {
- // Parse immediate if we're not at a mem operand yet.
- if (!isAtMemOperand()) {
- if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(Disp, EndLoc))
- return true;
- assert(!isa<X86MCExpr>(Disp) && "Expected non-register here.");
- } else {
- // Disp is implicitly zero if we haven't parsed it yet.
- Disp = MCConstantExpr::create(0, Parser.getContext());
- }
- }
- // We are now either at the end of the operand or at the '(' at the start of a
- // base-index-scale-expr.
- if (!parseOptionalToken(AsmToken::LParen)) {
- if (SegReg == 0)
- Operands.push_back(
- X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc));
- else
- Operands.push_back(X86Operand::CreateMem(getPointerWidth(), SegReg, Disp,
- 0, 0, 1, StartLoc, EndLoc));
- return false;
- }
- // If we reached here, then eat the '(' and Process
- // the rest of the memory operand.
- unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
- SMLoc BaseLoc = getLexer().getLoc();
- const MCExpr *E;
- StringRef ErrMsg;
- // Parse BaseReg if one is provided.
- if (getLexer().isNot(AsmToken::Comma) && getLexer().isNot(AsmToken::RParen)) {
- if (Parser.parseExpression(E, EndLoc) ||
- check(!isa<X86MCExpr>(E), BaseLoc, "expected register here"))
- return true;
- // Check the register.
- BaseReg = cast<X86MCExpr>(E)->getRegNo();
- if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
- return Error(BaseLoc, "eiz and riz can only be used as index registers",
- SMRange(BaseLoc, EndLoc));
- }
- if (parseOptionalToken(AsmToken::Comma)) {
- // Following the comma we should have either an index register, or a scale
- // value. We don't support the later form, but we want to parse it
- // correctly.
- //
- // Even though it would be completely consistent to support syntax like
- // "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
- if (getLexer().isNot(AsmToken::RParen)) {
- if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(E, EndLoc))
- return true;
- if (!isa<X86MCExpr>(E)) {
- // We've parsed an unexpected Scale Value instead of an index
- // register. Interpret it as an absolute.
- int64_t ScaleVal;
- if (!E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
- return Error(Loc, "expected absolute expression");
- if (ScaleVal != 1)
- Warning(Loc, "scale factor without index register is ignored");
- Scale = 1;
- } else { // IndexReg Found.
- IndexReg = cast<X86MCExpr>(E)->getRegNo();
- if (BaseReg == X86::RIP)
- return Error(Loc,
- "%rip as base register can not have an index register");
- if (IndexReg == X86::RIP)
- return Error(Loc, "%rip is not allowed as an index register");
- if (parseOptionalToken(AsmToken::Comma)) {
- // Parse the scale amount:
- // ::= ',' [scale-expression]
- // A scale amount without an index is ignored.
- if (getLexer().isNot(AsmToken::RParen)) {
- int64_t ScaleVal;
- if (Parser.parseTokenLoc(Loc) ||
- Parser.parseAbsoluteExpression(ScaleVal))
- return Error(Loc, "expected scale expression");
- Scale = (unsigned)ScaleVal;
- // Validate the scale amount.
- if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
- Scale != 1)
- return Error(Loc, "scale factor in 16-bit address must be 1");
- if (checkScale(Scale, ErrMsg))
- return Error(Loc, ErrMsg);
- }
- }
- }
- }
- }
- // Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
- if (parseToken(AsmToken::RParen, "unexpected token in memory operand"))
- return true;
- // This is to support otherwise illegal operand (%dx) found in various
- // unofficial manuals examples (e.g. "out[s]?[bwl]? %al, (%dx)") and must now
- // be supported. Mark such DX variants separately fix only in special cases.
- if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && SegReg == 0 &&
- isa<MCConstantExpr>(Disp) &&
- cast<MCConstantExpr>(Disp)->getValue() == 0) {
- Operands.push_back(X86Operand::CreateDXReg(BaseLoc, BaseLoc));
- return false;
- }
- if (CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
- ErrMsg))
- return Error(BaseLoc, ErrMsg);
- if (SegReg || BaseReg || IndexReg)
- Operands.push_back(X86Operand::CreateMem(getPointerWidth(), SegReg, Disp,
- BaseReg, IndexReg, Scale, StartLoc,
- EndLoc));
- else
- Operands.push_back(
- X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc));
- return false;
- }
- // Parse either a standard primary expression or a register.
- bool X86AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
- MCAsmParser &Parser = getParser();
- // See if this is a register first.
- if (getTok().is(AsmToken::Percent) ||
- (isParsingIntelSyntax() && getTok().is(AsmToken::Identifier) &&
- MatchRegisterName(Parser.getTok().getString()))) {
- SMLoc StartLoc = Parser.getTok().getLoc();
- unsigned RegNo;
- if (ParseRegister(RegNo, StartLoc, EndLoc))
- return true;
- Res = X86MCExpr::create(RegNo, Parser.getContext());
- return false;
- }
- return Parser.parsePrimaryExpr(Res, EndLoc, nullptr);
- }
- bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
- SMLoc NameLoc, OperandVector &Operands) {
- MCAsmParser &Parser = getParser();
- InstInfo = &Info;
- // Reset the forced VEX encoding.
- ForcedVEXEncoding = VEXEncoding_Default;
- ForcedDispEncoding = DispEncoding_Default;
- // Parse pseudo prefixes.
- while (true) {
- if (Name == "{") {
- if (getLexer().isNot(AsmToken::Identifier))
- return Error(Parser.getTok().getLoc(), "Unexpected token after '{'");
- std::string Prefix = Parser.getTok().getString().lower();
- Parser.Lex(); // Eat identifier.
- if (getLexer().isNot(AsmToken::RCurly))
- return Error(Parser.getTok().getLoc(), "Expected '}'");
- Parser.Lex(); // Eat curly.
- if (Prefix == "vex")
- ForcedVEXEncoding = VEXEncoding_VEX;
- else if (Prefix == "vex2")
- ForcedVEXEncoding = VEXEncoding_VEX2;
- else if (Prefix == "vex3")
- ForcedVEXEncoding = VEXEncoding_VEX3;
- else if (Prefix == "evex")
- ForcedVEXEncoding = VEXEncoding_EVEX;
- else if (Prefix == "disp8")
- ForcedDispEncoding = DispEncoding_Disp8;
- else if (Prefix == "disp32")
- ForcedDispEncoding = DispEncoding_Disp32;
- else
- return Error(NameLoc, "unknown prefix");
- NameLoc = Parser.getTok().getLoc();
- if (getLexer().is(AsmToken::LCurly)) {
- Parser.Lex();
- Name = "{";
- } else {
- if (getLexer().isNot(AsmToken::Identifier))
- return Error(Parser.getTok().getLoc(), "Expected identifier");
- // FIXME: The mnemonic won't match correctly if its not in lower case.
- Name = Parser.getTok().getString();
- Parser.Lex();
- }
- continue;
- }
- // Parse MASM style pseudo prefixes.
- if (isParsingMSInlineAsm()) {
- if (Name.equals_insensitive("vex"))
- ForcedVEXEncoding = VEXEncoding_VEX;
- else if (Name.equals_insensitive("vex2"))
- ForcedVEXEncoding = VEXEncoding_VEX2;
- else if (Name.equals_insensitive("vex3"))
- ForcedVEXEncoding = VEXEncoding_VEX3;
- else if (Name.equals_insensitive("evex"))
- ForcedVEXEncoding = VEXEncoding_EVEX;
- if (ForcedVEXEncoding != VEXEncoding_Default) {
- if (getLexer().isNot(AsmToken::Identifier))
- return Error(Parser.getTok().getLoc(), "Expected identifier");
- // FIXME: The mnemonic won't match correctly if its not in lower case.
- Name = Parser.getTok().getString();
- NameLoc = Parser.getTok().getLoc();
- Parser.Lex();
- }
- }
- break;
- }
- // Support the suffix syntax for overriding displacement size as well.
- if (Name.consume_back(".d32")) {
- ForcedDispEncoding = DispEncoding_Disp32;
- } else if (Name.consume_back(".d8")) {
- ForcedDispEncoding = DispEncoding_Disp8;
- }
- StringRef PatchedName = Name;
- // Hack to skip "short" following Jcc.
- if (isParsingIntelSyntax() &&
- (PatchedName == "jmp" || PatchedName == "jc" || PatchedName == "jnc" ||
- PatchedName == "jcxz" || PatchedName == "jecxz" ||
- (PatchedName.startswith("j") &&
- ParseConditionCode(PatchedName.substr(1)) != X86::COND_INVALID))) {
- StringRef NextTok = Parser.getTok().getString();
- if (Parser.isParsingMasm() ? NextTok.equals_insensitive("short")
- : NextTok == "short") {
- SMLoc NameEndLoc =
- NameLoc.getFromPointer(NameLoc.getPointer() + Name.size());
- // Eat the short keyword.
- Parser.Lex();
- // MS and GAS ignore the short keyword; they both determine the jmp type
- // based on the distance of the label. (NASM does emit different code with
- // and without "short," though.)
- InstInfo->AsmRewrites->emplace_back(AOK_Skip, NameEndLoc,
- NextTok.size() + 1);
- }
- }
- // FIXME: Hack to recognize setneb as setne.
- if (PatchedName.startswith("set") && PatchedName.endswith("b") &&
- PatchedName != "setb" && PatchedName != "setnb")
- PatchedName = PatchedName.substr(0, Name.size()-1);
- unsigned ComparisonPredicate = ~0U;
- // FIXME: Hack to recognize cmp<comparison code>{sh,ss,sd,ph,ps,pd}.
- if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
- (PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
- PatchedName.endswith("sh") || PatchedName.endswith("ph") ||
- PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
- bool IsVCMP = PatchedName[0] == 'v';
- unsigned CCIdx = IsVCMP ? 4 : 3;
- unsigned CC = StringSwitch<unsigned>(
- PatchedName.slice(CCIdx, PatchedName.size() - 2))
- .Case("eq", 0x00)
- .Case("eq_oq", 0x00)
- .Case("lt", 0x01)
- .Case("lt_os", 0x01)
- .Case("le", 0x02)
- .Case("le_os", 0x02)
- .Case("unord", 0x03)
- .Case("unord_q", 0x03)
- .Case("neq", 0x04)
- .Case("neq_uq", 0x04)
- .Case("nlt", 0x05)
- .Case("nlt_us", 0x05)
- .Case("nle", 0x06)
- .Case("nle_us", 0x06)
- .Case("ord", 0x07)
- .Case("ord_q", 0x07)
- /* AVX only from here */
- .Case("eq_uq", 0x08)
- .Case("nge", 0x09)
- .Case("nge_us", 0x09)
- .Case("ngt", 0x0A)
- .Case("ngt_us", 0x0A)
- .Case("false", 0x0B)
- .Case("false_oq", 0x0B)
- .Case("neq_oq", 0x0C)
- .Case("ge", 0x0D)
- .Case("ge_os", 0x0D)
- .Case("gt", 0x0E)
- .Case("gt_os", 0x0E)
- .Case("true", 0x0F)
- .Case("true_uq", 0x0F)
- .Case("eq_os", 0x10)
- .Case("lt_oq", 0x11)
- .Case("le_oq", 0x12)
- .Case("unord_s", 0x13)
- .Case("neq_us", 0x14)
- .Case("nlt_uq", 0x15)
- .Case("nle_uq", 0x16)
- .Case("ord_s", 0x17)
- .Case("eq_us", 0x18)
- .Case("nge_uq", 0x19)
- .Case("ngt_uq", 0x1A)
- .Case("false_os", 0x1B)
- .Case("neq_os", 0x1C)
- .Case("ge_oq", 0x1D)
- .Case("gt_oq", 0x1E)
- .Case("true_us", 0x1F)
- .Default(~0U);
- if (CC != ~0U && (IsVCMP || CC < 8) &&
- (IsVCMP || PatchedName.back() != 'h')) {
- if (PatchedName.endswith("ss"))
- PatchedName = IsVCMP ? "vcmpss" : "cmpss";
- else if (PatchedName.endswith("sd"))
- PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
- else if (PatchedName.endswith("ps"))
- PatchedName = IsVCMP ? "vcmpps" : "cmpps";
- else if (PatchedName.endswith("pd"))
- PatchedName = IsVCMP ? "vcmppd" : "cmppd";
- else if (PatchedName.endswith("sh"))
- PatchedName = "vcmpsh";
- else if (PatchedName.endswith("ph"))
- PatchedName = "vcmpph";
- else
- llvm_unreachable("Unexpected suffix!");
- ComparisonPredicate = CC;
- }
- }
- // FIXME: Hack to recognize vpcmp<comparison code>{ub,uw,ud,uq,b,w,d,q}.
- if (PatchedName.startswith("vpcmp") &&
- (PatchedName.back() == 'b' || PatchedName.back() == 'w' ||
- PatchedName.back() == 'd' || PatchedName.back() == 'q')) {
- unsigned SuffixSize = PatchedName.drop_back().back() == 'u' ? 2 : 1;
- unsigned CC = StringSwitch<unsigned>(
- PatchedName.slice(5, PatchedName.size() - SuffixSize))
- .Case("eq", 0x0) // Only allowed on unsigned. Checked below.
- .Case("lt", 0x1)
- .Case("le", 0x2)
- //.Case("false", 0x3) // Not a documented alias.
- .Case("neq", 0x4)
- .Case("nlt", 0x5)
- .Case("nle", 0x6)
- //.Case("true", 0x7) // Not a documented alias.
- .Default(~0U);
- if (CC != ~0U && (CC != 0 || SuffixSize == 2)) {
- switch (PatchedName.back()) {
- default: llvm_unreachable("Unexpected character!");
- case 'b': PatchedName = SuffixSize == 2 ? "vpcmpub" : "vpcmpb"; break;
- case 'w': PatchedName = SuffixSize == 2 ? "vpcmpuw" : "vpcmpw"; break;
- case 'd': PatchedName = SuffixSize == 2 ? "vpcmpud" : "vpcmpd"; break;
- case 'q': PatchedName = SuffixSize == 2 ? "vpcmpuq" : "vpcmpq"; break;
- }
- // Set up the immediate to push into the operands later.
- ComparisonPredicate = CC;
- }
- }
- // FIXME: Hack to recognize vpcom<comparison code>{ub,uw,ud,uq,b,w,d,q}.
- if (PatchedName.startswith("vpcom") &&
- (PatchedName.back() == 'b' || PatchedName.back() == 'w' ||
- PatchedName.back() == 'd' || PatchedName.back() == 'q')) {
- unsigned SuffixSize = PatchedName.drop_back().back() == 'u' ? 2 : 1;
- unsigned CC = StringSwitch<unsigned>(
- PatchedName.slice(5, PatchedName.size() - SuffixSize))
- .Case("lt", 0x0)
- .Case("le", 0x1)
- .Case("gt", 0x2)
- .Case("ge", 0x3)
- .Case("eq", 0x4)
- .Case("neq", 0x5)
- .Case("false", 0x6)
- .Case("true", 0x7)
- .Default(~0U);
- if (CC != ~0U) {
- switch (PatchedName.back()) {
- default: llvm_unreachable("Unexpected character!");
- case 'b': PatchedName = SuffixSize == 2 ? "vpcomub" : "vpcomb"; break;
- case 'w': PatchedName = SuffixSize == 2 ? "vpcomuw" : "vpcomw"; break;
- case 'd': PatchedName = SuffixSize == 2 ? "vpcomud" : "vpcomd"; break;
- case 'q': PatchedName = SuffixSize == 2 ? "vpcomuq" : "vpcomq"; break;
- }
- // Set up the immediate to push into the operands later.
- ComparisonPredicate = CC;
- }
- }
- // Determine whether this is an instruction prefix.
- // FIXME:
- // Enhance prefixes integrity robustness. for example, following forms
- // are currently tolerated:
- // repz repnz <insn> ; GAS errors for the use of two similar prefixes
- // lock addq %rax, %rbx ; Destination operand must be of memory type
- // xacquire <insn> ; xacquire must be accompanied by 'lock'
- bool IsPrefix =
- StringSwitch<bool>(Name)
- .Cases("cs", "ds", "es", "fs", "gs", "ss", true)
- .Cases("rex64", "data32", "data16", "addr32", "addr16", true)
- .Cases("xacquire", "xrelease", true)
- .Cases("acquire", "release", isParsingIntelSyntax())
- .Default(false);
- auto isLockRepeatNtPrefix = [](StringRef N) {
- return StringSwitch<bool>(N)
- .Cases("lock", "rep", "repe", "repz", "repne", "repnz", "notrack", true)
- .Default(false);
- };
- bool CurlyAsEndOfStatement = false;
- unsigned Flags = X86::IP_NO_PREFIX;
- while (isLockRepeatNtPrefix(Name.lower())) {
- unsigned Prefix =
- StringSwitch<unsigned>(Name)
- .Cases("lock", "lock", X86::IP_HAS_LOCK)
- .Cases("rep", "repe", "repz", X86::IP_HAS_REPEAT)
- .Cases("repne", "repnz", X86::IP_HAS_REPEAT_NE)
- .Cases("notrack", "notrack", X86::IP_HAS_NOTRACK)
- .Default(X86::IP_NO_PREFIX); // Invalid prefix (impossible)
- Flags |= Prefix;
- if (getLexer().is(AsmToken::EndOfStatement)) {
- // We don't have real instr with the given prefix
- // let's use the prefix as the instr.
- // TODO: there could be several prefixes one after another
- Flags = X86::IP_NO_PREFIX;
- break;
- }
- // FIXME: The mnemonic won't match correctly if its not in lower case.
- Name = Parser.getTok().getString();
- Parser.Lex(); // eat the prefix
- // Hack: we could have something like "rep # some comment" or
- // "lock; cmpxchg16b $1" or "lock\0A\09incl" or "lock/incl"
- while (Name.startswith(";") || Name.startswith("\n") ||
- Name.startswith("#") || Name.startswith("\t") ||
- Name.startswith("/")) {
- // FIXME: The mnemonic won't match correctly if its not in lower case.
- Name = Parser.getTok().getString();
- Parser.Lex(); // go to next prefix or instr
- }
- }
- if (Flags)
- PatchedName = Name;
- // Hacks to handle 'data16' and 'data32'
- if (PatchedName == "data16" && is16BitMode()) {
- return Error(NameLoc, "redundant data16 prefix");
- }
- if (PatchedName == "data32") {
- if (is32BitMode())
- return Error(NameLoc, "redundant data32 prefix");
- if (is64BitMode())
- return Error(NameLoc, "'data32' is not supported in 64-bit mode");
- // Hack to 'data16' for the table lookup.
- PatchedName = "data16";
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- StringRef Next = Parser.getTok().getString();
- getLexer().Lex();
- // data32 effectively changes the instruction suffix.
- // TODO Generalize.
- if (Next == "callw")
- Next = "calll";
- if (Next == "ljmpw")
- Next = "ljmpl";
- Name = Next;
- PatchedName = Name;
- ForcedDataPrefix = X86::Mode32Bit;
- IsPrefix = false;
- }
- }
- Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
- // Push the immediate if we extracted one from the mnemonic.
- if (ComparisonPredicate != ~0U && !isParsingIntelSyntax()) {
- const MCExpr *ImmOp = MCConstantExpr::create(ComparisonPredicate,
- getParser().getContext());
- Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
- }
- // This does the actual operand parsing. Don't parse any more if we have a
- // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
- // just want to parse the "lock" as the first instruction and the "incl" as
- // the next one.
- if (getLexer().isNot(AsmToken::EndOfStatement) && !IsPrefix) {
- // Parse '*' modifier.
- if (getLexer().is(AsmToken::Star))
- Operands.push_back(X86Operand::CreateToken("*", consumeToken()));
- // Read the operands.
- while (true) {
- if (ParseOperand(Operands))
- return true;
- if (HandleAVX512Operand(Operands))
- return true;
- // check for comma and eat it
- if (getLexer().is(AsmToken::Comma))
- Parser.Lex();
- else
- break;
- }
- // In MS inline asm curly braces mark the beginning/end of a block,
- // therefore they should be interepreted as end of statement
- CurlyAsEndOfStatement =
- isParsingIntelSyntax() && isParsingMSInlineAsm() &&
- (getLexer().is(AsmToken::LCurly) || getLexer().is(AsmToken::RCurly));
- if (getLexer().isNot(AsmToken::EndOfStatement) && !CurlyAsEndOfStatement)
- return TokError("unexpected token in argument list");
- }
- // Push the immediate if we extracted one from the mnemonic.
- if (ComparisonPredicate != ~0U && isParsingIntelSyntax()) {
- const MCExpr *ImmOp = MCConstantExpr::create(ComparisonPredicate,
- getParser().getContext());
- Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
- }
- // Consume the EndOfStatement or the prefix separator Slash
- if (getLexer().is(AsmToken::EndOfStatement) ||
- (IsPrefix && getLexer().is(AsmToken::Slash)))
- Parser.Lex();
- else if (CurlyAsEndOfStatement)
- // Add an actual EndOfStatement before the curly brace
- Info.AsmRewrites->emplace_back(AOK_EndOfStatement,
- getLexer().getTok().getLoc(), 0);
- // This is for gas compatibility and cannot be done in td.
- // Adding "p" for some floating point with no argument.
- // For example: fsub --> fsubp
- bool IsFp =
- Name == "fsub" || Name == "fdiv" || Name == "fsubr" || Name == "fdivr";
- if (IsFp && Operands.size() == 1) {
- const char *Repl = StringSwitch<const char *>(Name)
- .Case("fsub", "fsubp")
- .Case("fdiv", "fdivp")
- .Case("fsubr", "fsubrp")
- .Case("fdivr", "fdivrp");
- static_cast<X86Operand &>(*Operands[0]).setTokenValue(Repl);
- }
- if ((Name == "mov" || Name == "movw" || Name == "movl") &&
- (Operands.size() == 3)) {
- X86Operand &Op1 = (X86Operand &)*Operands[1];
- X86Operand &Op2 = (X86Operand &)*Operands[2];
- SMLoc Loc = Op1.getEndLoc();
- // Moving a 32 or 16 bit value into a segment register has the same
- // behavior. Modify such instructions to always take shorter form.
- if (Op1.isReg() && Op2.isReg() &&
- X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
- Op2.getReg()) &&
- (X86MCRegisterClasses[X86::GR16RegClassID].contains(Op1.getReg()) ||
- X86MCRegisterClasses[X86::GR32RegClassID].contains(Op1.getReg()))) {
- // Change instruction name to match new instruction.
- if (Name != "mov" && Name[3] == (is16BitMode() ? 'l' : 'w')) {
- Name = is16BitMode() ? "movw" : "movl";
- Operands[0] = X86Operand::CreateToken(Name, NameLoc);
- }
- // Select the correct equivalent 16-/32-bit source register.
- unsigned Reg =
- getX86SubSuperRegisterOrZero(Op1.getReg(), is16BitMode() ? 16 : 32);
- Operands[1] = X86Operand::CreateReg(Reg, Loc, Loc);
- }
- }
- // This is a terrible hack to handle "out[s]?[bwl]? %al, (%dx)" ->
- // "outb %al, %dx". Out doesn't take a memory form, but this is a widely
- // documented form in various unofficial manuals, so a lot of code uses it.
- if ((Name == "outb" || Name == "outsb" || Name == "outw" || Name == "outsw" ||
- Name == "outl" || Name == "outsl" || Name == "out" || Name == "outs") &&
- Operands.size() == 3) {
- X86Operand &Op = (X86Operand &)*Operands.back();
- if (Op.isDXReg())
- Operands.back() = X86Operand::CreateReg(X86::DX, Op.getStartLoc(),
- Op.getEndLoc());
- }
- // Same hack for "in[s]?[bwl]? (%dx), %al" -> "inb %dx, %al".
- if ((Name == "inb" || Name == "insb" || Name == "inw" || Name == "insw" ||
- Name == "inl" || Name == "insl" || Name == "in" || Name == "ins") &&
- Operands.size() == 3) {
- X86Operand &Op = (X86Operand &)*Operands[1];
- if (Op.isDXReg())
- Operands[1] = X86Operand::CreateReg(X86::DX, Op.getStartLoc(),
- Op.getEndLoc());
- }
- SmallVector<std::unique_ptr<MCParsedAsmOperand>, 2> TmpOperands;
- bool HadVerifyError = false;
- // Append default arguments to "ins[bwld]"
- if (Name.startswith("ins") &&
- (Operands.size() == 1 || Operands.size() == 3) &&
- (Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd" ||
- Name == "ins")) {
- AddDefaultSrcDestOperands(TmpOperands,
- X86Operand::CreateReg(X86::DX, NameLoc, NameLoc),
- DefaultMemDIOperand(NameLoc));
- HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
- }
- // Append default arguments to "outs[bwld]"
- if (Name.startswith("outs") &&
- (Operands.size() == 1 || Operands.size() == 3) &&
- (Name == "outsb" || Name == "outsw" || Name == "outsl" ||
- Name == "outsd" || Name == "outs")) {
- AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
- X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
- HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
- }
- // Transform "lods[bwlq]" into "lods[bwlq] ($SIREG)" for appropriate
- // values of $SIREG according to the mode. It would be nice if this
- // could be achieved with InstAlias in the tables.
- if (Name.startswith("lods") &&
- (Operands.size() == 1 || Operands.size() == 2) &&
- (Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
- Name == "lodsl" || Name == "lodsd" || Name == "lodsq")) {
- TmpOperands.push_back(DefaultMemSIOperand(NameLoc));
- HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
- }
- // Transform "stos[bwlq]" into "stos[bwlq] ($DIREG)" for appropriate
- // values of $DIREG according to the mode. It would be nice if this
- // could be achieved with InstAlias in the tables.
- if (Name.startswith("stos") &&
- (Operands.size() == 1 || Operands.size() == 2) &&
- (Name == "stos" || Name == "stosb" || Name == "stosw" ||
- Name == "stosl" || Name == "stosd" || Name == "stosq")) {
- TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
- HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
- }
- // Transform "scas[bwlq]" into "scas[bwlq] ($DIREG)" for appropriate
- // values of $DIREG according to the mode. It would be nice if this
- // could be achieved with InstAlias in the tables.
- if (Name.startswith("scas") &&
- (Operands.size() == 1 || Operands.size() == 2) &&
- (Name == "scas" || Name == "scasb" || Name == "scasw" ||
- Name == "scasl" || Name == "scasd" || Name == "scasq")) {
- TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
- HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
- }
- // Add default SI and DI operands to "cmps[bwlq]".
- if (Name.startswith("cmps") &&
- (Operands.size() == 1 || Operands.size() == 3) &&
- (Name == "cmps" || Name == "cmpsb" || Name == "cmpsw" ||
- Name == "cmpsl" || Name == "cmpsd" || Name == "cmpsq")) {
- AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
- DefaultMemSIOperand(NameLoc));
- HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
- }
- // Add default SI and DI operands to "movs[bwlq]".
- if (((Name.startswith("movs") &&
- (Name == "movs" || Name == "movsb" || Name == "movsw" ||
- Name == "movsl" || Name == "movsd" || Name == "movsq")) ||
- (Name.startswith("smov") &&
- (Name == "smov" || Name == "smovb" || Name == "smovw" ||
- Name == "smovl" || Name == "smovd" || Name == "smovq"))) &&
- (Operands.size() == 1 || Operands.size() == 3)) {
- if (Name == "movsd" && Operands.size() == 1 && !isParsingIntelSyntax())
- Operands.back() = X86Operand::CreateToken("movsl", NameLoc);
- AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
- DefaultMemDIOperand(NameLoc));
- HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
- }
- // Check if we encountered an error for one the string insturctions
- if (HadVerifyError) {
- return HadVerifyError;
- }
- // Transforms "xlat mem8" into "xlatb"
- if ((Name == "xlat" || Name == "xlatb") && Operands.size() == 2) {
- X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
- if (Op1.isMem8()) {
- Warning(Op1.getStartLoc(), "memory operand is only for determining the "
- "size, (R|E)BX will be used for the location");
- Operands.pop_back();
- static_cast<X86Operand &>(*Operands[0]).setTokenValue("xlatb");
- }
- }
- if (Flags)
- Operands.push_back(X86Operand::CreatePrefix(Flags, NameLoc, NameLoc));
- return false;
- }
- bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
- const MCRegisterInfo *MRI = getContext().getRegisterInfo();
- switch (Inst.getOpcode()) {
- default: return false;
- case X86::JMP_1:
- // {disp32} forces a larger displacement as if the instruction was relaxed.
- // NOTE: 16-bit mode uses 16-bit displacement even though it says {disp32}.
- // This matches GNU assembler.
- if (ForcedDispEncoding == DispEncoding_Disp32) {
- Inst.setOpcode(is16BitMode() ? X86::JMP_2 : X86::JMP_4);
- return true;
- }
- return false;
- case X86::JCC_1:
- // {disp32} forces a larger displacement as if the instruction was relaxed.
- // NOTE: 16-bit mode uses 16-bit displacement even though it says {disp32}.
- // This matches GNU assembler.
- if (ForcedDispEncoding == DispEncoding_Disp32) {
- Inst.setOpcode(is16BitMode() ? X86::JCC_2 : X86::JCC_4);
- return true;
- }
- return false;
- case X86::VMOVZPQILo2PQIrr:
- case X86::VMOVAPDrr:
- case X86::VMOVAPDYrr:
- case X86::VMOVAPSrr:
- case X86::VMOVAPSYrr:
- case X86::VMOVDQArr:
- case X86::VMOVDQAYrr:
- case X86::VMOVDQUrr:
- case X86::VMOVDQUYrr:
- case X86::VMOVUPDrr:
- case X86::VMOVUPDYrr:
- case X86::VMOVUPSrr:
- case X86::VMOVUPSYrr: {
- // We can get a smaller encoding by using VEX.R instead of VEX.B if one of
- // the registers is extended, but other isn't.
- if (ForcedVEXEncoding == VEXEncoding_VEX3 ||
- MRI->getEncodingValue(Inst.getOperand(0).getReg()) >= 8 ||
- MRI->getEncodingValue(Inst.getOperand(1).getReg()) < 8)
- return false;
- unsigned NewOpc;
- switch (Inst.getOpcode()) {
- default: llvm_unreachable("Invalid opcode");
- case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break;
- case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break;
- case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
- case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break;
- case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
- case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break;
- case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
- case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break;
- case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
- case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break;
- case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
- case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break;
- case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
- }
- Inst.setOpcode(NewOpc);
- return true;
- }
- case X86::VMOVSDrr:
- case X86::VMOVSSrr: {
- // We can get a smaller encoding by using VEX.R instead of VEX.B if one of
- // the registers is extended, but other isn't.
- if (ForcedVEXEncoding == VEXEncoding_VEX3 ||
- MRI->getEncodingValue(Inst.getOperand(0).getReg()) >= 8 ||
- MRI->getEncodingValue(Inst.getOperand(2).getReg()) < 8)
- return false;
- unsigned NewOpc;
- switch (Inst.getOpcode()) {
- default: llvm_unreachable("Invalid opcode");
- case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break;
- case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break;
- }
- Inst.setOpcode(NewOpc);
- return true;
- }
- case X86::RCR8ri: case X86::RCR16ri: case X86::RCR32ri: case X86::RCR64ri:
- case X86::RCL8ri: case X86::RCL16ri: case X86::RCL32ri: case X86::RCL64ri:
- case X86::ROR8ri: case X86::ROR16ri: case X86::ROR32ri: case X86::ROR64ri:
- case X86::ROL8ri: case X86::ROL16ri: case X86::ROL32ri: case X86::ROL64ri:
- case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri: case X86::SAR64ri:
- case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri: case X86::SHR64ri:
- case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri: case X86::SHL64ri: {
- // Optimize s{hr,ar,hl} $1, <op> to "shift <op>". Similar for rotate.
- // FIXME: It would be great if we could just do this with an InstAlias.
- if (!Inst.getOperand(2).isImm() || Inst.getOperand(2).getImm() != 1)
- return false;
- unsigned NewOpc;
- switch (Inst.getOpcode()) {
- default: llvm_unreachable("Invalid opcode");
- case X86::RCR8ri: NewOpc = X86::RCR8r1; break;
- case X86::RCR16ri: NewOpc = X86::RCR16r1; break;
- case X86::RCR32ri: NewOpc = X86::RCR32r1; break;
- case X86::RCR64ri: NewOpc = X86::RCR64r1; break;
- case X86::RCL8ri: NewOpc = X86::RCL8r1; break;
- case X86::RCL16ri: NewOpc = X86::RCL16r1; break;
- case X86::RCL32ri: NewOpc = X86::RCL32r1; break;
- case X86::RCL64ri: NewOpc = X86::RCL64r1; break;
- case X86::ROR8ri: NewOpc = X86::ROR8r1; break;
- case X86::ROR16ri: NewOpc = X86::ROR16r1; break;
- case X86::ROR32ri: NewOpc = X86::ROR32r1; break;
- case X86::ROR64ri: NewOpc = X86::ROR64r1; break;
- case X86::ROL8ri: NewOpc = X86::ROL8r1; break;
- case X86::ROL16ri: NewOpc = X86::ROL16r1; break;
- case X86::ROL32ri: NewOpc = X86::ROL32r1; break;
- case X86::ROL64ri: NewOpc = X86::ROL64r1; break;
- case X86::SAR8ri: NewOpc = X86::SAR8r1; break;
- case X86::SAR16ri: NewOpc = X86::SAR16r1; break;
- case X86::SAR32ri: NewOpc = X86::SAR32r1; break;
- case X86::SAR64ri: NewOpc = X86::SAR64r1; break;
- case X86::SHR8ri: NewOpc = X86::SHR8r1; break;
- case X86::SHR16ri: NewOpc = X86::SHR16r1; break;
- case X86::SHR32ri: NewOpc = X86::SHR32r1; break;
- case X86::SHR64ri: NewOpc = X86::SHR64r1; break;
- case X86::SHL8ri: NewOpc = X86::SHL8r1; break;
- case X86::SHL16ri: NewOpc = X86::SHL16r1; break;
- case X86::SHL32ri: NewOpc = X86::SHL32r1; break;
- case X86::SHL64ri: NewOpc = X86::SHL64r1; break;
- }
- MCInst TmpInst;
- TmpInst.setOpcode(NewOpc);
- TmpInst.addOperand(Inst.getOperand(0));
- TmpInst.addOperand(Inst.getOperand(1));
- Inst = TmpInst;
- return true;
- }
- case X86::RCR8mi: case X86::RCR16mi: case X86::RCR32mi: case X86::RCR64mi:
- case X86::RCL8mi: case X86::RCL16mi: case X86::RCL32mi: case X86::RCL64mi:
- case X86::ROR8mi: case X86::ROR16mi: case X86::ROR32mi: case X86::ROR64mi:
- case X86::ROL8mi: case X86::ROL16mi: case X86::ROL32mi: case X86::ROL64mi:
- case X86::SAR8mi: case X86::SAR16mi: case X86::SAR32mi: case X86::SAR64mi:
- case X86::SHR8mi: case X86::SHR16mi: case X86::SHR32mi: case X86::SHR64mi:
- case X86::SHL8mi: case X86::SHL16mi: case X86::SHL32mi: case X86::SHL64mi: {
- // Optimize s{hr,ar,hl} $1, <op> to "shift <op>". Similar for rotate.
- // FIXME: It would be great if we could just do this with an InstAlias.
- if (!Inst.getOperand(X86::AddrNumOperands).isImm() ||
- Inst.getOperand(X86::AddrNumOperands).getImm() != 1)
- return false;
- unsigned NewOpc;
- switch (Inst.getOpcode()) {
- default: llvm_unreachable("Invalid opcode");
- case X86::RCR8mi: NewOpc = X86::RCR8m1; break;
- case X86::RCR16mi: NewOpc = X86::RCR16m1; break;
- case X86::RCR32mi: NewOpc = X86::RCR32m1; break;
- case X86::RCR64mi: NewOpc = X86::RCR64m1; break;
- case X86::RCL8mi: NewOpc = X86::RCL8m1; break;
- case X86::RCL16mi: NewOpc = X86::RCL16m1; break;
- case X86::RCL32mi: NewOpc = X86::RCL32m1; break;
- case X86::RCL64mi: NewOpc = X86::RCL64m1; break;
- case X86::ROR8mi: NewOpc = X86::ROR8m1; break;
- case X86::ROR16mi: NewOpc = X86::ROR16m1; break;
- case X86::ROR32mi: NewOpc = X86::ROR32m1; break;
- case X86::ROR64mi: NewOpc = X86::ROR64m1; break;
- case X86::ROL8mi: NewOpc = X86::ROL8m1; break;
- case X86::ROL16mi: NewOpc = X86::ROL16m1; break;
- case X86::ROL32mi: NewOpc = X86::ROL32m1; break;
- case X86::ROL64mi: NewOpc = X86::ROL64m1; break;
- case X86::SAR8mi: NewOpc = X86::SAR8m1; break;
- case X86::SAR16mi: NewOpc = X86::SAR16m1; break;
- case X86::SAR32mi: NewOpc = X86::SAR32m1; break;
- case X86::SAR64mi: NewOpc = X86::SAR64m1; break;
- case X86::SHR8mi: NewOpc = X86::SHR8m1; break;
- case X86::SHR16mi: NewOpc = X86::SHR16m1; break;
- case X86::SHR32mi: NewOpc = X86::SHR32m1; break;
- case X86::SHR64mi: NewOpc = X86::SHR64m1; break;
- case X86::SHL8mi: NewOpc = X86::SHL8m1; break;
- case X86::SHL16mi: NewOpc = X86::SHL16m1; break;
- case X86::SHL32mi: NewOpc = X86::SHL32m1; break;
- case X86::SHL64mi: NewOpc = X86::SHL64m1; break;
- }
- MCInst TmpInst;
- TmpInst.setOpcode(NewOpc);
- for (int i = 0; i != X86::AddrNumOperands; ++i)
- TmpInst.addOperand(Inst.getOperand(i));
- Inst = TmpInst;
- return true;
- }
- case X86::INT: {
- // Transforms "int $3" into "int3" as a size optimization. We can't write an
- // instalias with an immediate operand yet.
- if (!Inst.getOperand(0).isImm() || Inst.getOperand(0).getImm() != 3)
- return false;
- MCInst TmpInst;
- TmpInst.setOpcode(X86::INT3);
- Inst = TmpInst;
- return true;
- }
- }
- }
- bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) {
- const MCRegisterInfo *MRI = getContext().getRegisterInfo();
- switch (Inst.getOpcode()) {
- case X86::VGATHERDPDYrm:
- case X86::VGATHERDPDrm:
- case X86::VGATHERDPSYrm:
- case X86::VGATHERDPSrm:
- case X86::VGATHERQPDYrm:
- case X86::VGATHERQPDrm:
- case X86::VGATHERQPSYrm:
- case X86::VGATHERQPSrm:
- case X86::VPGATHERDDYrm:
- case X86::VPGATHERDDrm:
- case X86::VPGATHERDQYrm:
- case X86::VPGATHERDQrm:
- case X86::VPGATHERQDYrm:
- case X86::VPGATHERQDrm:
- case X86::VPGATHERQQYrm:
- case X86::VPGATHERQQrm: {
- unsigned Dest = MRI->getEncodingValue(Inst.getOperand(0).getReg());
- unsigned Mask = MRI->getEncodingValue(Inst.getOperand(1).getReg());
- unsigned Index =
- MRI->getEncodingValue(Inst.getOperand(3 + X86::AddrIndexReg).getReg());
- if (Dest == Mask || Dest == Index || Mask == Index)
- return Warning(Ops[0]->getStartLoc(), "mask, index, and destination "
- "registers should be distinct");
- break;
- }
- case X86::VGATHERDPDZ128rm:
- case X86::VGATHERDPDZ256rm:
- case X86::VGATHERDPDZrm:
- case X86::VGATHERDPSZ128rm:
- case X86::VGATHERDPSZ256rm:
- case X86::VGATHERDPSZrm:
- case X86::VGATHERQPDZ128rm:
- case X86::VGATHERQPDZ256rm:
- case X86::VGATHERQPDZrm:
- case X86::VGATHERQPSZ128rm:
- case X86::VGATHERQPSZ256rm:
- case X86::VGATHERQPSZrm:
- case X86::VPGATHERDDZ128rm:
- case X86::VPGATHERDDZ256rm:
- case X86::VPGATHERDDZrm:
- case X86::VPGATHERDQZ128rm:
- case X86::VPGATHERDQZ256rm:
- case X86::VPGATHERDQZrm:
- case X86::VPGATHERQDZ128rm:
- case X86::VPGATHERQDZ256rm:
- case X86::VPGATHERQDZrm:
- case X86::VPGATHERQQZ128rm:
- case X86::VPGATHERQQZ256rm:
- case X86::VPGATHERQQZrm: {
- unsigned Dest = MRI->getEncodingValue(Inst.getOperand(0).getReg());
- unsigned Index =
- MRI->getEncodingValue(Inst.getOperand(4 + X86::AddrIndexReg).getReg());
- if (Dest == Index)
- return Warning(Ops[0]->getStartLoc(), "index and destination registers "
- "should be distinct");
- break;
- }
- case X86::V4FMADDPSrm:
- case X86::V4FMADDPSrmk:
- case X86::V4FMADDPSrmkz:
- case X86::V4FMADDSSrm:
- case X86::V4FMADDSSrmk:
- case X86::V4FMADDSSrmkz:
- case X86::V4FNMADDPSrm:
- case X86::V4FNMADDPSrmk:
- case X86::V4FNMADDPSrmkz:
- case X86::V4FNMADDSSrm:
- case X86::V4FNMADDSSrmk:
- case X86::V4FNMADDSSrmkz:
- case X86::VP4DPWSSDSrm:
- case X86::VP4DPWSSDSrmk:
- case X86::VP4DPWSSDSrmkz:
- case X86::VP4DPWSSDrm:
- case X86::VP4DPWSSDrmk:
- case X86::VP4DPWSSDrmkz: {
- unsigned Src2 = Inst.getOperand(Inst.getNumOperands() -
- X86::AddrNumOperands - 1).getReg();
- unsigned Src2Enc = MRI->getEncodingValue(Src2);
- if (Src2Enc % 4 != 0) {
- StringRef RegName = X86IntelInstPrinter::getRegisterName(Src2);
- unsigned GroupStart = (Src2Enc / 4) * 4;
- unsigned GroupEnd = GroupStart + 3;
- return Warning(Ops[0]->getStartLoc(),
- "source register '" + RegName + "' implicitly denotes '" +
- RegName.take_front(3) + Twine(GroupStart) + "' to '" +
- RegName.take_front(3) + Twine(GroupEnd) +
- "' source group");
- }
- break;
- }
- case X86::VFCMADDCPHZ128m:
- case X86::VFCMADDCPHZ256m:
- case X86::VFCMADDCPHZm:
- case X86::VFCMADDCPHZ128mb:
- case X86::VFCMADDCPHZ256mb:
- case X86::VFCMADDCPHZmb:
- case X86::VFCMADDCPHZ128mbk:
- case X86::VFCMADDCPHZ256mbk:
- case X86::VFCMADDCPHZmbk:
- case X86::VFCMADDCPHZ128mbkz:
- case X86::VFCMADDCPHZ256mbkz:
- case X86::VFCMADDCPHZmbkz:
- case X86::VFCMADDCPHZ128mk:
- case X86::VFCMADDCPHZ256mk:
- case X86::VFCMADDCPHZmk:
- case X86::VFCMADDCPHZ128mkz:
- case X86::VFCMADDCPHZ256mkz:
- case X86::VFCMADDCPHZmkz:
- case X86::VFCMADDCPHZ128r:
- case X86::VFCMADDCPHZ256r:
- case X86::VFCMADDCPHZr:
- case X86::VFCMADDCPHZ128rk:
- case X86::VFCMADDCPHZ256rk:
- case X86::VFCMADDCPHZrk:
- case X86::VFCMADDCPHZ128rkz:
- case X86::VFCMADDCPHZ256rkz:
- case X86::VFCMADDCPHZrkz:
- case X86::VFCMADDCPHZrb:
- case X86::VFCMADDCPHZrbk:
- case X86::VFCMADDCPHZrbkz:
- case X86::VFCMADDCSHZm:
- case X86::VFCMADDCSHZmk:
- case X86::VFCMADDCSHZmkz:
- case X86::VFCMADDCSHZr:
- case X86::VFCMADDCSHZrb:
- case X86::VFCMADDCSHZrbk:
- case X86::VFCMADDCSHZrbkz:
- case X86::VFCMADDCSHZrk:
- case X86::VFCMADDCSHZrkz:
- case X86::VFMADDCPHZ128m:
- case X86::VFMADDCPHZ256m:
- case X86::VFMADDCPHZm:
- case X86::VFMADDCPHZ128mb:
- case X86::VFMADDCPHZ256mb:
- case X86::VFMADDCPHZmb:
- case X86::VFMADDCPHZ128mbk:
- case X86::VFMADDCPHZ256mbk:
- case X86::VFMADDCPHZmbk:
- case X86::VFMADDCPHZ128mbkz:
- case X86::VFMADDCPHZ256mbkz:
- case X86::VFMADDCPHZmbkz:
- case X86::VFMADDCPHZ128mk:
- case X86::VFMADDCPHZ256mk:
- case X86::VFMADDCPHZmk:
- case X86::VFMADDCPHZ128mkz:
- case X86::VFMADDCPHZ256mkz:
- case X86::VFMADDCPHZmkz:
- case X86::VFMADDCPHZ128r:
- case X86::VFMADDCPHZ256r:
- case X86::VFMADDCPHZr:
- case X86::VFMADDCPHZ128rk:
- case X86::VFMADDCPHZ256rk:
- case X86::VFMADDCPHZrk:
- case X86::VFMADDCPHZ128rkz:
- case X86::VFMADDCPHZ256rkz:
- case X86::VFMADDCPHZrkz:
- case X86::VFMADDCPHZrb:
- case X86::VFMADDCPHZrbk:
- case X86::VFMADDCPHZrbkz:
- case X86::VFMADDCSHZm:
- case X86::VFMADDCSHZmk:
- case X86::VFMADDCSHZmkz:
- case X86::VFMADDCSHZr:
- case X86::VFMADDCSHZrb:
- case X86::VFMADDCSHZrbk:
- case X86::VFMADDCSHZrbkz:
- case X86::VFMADDCSHZrk:
- case X86::VFMADDCSHZrkz: {
- unsigned Dest = Inst.getOperand(0).getReg();
- for (unsigned i = 2; i < Inst.getNumOperands(); i++)
- if (Inst.getOperand(i).isReg() && Dest == Inst.getOperand(i).getReg())
- return Warning(Ops[0]->getStartLoc(), "Destination register should be "
- "distinct from source registers");
- break;
- }
- case X86::VFCMULCPHZ128rm:
- case X86::VFCMULCPHZ256rm:
- case X86::VFCMULCPHZrm:
- case X86::VFCMULCPHZ128rmb:
- case X86::VFCMULCPHZ256rmb:
- case X86::VFCMULCPHZrmb:
- case X86::VFCMULCPHZ128rmbk:
- case X86::VFCMULCPHZ256rmbk:
- case X86::VFCMULCPHZrmbk:
- case X86::VFCMULCPHZ128rmbkz:
- case X86::VFCMULCPHZ256rmbkz:
- case X86::VFCMULCPHZrmbkz:
- case X86::VFCMULCPHZ128rmk:
- case X86::VFCMULCPHZ256rmk:
- case X86::VFCMULCPHZrmk:
- case X86::VFCMULCPHZ128rmkz:
- case X86::VFCMULCPHZ256rmkz:
- case X86::VFCMULCPHZrmkz:
- case X86::VFCMULCPHZ128rr:
- case X86::VFCMULCPHZ256rr:
- case X86::VFCMULCPHZrr:
- case X86::VFCMULCPHZ128rrk:
- case X86::VFCMULCPHZ256rrk:
- case X86::VFCMULCPHZrrk:
- case X86::VFCMULCPHZ128rrkz:
- case X86::VFCMULCPHZ256rrkz:
- case X86::VFCMULCPHZrrkz:
- case X86::VFCMULCPHZrrb:
- case X86::VFCMULCPHZrrbk:
- case X86::VFCMULCPHZrrbkz:
- case X86::VFCMULCSHZrm:
- case X86::VFCMULCSHZrmk:
- case X86::VFCMULCSHZrmkz:
- case X86::VFCMULCSHZrr:
- case X86::VFCMULCSHZrrb:
- case X86::VFCMULCSHZrrbk:
- case X86::VFCMULCSHZrrbkz:
- case X86::VFCMULCSHZrrk:
- case X86::VFCMULCSHZrrkz:
- case X86::VFMULCPHZ128rm:
- case X86::VFMULCPHZ256rm:
- case X86::VFMULCPHZrm:
- case X86::VFMULCPHZ128rmb:
- case X86::VFMULCPHZ256rmb:
- case X86::VFMULCPHZrmb:
- case X86::VFMULCPHZ128rmbk:
- case X86::VFMULCPHZ256rmbk:
- case X86::VFMULCPHZrmbk:
- case X86::VFMULCPHZ128rmbkz:
- case X86::VFMULCPHZ256rmbkz:
- case X86::VFMULCPHZrmbkz:
- case X86::VFMULCPHZ128rmk:
- case X86::VFMULCPHZ256rmk:
- case X86::VFMULCPHZrmk:
- case X86::VFMULCPHZ128rmkz:
- case X86::VFMULCPHZ256rmkz:
- case X86::VFMULCPHZrmkz:
- case X86::VFMULCPHZ128rr:
- case X86::VFMULCPHZ256rr:
- case X86::VFMULCPHZrr:
- case X86::VFMULCPHZ128rrk:
- case X86::VFMULCPHZ256rrk:
- case X86::VFMULCPHZrrk:
- case X86::VFMULCPHZ128rrkz:
- case X86::VFMULCPHZ256rrkz:
- case X86::VFMULCPHZrrkz:
- case X86::VFMULCPHZrrb:
- case X86::VFMULCPHZrrbk:
- case X86::VFMULCPHZrrbkz:
- case X86::VFMULCSHZrm:
- case X86::VFMULCSHZrmk:
- case X86::VFMULCSHZrmkz:
- case X86::VFMULCSHZrr:
- case X86::VFMULCSHZrrb:
- case X86::VFMULCSHZrrbk:
- case X86::VFMULCSHZrrbkz:
- case X86::VFMULCSHZrrk:
- case X86::VFMULCSHZrrkz: {
- unsigned Dest = Inst.getOperand(0).getReg();
- for (unsigned i = 1; i < Inst.getNumOperands(); i++)
- if (Inst.getOperand(i).isReg() && Dest == Inst.getOperand(i).getReg())
- return Warning(Ops[0]->getStartLoc(), "Destination register should be "
- "distinct from source registers");
- break;
- }
- }
- const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
- // Check that we aren't mixing AH/BH/CH/DH with REX prefix. We only need to
- // check this with the legacy encoding, VEX/EVEX/XOP don't use REX.
- if ((MCID.TSFlags & X86II::EncodingMask) == 0) {
- MCPhysReg HReg = X86::NoRegister;
- bool UsesRex = MCID.TSFlags & X86II::REX_W;
- unsigned NumOps = Inst.getNumOperands();
- for (unsigned i = 0; i != NumOps; ++i) {
- const MCOperand &MO = Inst.getOperand(i);
- if (!MO.isReg())
- continue;
- unsigned Reg = MO.getReg();
- if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
- HReg = Reg;
- if (X86II::isX86_64NonExtLowByteReg(Reg) ||
- X86II::isX86_64ExtendedReg(Reg))
- UsesRex = true;
- }
- if (UsesRex && HReg != X86::NoRegister) {
- StringRef RegName = X86IntelInstPrinter::getRegisterName(HReg);
- return Error(Ops[0]->getStartLoc(),
- "can't encode '" + RegName + "' in an instruction requiring "
- "REX prefix");
- }
- }
- return false;
- }
- static const char *getSubtargetFeatureName(uint64_t Val);
- void X86AsmParser::emitWarningForSpecialLVIInstruction(SMLoc Loc) {
- Warning(Loc, "Instruction may be vulnerable to LVI and "
- "requires manual mitigation");
- Note(SMLoc(), "See https://software.intel.com/"
- "security-software-guidance/insights/"
- "deep-dive-load-value-injection#specialinstructions"
- " for more information");
- }
- /// RET instructions and also instructions that indirect calls/jumps from memory
- /// combine a load and a branch within a single instruction. To mitigate these
- /// instructions against LVI, they must be decomposed into separate load and
- /// branch instructions, with an LFENCE in between. For more details, see:
- /// - X86LoadValueInjectionRetHardening.cpp
- /// - X86LoadValueInjectionIndirectThunks.cpp
- /// - https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection
- ///
- /// Returns `true` if a mitigation was applied or warning was emitted.
- void X86AsmParser::applyLVICFIMitigation(MCInst &Inst, MCStreamer &Out) {
- // Information on control-flow instructions that require manual mitigation can
- // be found here:
- // https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions
- switch (Inst.getOpcode()) {
- case X86::RET16:
- case X86::RET32:
- case X86::RET64:
- case X86::RETI16:
- case X86::RETI32:
- case X86::RETI64: {
- MCInst ShlInst, FenceInst;
- bool Parse32 = is32BitMode() || Code16GCC;
- unsigned Basereg =
- is64BitMode() ? X86::RSP : (Parse32 ? X86::ESP : X86::SP);
- const MCExpr *Disp = MCConstantExpr::create(0, getContext());
- auto ShlMemOp = X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
- /*BaseReg=*/Basereg, /*IndexReg=*/0,
- /*Scale=*/1, SMLoc{}, SMLoc{}, 0);
- ShlInst.setOpcode(X86::SHL64mi);
- ShlMemOp->addMemOperands(ShlInst, 5);
- ShlInst.addOperand(MCOperand::createImm(0));
- FenceInst.setOpcode(X86::LFENCE);
- Out.emitInstruction(ShlInst, getSTI());
- Out.emitInstruction(FenceInst, getSTI());
- return;
- }
- case X86::JMP16m:
- case X86::JMP32m:
- case X86::JMP64m:
- case X86::CALL16m:
- case X86::CALL32m:
- case X86::CALL64m:
- emitWarningForSpecialLVIInstruction(Inst.getLoc());
- return;
- }
- }
- /// To mitigate LVI, every instruction that performs a load can be followed by
- /// an LFENCE instruction to squash any potential mis-speculation. There are
- /// some instructions that require additional considerations, and may requre
- /// manual mitigation. For more details, see:
- /// https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection
- ///
- /// Returns `true` if a mitigation was applied or warning was emitted.
- void X86AsmParser::applyLVILoadHardeningMitigation(MCInst &Inst,
- MCStreamer &Out) {
- auto Opcode = Inst.getOpcode();
- auto Flags = Inst.getFlags();
- if ((Flags & X86::IP_HAS_REPEAT) || (Flags & X86::IP_HAS_REPEAT_NE)) {
- // Information on REP string instructions that require manual mitigation can
- // be found here:
- // https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions
- switch (Opcode) {
- case X86::CMPSB:
- case X86::CMPSW:
- case X86::CMPSL:
- case X86::CMPSQ:
- case X86::SCASB:
- case X86::SCASW:
- case X86::SCASL:
- case X86::SCASQ:
- emitWarningForSpecialLVIInstruction(Inst.getLoc());
- return;
- }
- } else if (Opcode == X86::REP_PREFIX || Opcode == X86::REPNE_PREFIX) {
- // If a REP instruction is found on its own line, it may or may not be
- // followed by a vulnerable instruction. Emit a warning just in case.
- emitWarningForSpecialLVIInstruction(Inst.getLoc());
- return;
- }
- const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
- // Can't mitigate after terminators or calls. A control flow change may have
- // already occurred.
- if (MCID.isTerminator() || MCID.isCall())
- return;
- // LFENCE has the mayLoad property, don't double fence.
- if (MCID.mayLoad() && Inst.getOpcode() != X86::LFENCE) {
- MCInst FenceInst;
- FenceInst.setOpcode(X86::LFENCE);
- Out.emitInstruction(FenceInst, getSTI());
- }
- }
- void X86AsmParser::emitInstruction(MCInst &Inst, OperandVector &Operands,
- MCStreamer &Out) {
- if (LVIInlineAsmHardening &&
- getSTI().getFeatureBits()[X86::FeatureLVIControlFlowIntegrity])
- applyLVICFIMitigation(Inst, Out);
- Out.emitInstruction(Inst, getSTI());
- if (LVIInlineAsmHardening &&
- getSTI().getFeatureBits()[X86::FeatureLVILoadHardening])
- applyLVILoadHardeningMitigation(Inst, Out);
- }
- bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- OperandVector &Operands,
- MCStreamer &Out, uint64_t &ErrorInfo,
- bool MatchingInlineAsm) {
- if (isParsingIntelSyntax())
- return MatchAndEmitIntelInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo,
- MatchingInlineAsm);
- return MatchAndEmitATTInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo,
- MatchingInlineAsm);
- }
- void X86AsmParser::MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op,
- OperandVector &Operands, MCStreamer &Out,
- bool MatchingInlineAsm) {
- // FIXME: This should be replaced with a real .td file alias mechanism.
- // Also, MatchInstructionImpl should actually *do* the EmitInstruction
- // call.
- const char *Repl = StringSwitch<const char *>(Op.getToken())
- .Case("finit", "fninit")
- .Case("fsave", "fnsave")
- .Case("fstcw", "fnstcw")
- .Case("fstcww", "fnstcw")
- .Case("fstenv", "fnstenv")
- .Case("fstsw", "fnstsw")
- .Case("fstsww", "fnstsw")
- .Case("fclex", "fnclex")
- .Default(nullptr);
- if (Repl) {
- MCInst Inst;
- Inst.setOpcode(X86::WAIT);
- Inst.setLoc(IDLoc);
- if (!MatchingInlineAsm)
- emitInstruction(Inst, Operands, Out);
- Operands[0] = X86Operand::CreateToken(Repl, IDLoc);
- }
- }
- bool X86AsmParser::ErrorMissingFeature(SMLoc IDLoc,
- const FeatureBitset &MissingFeatures,
- bool MatchingInlineAsm) {
- assert(MissingFeatures.any() && "Unknown missing feature!");
- SmallString<126> Msg;
- raw_svector_ostream OS(Msg);
- OS << "instruction requires:";
- for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
- if (MissingFeatures[i])
- OS << ' ' << getSubtargetFeatureName(i);
- }
- return Error(IDLoc, OS.str(), SMRange(), MatchingInlineAsm);
- }
- static unsigned getPrefixes(OperandVector &Operands) {
- unsigned Result = 0;
- X86Operand &Prefix = static_cast<X86Operand &>(*Operands.back());
- if (Prefix.isPrefix()) {
- Result = Prefix.getPrefix();
- Operands.pop_back();
- }
- return Result;
- }
- unsigned X86AsmParser::checkTargetMatchPredicate(MCInst &Inst) {
- unsigned Opc = Inst.getOpcode();
- const MCInstrDesc &MCID = MII.get(Opc);
- if (ForcedVEXEncoding == VEXEncoding_EVEX &&
- (MCID.TSFlags & X86II::EncodingMask) != X86II::EVEX)
- return Match_Unsupported;
- if ((ForcedVEXEncoding == VEXEncoding_VEX ||
- ForcedVEXEncoding == VEXEncoding_VEX2 ||
- ForcedVEXEncoding == VEXEncoding_VEX3) &&
- (MCID.TSFlags & X86II::EncodingMask) != X86II::VEX)
- return Match_Unsupported;
- // These instructions are only available with {vex}, {vex2} or {vex3} prefix
- if (MCID.TSFlags & X86II::ExplicitVEXPrefix &&
- (ForcedVEXEncoding != VEXEncoding_VEX &&
- ForcedVEXEncoding != VEXEncoding_VEX2 &&
- ForcedVEXEncoding != VEXEncoding_VEX3))
- return Match_Unsupported;
- return Match_Success;
- }
- bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
- OperandVector &Operands,
- MCStreamer &Out,
- uint64_t &ErrorInfo,
- bool MatchingInlineAsm) {
- assert(!Operands.empty() && "Unexpect empty operand list!");
- assert((*Operands[0]).isToken() && "Leading operand should always be a mnemonic!");
- SMRange EmptyRange = None;
- // First, handle aliases that expand to multiple instructions.
- MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands,
- Out, MatchingInlineAsm);
- X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
- unsigned Prefixes = getPrefixes(Operands);
- MCInst Inst;
- // If VEX/EVEX encoding is forced, we need to pass the USE_* flag to the
- // encoder and printer.
- if (ForcedVEXEncoding == VEXEncoding_VEX)
- Prefixes |= X86::IP_USE_VEX;
- else if (ForcedVEXEncoding == VEXEncoding_VEX2)
- Prefixes |= X86::IP_USE_VEX2;
- else if (ForcedVEXEncoding == VEXEncoding_VEX3)
- Prefixes |= X86::IP_USE_VEX3;
- else if (ForcedVEXEncoding == VEXEncoding_EVEX)
- Prefixes |= X86::IP_USE_EVEX;
- // Set encoded flags for {disp8} and {disp32}.
- if (ForcedDispEncoding == DispEncoding_Disp8)
- Prefixes |= X86::IP_USE_DISP8;
- else if (ForcedDispEncoding == DispEncoding_Disp32)
- Prefixes |= X86::IP_USE_DISP32;
- if (Prefixes)
- Inst.setFlags(Prefixes);
- // In 16-bit mode, if data32 is specified, temporarily switch to 32-bit mode
- // when matching the instruction.
- if (ForcedDataPrefix == X86::Mode32Bit)
- SwitchMode(X86::Mode32Bit);
- // First, try a direct match.
- FeatureBitset MissingFeatures;
- unsigned OriginalError = MatchInstruction(Operands, Inst, ErrorInfo,
- MissingFeatures, MatchingInlineAsm,
- isParsingIntelSyntax());
- if (ForcedDataPrefix == X86::Mode32Bit) {
- SwitchMode(X86::Mode16Bit);
- ForcedDataPrefix = 0;
- }
- switch (OriginalError) {
- default: llvm_unreachable("Unexpected match result!");
- case Match_Success:
- if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
- return true;
- // Some instructions need post-processing to, for example, tweak which
- // encoding is selected. Loop on it while changes happen so the
- // individual transformations can chain off each other.
- if (!MatchingInlineAsm)
- while (processInstruction(Inst, Operands))
- ;
- Inst.setLoc(IDLoc);
- if (!MatchingInlineAsm)
- emitInstruction(Inst, Operands, Out);
- Opcode = Inst.getOpcode();
- return false;
- case Match_InvalidImmUnsignedi4: {
- SMLoc ErrorLoc = ((X86Operand &)*Operands[ErrorInfo]).getStartLoc();
- if (ErrorLoc == SMLoc())
- ErrorLoc = IDLoc;
- return Error(ErrorLoc, "immediate must be an integer in range [0, 15]",
- EmptyRange, MatchingInlineAsm);
- }
- case Match_MissingFeature:
- return ErrorMissingFeature(IDLoc, MissingFeatures, MatchingInlineAsm);
- case Match_InvalidOperand:
- case Match_MnemonicFail:
- case Match_Unsupported:
- break;
- }
- if (Op.getToken().empty()) {
- Error(IDLoc, "instruction must have size higher than 0", EmptyRange,
- MatchingInlineAsm);
- return true;
- }
- // FIXME: Ideally, we would only attempt suffix matches for things which are
- // valid prefixes, and we could just infer the right unambiguous
- // type. However, that requires substantially more matcher support than the
- // following hack.
- // Change the operand to point to a temporary token.
- StringRef Base = Op.getToken();
- SmallString<16> Tmp;
- Tmp += Base;
- Tmp += ' ';
- Op.setTokenValue(Tmp);
- // If this instruction starts with an 'f', then it is a floating point stack
- // instruction. These come in up to three forms for 32-bit, 64-bit, and
- // 80-bit floating point, which use the suffixes s,l,t respectively.
- //
- // Otherwise, we assume that this may be an integer instruction, which comes
- // in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
- const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
- // MemSize corresponding to Suffixes. { 8, 16, 32, 64 } { 32, 64, 80, 0 }
- const char *MemSize = Base[0] != 'f' ? "\x08\x10\x20\x40" : "\x20\x40\x50\0";
- // Check for the various suffix matches.
- uint64_t ErrorInfoIgnore;
- FeatureBitset ErrorInfoMissingFeatures; // Init suppresses compiler warnings.
- unsigned Match[4];
- // Some instruction like VPMULDQ is NOT the variant of VPMULD but a new one.
- // So we should make sure the suffix matcher only works for memory variant
- // that has the same size with the suffix.
- // FIXME: This flag is a workaround for legacy instructions that didn't
- // declare non suffix variant assembly.
- bool HasVectorReg = false;
- X86Operand *MemOp = nullptr;
- for (const auto &Op : Operands) {
- X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
- if (X86Op->isVectorReg())
- HasVectorReg = true;
- else if (X86Op->isMem()) {
- MemOp = X86Op;
- assert(MemOp->Mem.Size == 0 && "Memory size always 0 under ATT syntax");
- // Have we found an unqualified memory operand,
- // break. IA allows only one memory operand.
- break;
- }
- }
- for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I) {
- Tmp.back() = Suffixes[I];
- if (MemOp && HasVectorReg)
- MemOp->Mem.Size = MemSize[I];
- Match[I] = Match_MnemonicFail;
- if (MemOp || !HasVectorReg) {
- Match[I] =
- MatchInstruction(Operands, Inst, ErrorInfoIgnore, MissingFeatures,
- MatchingInlineAsm, isParsingIntelSyntax());
- // If this returned as a missing feature failure, remember that.
- if (Match[I] == Match_MissingFeature)
- ErrorInfoMissingFeatures = MissingFeatures;
- }
- }
- // Restore the old token.
- Op.setTokenValue(Base);
- // If exactly one matched, then we treat that as a successful match (and the
- // instruction will already have been filled in correctly, since the failing
- // matches won't have modified it).
- unsigned NumSuccessfulMatches = llvm::count(Match, Match_Success);
- if (NumSuccessfulMatches == 1) {
- if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
- return true;
- // Some instructions need post-processing to, for example, tweak which
- // encoding is selected. Loop on it while changes happen so the
- // individual transformations can chain off each other.
- if (!MatchingInlineAsm)
- while (processInstruction(Inst, Operands))
- ;
- Inst.setLoc(IDLoc);
- if (!MatchingInlineAsm)
- emitInstruction(Inst, Operands, Out);
- Opcode = Inst.getOpcode();
- return false;
- }
- // Otherwise, the match failed, try to produce a decent error message.
- // If we had multiple suffix matches, then identify this as an ambiguous
- // match.
- if (NumSuccessfulMatches > 1) {
- char MatchChars[4];
- unsigned NumMatches = 0;
- for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I)
- if (Match[I] == Match_Success)
- MatchChars[NumMatches++] = Suffixes[I];
- SmallString<126> Msg;
- raw_svector_ostream OS(Msg);
- OS << "ambiguous instructions require an explicit suffix (could be ";
- for (unsigned i = 0; i != NumMatches; ++i) {
- if (i != 0)
- OS << ", ";
- if (i + 1 == NumMatches)
- OS << "or ";
- OS << "'" << Base << MatchChars[i] << "'";
- }
- OS << ")";
- Error(IDLoc, OS.str(), EmptyRange, MatchingInlineAsm);
- return true;
- }
- // Okay, we know that none of the variants matched successfully.
- // If all of the instructions reported an invalid mnemonic, then the original
- // mnemonic was invalid.
- if (llvm::count(Match, Match_MnemonicFail) == 4) {
- if (OriginalError == Match_MnemonicFail)
- return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
- Op.getLocRange(), MatchingInlineAsm);
- if (OriginalError == Match_Unsupported)
- return Error(IDLoc, "unsupported instruction", EmptyRange,
- MatchingInlineAsm);
- assert(OriginalError == Match_InvalidOperand && "Unexpected error");
- // Recover location info for the operand if we know which was the problem.
- if (ErrorInfo != ~0ULL) {
- if (ErrorInfo >= Operands.size())
- return Error(IDLoc, "too few operands for instruction", EmptyRange,
- MatchingInlineAsm);
- X86Operand &Operand = (X86Operand &)*Operands[ErrorInfo];
- if (Operand.getStartLoc().isValid()) {
- SMRange OperandRange = Operand.getLocRange();
- return Error(Operand.getStartLoc(), "invalid operand for instruction",
- OperandRange, MatchingInlineAsm);
- }
- }
- return Error(IDLoc, "invalid operand for instruction", EmptyRange,
- MatchingInlineAsm);
- }
- // If one instruction matched as unsupported, report this as unsupported.
- if (llvm::count(Match, Match_Unsupported) == 1) {
- return Error(IDLoc, "unsupported instruction", EmptyRange,
- MatchingInlineAsm);
- }
- // If one instruction matched with a missing feature, report this as a
- // missing feature.
- if (llvm::count(Match, Match_MissingFeature) == 1) {
- ErrorInfo = Match_MissingFeature;
- return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
- MatchingInlineAsm);
- }
- // If one instruction matched with an invalid operand, report this as an
- // operand failure.
- if (llvm::count(Match, Match_InvalidOperand) == 1) {
- return Error(IDLoc, "invalid operand for instruction", EmptyRange,
- MatchingInlineAsm);
- }
- // If all of these were an outright failure, report it in a useless way.
- Error(IDLoc, "unknown use of instruction mnemonic without a size suffix",
- EmptyRange, MatchingInlineAsm);
- return true;
- }
- bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
- OperandVector &Operands,
- MCStreamer &Out,
- uint64_t &ErrorInfo,
- bool MatchingInlineAsm) {
- assert(!Operands.empty() && "Unexpect empty operand list!");
- assert((*Operands[0]).isToken() && "Leading operand should always be a mnemonic!");
- StringRef Mnemonic = (static_cast<X86Operand &>(*Operands[0])).getToken();
- SMRange EmptyRange = None;
- StringRef Base = (static_cast<X86Operand &>(*Operands[0])).getToken();
- unsigned Prefixes = getPrefixes(Operands);
- // First, handle aliases that expand to multiple instructions.
- MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands, Out, MatchingInlineAsm);
- X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
- MCInst Inst;
- // If VEX/EVEX encoding is forced, we need to pass the USE_* flag to the
- // encoder and printer.
- if (ForcedVEXEncoding == VEXEncoding_VEX)
- Prefixes |= X86::IP_USE_VEX;
- else if (ForcedVEXEncoding == VEXEncoding_VEX2)
- Prefixes |= X86::IP_USE_VEX2;
- else if (ForcedVEXEncoding == VEXEncoding_VEX3)
- Prefixes |= X86::IP_USE_VEX3;
- else if (ForcedVEXEncoding == VEXEncoding_EVEX)
- Prefixes |= X86::IP_USE_EVEX;
- // Set encoded flags for {disp8} and {disp32}.
- if (ForcedDispEncoding == DispEncoding_Disp8)
- Prefixes |= X86::IP_USE_DISP8;
- else if (ForcedDispEncoding == DispEncoding_Disp32)
- Prefixes |= X86::IP_USE_DISP32;
- if (Prefixes)
- Inst.setFlags(Prefixes);
- // Find one unsized memory operand, if present.
- X86Operand *UnsizedMemOp = nullptr;
- for (const auto &Op : Operands) {
- X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
- if (X86Op->isMemUnsized()) {
- UnsizedMemOp = X86Op;
- // Have we found an unqualified memory operand,
- // break. IA allows only one memory operand.
- break;
- }
- }
- // Allow some instructions to have implicitly pointer-sized operands. This is
- // compatible with gas.
- if (UnsizedMemOp) {
- static const char *const PtrSizedInstrs[] = {"call", "jmp", "push"};
- for (const char *Instr : PtrSizedInstrs) {
- if (Mnemonic == Instr) {
- UnsizedMemOp->Mem.Size = getPointerWidth();
- break;
- }
- }
- }
- SmallVector<unsigned, 8> Match;
- FeatureBitset ErrorInfoMissingFeatures;
- FeatureBitset MissingFeatures;
- // If unsized push has immediate operand we should default the default pointer
- // size for the size.
- if (Mnemonic == "push" && Operands.size() == 2) {
- auto *X86Op = static_cast<X86Operand *>(Operands[1].get());
- if (X86Op->isImm()) {
- // If it's not a constant fall through and let remainder take care of it.
- const auto *CE = dyn_cast<MCConstantExpr>(X86Op->getImm());
- unsigned Size = getPointerWidth();
- if (CE &&
- (isIntN(Size, CE->getValue()) || isUIntN(Size, CE->getValue()))) {
- SmallString<16> Tmp;
- Tmp += Base;
- Tmp += (is64BitMode())
- ? "q"
- : (is32BitMode()) ? "l" : (is16BitMode()) ? "w" : " ";
- Op.setTokenValue(Tmp);
- // Do match in ATT mode to allow explicit suffix usage.
- Match.push_back(MatchInstruction(Operands, Inst, ErrorInfo,
- MissingFeatures, MatchingInlineAsm,
- false /*isParsingIntelSyntax()*/));
- Op.setTokenValue(Base);
- }
- }
- }
- // If an unsized memory operand is present, try to match with each memory
- // operand size. In Intel assembly, the size is not part of the instruction
- // mnemonic.
- if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
- static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
- for (unsigned Size : MopSizes) {
- UnsizedMemOp->Mem.Size = Size;
- uint64_t ErrorInfoIgnore;
- unsigned LastOpcode = Inst.getOpcode();
- unsigned M = MatchInstruction(Operands, Inst, ErrorInfoIgnore,
- MissingFeatures, MatchingInlineAsm,
- isParsingIntelSyntax());
- if (Match.empty() || LastOpcode != Inst.getOpcode())
- Match.push_back(M);
- // If this returned as a missing feature failure, remember that.
- if (Match.back() == Match_MissingFeature)
- ErrorInfoMissingFeatures = MissingFeatures;
- }
- // Restore the size of the unsized memory operand if we modified it.
- UnsizedMemOp->Mem.Size = 0;
- }
- // If we haven't matched anything yet, this is not a basic integer or FPU
- // operation. There shouldn't be any ambiguity in our mnemonic table, so try
- // matching with the unsized operand.
- if (Match.empty()) {
- Match.push_back(MatchInstruction(
- Operands, Inst, ErrorInfo, MissingFeatures, MatchingInlineAsm,
- isParsingIntelSyntax()));
- // If this returned as a missing feature failure, remember that.
- if (Match.back() == Match_MissingFeature)
- ErrorInfoMissingFeatures = MissingFeatures;
- }
- // Restore the size of the unsized memory operand if we modified it.
- if (UnsizedMemOp)
- UnsizedMemOp->Mem.Size = 0;
- // If it's a bad mnemonic, all results will be the same.
- if (Match.back() == Match_MnemonicFail) {
- return Error(IDLoc, "invalid instruction mnemonic '" + Mnemonic + "'",
- Op.getLocRange(), MatchingInlineAsm);
- }
- unsigned NumSuccessfulMatches = llvm::count(Match, Match_Success);
- // If matching was ambiguous and we had size information from the frontend,
- // try again with that. This handles cases like "movxz eax, m8/m16".
- if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
- UnsizedMemOp->getMemFrontendSize()) {
- UnsizedMemOp->Mem.Size = UnsizedMemOp->getMemFrontendSize();
- unsigned M = MatchInstruction(
- Operands, Inst, ErrorInfo, MissingFeatures, MatchingInlineAsm,
- isParsingIntelSyntax());
- if (M == Match_Success)
- NumSuccessfulMatches = 1;
- // Add a rewrite that encodes the size information we used from the
- // frontend.
- InstInfo->AsmRewrites->emplace_back(
- AOK_SizeDirective, UnsizedMemOp->getStartLoc(),
- /*Len=*/0, UnsizedMemOp->getMemFrontendSize());
- }
- // If exactly one matched, then we treat that as a successful match (and the
- // instruction will already have been filled in correctly, since the failing
- // matches won't have modified it).
- if (NumSuccessfulMatches == 1) {
- if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
- return true;
- // Some instructions need post-processing to, for example, tweak which
- // encoding is selected. Loop on it while changes happen so the individual
- // transformations can chain off each other.
- if (!MatchingInlineAsm)
- while (processInstruction(Inst, Operands))
- ;
- Inst.setLoc(IDLoc);
- if (!MatchingInlineAsm)
- emitInstruction(Inst, Operands, Out);
- Opcode = Inst.getOpcode();
- return false;
- } else if (NumSuccessfulMatches > 1) {
- assert(UnsizedMemOp &&
- "multiple matches only possible with unsized memory operands");
- return Error(UnsizedMemOp->getStartLoc(),
- "ambiguous operand size for instruction '" + Mnemonic + "\'",
- UnsizedMemOp->getLocRange());
- }
- // If one instruction matched as unsupported, report this as unsupported.
- if (llvm::count(Match, Match_Unsupported) == 1) {
- return Error(IDLoc, "unsupported instruction", EmptyRange,
- MatchingInlineAsm);
- }
- // If one instruction matched with a missing feature, report this as a
- // missing feature.
- if (llvm::count(Match, Match_MissingFeature) == 1) {
- ErrorInfo = Match_MissingFeature;
- return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
- MatchingInlineAsm);
- }
- // If one instruction matched with an invalid operand, report this as an
- // operand failure.
- if (llvm::count(Match, Match_InvalidOperand) == 1) {
- return Error(IDLoc, "invalid operand for instruction", EmptyRange,
- MatchingInlineAsm);
- }
- if (llvm::count(Match, Match_InvalidImmUnsignedi4) == 1) {
- SMLoc ErrorLoc = ((X86Operand &)*Operands[ErrorInfo]).getStartLoc();
- if (ErrorLoc == SMLoc())
- ErrorLoc = IDLoc;
- return Error(ErrorLoc, "immediate must be an integer in range [0, 15]",
- EmptyRange, MatchingInlineAsm);
- }
- // If all of these were an outright failure, report it in a useless way.
- return Error(IDLoc, "unknown instruction mnemonic", EmptyRange,
- MatchingInlineAsm);
- }
- bool X86AsmParser::OmitRegisterFromClobberLists(unsigned RegNo) {
- return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo);
- }
- bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
- MCAsmParser &Parser = getParser();
- StringRef IDVal = DirectiveID.getIdentifier();
- if (IDVal.startswith(".arch"))
- return parseDirectiveArch();
- if (IDVal.startswith(".code"))
- return ParseDirectiveCode(IDVal, DirectiveID.getLoc());
- else if (IDVal.startswith(".att_syntax")) {
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- if (Parser.getTok().getString() == "prefix")
- Parser.Lex();
- else if (Parser.getTok().getString() == "noprefix")
- return Error(DirectiveID.getLoc(), "'.att_syntax noprefix' is not "
- "supported: registers must have a "
- "'%' prefix in .att_syntax");
- }
- getParser().setAssemblerDialect(0);
- return false;
- } else if (IDVal.startswith(".intel_syntax")) {
- getParser().setAssemblerDialect(1);
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- if (Parser.getTok().getString() == "noprefix")
- Parser.Lex();
- else if (Parser.getTok().getString() == "prefix")
- return Error(DirectiveID.getLoc(), "'.intel_syntax prefix' is not "
- "supported: registers must not have "
- "a '%' prefix in .intel_syntax");
- }
- return false;
- } else if (IDVal == ".nops")
- return parseDirectiveNops(DirectiveID.getLoc());
- else if (IDVal == ".even")
- return parseDirectiveEven(DirectiveID.getLoc());
- else if (IDVal == ".cv_fpo_proc")
- return parseDirectiveFPOProc(DirectiveID.getLoc());
- else if (IDVal == ".cv_fpo_setframe")
- return parseDirectiveFPOSetFrame(DirectiveID.getLoc());
- else if (IDVal == ".cv_fpo_pushreg")
- return parseDirectiveFPOPushReg(DirectiveID.getLoc());
- else if (IDVal == ".cv_fpo_stackalloc")
- return parseDirectiveFPOStackAlloc(DirectiveID.getLoc());
- else if (IDVal == ".cv_fpo_stackalign")
- return parseDirectiveFPOStackAlign(DirectiveID.getLoc());
- else if (IDVal == ".cv_fpo_endprologue")
- return parseDirectiveFPOEndPrologue(DirectiveID.getLoc());
- else if (IDVal == ".cv_fpo_endproc")
- return parseDirectiveFPOEndProc(DirectiveID.getLoc());
- else if (IDVal == ".seh_pushreg" ||
- (Parser.isParsingMasm() && IDVal.equals_insensitive(".pushreg")))
- return parseDirectiveSEHPushReg(DirectiveID.getLoc());
- else if (IDVal == ".seh_setframe" ||
- (Parser.isParsingMasm() && IDVal.equals_insensitive(".setframe")))
- return parseDirectiveSEHSetFrame(DirectiveID.getLoc());
- else if (IDVal == ".seh_savereg" ||
- (Parser.isParsingMasm() && IDVal.equals_insensitive(".savereg")))
- return parseDirectiveSEHSaveReg(DirectiveID.getLoc());
- else if (IDVal == ".seh_savexmm" ||
- (Parser.isParsingMasm() && IDVal.equals_insensitive(".savexmm128")))
- return parseDirectiveSEHSaveXMM(DirectiveID.getLoc());
- else if (IDVal == ".seh_pushframe" ||
- (Parser.isParsingMasm() && IDVal.equals_insensitive(".pushframe")))
- return parseDirectiveSEHPushFrame(DirectiveID.getLoc());
- return true;
- }
- bool X86AsmParser::parseDirectiveArch() {
- // Ignore .arch for now.
- getParser().parseStringToEndOfStatement();
- return false;
- }
- /// parseDirectiveNops
- /// ::= .nops size[, control]
- bool X86AsmParser::parseDirectiveNops(SMLoc L) {
- int64_t NumBytes = 0, Control = 0;
- SMLoc NumBytesLoc, ControlLoc;
- const MCSubtargetInfo& STI = getSTI();
- NumBytesLoc = getTok().getLoc();
- if (getParser().checkForValidSection() ||
- getParser().parseAbsoluteExpression(NumBytes))
- return true;
- if (parseOptionalToken(AsmToken::Comma)) {
- ControlLoc = getTok().getLoc();
- if (getParser().parseAbsoluteExpression(Control))
- return true;
- }
- if (getParser().parseToken(AsmToken::EndOfStatement,
- "unexpected token in '.nops' directive"))
- return true;
- if (NumBytes <= 0) {
- Error(NumBytesLoc, "'.nops' directive with non-positive size");
- return false;
- }
- if (Control < 0) {
- Error(ControlLoc, "'.nops' directive with negative NOP size");
- return false;
- }
- /// Emit nops
- getParser().getStreamer().emitNops(NumBytes, Control, L, STI);
- return false;
- }
- /// parseDirectiveEven
- /// ::= .even
- bool X86AsmParser::parseDirectiveEven(SMLoc L) {
- if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
- return false;
- const MCSection *Section = getStreamer().getCurrentSectionOnly();
- if (!Section) {
- getStreamer().initSections(false, getSTI());
- Section = getStreamer().getCurrentSectionOnly();
- }
- if (Section->UseCodeAlign())
- getStreamer().emitCodeAlignment(2, &getSTI(), 0);
- else
- getStreamer().emitValueToAlignment(2, 0, 1, 0);
- return false;
- }
- /// ParseDirectiveCode
- /// ::= .code16 | .code32 | .code64
- bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
- MCAsmParser &Parser = getParser();
- Code16GCC = false;
- if (IDVal == ".code16") {
- Parser.Lex();
- if (!is16BitMode()) {
- SwitchMode(X86::Mode16Bit);
- getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
- }
- } else if (IDVal == ".code16gcc") {
- // .code16gcc parses as if in 32-bit mode, but emits code in 16-bit mode.
- Parser.Lex();
- Code16GCC = true;
- if (!is16BitMode()) {
- SwitchMode(X86::Mode16Bit);
- getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
- }
- } else if (IDVal == ".code32") {
- Parser.Lex();
- if (!is32BitMode()) {
- SwitchMode(X86::Mode32Bit);
- getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
- }
- } else if (IDVal == ".code64") {
- Parser.Lex();
- if (!is64BitMode()) {
- SwitchMode(X86::Mode64Bit);
- getParser().getStreamer().emitAssemblerFlag(MCAF_Code64);
- }
- } else {
- Error(L, "unknown directive " + IDVal);
- return false;
- }
- return false;
- }
- // .cv_fpo_proc foo
- bool X86AsmParser::parseDirectiveFPOProc(SMLoc L) {
- MCAsmParser &Parser = getParser();
- StringRef ProcName;
- int64_t ParamsSize;
- if (Parser.parseIdentifier(ProcName))
- return Parser.TokError("expected symbol name");
- if (Parser.parseIntToken(ParamsSize, "expected parameter byte count"))
- return true;
- if (!isUIntN(32, ParamsSize))
- return Parser.TokError("parameters size out of range");
- if (parseEOL())
- return true;
- MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
- return getTargetStreamer().emitFPOProc(ProcSym, ParamsSize, L);
- }
- // .cv_fpo_setframe ebp
- bool X86AsmParser::parseDirectiveFPOSetFrame(SMLoc L) {
- unsigned Reg;
- SMLoc DummyLoc;
- if (ParseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
- return true;
- return getTargetStreamer().emitFPOSetFrame(Reg, L);
- }
- // .cv_fpo_pushreg ebx
- bool X86AsmParser::parseDirectiveFPOPushReg(SMLoc L) {
- unsigned Reg;
- SMLoc DummyLoc;
- if (ParseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
- return true;
- return getTargetStreamer().emitFPOPushReg(Reg, L);
- }
- // .cv_fpo_stackalloc 20
- bool X86AsmParser::parseDirectiveFPOStackAlloc(SMLoc L) {
- MCAsmParser &Parser = getParser();
- int64_t Offset;
- if (Parser.parseIntToken(Offset, "expected offset") || parseEOL())
- return true;
- return getTargetStreamer().emitFPOStackAlloc(Offset, L);
- }
- // .cv_fpo_stackalign 8
- bool X86AsmParser::parseDirectiveFPOStackAlign(SMLoc L) {
- MCAsmParser &Parser = getParser();
- int64_t Offset;
- if (Parser.parseIntToken(Offset, "expected offset") || parseEOL())
- return true;
- return getTargetStreamer().emitFPOStackAlign(Offset, L);
- }
- // .cv_fpo_endprologue
- bool X86AsmParser::parseDirectiveFPOEndPrologue(SMLoc L) {
- MCAsmParser &Parser = getParser();
- if (Parser.parseEOL())
- return true;
- return getTargetStreamer().emitFPOEndPrologue(L);
- }
- // .cv_fpo_endproc
- bool X86AsmParser::parseDirectiveFPOEndProc(SMLoc L) {
- MCAsmParser &Parser = getParser();
- if (Parser.parseEOL())
- return true;
- return getTargetStreamer().emitFPOEndProc(L);
- }
- bool X86AsmParser::parseSEHRegisterNumber(unsigned RegClassID,
- unsigned &RegNo) {
- SMLoc startLoc = getLexer().getLoc();
- const MCRegisterInfo *MRI = getContext().getRegisterInfo();
- // Try parsing the argument as a register first.
- if (getLexer().getTok().isNot(AsmToken::Integer)) {
- SMLoc endLoc;
- if (ParseRegister(RegNo, startLoc, endLoc))
- return true;
- if (!X86MCRegisterClasses[RegClassID].contains(RegNo)) {
- return Error(startLoc,
- "register is not supported for use with this directive");
- }
- } else {
- // Otherwise, an integer number matching the encoding of the desired
- // register may appear.
- int64_t EncodedReg;
- if (getParser().parseAbsoluteExpression(EncodedReg))
- return true;
- // The SEH register number is the same as the encoding register number. Map
- // from the encoding back to the LLVM register number.
- RegNo = 0;
- for (MCPhysReg Reg : X86MCRegisterClasses[RegClassID]) {
- if (MRI->getEncodingValue(Reg) == EncodedReg) {
- RegNo = Reg;
- break;
- }
- }
- if (RegNo == 0) {
- return Error(startLoc,
- "incorrect register number for use with this directive");
- }
- }
- return false;
- }
- bool X86AsmParser::parseDirectiveSEHPushReg(SMLoc Loc) {
- unsigned Reg = 0;
- if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
- return true;
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in directive");
- getParser().Lex();
- getStreamer().EmitWinCFIPushReg(Reg, Loc);
- return false;
- }
- bool X86AsmParser::parseDirectiveSEHSetFrame(SMLoc Loc) {
- unsigned Reg = 0;
- int64_t Off;
- if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
- return true;
- if (getLexer().isNot(AsmToken::Comma))
- return TokError("you must specify a stack pointer offset");
- getParser().Lex();
- if (getParser().parseAbsoluteExpression(Off))
- return true;
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in directive");
- getParser().Lex();
- getStreamer().EmitWinCFISetFrame(Reg, Off, Loc);
- return false;
- }
- bool X86AsmParser::parseDirectiveSEHSaveReg(SMLoc Loc) {
- unsigned Reg = 0;
- int64_t Off;
- if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
- return true;
- if (getLexer().isNot(AsmToken::Comma))
- return TokError("you must specify an offset on the stack");
- getParser().Lex();
- if (getParser().parseAbsoluteExpression(Off))
- return true;
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in directive");
- getParser().Lex();
- getStreamer().EmitWinCFISaveReg(Reg, Off, Loc);
- return false;
- }
- bool X86AsmParser::parseDirectiveSEHSaveXMM(SMLoc Loc) {
- unsigned Reg = 0;
- int64_t Off;
- if (parseSEHRegisterNumber(X86::VR128XRegClassID, Reg))
- return true;
- if (getLexer().isNot(AsmToken::Comma))
- return TokError("you must specify an offset on the stack");
- getParser().Lex();
- if (getParser().parseAbsoluteExpression(Off))
- return true;
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in directive");
- getParser().Lex();
- getStreamer().EmitWinCFISaveXMM(Reg, Off, Loc);
- return false;
- }
- bool X86AsmParser::parseDirectiveSEHPushFrame(SMLoc Loc) {
- bool Code = false;
- StringRef CodeID;
- if (getLexer().is(AsmToken::At)) {
- SMLoc startLoc = getLexer().getLoc();
- getParser().Lex();
- if (!getParser().parseIdentifier(CodeID)) {
- if (CodeID != "code")
- return Error(startLoc, "expected @code");
- Code = true;
- }
- }
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in directive");
- getParser().Lex();
- getStreamer().EmitWinCFIPushFrame(Code, Loc);
- return false;
- }
- // Force static initialization.
- extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86AsmParser() {
- RegisterMCAsmParser<X86AsmParser> X(getTheX86_32Target());
- RegisterMCAsmParser<X86AsmParser> Y(getTheX86_64Target());
- }
- #define GET_REGISTER_MATCHER
- #define GET_MATCHER_IMPLEMENTATION
- #define GET_SUBTARGET_FEATURE_NAME
- #include "X86GenAsmMatcher.inc"
|