ARMInstrVFP.td 112 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904
  1. //===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file describes the ARM VFP instruction set.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
  13. def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
  14. SDTCisSameAs<1, 2>]>;
  15. def SDT_VMOVRRD : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>,
  16. SDTCisVT<2, f64>]>;
  17. def SDT_VMOVSR : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i32>]>;
  18. def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>;
  19. def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>;
  20. def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>;
  21. def arm_cmpfpe : SDNode<"ARMISD::CMPFPE", SDT_ARMCmp, [SDNPOutGlue]>;
  22. def arm_cmpfpe0: SDNode<"ARMISD::CMPFPEw0",SDT_CMPFP0, [SDNPOutGlue]>;
  23. def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
  24. def arm_fmrrd : SDNode<"ARMISD::VMOVRRD", SDT_VMOVRRD>;
  25. def arm_vmovsr : SDNode<"ARMISD::VMOVSR", SDT_VMOVSR>;
  26. def SDT_VMOVhr : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, i32>] >;
  27. def SDT_VMOVrh : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisFP<1>] >;
  28. def arm_vmovhr : SDNode<"ARMISD::VMOVhr", SDT_VMOVhr>;
  29. def arm_vmovrh : SDNode<"ARMISD::VMOVrh", SDT_VMOVrh>;
  30. //===----------------------------------------------------------------------===//
  31. // Operand Definitions.
  32. //
  33. // 8-bit floating-point immediate encodings.
  34. def FPImmOperand : AsmOperandClass {
  35. let Name = "FPImm";
  36. let ParserMethod = "parseFPImm";
  37. }
  38. def vfp_f16imm : Operand<f16>,
  39. PatLeaf<(f16 fpimm), [{
  40. return ARM_AM::getFP16Imm(N->getValueAPF()) != -1;
  41. }], SDNodeXForm<fpimm, [{
  42. APFloat InVal = N->getValueAPF();
  43. uint32_t enc = ARM_AM::getFP16Imm(InVal);
  44. return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
  45. }]>> {
  46. let PrintMethod = "printFPImmOperand";
  47. let ParserMatchClass = FPImmOperand;
  48. }
  49. def vfp_f32f16imm_xform : SDNodeXForm<fpimm, [{
  50. APFloat InVal = N->getValueAPF();
  51. uint32_t enc = ARM_AM::getFP32FP16Imm(InVal);
  52. return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
  53. }]>;
  54. def vfp_f32f16imm : PatLeaf<(f32 fpimm), [{
  55. return ARM_AM::getFP32FP16Imm(N->getValueAPF()) != -1;
  56. }], vfp_f32f16imm_xform>;
  57. def vfp_f32imm_xform : SDNodeXForm<fpimm, [{
  58. APFloat InVal = N->getValueAPF();
  59. uint32_t enc = ARM_AM::getFP32Imm(InVal);
  60. return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
  61. }]>;
  62. def gi_vfp_f32imm : GICustomOperandRenderer<"renderVFPF32Imm">,
  63. GISDNodeXFormEquiv<vfp_f32imm_xform>;
  64. def vfp_f32imm : Operand<f32>,
  65. PatLeaf<(f32 fpimm), [{
  66. return ARM_AM::getFP32Imm(N->getValueAPF()) != -1;
  67. }], vfp_f32imm_xform> {
  68. let PrintMethod = "printFPImmOperand";
  69. let ParserMatchClass = FPImmOperand;
  70. let GISelPredicateCode = [{
  71. const auto &MO = MI.getOperand(1);
  72. if (!MO.isFPImm())
  73. return false;
  74. return ARM_AM::getFP32Imm(MO.getFPImm()->getValueAPF()) != -1;
  75. }];
  76. }
  77. def vfp_f64imm_xform : SDNodeXForm<fpimm, [{
  78. APFloat InVal = N->getValueAPF();
  79. uint32_t enc = ARM_AM::getFP64Imm(InVal);
  80. return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
  81. }]>;
  82. def gi_vfp_f64imm : GICustomOperandRenderer<"renderVFPF64Imm">,
  83. GISDNodeXFormEquiv<vfp_f64imm_xform>;
  84. def vfp_f64imm : Operand<f64>,
  85. PatLeaf<(f64 fpimm), [{
  86. return ARM_AM::getFP64Imm(N->getValueAPF()) != -1;
  87. }], vfp_f64imm_xform> {
  88. let PrintMethod = "printFPImmOperand";
  89. let ParserMatchClass = FPImmOperand;
  90. let GISelPredicateCode = [{
  91. const auto &MO = MI.getOperand(1);
  92. if (!MO.isFPImm())
  93. return false;
  94. return ARM_AM::getFP64Imm(MO.getFPImm()->getValueAPF()) != -1;
  95. }];
  96. }
  97. def alignedload16 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
  98. return cast<LoadSDNode>(N)->getAlign() >= 2;
  99. }]>;
  100. def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
  101. return cast<LoadSDNode>(N)->getAlign() >= 4;
  102. }]>;
  103. def alignedstore16 : PatFrag<(ops node:$val, node:$ptr),
  104. (store node:$val, node:$ptr), [{
  105. return cast<StoreSDNode>(N)->getAlign() >= 2;
  106. }]>;
  107. def alignedstore32 : PatFrag<(ops node:$val, node:$ptr),
  108. (store node:$val, node:$ptr), [{
  109. return cast<StoreSDNode>(N)->getAlign() >= 4;
  110. }]>;
  111. // The VCVT to/from fixed-point instructions encode the 'fbits' operand
  112. // (the number of fixed bits) differently than it appears in the assembly
  113. // source. It's encoded as "Size - fbits" where Size is the size of the
  114. // fixed-point representation (32 or 16) and fbits is the value appearing
  115. // in the assembly source, an integer in [0,16] or (0,32], depending on size.
  116. def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; }
  117. def fbits32 : Operand<i32> {
  118. let PrintMethod = "printFBits32";
  119. let ParserMatchClass = fbits32_asm_operand;
  120. }
  121. def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; }
  122. def fbits16 : Operand<i32> {
  123. let PrintMethod = "printFBits16";
  124. let ParserMatchClass = fbits16_asm_operand;
  125. }
  126. //===----------------------------------------------------------------------===//
  127. // Load / store Instructions.
  128. //
  129. let canFoldAsLoad = 1, isReMaterializable = 1 in {
  130. def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
  131. IIC_fpLoad64, "vldr", "\t$Dd, $addr",
  132. [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>,
  133. Requires<[HasFPRegs]>;
  134. def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
  135. IIC_fpLoad32, "vldr", "\t$Sd, $addr",
  136. [(set SPR:$Sd, (alignedload32 addrmode5:$addr))]>,
  137. Requires<[HasFPRegs]> {
  138. // Some single precision VFP instructions may be executed on both NEON and VFP
  139. // pipelines.
  140. let D = VFPNeonDomain;
  141. }
  142. let isUnpredicable = 1 in
  143. def VLDRH : AHI5<0b1101, 0b01, (outs HPR:$Sd), (ins addrmode5fp16:$addr),
  144. IIC_fpLoad16, "vldr", ".16\t$Sd, $addr",
  145. [(set HPR:$Sd, (f16 (alignedload16 addrmode5fp16:$addr)))]>,
  146. Requires<[HasFPRegs16]>;
  147. } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
  148. def : Pat<(bf16 (alignedload16 addrmode5fp16:$addr)),
  149. (VLDRH addrmode5fp16:$addr)> {
  150. let Predicates = [HasFPRegs16];
  151. }
  152. def : Pat<(bf16 (alignedload16 addrmode3:$addr)),
  153. (COPY_TO_REGCLASS (LDRH addrmode3:$addr), HPR)> {
  154. let Predicates = [HasNoFPRegs16, IsARM];
  155. }
  156. def : Pat<(bf16 (alignedload16 t2addrmode_imm12:$addr)),
  157. (COPY_TO_REGCLASS (t2LDRHi12 t2addrmode_imm12:$addr), HPR)> {
  158. let Predicates = [HasNoFPRegs16, IsThumb];
  159. }
  160. def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
  161. IIC_fpStore64, "vstr", "\t$Dd, $addr",
  162. [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>,
  163. Requires<[HasFPRegs]>;
  164. def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
  165. IIC_fpStore32, "vstr", "\t$Sd, $addr",
  166. [(alignedstore32 SPR:$Sd, addrmode5:$addr)]>,
  167. Requires<[HasFPRegs]> {
  168. // Some single precision VFP instructions may be executed on both NEON and VFP
  169. // pipelines.
  170. let D = VFPNeonDomain;
  171. }
  172. let isUnpredicable = 1 in
  173. def VSTRH : AHI5<0b1101, 0b00, (outs), (ins HPR:$Sd, addrmode5fp16:$addr),
  174. IIC_fpStore16, "vstr", ".16\t$Sd, $addr",
  175. [(alignedstore16 (f16 HPR:$Sd), addrmode5fp16:$addr)]>,
  176. Requires<[HasFPRegs16]>;
  177. def : Pat<(alignedstore16 (bf16 HPR:$Sd), addrmode5fp16:$addr),
  178. (VSTRH (bf16 HPR:$Sd), addrmode5fp16:$addr)> {
  179. let Predicates = [HasFPRegs16];
  180. }
  181. def : Pat<(alignedstore16 (bf16 HPR:$Sd), addrmode3:$addr),
  182. (STRH (COPY_TO_REGCLASS $Sd, GPR), addrmode3:$addr)> {
  183. let Predicates = [HasNoFPRegs16, IsARM];
  184. }
  185. def : Pat<(alignedstore16 (bf16 HPR:$Sd), t2addrmode_imm12:$addr),
  186. (t2STRHi12 (COPY_TO_REGCLASS $Sd, GPR), t2addrmode_imm12:$addr)> {
  187. let Predicates = [HasNoFPRegs16, IsThumb];
  188. }
  189. //===----------------------------------------------------------------------===//
  190. // Load / store multiple Instructions.
  191. //
  192. multiclass vfp_ldst_mult<string asm, bit L_bit,
  193. InstrItinClass itin, InstrItinClass itin_upd> {
  194. let Predicates = [HasFPRegs] in {
  195. // Double Precision
  196. def DIA :
  197. AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
  198. IndexModeNone, itin,
  199. !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
  200. let Inst{24-23} = 0b01; // Increment After
  201. let Inst{21} = 0; // No writeback
  202. let Inst{20} = L_bit;
  203. }
  204. def DIA_UPD :
  205. AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
  206. variable_ops),
  207. IndexModeUpd, itin_upd,
  208. !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
  209. let Inst{24-23} = 0b01; // Increment After
  210. let Inst{21} = 1; // Writeback
  211. let Inst{20} = L_bit;
  212. }
  213. def DDB_UPD :
  214. AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
  215. variable_ops),
  216. IndexModeUpd, itin_upd,
  217. !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
  218. let Inst{24-23} = 0b10; // Decrement Before
  219. let Inst{21} = 1; // Writeback
  220. let Inst{20} = L_bit;
  221. }
  222. // Single Precision
  223. def SIA :
  224. AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
  225. IndexModeNone, itin,
  226. !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
  227. let Inst{24-23} = 0b01; // Increment After
  228. let Inst{21} = 0; // No writeback
  229. let Inst{20} = L_bit;
  230. // Some single precision VFP instructions may be executed on both NEON and
  231. // VFP pipelines.
  232. let D = VFPNeonDomain;
  233. }
  234. def SIA_UPD :
  235. AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
  236. variable_ops),
  237. IndexModeUpd, itin_upd,
  238. !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
  239. let Inst{24-23} = 0b01; // Increment After
  240. let Inst{21} = 1; // Writeback
  241. let Inst{20} = L_bit;
  242. // Some single precision VFP instructions may be executed on both NEON and
  243. // VFP pipelines.
  244. let D = VFPNeonDomain;
  245. }
  246. def SDB_UPD :
  247. AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
  248. variable_ops),
  249. IndexModeUpd, itin_upd,
  250. !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
  251. let Inst{24-23} = 0b10; // Decrement Before
  252. let Inst{21} = 1; // Writeback
  253. let Inst{20} = L_bit;
  254. // Some single precision VFP instructions may be executed on both NEON and
  255. // VFP pipelines.
  256. let D = VFPNeonDomain;
  257. }
  258. }
  259. }
  260. let hasSideEffects = 0 in {
  261. let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
  262. defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
  263. let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
  264. defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>;
  265. } // hasSideEffects
  266. def : MnemonicAlias<"vldm", "vldmia">;
  267. def : MnemonicAlias<"vstm", "vstmia">;
  268. //===----------------------------------------------------------------------===//
  269. // Lazy load / store multiple Instructions
  270. //
  271. def VLLDM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
  272. NoItinerary, "vlldm${p}\t$Rn", "", []>,
  273. Requires<[HasV8MMainline, Has8MSecExt]> {
  274. let Inst{24-23} = 0b00;
  275. let Inst{22} = 0;
  276. let Inst{21} = 1;
  277. let Inst{20} = 1;
  278. let Inst{15-12} = 0;
  279. let Inst{7-0} = 0;
  280. let mayLoad = 1;
  281. let Defs = [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, VPR, FPSCR, FPSCR_NZCV];
  282. }
  283. def VLSTM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
  284. NoItinerary, "vlstm${p}\t$Rn", "", []>,
  285. Requires<[HasV8MMainline, Has8MSecExt]> {
  286. let Inst{24-23} = 0b00;
  287. let Inst{22} = 0;
  288. let Inst{21} = 1;
  289. let Inst{20} = 0;
  290. let Inst{15-12} = 0;
  291. let Inst{7-0} = 0;
  292. let mayStore = 1;
  293. }
  294. def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r), 0>,
  295. Requires<[HasFPRegs]>;
  296. def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r), 0>,
  297. Requires<[HasFPRegs]>;
  298. def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r), 0>,
  299. Requires<[HasFPRegs]>;
  300. def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r), 0>,
  301. Requires<[HasFPRegs]>;
  302. defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
  303. (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>;
  304. defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
  305. (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>;
  306. defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
  307. (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>;
  308. defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
  309. (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>;
  310. // FLDMX, FSTMX - Load and store multiple unknown precision registers for
  311. // pre-armv6 cores.
  312. // These instruction are deprecated so we don't want them to get selected.
  313. // However, there is no UAL syntax for them, so we keep them around for
  314. // (dis)assembly only.
  315. multiclass vfp_ldstx_mult<string asm, bit L_bit> {
  316. let Predicates = [HasFPRegs], hasNoSchedulingInfo = 1 in {
  317. // Unknown precision
  318. def XIA :
  319. AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
  320. IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> {
  321. let Inst{24-23} = 0b01; // Increment After
  322. let Inst{21} = 0; // No writeback
  323. let Inst{20} = L_bit;
  324. }
  325. def XIA_UPD :
  326. AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
  327. IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
  328. let Inst{24-23} = 0b01; // Increment After
  329. let Inst{21} = 1; // Writeback
  330. let Inst{20} = L_bit;
  331. }
  332. def XDB_UPD :
  333. AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
  334. IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
  335. let Inst{24-23} = 0b10; // Decrement Before
  336. let Inst{21} = 1; // Writeback
  337. let Inst{20} = L_bit;
  338. }
  339. }
  340. }
  341. defm FLDM : vfp_ldstx_mult<"fldm", 1>;
  342. defm FSTM : vfp_ldstx_mult<"fstm", 0>;
  343. def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">;
  344. def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">;
  345. def : VFP2MnemonicAlias<"fstmeax", "fstmiax">;
  346. def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">;
  347. //===----------------------------------------------------------------------===//
  348. // FP Binary Operations.
  349. //
  350. let TwoOperandAliasConstraint = "$Dn = $Dd" in
  351. def VADDD : ADbI<0b11100, 0b11, 0, 0,
  352. (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
  353. IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
  354. [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>,
  355. Sched<[WriteFPALU64]>;
  356. let TwoOperandAliasConstraint = "$Sn = $Sd" in
  357. def VADDS : ASbIn<0b11100, 0b11, 0, 0,
  358. (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
  359. IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
  360. [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>,
  361. Sched<[WriteFPALU32]> {
  362. // Some single precision VFP instructions may be executed on both NEON and
  363. // VFP pipelines on A8.
  364. let D = VFPNeonA8Domain;
  365. }
  366. let TwoOperandAliasConstraint = "$Sn = $Sd" in
  367. def VADDH : AHbI<0b11100, 0b11, 0, 0,
  368. (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
  369. IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm",
  370. [(set (f16 HPR:$Sd), (fadd (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
  371. Sched<[WriteFPALU32]>;
  372. let TwoOperandAliasConstraint = "$Dn = $Dd" in
  373. def VSUBD : ADbI<0b11100, 0b11, 1, 0,
  374. (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
  375. IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
  376. [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>,
  377. Sched<[WriteFPALU64]>;
  378. let TwoOperandAliasConstraint = "$Sn = $Sd" in
  379. def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
  380. (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
  381. IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
  382. [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>,
  383. Sched<[WriteFPALU32]>{
  384. // Some single precision VFP instructions may be executed on both NEON and
  385. // VFP pipelines on A8.
  386. let D = VFPNeonA8Domain;
  387. }
  388. let TwoOperandAliasConstraint = "$Sn = $Sd" in
  389. def VSUBH : AHbI<0b11100, 0b11, 1, 0,
  390. (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
  391. IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm",
  392. [(set (f16 HPR:$Sd), (fsub (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
  393. Sched<[WriteFPALU32]>;
  394. let TwoOperandAliasConstraint = "$Dn = $Dd" in
  395. def VDIVD : ADbI<0b11101, 0b00, 0, 0,
  396. (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
  397. IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
  398. [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>,
  399. Sched<[WriteFPDIV64]>;
  400. let TwoOperandAliasConstraint = "$Sn = $Sd" in
  401. def VDIVS : ASbI<0b11101, 0b00, 0, 0,
  402. (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
  403. IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
  404. [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>,
  405. Sched<[WriteFPDIV32]>;
  406. let TwoOperandAliasConstraint = "$Sn = $Sd" in
  407. def VDIVH : AHbI<0b11101, 0b00, 0, 0,
  408. (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
  409. IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm",
  410. [(set (f16 HPR:$Sd), (fdiv (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
  411. Sched<[WriteFPDIV32]>;
  412. let TwoOperandAliasConstraint = "$Dn = $Dd" in
  413. def VMULD : ADbI<0b11100, 0b10, 0, 0,
  414. (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
  415. IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
  416. [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>,
  417. Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
  418. let TwoOperandAliasConstraint = "$Sn = $Sd" in
  419. def VMULS : ASbIn<0b11100, 0b10, 0, 0,
  420. (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
  421. IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
  422. [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>,
  423. Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
  424. // Some single precision VFP instructions may be executed on both NEON and
  425. // VFP pipelines on A8.
  426. let D = VFPNeonA8Domain;
  427. }
  428. let TwoOperandAliasConstraint = "$Sn = $Sd" in
  429. def VMULH : AHbI<0b11100, 0b10, 0, 0,
  430. (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
  431. IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm",
  432. [(set (f16 HPR:$Sd), (fmul (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
  433. Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
  434. let TwoOperandAliasConstraint = "$Dn = $Dd" in
  435. def VNMULD : ADbI<0b11100, 0b10, 1, 0,
  436. (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
  437. IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
  438. [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>,
  439. Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
  440. let TwoOperandAliasConstraint = "$Sn = $Sd" in
  441. def VNMULS : ASbI<0b11100, 0b10, 1, 0,
  442. (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
  443. IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
  444. [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>,
  445. Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
  446. // Some single precision VFP instructions may be executed on both NEON and
  447. // VFP pipelines on A8.
  448. let D = VFPNeonA8Domain;
  449. }
  450. let TwoOperandAliasConstraint = "$Sn = $Sd" in
  451. def VNMULH : AHbI<0b11100, 0b10, 1, 0,
  452. (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
  453. IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm",
  454. [(set (f16 HPR:$Sd), (fneg (fmul (f16 HPR:$Sn), (f16 HPR:$Sm))))]>,
  455. Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
  456. multiclass vsel_inst<string op, bits<2> opc, int CC> {
  457. let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
  458. Uses = [CPSR], AddedComplexity = 4, isUnpredicable = 1 in {
  459. def H : AHbInp<0b11100, opc, 0,
  460. (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
  461. NoItinerary, !strconcat("vsel", op, ".f16\t$Sd, $Sn, $Sm"),
  462. [(set (f16 HPR:$Sd), (ARMcmov (f16 HPR:$Sm), (f16 HPR:$Sn), CC))]>,
  463. Requires<[HasFullFP16]>;
  464. def S : ASbInp<0b11100, opc, 0,
  465. (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
  466. NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"),
  467. [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>,
  468. Requires<[HasFPARMv8]>;
  469. def D : ADbInp<0b11100, opc, 0,
  470. (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
  471. NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"),
  472. [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>,
  473. Requires<[HasFPARMv8, HasDPVFP]>;
  474. }
  475. }
  476. // The CC constants here match ARMCC::CondCodes.
  477. defm VSELGT : vsel_inst<"gt", 0b11, 12>;
  478. defm VSELGE : vsel_inst<"ge", 0b10, 10>;
  479. defm VSELEQ : vsel_inst<"eq", 0b00, 0>;
  480. defm VSELVS : vsel_inst<"vs", 0b01, 6>;
  481. multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
  482. let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
  483. isUnpredicable = 1 in {
  484. def H : AHbInp<0b11101, 0b00, opc,
  485. (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
  486. NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"),
  487. [(set (f16 HPR:$Sd), (SD (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
  488. Requires<[HasFullFP16]>;
  489. def S : ASbInp<0b11101, 0b00, opc,
  490. (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
  491. NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"),
  492. [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>,
  493. Requires<[HasFPARMv8]>;
  494. def D : ADbInp<0b11101, 0b00, opc,
  495. (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
  496. NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"),
  497. [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>,
  498. Requires<[HasFPARMv8, HasDPVFP]>;
  499. }
  500. }
  501. defm VFP_VMAXNM : vmaxmin_inst<"vmaxnm", 0, fmaxnum>;
  502. defm VFP_VMINNM : vmaxmin_inst<"vminnm", 1, fminnum>;
  503. // Match reassociated forms only if not sign dependent rounding.
  504. def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
  505. (VNMULD DPR:$a, DPR:$b)>,
  506. Requires<[NoHonorSignDependentRounding,HasDPVFP]>;
  507. def : Pat<(fmul (fneg SPR:$a), SPR:$b),
  508. (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
  509. // These are encoded as unary instructions.
  510. let Defs = [FPSCR_NZCV] in {
  511. def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
  512. (outs), (ins DPR:$Dd, DPR:$Dm),
  513. IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm", "",
  514. [(arm_cmpfpe DPR:$Dd, (f64 DPR:$Dm))]>;
  515. def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
  516. (outs), (ins SPR:$Sd, SPR:$Sm),
  517. IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm", "",
  518. [(arm_cmpfpe SPR:$Sd, SPR:$Sm)]> {
  519. // Some single precision VFP instructions may be executed on both NEON and
  520. // VFP pipelines on A8.
  521. let D = VFPNeonA8Domain;
  522. }
  523. def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0,
  524. (outs), (ins HPR:$Sd, HPR:$Sm),
  525. IIC_fpCMP16, "vcmpe", ".f16\t$Sd, $Sm",
  526. [(arm_cmpfpe (f16 HPR:$Sd), (f16 HPR:$Sm))]>;
  527. def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
  528. (outs), (ins DPR:$Dd, DPR:$Dm),
  529. IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm", "",
  530. [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>;
  531. def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
  532. (outs), (ins SPR:$Sd, SPR:$Sm),
  533. IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm", "",
  534. [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> {
  535. // Some single precision VFP instructions may be executed on both NEON and
  536. // VFP pipelines on A8.
  537. let D = VFPNeonA8Domain;
  538. }
  539. def VCMPH : AHuI<0b11101, 0b11, 0b0100, 0b01, 0,
  540. (outs), (ins HPR:$Sd, HPR:$Sm),
  541. IIC_fpCMP16, "vcmp", ".f16\t$Sd, $Sm",
  542. [(arm_cmpfp (f16 HPR:$Sd), (f16 HPR:$Sm))]>;
  543. } // Defs = [FPSCR_NZCV]
  544. //===----------------------------------------------------------------------===//
  545. // FP Unary Operations.
  546. //
  547. def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
  548. (outs DPR:$Dd), (ins DPR:$Dm),
  549. IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm", "",
  550. [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
  551. def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
  552. (outs SPR:$Sd), (ins SPR:$Sm),
  553. IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
  554. [(set SPR:$Sd, (fabs SPR:$Sm))]> {
  555. // Some single precision VFP instructions may be executed on both NEON and
  556. // VFP pipelines on A8.
  557. let D = VFPNeonA8Domain;
  558. }
  559. def VABSH : AHuI<0b11101, 0b11, 0b0000, 0b11, 0,
  560. (outs HPR:$Sd), (ins HPR:$Sm),
  561. IIC_fpUNA16, "vabs", ".f16\t$Sd, $Sm",
  562. [(set (f16 HPR:$Sd), (fabs (f16 HPR:$Sm)))]>;
  563. let Defs = [FPSCR_NZCV] in {
  564. def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
  565. (outs), (ins DPR:$Dd),
  566. IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0", "",
  567. [(arm_cmpfpe0 (f64 DPR:$Dd))]> {
  568. let Inst{3-0} = 0b0000;
  569. let Inst{5} = 0;
  570. }
  571. def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
  572. (outs), (ins SPR:$Sd),
  573. IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0", "",
  574. [(arm_cmpfpe0 SPR:$Sd)]> {
  575. let Inst{3-0} = 0b0000;
  576. let Inst{5} = 0;
  577. // Some single precision VFP instructions may be executed on both NEON and
  578. // VFP pipelines on A8.
  579. let D = VFPNeonA8Domain;
  580. }
  581. def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0,
  582. (outs), (ins HPR:$Sd),
  583. IIC_fpCMP16, "vcmpe", ".f16\t$Sd, #0",
  584. [(arm_cmpfpe0 (f16 HPR:$Sd))]> {
  585. let Inst{3-0} = 0b0000;
  586. let Inst{5} = 0;
  587. }
  588. def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
  589. (outs), (ins DPR:$Dd),
  590. IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0", "",
  591. [(arm_cmpfp0 (f64 DPR:$Dd))]> {
  592. let Inst{3-0} = 0b0000;
  593. let Inst{5} = 0;
  594. }
  595. def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
  596. (outs), (ins SPR:$Sd),
  597. IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0", "",
  598. [(arm_cmpfp0 SPR:$Sd)]> {
  599. let Inst{3-0} = 0b0000;
  600. let Inst{5} = 0;
  601. // Some single precision VFP instructions may be executed on both NEON and
  602. // VFP pipelines on A8.
  603. let D = VFPNeonA8Domain;
  604. }
  605. def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0,
  606. (outs), (ins HPR:$Sd),
  607. IIC_fpCMP16, "vcmp", ".f16\t$Sd, #0",
  608. [(arm_cmpfp0 (f16 HPR:$Sd))]> {
  609. let Inst{3-0} = 0b0000;
  610. let Inst{5} = 0;
  611. }
  612. } // Defs = [FPSCR_NZCV]
  613. def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
  614. (outs DPR:$Dd), (ins SPR:$Sm),
  615. IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm", "",
  616. [(set DPR:$Dd, (fpextend SPR:$Sm))]>,
  617. Sched<[WriteFPCVT]> {
  618. // Instruction operands.
  619. bits<5> Dd;
  620. bits<5> Sm;
  621. // Encode instruction operands.
  622. let Inst{3-0} = Sm{4-1};
  623. let Inst{5} = Sm{0};
  624. let Inst{15-12} = Dd{3-0};
  625. let Inst{22} = Dd{4};
  626. let Predicates = [HasVFP2, HasDPVFP];
  627. let hasSideEffects = 0;
  628. }
  629. // Special case encoding: bits 11-8 is 0b1011.
  630. def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
  631. IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm", "",
  632. [(set SPR:$Sd, (fpround DPR:$Dm))]>,
  633. Sched<[WriteFPCVT]> {
  634. // Instruction operands.
  635. bits<5> Sd;
  636. bits<5> Dm;
  637. // Encode instruction operands.
  638. let Inst{3-0} = Dm{3-0};
  639. let Inst{5} = Dm{4};
  640. let Inst{15-12} = Sd{4-1};
  641. let Inst{22} = Sd{0};
  642. let Inst{27-23} = 0b11101;
  643. let Inst{21-16} = 0b110111;
  644. let Inst{11-8} = 0b1011;
  645. let Inst{7-6} = 0b11;
  646. let Inst{4} = 0;
  647. let Predicates = [HasVFP2, HasDPVFP];
  648. let hasSideEffects = 0;
  649. }
  650. // Between half, single and double-precision.
  651. let hasSideEffects = 0 in
  652. def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
  653. /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm", "",
  654. [/* Intentionally left blank, see patterns below */]>,
  655. Requires<[HasFP16]>,
  656. Sched<[WriteFPCVT]>;
  657. def : FP16Pat<(f32 (fpextend (f16 HPR:$Sm))),
  658. (VCVTBHS (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>;
  659. def : FP16Pat<(f16_to_fp GPR:$a),
  660. (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
  661. let hasSideEffects = 0 in
  662. def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm),
  663. /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm", "$Sd = $Sda",
  664. [/* Intentionally left blank, see patterns below */]>,
  665. Requires<[HasFP16]>,
  666. Sched<[WriteFPCVT]>;
  667. def : FP16Pat<(f16 (fpround SPR:$Sm)),
  668. (COPY_TO_REGCLASS (VCVTBSH (IMPLICIT_DEF), SPR:$Sm), HPR)>;
  669. def : FP16Pat<(fp_to_f16 SPR:$a),
  670. (i32 (COPY_TO_REGCLASS (VCVTBSH (IMPLICIT_DEF), SPR:$a), GPR))>;
  671. def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane),
  672. (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1),
  673. (VCVTBSH (EXTRACT_SUBREG (v8f16 MQPR:$src1), (SSubReg_f16_reg imm:$lane)),
  674. SPR:$src2),
  675. (SSubReg_f16_reg imm:$lane)))>;
  676. def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_even:$lane),
  677. (v4f16 (INSERT_SUBREG (v4f16 DPR:$src1),
  678. (VCVTBSH (EXTRACT_SUBREG (v4f16 DPR:$src1), (SSubReg_f16_reg imm:$lane)),
  679. SPR:$src2),
  680. (SSubReg_f16_reg imm:$lane)))>;
  681. let hasSideEffects = 0 in
  682. def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
  683. /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm", "",
  684. [/* Intentionally left blank, see patterns below */]>,
  685. Requires<[HasFP16]>,
  686. Sched<[WriteFPCVT]>;
  687. def : FP16Pat<(f32 (fpextend (extractelt (v8f16 MQPR:$src), imm_odd:$lane))),
  688. (VCVTTHS (EXTRACT_SUBREG MQPR:$src, (SSubReg_f16_reg imm_odd:$lane)))>;
  689. def : FP16Pat<(f32 (fpextend (extractelt (v4f16 DPR:$src), imm_odd:$lane))),
  690. (VCVTTHS (EXTRACT_SUBREG
  691. (v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)),
  692. (SSubReg_f16_reg imm_odd:$lane)))>;
  693. let hasSideEffects = 0 in
  694. def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm),
  695. /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm", "$Sd = $Sda",
  696. [/* Intentionally left blank, see patterns below */]>,
  697. Requires<[HasFP16]>,
  698. Sched<[WriteFPCVT]>;
  699. def : FP16Pat<(insertelt (v8f16 MQPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane),
  700. (v8f16 (INSERT_SUBREG (v8f16 MQPR:$src1),
  701. (VCVTTSH (EXTRACT_SUBREG (v8f16 MQPR:$src1), (SSubReg_f16_reg imm:$lane)),
  702. SPR:$src2),
  703. (SSubReg_f16_reg imm:$lane)))>;
  704. def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm_odd:$lane),
  705. (v4f16 (INSERT_SUBREG (v4f16 DPR:$src1),
  706. (VCVTTSH (EXTRACT_SUBREG (v4f16 DPR:$src1), (SSubReg_f16_reg imm:$lane)),
  707. SPR:$src2),
  708. (SSubReg_f16_reg imm:$lane)))>;
  709. def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0,
  710. (outs DPR:$Dd), (ins SPR:$Sm),
  711. NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm", "",
  712. [/* Intentionally left blank, see patterns below */]>,
  713. Requires<[HasFPARMv8, HasDPVFP]>,
  714. Sched<[WriteFPCVT]> {
  715. // Instruction operands.
  716. bits<5> Sm;
  717. // Encode instruction operands.
  718. let Inst{3-0} = Sm{4-1};
  719. let Inst{5} = Sm{0};
  720. let hasSideEffects = 0;
  721. }
  722. def : FullFP16Pat<(f64 (fpextend (f16 HPR:$Sm))),
  723. (VCVTBHD (COPY_TO_REGCLASS (f16 HPR:$Sm), SPR))>,
  724. Requires<[HasFPARMv8, HasDPVFP]>;
  725. def : FP16Pat<(f64 (f16_to_fp GPR:$a)),
  726. (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>,
  727. Requires<[HasFPARMv8, HasDPVFP]>;
  728. def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0,
  729. (outs SPR:$Sd), (ins SPR:$Sda, DPR:$Dm),
  730. NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm", "$Sd = $Sda",
  731. [/* Intentionally left blank, see patterns below */]>,
  732. Requires<[HasFPARMv8, HasDPVFP]> {
  733. // Instruction operands.
  734. bits<5> Sd;
  735. bits<5> Dm;
  736. // Encode instruction operands.
  737. let Inst{3-0} = Dm{3-0};
  738. let Inst{5} = Dm{4};
  739. let Inst{15-12} = Sd{4-1};
  740. let Inst{22} = Sd{0};
  741. let hasSideEffects = 0;
  742. }
  743. def : FullFP16Pat<(f16 (fpround DPR:$Dm)),
  744. (COPY_TO_REGCLASS (VCVTBDH (IMPLICIT_DEF), DPR:$Dm), HPR)>,
  745. Requires<[HasFPARMv8, HasDPVFP]>;
  746. def : FP16Pat<(fp_to_f16 (f64 DPR:$a)),
  747. (i32 (COPY_TO_REGCLASS (VCVTBDH (IMPLICIT_DEF), DPR:$a), GPR))>,
  748. Requires<[HasFPARMv8, HasDPVFP]>;
  749. def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0,
  750. (outs DPR:$Dd), (ins SPR:$Sm),
  751. NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm", "",
  752. []>, Requires<[HasFPARMv8, HasDPVFP]> {
  753. // Instruction operands.
  754. bits<5> Sm;
  755. // Encode instruction operands.
  756. let Inst{3-0} = Sm{4-1};
  757. let Inst{5} = Sm{0};
  758. let hasSideEffects = 0;
  759. }
  760. def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0,
  761. (outs SPR:$Sd), (ins SPR:$Sda, DPR:$Dm),
  762. NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm", "$Sd = $Sda",
  763. []>, Requires<[HasFPARMv8, HasDPVFP]> {
  764. // Instruction operands.
  765. bits<5> Sd;
  766. bits<5> Dm;
  767. // Encode instruction operands.
  768. let Inst{15-12} = Sd{4-1};
  769. let Inst{22} = Sd{0};
  770. let Inst{3-0} = Dm{3-0};
  771. let Inst{5} = Dm{4};
  772. let hasSideEffects = 0;
  773. }
  774. multiclass vcvt_inst<string opc, bits<2> rm,
  775. SDPatternOperator node = null_frag> {
  776. let PostEncoderMethod = "", DecoderNamespace = "VFPV8", hasSideEffects = 0 in {
  777. def SH : AHuInp<0b11101, 0b11, 0b1100, 0b11, 0,
  778. (outs SPR:$Sd), (ins HPR:$Sm),
  779. NoItinerary, !strconcat("vcvt", opc, ".s32.f16\t$Sd, $Sm"),
  780. []>,
  781. Requires<[HasFullFP16]> {
  782. let Inst{17-16} = rm;
  783. }
  784. def UH : AHuInp<0b11101, 0b11, 0b1100, 0b01, 0,
  785. (outs SPR:$Sd), (ins HPR:$Sm),
  786. NoItinerary, !strconcat("vcvt", opc, ".u32.f16\t$Sd, $Sm"),
  787. []>,
  788. Requires<[HasFullFP16]> {
  789. let Inst{17-16} = rm;
  790. }
  791. def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
  792. (outs SPR:$Sd), (ins SPR:$Sm),
  793. NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"),
  794. []>,
  795. Requires<[HasFPARMv8]> {
  796. let Inst{17-16} = rm;
  797. }
  798. def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
  799. (outs SPR:$Sd), (ins SPR:$Sm),
  800. NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"),
  801. []>,
  802. Requires<[HasFPARMv8]> {
  803. let Inst{17-16} = rm;
  804. }
  805. def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
  806. (outs SPR:$Sd), (ins DPR:$Dm),
  807. NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"),
  808. []>,
  809. Requires<[HasFPARMv8, HasDPVFP]> {
  810. bits<5> Dm;
  811. let Inst{17-16} = rm;
  812. // Encode instruction operands.
  813. let Inst{3-0} = Dm{3-0};
  814. let Inst{5} = Dm{4};
  815. let Inst{8} = 1;
  816. }
  817. def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
  818. (outs SPR:$Sd), (ins DPR:$Dm),
  819. NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"),
  820. []>,
  821. Requires<[HasFPARMv8, HasDPVFP]> {
  822. bits<5> Dm;
  823. let Inst{17-16} = rm;
  824. // Encode instruction operands
  825. let Inst{3-0} = Dm{3-0};
  826. let Inst{5} = Dm{4};
  827. let Inst{8} = 1;
  828. }
  829. }
  830. let Predicates = [HasFPARMv8] in {
  831. let Predicates = [HasFullFP16] in {
  832. def : Pat<(i32 (fp_to_sint (node (f16 HPR:$a)))),
  833. (COPY_TO_REGCLASS
  834. (!cast<Instruction>(NAME#"SH") (f16 HPR:$a)),
  835. GPR)>;
  836. def : Pat<(i32 (fp_to_uint (node (f16 HPR:$a)))),
  837. (COPY_TO_REGCLASS
  838. (!cast<Instruction>(NAME#"UH") (f16 HPR:$a)),
  839. GPR)>;
  840. }
  841. def : Pat<(i32 (fp_to_sint (node SPR:$a))),
  842. (COPY_TO_REGCLASS
  843. (!cast<Instruction>(NAME#"SS") SPR:$a),
  844. GPR)>;
  845. def : Pat<(i32 (fp_to_uint (node SPR:$a))),
  846. (COPY_TO_REGCLASS
  847. (!cast<Instruction>(NAME#"US") SPR:$a),
  848. GPR)>;
  849. }
  850. let Predicates = [HasFPARMv8, HasDPVFP] in {
  851. def : Pat<(i32 (fp_to_sint (node (f64 DPR:$a)))),
  852. (COPY_TO_REGCLASS
  853. (!cast<Instruction>(NAME#"SD") DPR:$a),
  854. GPR)>;
  855. def : Pat<(i32 (fp_to_uint (node (f64 DPR:$a)))),
  856. (COPY_TO_REGCLASS
  857. (!cast<Instruction>(NAME#"UD") DPR:$a),
  858. GPR)>;
  859. }
  860. }
  861. defm VCVTA : vcvt_inst<"a", 0b00, fround>;
  862. defm VCVTN : vcvt_inst<"n", 0b01>;
  863. defm VCVTP : vcvt_inst<"p", 0b10, fceil>;
  864. defm VCVTM : vcvt_inst<"m", 0b11, ffloor>;
  865. def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
  866. (outs DPR:$Dd), (ins DPR:$Dm),
  867. IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm", "",
  868. [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
  869. def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
  870. (outs SPR:$Sd), (ins SPR:$Sm),
  871. IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
  872. [(set SPR:$Sd, (fneg SPR:$Sm))]> {
  873. // Some single precision VFP instructions may be executed on both NEON and
  874. // VFP pipelines on A8.
  875. let D = VFPNeonA8Domain;
  876. }
  877. def VNEGH : AHuI<0b11101, 0b11, 0b0001, 0b01, 0,
  878. (outs HPR:$Sd), (ins HPR:$Sm),
  879. IIC_fpUNA16, "vneg", ".f16\t$Sd, $Sm",
  880. [(set (f16 HPR:$Sd), (fneg (f16 HPR:$Sm)))]>;
  881. multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
  882. def H : AHuI<0b11101, 0b11, 0b0110, 0b11, 0,
  883. (outs HPR:$Sd), (ins HPR:$Sm),
  884. NoItinerary, !strconcat("vrint", opc), ".f16\t$Sd, $Sm",
  885. [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>,
  886. Requires<[HasFullFP16]> {
  887. let Inst{7} = op2;
  888. let Inst{16} = op;
  889. }
  890. def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0,
  891. (outs SPR:$Sd), (ins SPR:$Sm),
  892. NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm", "",
  893. [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
  894. Requires<[HasFPARMv8]> {
  895. let Inst{7} = op2;
  896. let Inst{16} = op;
  897. }
  898. def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0,
  899. (outs DPR:$Dd), (ins DPR:$Dm),
  900. NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm", "",
  901. [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
  902. Requires<[HasFPARMv8, HasDPVFP]> {
  903. let Inst{7} = op2;
  904. let Inst{16} = op;
  905. }
  906. def : InstAlias<!strconcat("vrint", opc, "$p.f16.f16\t$Sd, $Sm"),
  907. (!cast<Instruction>(NAME#"H") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
  908. Requires<[HasFullFP16]>;
  909. def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"),
  910. (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
  911. Requires<[HasFPARMv8]>;
  912. def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"),
  913. (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p), 0>,
  914. Requires<[HasFPARMv8,HasDPVFP]>;
  915. }
  916. defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>;
  917. defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>;
  918. defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>;
  919. multiclass vrint_inst_anpm<string opc, bits<2> rm,
  920. SDPatternOperator node = null_frag> {
  921. let PostEncoderMethod = "", DecoderNamespace = "VFPV8",
  922. isUnpredicable = 1 in {
  923. def H : AHuInp<0b11101, 0b11, 0b1000, 0b01, 0,
  924. (outs HPR:$Sd), (ins HPR:$Sm),
  925. NoItinerary, !strconcat("vrint", opc, ".f16\t$Sd, $Sm"),
  926. [(set (f16 HPR:$Sd), (node (f16 HPR:$Sm)))]>,
  927. Requires<[HasFullFP16]> {
  928. let Inst{17-16} = rm;
  929. }
  930. def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0,
  931. (outs SPR:$Sd), (ins SPR:$Sm),
  932. NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"),
  933. [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
  934. Requires<[HasFPARMv8]> {
  935. let Inst{17-16} = rm;
  936. }
  937. def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0,
  938. (outs DPR:$Dd), (ins DPR:$Dm),
  939. NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"),
  940. [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
  941. Requires<[HasFPARMv8, HasDPVFP]> {
  942. let Inst{17-16} = rm;
  943. }
  944. }
  945. def : InstAlias<!strconcat("vrint", opc, ".f16.f16\t$Sd, $Sm"),
  946. (!cast<Instruction>(NAME#"H") HPR:$Sd, HPR:$Sm), 0>,
  947. Requires<[HasFullFP16]>;
  948. def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"),
  949. (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm), 0>,
  950. Requires<[HasFPARMv8]>;
  951. def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"),
  952. (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm), 0>,
  953. Requires<[HasFPARMv8,HasDPVFP]>;
  954. }
  955. defm VRINTA : vrint_inst_anpm<"a", 0b00, fround>;
  956. defm VRINTN : vrint_inst_anpm<"n", 0b01, int_arm_neon_vrintn>;
  957. defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>;
  958. defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>;
  959. def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
  960. (outs DPR:$Dd), (ins DPR:$Dm),
  961. IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm", "",
  962. [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>,
  963. Sched<[WriteFPSQRT64]>;
  964. def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
  965. (outs SPR:$Sd), (ins SPR:$Sm),
  966. IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm", "",
  967. [(set SPR:$Sd, (fsqrt SPR:$Sm))]>,
  968. Sched<[WriteFPSQRT32]>;
  969. def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0,
  970. (outs HPR:$Sd), (ins HPR:$Sm),
  971. IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm",
  972. [(set (f16 HPR:$Sd), (fsqrt (f16 HPR:$Sm)))]>;
  973. let hasSideEffects = 0 in {
  974. let isMoveReg = 1 in {
  975. def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
  976. (outs DPR:$Dd), (ins DPR:$Dm),
  977. IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", "", []>,
  978. Requires<[HasFPRegs64]>;
  979. def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
  980. (outs SPR:$Sd), (ins SPR:$Sm),
  981. IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", "", []>,
  982. Requires<[HasFPRegs]>;
  983. } // isMoveReg
  984. let PostEncoderMethod = "", DecoderNamespace = "VFPV8", isUnpredicable = 1 in {
  985. def VMOVH : ASuInp<0b11101, 0b11, 0b0000, 0b01, 0,
  986. (outs SPR:$Sd), (ins SPR:$Sm),
  987. IIC_fpUNA16, "vmovx.f16\t$Sd, $Sm", []>,
  988. Requires<[HasFullFP16]>;
  989. def VINSH : ASuInp<0b11101, 0b11, 0b0000, 0b11, 0,
  990. (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm),
  991. IIC_fpUNA16, "vins.f16\t$Sd, $Sm", []>,
  992. Requires<[HasFullFP16]> {
  993. let Constraints = "$Sd = $Sda";
  994. }
  995. } // PostEncoderMethod
  996. } // hasSideEffects
  997. //===----------------------------------------------------------------------===//
  998. // FP <-> GPR Copies. Int <-> FP Conversions.
  999. //
  1000. let isMoveReg = 1 in {
  1001. def VMOVRS : AVConv2I<0b11100001, 0b1010,
  1002. (outs GPR:$Rt), (ins SPR:$Sn),
  1003. IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
  1004. [(set GPR:$Rt, (bitconvert SPR:$Sn))]>,
  1005. Requires<[HasFPRegs]>,
  1006. Sched<[WriteFPMOV]> {
  1007. // Instruction operands.
  1008. bits<4> Rt;
  1009. bits<5> Sn;
  1010. // Encode instruction operands.
  1011. let Inst{19-16} = Sn{4-1};
  1012. let Inst{7} = Sn{0};
  1013. let Inst{15-12} = Rt;
  1014. let Inst{6-5} = 0b00;
  1015. let Inst{3-0} = 0b0000;
  1016. // Some single precision VFP instructions may be executed on both NEON and VFP
  1017. // pipelines.
  1018. let D = VFPNeonDomain;
  1019. }
  1020. // Bitcast i32 -> f32. NEON prefers to use VMOVDRR.
  1021. def VMOVSR : AVConv4I<0b11100000, 0b1010,
  1022. (outs SPR:$Sn), (ins GPR:$Rt),
  1023. IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
  1024. [(set SPR:$Sn, (bitconvert GPR:$Rt))]>,
  1025. Requires<[HasFPRegs, UseVMOVSR]>,
  1026. Sched<[WriteFPMOV]> {
  1027. // Instruction operands.
  1028. bits<5> Sn;
  1029. bits<4> Rt;
  1030. // Encode instruction operands.
  1031. let Inst{19-16} = Sn{4-1};
  1032. let Inst{7} = Sn{0};
  1033. let Inst{15-12} = Rt;
  1034. let Inst{6-5} = 0b00;
  1035. let Inst{3-0} = 0b0000;
  1036. // Some single precision VFP instructions may be executed on both NEON and VFP
  1037. // pipelines.
  1038. let D = VFPNeonDomain;
  1039. }
  1040. } // isMoveReg
  1041. def : Pat<(arm_vmovsr GPR:$Rt), (VMOVSR GPR:$Rt)>, Requires<[HasVFP2, UseVMOVSR]>;
  1042. let hasSideEffects = 0 in {
  1043. def VMOVRRD : AVConv3I<0b11000101, 0b1011,
  1044. (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
  1045. IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
  1046. [(set GPR:$Rt, GPR:$Rt2, (arm_fmrrd DPR:$Dm))]>,
  1047. Requires<[HasFPRegs]>,
  1048. Sched<[WriteFPMOV]> {
  1049. // Instruction operands.
  1050. bits<5> Dm;
  1051. bits<4> Rt;
  1052. bits<4> Rt2;
  1053. // Encode instruction operands.
  1054. let Inst{3-0} = Dm{3-0};
  1055. let Inst{5} = Dm{4};
  1056. let Inst{15-12} = Rt;
  1057. let Inst{19-16} = Rt2;
  1058. let Inst{7-6} = 0b00;
  1059. // Some single precision VFP instructions may be executed on both NEON and VFP
  1060. // pipelines.
  1061. let D = VFPNeonDomain;
  1062. // This instruction is equivalent to
  1063. // $Rt = EXTRACT_SUBREG $Dm, ssub_0
  1064. // $Rt2 = EXTRACT_SUBREG $Dm, ssub_1
  1065. let isExtractSubreg = 1;
  1066. }
  1067. def VMOVRRS : AVConv3I<0b11000101, 0b1010,
  1068. (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2),
  1069. IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2",
  1070. [/* For disassembly only; pattern left blank */]>,
  1071. Requires<[HasFPRegs]>,
  1072. Sched<[WriteFPMOV]> {
  1073. bits<5> src1;
  1074. bits<4> Rt;
  1075. bits<4> Rt2;
  1076. // Encode instruction operands.
  1077. let Inst{3-0} = src1{4-1};
  1078. let Inst{5} = src1{0};
  1079. let Inst{15-12} = Rt;
  1080. let Inst{19-16} = Rt2;
  1081. let Inst{7-6} = 0b00;
  1082. // Some single precision VFP instructions may be executed on both NEON and VFP
  1083. // pipelines.
  1084. let D = VFPNeonDomain;
  1085. let DecoderMethod = "DecodeVMOVRRS";
  1086. }
  1087. } // hasSideEffects
  1088. // FMDHR: GPR -> SPR
  1089. // FMDLR: GPR -> SPR
  1090. def VMOVDRR : AVConv5I<0b11000100, 0b1011,
  1091. (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
  1092. IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
  1093. [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]>,
  1094. Requires<[HasFPRegs]>,
  1095. Sched<[WriteFPMOV]> {
  1096. // Instruction operands.
  1097. bits<5> Dm;
  1098. bits<4> Rt;
  1099. bits<4> Rt2;
  1100. // Encode instruction operands.
  1101. let Inst{3-0} = Dm{3-0};
  1102. let Inst{5} = Dm{4};
  1103. let Inst{15-12} = Rt;
  1104. let Inst{19-16} = Rt2;
  1105. let Inst{7-6} = 0b00;
  1106. // Some single precision VFP instructions may be executed on both NEON and VFP
  1107. // pipelines.
  1108. let D = VFPNeonDomain;
  1109. // This instruction is equivalent to
  1110. // $Dm = REG_SEQUENCE $Rt, ssub_0, $Rt2, ssub_1
  1111. let isRegSequence = 1;
  1112. }
  1113. // Hoist an fabs or a fneg of a value coming from integer registers
  1114. // and do the fabs/fneg on the integer value. This is never a lose
  1115. // and could enable the conversion to float to be removed completely.
  1116. def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)),
  1117. (VMOVDRR GPR:$Rl, (BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
  1118. Requires<[IsARM, HasV6T2]>;
  1119. def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)),
  1120. (VMOVDRR GPR:$Rl, (t2BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
  1121. Requires<[IsThumb2, HasV6T2]>;
  1122. def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)),
  1123. (VMOVDRR GPR:$Rl, (EORri GPR:$Rh, (i32 0x80000000)))>,
  1124. Requires<[IsARM]>;
  1125. def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)),
  1126. (VMOVDRR GPR:$Rl, (t2EORri GPR:$Rh, (i32 0x80000000)))>,
  1127. Requires<[IsThumb2]>;
  1128. let hasSideEffects = 0 in
  1129. def VMOVSRR : AVConv5I<0b11000100, 0b1010,
  1130. (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
  1131. IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
  1132. [/* For disassembly only; pattern left blank */]>,
  1133. Requires<[HasFPRegs]>,
  1134. Sched<[WriteFPMOV]> {
  1135. // Instruction operands.
  1136. bits<5> dst1;
  1137. bits<4> src1;
  1138. bits<4> src2;
  1139. // Encode instruction operands.
  1140. let Inst{3-0} = dst1{4-1};
  1141. let Inst{5} = dst1{0};
  1142. let Inst{15-12} = src1;
  1143. let Inst{19-16} = src2;
  1144. let Inst{7-6} = 0b00;
  1145. // Some single precision VFP instructions may be executed on both NEON and VFP
  1146. // pipelines.
  1147. let D = VFPNeonDomain;
  1148. let DecoderMethod = "DecodeVMOVSRR";
  1149. }
  1150. // Move H->R, clearing top 16 bits
  1151. def VMOVRH : AVConv2I<0b11100001, 0b1001,
  1152. (outs rGPR:$Rt), (ins HPR:$Sn),
  1153. IIC_fpMOVSI, "vmov", ".f16\t$Rt, $Sn",
  1154. []>,
  1155. Requires<[HasFPRegs16]>,
  1156. Sched<[WriteFPMOV]> {
  1157. // Instruction operands.
  1158. bits<4> Rt;
  1159. bits<5> Sn;
  1160. // Encode instruction operands.
  1161. let Inst{19-16} = Sn{4-1};
  1162. let Inst{7} = Sn{0};
  1163. let Inst{15-12} = Rt;
  1164. let Inst{6-5} = 0b00;
  1165. let Inst{3-0} = 0b0000;
  1166. let isUnpredicable = 1;
  1167. }
  1168. // Move R->H, clearing top 16 bits
  1169. def VMOVHR : AVConv4I<0b11100000, 0b1001,
  1170. (outs HPR:$Sn), (ins rGPR:$Rt),
  1171. IIC_fpMOVIS, "vmov", ".f16\t$Sn, $Rt",
  1172. []>,
  1173. Requires<[HasFPRegs16]>,
  1174. Sched<[WriteFPMOV]> {
  1175. // Instruction operands.
  1176. bits<5> Sn;
  1177. bits<4> Rt;
  1178. // Encode instruction operands.
  1179. let Inst{19-16} = Sn{4-1};
  1180. let Inst{7} = Sn{0};
  1181. let Inst{15-12} = Rt;
  1182. let Inst{6-5} = 0b00;
  1183. let Inst{3-0} = 0b0000;
  1184. let isUnpredicable = 1;
  1185. }
  1186. def : FPRegs16Pat<(arm_vmovrh (f16 HPR:$Sn)), (VMOVRH (f16 HPR:$Sn))>;
  1187. def : FPRegs16Pat<(arm_vmovrh (bf16 HPR:$Sn)), (VMOVRH (bf16 HPR:$Sn))>;
  1188. def : FPRegs16Pat<(f16 (arm_vmovhr rGPR:$Rt)), (VMOVHR rGPR:$Rt)>;
  1189. def : FPRegs16Pat<(bf16 (arm_vmovhr rGPR:$Rt)), (VMOVHR rGPR:$Rt)>;
  1190. // FMRDH: SPR -> GPR
  1191. // FMRDL: SPR -> GPR
  1192. // FMRRS: SPR -> GPR
  1193. // FMRX: SPR system reg -> GPR
  1194. // FMSRR: GPR -> SPR
  1195. // FMXR: GPR -> VFP system reg
  1196. // Int -> FP:
  1197. class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
  1198. bits<4> opcod4, dag oops, dag iops,
  1199. InstrItinClass itin, string opc, string asm,
  1200. list<dag> pattern>
  1201. : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
  1202. pattern> {
  1203. // Instruction operands.
  1204. bits<5> Dd;
  1205. bits<5> Sm;
  1206. // Encode instruction operands.
  1207. let Inst{3-0} = Sm{4-1};
  1208. let Inst{5} = Sm{0};
  1209. let Inst{15-12} = Dd{3-0};
  1210. let Inst{22} = Dd{4};
  1211. let Predicates = [HasVFP2, HasDPVFP];
  1212. let hasSideEffects = 0;
  1213. }
  1214. class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
  1215. bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
  1216. string opc, string asm, list<dag> pattern>
  1217. : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
  1218. pattern> {
  1219. // Instruction operands.
  1220. bits<5> Sd;
  1221. bits<5> Sm;
  1222. // Encode instruction operands.
  1223. let Inst{3-0} = Sm{4-1};
  1224. let Inst{5} = Sm{0};
  1225. let Inst{15-12} = Sd{4-1};
  1226. let Inst{22} = Sd{0};
  1227. let hasSideEffects = 0;
  1228. }
  1229. class AVConv1IHs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
  1230. bits<4> opcod4, dag oops, dag iops,
  1231. InstrItinClass itin, string opc, string asm,
  1232. list<dag> pattern>
  1233. : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
  1234. pattern> {
  1235. // Instruction operands.
  1236. bits<5> Sd;
  1237. bits<5> Sm;
  1238. // Encode instruction operands.
  1239. let Inst{3-0} = Sm{4-1};
  1240. let Inst{5} = Sm{0};
  1241. let Inst{15-12} = Sd{4-1};
  1242. let Inst{22} = Sd{0};
  1243. let Predicates = [HasFullFP16];
  1244. let hasSideEffects = 0;
  1245. }
  1246. def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
  1247. (outs DPR:$Dd), (ins SPR:$Sm),
  1248. IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
  1249. []>,
  1250. Sched<[WriteFPCVT]> {
  1251. let Inst{7} = 1; // s32
  1252. }
  1253. let Predicates=[HasVFP2, HasDPVFP] in {
  1254. def : VFPPat<(f64 (sint_to_fp GPR:$a)),
  1255. (VSITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
  1256. def : VFPPat<(f64 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
  1257. (VSITOD (VLDRS addrmode5:$a))>;
  1258. }
  1259. def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
  1260. (outs SPR:$Sd),(ins SPR:$Sm),
  1261. IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
  1262. []>,
  1263. Sched<[WriteFPCVT]> {
  1264. let Inst{7} = 1; // s32
  1265. // Some single precision VFP instructions may be executed on both NEON and
  1266. // VFP pipelines on A8.
  1267. let D = VFPNeonA8Domain;
  1268. }
  1269. def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)),
  1270. (VSITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
  1271. def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
  1272. (VSITOS (VLDRS addrmode5:$a))>;
  1273. def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
  1274. (outs HPR:$Sd), (ins SPR:$Sm),
  1275. IIC_fpCVTIH, "vcvt", ".f16.s32\t$Sd, $Sm",
  1276. []>,
  1277. Sched<[WriteFPCVT]> {
  1278. let Inst{7} = 1; // s32
  1279. let isUnpredicable = 1;
  1280. }
  1281. def : VFPNoNEONPat<(f16 (sint_to_fp GPR:$a)),
  1282. (VSITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
  1283. def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
  1284. (outs DPR:$Dd), (ins SPR:$Sm),
  1285. IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
  1286. []>,
  1287. Sched<[WriteFPCVT]> {
  1288. let Inst{7} = 0; // u32
  1289. }
  1290. let Predicates=[HasVFP2, HasDPVFP] in {
  1291. def : VFPPat<(f64 (uint_to_fp GPR:$a)),
  1292. (VUITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
  1293. def : VFPPat<(f64 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
  1294. (VUITOD (VLDRS addrmode5:$a))>;
  1295. }
  1296. def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
  1297. (outs SPR:$Sd), (ins SPR:$Sm),
  1298. IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
  1299. []>,
  1300. Sched<[WriteFPCVT]> {
  1301. let Inst{7} = 0; // u32
  1302. // Some single precision VFP instructions may be executed on both NEON and
  1303. // VFP pipelines on A8.
  1304. let D = VFPNeonA8Domain;
  1305. }
  1306. def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)),
  1307. (VUITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
  1308. def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
  1309. (VUITOS (VLDRS addrmode5:$a))>;
  1310. def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
  1311. (outs HPR:$Sd), (ins SPR:$Sm),
  1312. IIC_fpCVTIH, "vcvt", ".f16.u32\t$Sd, $Sm",
  1313. []>,
  1314. Sched<[WriteFPCVT]> {
  1315. let Inst{7} = 0; // u32
  1316. let isUnpredicable = 1;
  1317. }
  1318. def : VFPNoNEONPat<(f16 (uint_to_fp GPR:$a)),
  1319. (VUITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
  1320. // FP -> Int:
  1321. class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
  1322. bits<4> opcod4, dag oops, dag iops,
  1323. InstrItinClass itin, string opc, string asm,
  1324. list<dag> pattern>
  1325. : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
  1326. pattern> {
  1327. // Instruction operands.
  1328. bits<5> Sd;
  1329. bits<5> Dm;
  1330. // Encode instruction operands.
  1331. let Inst{3-0} = Dm{3-0};
  1332. let Inst{5} = Dm{4};
  1333. let Inst{15-12} = Sd{4-1};
  1334. let Inst{22} = Sd{0};
  1335. let Predicates = [HasVFP2, HasDPVFP];
  1336. let hasSideEffects = 0;
  1337. }
  1338. class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
  1339. bits<4> opcod4, dag oops, dag iops,
  1340. InstrItinClass itin, string opc, string asm,
  1341. list<dag> pattern>
  1342. : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
  1343. pattern> {
  1344. // Instruction operands.
  1345. bits<5> Sd;
  1346. bits<5> Sm;
  1347. // Encode instruction operands.
  1348. let Inst{3-0} = Sm{4-1};
  1349. let Inst{5} = Sm{0};
  1350. let Inst{15-12} = Sd{4-1};
  1351. let Inst{22} = Sd{0};
  1352. let hasSideEffects = 0;
  1353. }
  1354. class AVConv1IsH_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
  1355. bits<4> opcod4, dag oops, dag iops,
  1356. InstrItinClass itin, string opc, string asm,
  1357. list<dag> pattern>
  1358. : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
  1359. pattern> {
  1360. // Instruction operands.
  1361. bits<5> Sd;
  1362. bits<5> Sm;
  1363. // Encode instruction operands.
  1364. let Inst{3-0} = Sm{4-1};
  1365. let Inst{5} = Sm{0};
  1366. let Inst{15-12} = Sd{4-1};
  1367. let Inst{22} = Sd{0};
  1368. let Predicates = [HasFullFP16];
  1369. let hasSideEffects = 0;
  1370. }
  1371. // Always set Z bit in the instruction, i.e. "round towards zero" variants.
  1372. def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
  1373. (outs SPR:$Sd), (ins DPR:$Dm),
  1374. IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
  1375. []>,
  1376. Sched<[WriteFPCVT]> {
  1377. let Inst{7} = 1; // Z bit
  1378. }
  1379. let Predicates=[HasVFP2, HasDPVFP] in {
  1380. def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))),
  1381. (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
  1382. def : VFPPat<(i32 (fp_to_sint_sat (f64 DPR:$a), i32)),
  1383. (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
  1384. def : VFPPat<(alignedstore32 (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr),
  1385. (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
  1386. def : VFPPat<(alignedstore32 (i32 (fp_to_sint_sat (f64 DPR:$a), i32)), addrmode5:$ptr),
  1387. (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
  1388. }
  1389. def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
  1390. (outs SPR:$Sd), (ins SPR:$Sm),
  1391. IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
  1392. []>,
  1393. Sched<[WriteFPCVT]> {
  1394. let Inst{7} = 1; // Z bit
  1395. // Some single precision VFP instructions may be executed on both NEON and
  1396. // VFP pipelines on A8.
  1397. let D = VFPNeonA8Domain;
  1398. }
  1399. def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)),
  1400. (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
  1401. def : VFPPat<(i32 (fp_to_sint_sat SPR:$a, i32)),
  1402. (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
  1403. def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))),
  1404. addrmode5:$ptr),
  1405. (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
  1406. def : VFPPat<(alignedstore32 (i32 (fp_to_sint_sat (f32 SPR:$a), i32)),
  1407. addrmode5:$ptr),
  1408. (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
  1409. def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
  1410. (outs SPR:$Sd), (ins HPR:$Sm),
  1411. IIC_fpCVTHI, "vcvt", ".s32.f16\t$Sd, $Sm",
  1412. []>,
  1413. Sched<[WriteFPCVT]> {
  1414. let Inst{7} = 1; // Z bit
  1415. let isUnpredicable = 1;
  1416. }
  1417. def : VFPNoNEONPat<(i32 (fp_to_sint (f16 HPR:$a))),
  1418. (COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>;
  1419. def : VFPPat<(i32 (fp_to_sint_sat (f16 HPR:$a), i32)),
  1420. (COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>;
  1421. def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
  1422. (outs SPR:$Sd), (ins DPR:$Dm),
  1423. IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
  1424. []>,
  1425. Sched<[WriteFPCVT]> {
  1426. let Inst{7} = 1; // Z bit
  1427. }
  1428. let Predicates=[HasVFP2, HasDPVFP] in {
  1429. def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))),
  1430. (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
  1431. def : VFPPat<(i32 (fp_to_uint_sat (f64 DPR:$a), i32)),
  1432. (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
  1433. def : VFPPat<(alignedstore32 (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr),
  1434. (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
  1435. def : VFPPat<(alignedstore32 (i32 (fp_to_uint_sat (f64 DPR:$a), i32)), addrmode5:$ptr),
  1436. (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
  1437. }
  1438. def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
  1439. (outs SPR:$Sd), (ins SPR:$Sm),
  1440. IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
  1441. []>,
  1442. Sched<[WriteFPCVT]> {
  1443. let Inst{7} = 1; // Z bit
  1444. // Some single precision VFP instructions may be executed on both NEON and
  1445. // VFP pipelines on A8.
  1446. let D = VFPNeonA8Domain;
  1447. }
  1448. def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)),
  1449. (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
  1450. def : VFPPat<(i32 (fp_to_uint_sat SPR:$a, i32)),
  1451. (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
  1452. def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))),
  1453. addrmode5:$ptr),
  1454. (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
  1455. def : VFPPat<(alignedstore32 (i32 (fp_to_uint_sat (f32 SPR:$a), i32)),
  1456. addrmode5:$ptr),
  1457. (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
  1458. def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
  1459. (outs SPR:$Sd), (ins HPR:$Sm),
  1460. IIC_fpCVTHI, "vcvt", ".u32.f16\t$Sd, $Sm",
  1461. []>,
  1462. Sched<[WriteFPCVT]> {
  1463. let Inst{7} = 1; // Z bit
  1464. let isUnpredicable = 1;
  1465. }
  1466. def : VFPNoNEONPat<(i32 (fp_to_uint (f16 HPR:$a))),
  1467. (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
  1468. def : VFPPat<(i32 (fp_to_uint_sat (f16 HPR:$a), i32)),
  1469. (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
  1470. // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
  1471. let Uses = [FPSCR] in {
  1472. def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
  1473. (outs SPR:$Sd), (ins DPR:$Dm),
  1474. IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
  1475. [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>,
  1476. Sched<[WriteFPCVT]> {
  1477. let Inst{7} = 0; // Z bit
  1478. }
  1479. def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
  1480. (outs SPR:$Sd), (ins SPR:$Sm),
  1481. IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
  1482. [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]>,
  1483. Sched<[WriteFPCVT]> {
  1484. let Inst{7} = 0; // Z bit
  1485. }
  1486. def VTOSIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
  1487. (outs SPR:$Sd), (ins SPR:$Sm),
  1488. IIC_fpCVTHI, "vcvtr", ".s32.f16\t$Sd, $Sm",
  1489. []>,
  1490. Sched<[WriteFPCVT]> {
  1491. let Inst{7} = 0; // Z bit
  1492. let isUnpredicable = 1;
  1493. }
  1494. def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
  1495. (outs SPR:$Sd), (ins DPR:$Dm),
  1496. IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
  1497. [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>,
  1498. Sched<[WriteFPCVT]> {
  1499. let Inst{7} = 0; // Z bit
  1500. }
  1501. def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
  1502. (outs SPR:$Sd), (ins SPR:$Sm),
  1503. IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
  1504. [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]>,
  1505. Sched<[WriteFPCVT]> {
  1506. let Inst{7} = 0; // Z bit
  1507. }
  1508. def VTOUIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
  1509. (outs SPR:$Sd), (ins SPR:$Sm),
  1510. IIC_fpCVTHI, "vcvtr", ".u32.f16\t$Sd, $Sm",
  1511. []>,
  1512. Sched<[WriteFPCVT]> {
  1513. let Inst{7} = 0; // Z bit
  1514. let isUnpredicable = 1;
  1515. }
  1516. }
  1517. // v8.3-a Javascript Convert to Signed fixed-point
  1518. def VJCVT : AVConv1IsD_Encode<0b11101, 0b11, 0b1001, 0b1011,
  1519. (outs SPR:$Sd), (ins DPR:$Dm),
  1520. IIC_fpCVTDI, "vjcvt", ".s32.f64\t$Sd, $Dm",
  1521. []>,
  1522. Requires<[HasFPARMv8, HasV8_3a]> {
  1523. let Inst{7} = 1; // Z bit
  1524. }
  1525. // Convert between floating-point and fixed-point
  1526. // Data type for fixed-point naming convention:
  1527. // S16 (U=0, sx=0) -> SH
  1528. // U16 (U=1, sx=0) -> UH
  1529. // S32 (U=0, sx=1) -> SL
  1530. // U32 (U=1, sx=1) -> UL
  1531. let Constraints = "$a = $dst" in {
  1532. // FP to Fixed-Point:
  1533. // Single Precision register
  1534. class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
  1535. bit op5, dag oops, dag iops, InstrItinClass itin,
  1536. string opc, string asm, list<dag> pattern>
  1537. : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
  1538. bits<5> dst;
  1539. // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
  1540. let Inst{22} = dst{0};
  1541. let Inst{15-12} = dst{4-1};
  1542. let hasSideEffects = 0;
  1543. }
  1544. // Double Precision register
  1545. class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
  1546. bit op5, dag oops, dag iops, InstrItinClass itin,
  1547. string opc, string asm, list<dag> pattern>
  1548. : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
  1549. bits<5> dst;
  1550. // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
  1551. let Inst{22} = dst{4};
  1552. let Inst{15-12} = dst{3-0};
  1553. let hasSideEffects = 0;
  1554. let Predicates = [HasVFP2, HasDPVFP];
  1555. }
  1556. let isUnpredicable = 1 in {
  1557. def VTOSHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 0,
  1558. (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
  1559. IIC_fpCVTHI, "vcvt", ".s16.f16\t$dst, $a, $fbits", []>,
  1560. Requires<[HasFullFP16]>,
  1561. Sched<[WriteFPCVT]>;
  1562. def VTOUHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 0,
  1563. (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
  1564. IIC_fpCVTHI, "vcvt", ".u16.f16\t$dst, $a, $fbits", []>,
  1565. Requires<[HasFullFP16]>,
  1566. Sched<[WriteFPCVT]>;
  1567. def VTOSLH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 1,
  1568. (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
  1569. IIC_fpCVTHI, "vcvt", ".s32.f16\t$dst, $a, $fbits", []>,
  1570. Requires<[HasFullFP16]>,
  1571. Sched<[WriteFPCVT]>;
  1572. def VTOULH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 1,
  1573. (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
  1574. IIC_fpCVTHI, "vcvt", ".u32.f16\t$dst, $a, $fbits", []>,
  1575. Requires<[HasFullFP16]>,
  1576. Sched<[WriteFPCVT]>;
  1577. } // End of 'let isUnpredicable = 1 in'
  1578. def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
  1579. (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
  1580. IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []>,
  1581. Sched<[WriteFPCVT]> {
  1582. // Some single precision VFP instructions may be executed on both NEON and
  1583. // VFP pipelines on A8.
  1584. let D = VFPNeonA8Domain;
  1585. }
  1586. def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0,
  1587. (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
  1588. IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []>,
  1589. Sched<[WriteFPCVT]> {
  1590. // Some single precision VFP instructions may be executed on both NEON and
  1591. // VFP pipelines on A8.
  1592. let D = VFPNeonA8Domain;
  1593. }
  1594. def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1,
  1595. (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
  1596. IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []>,
  1597. Sched<[WriteFPCVT]> {
  1598. // Some single precision VFP instructions may be executed on both NEON and
  1599. // VFP pipelines on A8.
  1600. let D = VFPNeonA8Domain;
  1601. }
  1602. def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1,
  1603. (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
  1604. IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []>,
  1605. Sched<[WriteFPCVT]> {
  1606. // Some single precision VFP instructions may be executed on both NEON and
  1607. // VFP pipelines on A8.
  1608. let D = VFPNeonA8Domain;
  1609. }
  1610. def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0,
  1611. (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
  1612. IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>,
  1613. Sched<[WriteFPCVT]>;
  1614. def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0,
  1615. (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
  1616. IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>,
  1617. Sched<[WriteFPCVT]>;
  1618. def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1,
  1619. (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
  1620. IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>,
  1621. Sched<[WriteFPCVT]>;
  1622. def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1,
  1623. (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
  1624. IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>,
  1625. Sched<[WriteFPCVT]>;
  1626. // Fixed-Point to FP:
  1627. let isUnpredicable = 1 in {
  1628. def VSHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 0,
  1629. (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
  1630. IIC_fpCVTIH, "vcvt", ".f16.s16\t$dst, $a, $fbits", []>,
  1631. Requires<[HasFullFP16]>,
  1632. Sched<[WriteFPCVT]>;
  1633. def VUHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 0,
  1634. (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
  1635. IIC_fpCVTIH, "vcvt", ".f16.u16\t$dst, $a, $fbits", []>,
  1636. Requires<[HasFullFP16]>,
  1637. Sched<[WriteFPCVT]>;
  1638. def VSLTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 1,
  1639. (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
  1640. IIC_fpCVTIH, "vcvt", ".f16.s32\t$dst, $a, $fbits", []>,
  1641. Requires<[HasFullFP16]>,
  1642. Sched<[WriteFPCVT]>;
  1643. def VULTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 1,
  1644. (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
  1645. IIC_fpCVTIH, "vcvt", ".f16.u32\t$dst, $a, $fbits", []>,
  1646. Requires<[HasFullFP16]>,
  1647. Sched<[WriteFPCVT]>;
  1648. } // End of 'let isUnpredicable = 1 in'
  1649. def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0,
  1650. (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
  1651. IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []>,
  1652. Sched<[WriteFPCVT]> {
  1653. // Some single precision VFP instructions may be executed on both NEON and
  1654. // VFP pipelines on A8.
  1655. let D = VFPNeonA8Domain;
  1656. }
  1657. def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0,
  1658. (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
  1659. IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []>,
  1660. Sched<[WriteFPCVT]> {
  1661. // Some single precision VFP instructions may be executed on both NEON and
  1662. // VFP pipelines on A8.
  1663. let D = VFPNeonA8Domain;
  1664. }
  1665. def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1,
  1666. (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
  1667. IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []>,
  1668. Sched<[WriteFPCVT]> {
  1669. // Some single precision VFP instructions may be executed on both NEON and
  1670. // VFP pipelines on A8.
  1671. let D = VFPNeonA8Domain;
  1672. }
  1673. def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1,
  1674. (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
  1675. IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []>,
  1676. Sched<[WriteFPCVT]> {
  1677. // Some single precision VFP instructions may be executed on both NEON and
  1678. // VFP pipelines on A8.
  1679. let D = VFPNeonA8Domain;
  1680. }
  1681. def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0,
  1682. (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
  1683. IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>,
  1684. Sched<[WriteFPCVT]>;
  1685. def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0,
  1686. (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
  1687. IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>,
  1688. Sched<[WriteFPCVT]>;
  1689. def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1,
  1690. (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
  1691. IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>,
  1692. Sched<[WriteFPCVT]>;
  1693. def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1,
  1694. (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
  1695. IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>,
  1696. Sched<[WriteFPCVT]>;
  1697. } // End of 'let Constraints = "$a = $dst" in'
  1698. // BFloat16 - Single precision, unary, predicated
  1699. class BF16_VCVT<string opc, bits<2> op7_6>
  1700. : VFPAI<(outs SPR:$Sd), (ins SPR:$dst, SPR:$Sm),
  1701. VFPUnaryFrm, NoItinerary,
  1702. opc, ".bf16.f32\t$Sd, $Sm", "", []>,
  1703. RegConstraint<"$dst = $Sd">,
  1704. Requires<[HasBF16]>,
  1705. Sched<[]> {
  1706. bits<5> Sd;
  1707. bits<5> Sm;
  1708. // Encode instruction operands.
  1709. let Inst{3-0} = Sm{4-1};
  1710. let Inst{5} = Sm{0};
  1711. let Inst{15-12} = Sd{4-1};
  1712. let Inst{22} = Sd{0};
  1713. let Inst{27-23} = 0b11101; // opcode1
  1714. let Inst{21-20} = 0b11; // opcode2
  1715. let Inst{19-16} = 0b0011; // opcode3
  1716. let Inst{11-8} = 0b1001;
  1717. let Inst{7-6} = op7_6;
  1718. let Inst{4} = 0;
  1719. let DecoderNamespace = "VFPV8";
  1720. let hasSideEffects = 0;
  1721. }
  1722. def BF16_VCVTB : BF16_VCVT<"vcvtb", 0b01>;
  1723. def BF16_VCVTT : BF16_VCVT<"vcvtt", 0b11>;
  1724. //===----------------------------------------------------------------------===//
  1725. // FP Multiply-Accumulate Operations.
  1726. //
  1727. def VMLAD : ADbI<0b11100, 0b00, 0, 0,
  1728. (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
  1729. IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
  1730. [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
  1731. (f64 DPR:$Ddin)))]>,
  1732. RegConstraint<"$Ddin = $Dd">,
  1733. Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
  1734. Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  1735. def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
  1736. (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
  1737. IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
  1738. [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
  1739. SPR:$Sdin))]>,
  1740. RegConstraint<"$Sdin = $Sd">,
  1741. Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
  1742. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
  1743. // Some single precision VFP instructions may be executed on both NEON and
  1744. // VFP pipelines on A8.
  1745. let D = VFPNeonA8Domain;
  1746. }
  1747. def VMLAH : AHbI<0b11100, 0b00, 0, 0,
  1748. (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
  1749. IIC_fpMAC16, "vmla", ".f16\t$Sd, $Sn, $Sm",
  1750. [(set (f16 HPR:$Sd), (fadd_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)),
  1751. (f16 HPR:$Sdin)))]>,
  1752. RegConstraint<"$Sdin = $Sd">,
  1753. Requires<[HasFullFP16,UseFPVMLx]>;
  1754. def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
  1755. (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
  1756. Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
  1757. def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
  1758. (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
  1759. Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>;
  1760. def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
  1761. (VMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
  1762. Requires<[HasFullFP16,DontUseNEONForFP, UseFPVMLx]>;
  1763. def VMLSD : ADbI<0b11100, 0b00, 1, 0,
  1764. (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
  1765. IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
  1766. [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
  1767. (f64 DPR:$Ddin)))]>,
  1768. RegConstraint<"$Ddin = $Dd">,
  1769. Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
  1770. Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  1771. def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
  1772. (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
  1773. IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
  1774. [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
  1775. SPR:$Sdin))]>,
  1776. RegConstraint<"$Sdin = $Sd">,
  1777. Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
  1778. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
  1779. // Some single precision VFP instructions may be executed on both NEON and
  1780. // VFP pipelines on A8.
  1781. let D = VFPNeonA8Domain;
  1782. }
  1783. def VMLSH : AHbI<0b11100, 0b00, 1, 0,
  1784. (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
  1785. IIC_fpMAC16, "vmls", ".f16\t$Sd, $Sn, $Sm",
  1786. [(set (f16 HPR:$Sd), (fadd_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
  1787. (f16 HPR:$Sdin)))]>,
  1788. RegConstraint<"$Sdin = $Sd">,
  1789. Requires<[HasFullFP16,UseFPVMLx]>;
  1790. def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
  1791. (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
  1792. Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
  1793. def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
  1794. (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
  1795. Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
  1796. def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
  1797. (VMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
  1798. Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
  1799. def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
  1800. (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
  1801. IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
  1802. [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
  1803. (f64 DPR:$Ddin)))]>,
  1804. RegConstraint<"$Ddin = $Dd">,
  1805. Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
  1806. Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  1807. def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
  1808. (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
  1809. IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
  1810. [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
  1811. SPR:$Sdin))]>,
  1812. RegConstraint<"$Sdin = $Sd">,
  1813. Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
  1814. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
  1815. // Some single precision VFP instructions may be executed on both NEON and
  1816. // VFP pipelines on A8.
  1817. let D = VFPNeonA8Domain;
  1818. }
  1819. def VNMLAH : AHbI<0b11100, 0b01, 1, 0,
  1820. (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
  1821. IIC_fpMAC16, "vnmla", ".f16\t$Sd, $Sn, $Sm",
  1822. [(set (f16 HPR:$Sd), (fsub_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
  1823. (f16 HPR:$Sdin)))]>,
  1824. RegConstraint<"$Sdin = $Sd">,
  1825. Requires<[HasFullFP16,UseFPVMLx]>;
  1826. // (-(a * b) - dst) -> -(dst + (a * b))
  1827. def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
  1828. (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
  1829. Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
  1830. def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
  1831. (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
  1832. Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
  1833. def : Pat<(fsub_mlx (fneg (fmul_su (f16 HPR:$a), HPR:$b)), HPR:$dstin),
  1834. (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
  1835. Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
  1836. // (-dst - (a * b)) -> -(dst + (a * b))
  1837. def : Pat<(fsub_mlx (fneg DPR:$dstin), (fmul_su DPR:$a, (f64 DPR:$b))),
  1838. (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
  1839. Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
  1840. def : Pat<(fsub_mlx (fneg SPR:$dstin), (fmul_su SPR:$a, SPR:$b)),
  1841. (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
  1842. Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
  1843. def : Pat<(fsub_mlx (fneg HPR:$dstin), (fmul_su (f16 HPR:$a), HPR:$b)),
  1844. (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
  1845. Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
  1846. def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
  1847. (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
  1848. IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
  1849. [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
  1850. (f64 DPR:$Ddin)))]>,
  1851. RegConstraint<"$Ddin = $Dd">,
  1852. Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
  1853. Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  1854. def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
  1855. (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
  1856. IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
  1857. [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
  1858. RegConstraint<"$Sdin = $Sd">,
  1859. Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
  1860. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
  1861. // Some single precision VFP instructions may be executed on both NEON and
  1862. // VFP pipelines on A8.
  1863. let D = VFPNeonA8Domain;
  1864. }
  1865. def VNMLSH : AHbI<0b11100, 0b01, 0, 0,
  1866. (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
  1867. IIC_fpMAC16, "vnmls", ".f16\t$Sd, $Sn, $Sm",
  1868. [(set (f16 HPR:$Sd), (fsub_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), (f16 HPR:$Sdin)))]>,
  1869. RegConstraint<"$Sdin = $Sd">,
  1870. Requires<[HasFullFP16,UseFPVMLx]>;
  1871. def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
  1872. (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
  1873. Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
  1874. def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
  1875. (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
  1876. Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
  1877. def : Pat<(fsub_mlx (fmul_su (f16 HPR:$a), HPR:$b), HPR:$dstin),
  1878. (VNMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
  1879. Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
  1880. //===----------------------------------------------------------------------===//
  1881. // Fused FP Multiply-Accumulate Operations.
  1882. //
  1883. def VFMAD : ADbI<0b11101, 0b10, 0, 0,
  1884. (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
  1885. IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm",
  1886. [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
  1887. (f64 DPR:$Ddin)))]>,
  1888. RegConstraint<"$Ddin = $Dd">,
  1889. Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
  1890. Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  1891. def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
  1892. (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
  1893. IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm",
  1894. [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
  1895. SPR:$Sdin))]>,
  1896. RegConstraint<"$Sdin = $Sd">,
  1897. Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
  1898. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
  1899. // Some single precision VFP instructions may be executed on both NEON and
  1900. // VFP pipelines.
  1901. }
  1902. def VFMAH : AHbI<0b11101, 0b10, 0, 0,
  1903. (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
  1904. IIC_fpFMAC16, "vfma", ".f16\t$Sd, $Sn, $Sm",
  1905. [(set (f16 HPR:$Sd), (fadd_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)),
  1906. (f16 HPR:$Sdin)))]>,
  1907. RegConstraint<"$Sdin = $Sd">,
  1908. Requires<[HasFullFP16,UseFusedMAC]>,
  1909. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  1910. def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
  1911. (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>,
  1912. Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
  1913. def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
  1914. (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>,
  1915. Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
  1916. def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
  1917. (VFMAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
  1918. Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>;
  1919. // Match @llvm.fma.* intrinsics
  1920. // (fma x, y, z) -> (vfms z, x, y)
  1921. def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
  1922. (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
  1923. Requires<[HasVFP4,HasDPVFP]>;
  1924. def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
  1925. (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
  1926. Requires<[HasVFP4]>;
  1927. def : Pat<(f16 (fma HPR:$Sn, HPR:$Sm, (f16 HPR:$Sdin))),
  1928. (VFMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
  1929. Requires<[HasFullFP16]>;
  1930. def VFMSD : ADbI<0b11101, 0b10, 1, 0,
  1931. (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
  1932. IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm",
  1933. [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
  1934. (f64 DPR:$Ddin)))]>,
  1935. RegConstraint<"$Ddin = $Dd">,
  1936. Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
  1937. Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  1938. def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
  1939. (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
  1940. IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm",
  1941. [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
  1942. SPR:$Sdin))]>,
  1943. RegConstraint<"$Sdin = $Sd">,
  1944. Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
  1945. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
  1946. // Some single precision VFP instructions may be executed on both NEON and
  1947. // VFP pipelines.
  1948. }
  1949. def VFMSH : AHbI<0b11101, 0b10, 1, 0,
  1950. (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
  1951. IIC_fpFMAC16, "vfms", ".f16\t$Sd, $Sn, $Sm",
  1952. [(set (f16 HPR:$Sd), (fadd_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
  1953. (f16 HPR:$Sdin)))]>,
  1954. RegConstraint<"$Sdin = $Sd">,
  1955. Requires<[HasFullFP16,UseFusedMAC]>,
  1956. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  1957. def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
  1958. (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>,
  1959. Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
  1960. def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
  1961. (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>,
  1962. Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
  1963. def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
  1964. (VFMSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
  1965. Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>;
  1966. // Match @llvm.fma.* intrinsics
  1967. // (fma (fneg x), y, z) -> (vfms z, x, y)
  1968. def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
  1969. (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
  1970. Requires<[HasVFP4,HasDPVFP]>;
  1971. def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
  1972. (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
  1973. Requires<[HasVFP4]>;
  1974. def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin))),
  1975. (VFMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
  1976. Requires<[HasFullFP16]>;
  1977. def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
  1978. (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
  1979. IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm",
  1980. [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
  1981. (f64 DPR:$Ddin)))]>,
  1982. RegConstraint<"$Ddin = $Dd">,
  1983. Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
  1984. Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  1985. def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
  1986. (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
  1987. IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm",
  1988. [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
  1989. SPR:$Sdin))]>,
  1990. RegConstraint<"$Sdin = $Sd">,
  1991. Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
  1992. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
  1993. // Some single precision VFP instructions may be executed on both NEON and
  1994. // VFP pipelines.
  1995. }
  1996. def VFNMAH : AHbI<0b11101, 0b01, 1, 0,
  1997. (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
  1998. IIC_fpFMAC16, "vfnma", ".f16\t$Sd, $Sn, $Sm",
  1999. [(set (f16 HPR:$Sd), (fsub_mlx (fneg (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm))),
  2000. (f16 HPR:$Sdin)))]>,
  2001. RegConstraint<"$Sdin = $Sd">,
  2002. Requires<[HasFullFP16,UseFusedMAC]>,
  2003. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  2004. def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
  2005. (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>,
  2006. Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
  2007. def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
  2008. (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>,
  2009. Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
  2010. // Match @llvm.fma.* intrinsics
  2011. // (fneg (fma x, y, z)) -> (vfnma z, x, y)
  2012. def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
  2013. (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
  2014. Requires<[HasVFP4,HasDPVFP]>;
  2015. def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
  2016. (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
  2017. Requires<[HasVFP4]>;
  2018. def : Pat<(fneg (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (f16 (f16 HPR:$Sdin)))),
  2019. (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
  2020. Requires<[HasFullFP16]>;
  2021. // (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y)
  2022. def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
  2023. (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
  2024. Requires<[HasVFP4,HasDPVFP]>;
  2025. def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
  2026. (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
  2027. Requires<[HasVFP4]>;
  2028. def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))),
  2029. (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
  2030. Requires<[HasFullFP16]>;
  2031. def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
  2032. (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
  2033. IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm",
  2034. [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
  2035. (f64 DPR:$Ddin)))]>,
  2036. RegConstraint<"$Ddin = $Dd">,
  2037. Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
  2038. Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  2039. def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
  2040. (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
  2041. IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm",
  2042. [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
  2043. RegConstraint<"$Sdin = $Sd">,
  2044. Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
  2045. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
  2046. // Some single precision VFP instructions may be executed on both NEON and
  2047. // VFP pipelines.
  2048. }
  2049. def VFNMSH : AHbI<0b11101, 0b01, 0, 0,
  2050. (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
  2051. IIC_fpFMAC16, "vfnms", ".f16\t$Sd, $Sn, $Sm",
  2052. [(set (f16 HPR:$Sd), (fsub_mlx (fmul_su (f16 HPR:$Sn), (f16 HPR:$Sm)), (f16 HPR:$Sdin)))]>,
  2053. RegConstraint<"$Sdin = $Sd">,
  2054. Requires<[HasFullFP16,UseFusedMAC]>,
  2055. Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
  2056. def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
  2057. (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>,
  2058. Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
  2059. def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
  2060. (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>,
  2061. Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
  2062. // Match @llvm.fma.* intrinsics
  2063. // (fma x, y, (fneg z)) -> (vfnms z, x, y))
  2064. def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
  2065. (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
  2066. Requires<[HasVFP4,HasDPVFP]>;
  2067. def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
  2068. (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
  2069. Requires<[HasVFP4]>;
  2070. def : Pat<(f16 (fma (f16 HPR:$Sn), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))),
  2071. (VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
  2072. Requires<[HasFullFP16]>;
  2073. // (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y)
  2074. def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
  2075. (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
  2076. Requires<[HasVFP4,HasDPVFP]>;
  2077. def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
  2078. (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
  2079. Requires<[HasVFP4]>;
  2080. def : Pat<(fneg (f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin)))),
  2081. (VFNMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
  2082. Requires<[HasFullFP16]>;
  2083. //===----------------------------------------------------------------------===//
  2084. // FP Conditional moves.
  2085. //
  2086. let hasSideEffects = 0 in {
  2087. def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p),
  2088. IIC_fpUNA64,
  2089. [(set (f64 DPR:$Dd),
  2090. (ARMcmov DPR:$Dn, DPR:$Dm, cmovpred:$p))]>,
  2091. RegConstraint<"$Dn = $Dd">, Requires<[HasFPRegs64]>;
  2092. def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p),
  2093. IIC_fpUNA32,
  2094. [(set (f32 SPR:$Sd),
  2095. (ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>,
  2096. RegConstraint<"$Sn = $Sd">, Requires<[HasFPRegs]>;
  2097. def VMOVHcc : PseudoInst<(outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm, cmovpred:$p),
  2098. IIC_fpUNA16,
  2099. [(set (f16 HPR:$Sd),
  2100. (ARMcmov (f16 HPR:$Sn), (f16 HPR:$Sm), cmovpred:$p))]>,
  2101. RegConstraint<"$Sd = $Sn">, Requires<[HasFPRegs]>;
  2102. } // hasSideEffects
  2103. //===----------------------------------------------------------------------===//
  2104. // Move from VFP System Register to ARM core register.
  2105. //
  2106. class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
  2107. list<dag> pattern>:
  2108. VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, "", pattern> {
  2109. // Instruction operand.
  2110. bits<4> Rt;
  2111. let Inst{27-20} = 0b11101111;
  2112. let Inst{19-16} = opc19_16;
  2113. let Inst{15-12} = Rt;
  2114. let Inst{11-8} = 0b1010;
  2115. let Inst{7} = 0;
  2116. let Inst{6-5} = 0b00;
  2117. let Inst{4} = 1;
  2118. let Inst{3-0} = 0b0000;
  2119. let Unpredictable{7-5} = 0b111;
  2120. let Unpredictable{3-0} = 0b1111;
  2121. }
  2122. let DecoderMethod = "DecodeForVMRSandVMSR" in {
  2123. // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
  2124. // to APSR.
  2125. let Defs = [CPSR], Uses = [FPSCR_NZCV], Predicates = [HasFPRegs],
  2126. Rt = 0b1111 /* apsr_nzcv */ in
  2127. def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
  2128. "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>;
  2129. // Application level FPSCR -> GPR
  2130. let hasSideEffects = 1, Uses = [FPSCR], Predicates = [HasFPRegs] in
  2131. def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPRnopc:$Rt), (ins),
  2132. "vmrs", "\t$Rt, fpscr",
  2133. [(set GPRnopc:$Rt, (int_arm_get_fpscr))]>;
  2134. // System level FPEXC, FPSID -> GPR
  2135. let Uses = [FPSCR] in {
  2136. def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPRnopc:$Rt), (ins),
  2137. "vmrs", "\t$Rt, fpexc", []>;
  2138. def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPRnopc:$Rt), (ins),
  2139. "vmrs", "\t$Rt, fpsid", []>;
  2140. def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPRnopc:$Rt), (ins),
  2141. "vmrs", "\t$Rt, mvfr0", []>;
  2142. def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPRnopc:$Rt), (ins),
  2143. "vmrs", "\t$Rt, mvfr1", []>;
  2144. let Predicates = [HasFPARMv8] in {
  2145. def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPRnopc:$Rt), (ins),
  2146. "vmrs", "\t$Rt, mvfr2", []>;
  2147. }
  2148. def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPRnopc:$Rt), (ins),
  2149. "vmrs", "\t$Rt, fpinst", []>;
  2150. def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPRnopc:$Rt),
  2151. (ins), "vmrs", "\t$Rt, fpinst2", []>;
  2152. let Predicates = [HasV8_1MMainline, HasFPRegs] in {
  2153. // System level FPSCR_NZCVQC -> GPR
  2154. def VMRS_FPSCR_NZCVQC
  2155. : MovFromVFP<0b0010 /* fpscr_nzcvqc */,
  2156. (outs GPR:$Rt), (ins cl_FPSCR_NZCV:$fpscr_in),
  2157. "vmrs", "\t$Rt, fpscr_nzcvqc", []>;
  2158. }
  2159. }
  2160. let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
  2161. // System level FPSCR -> GPR, with context saving for security extensions
  2162. def VMRS_FPCXTNS : MovFromVFP<0b1110 /* fpcxtns */, (outs GPR:$Rt), (ins),
  2163. "vmrs", "\t$Rt, fpcxtns", []>;
  2164. }
  2165. let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
  2166. // System level FPSCR -> GPR, with context saving for security extensions
  2167. def VMRS_FPCXTS : MovFromVFP<0b1111 /* fpcxts */, (outs GPR:$Rt), (ins),
  2168. "vmrs", "\t$Rt, fpcxts", []>;
  2169. }
  2170. let Predicates = [HasV8_1MMainline, HasMVEInt] in {
  2171. // System level VPR/P0 -> GPR
  2172. let Uses = [VPR] in
  2173. def VMRS_VPR : MovFromVFP<0b1100 /* vpr */, (outs GPR:$Rt), (ins),
  2174. "vmrs", "\t$Rt, vpr", []>;
  2175. def VMRS_P0 : MovFromVFP<0b1101 /* p0 */, (outs GPR:$Rt), (ins VCCR:$cond),
  2176. "vmrs", "\t$Rt, p0", []>;
  2177. }
  2178. }
  2179. //===----------------------------------------------------------------------===//
  2180. // Move from ARM core register to VFP System Register.
  2181. //
  2182. class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
  2183. list<dag> pattern>:
  2184. VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, "", pattern> {
  2185. // Instruction operand.
  2186. bits<4> Rt;
  2187. let Inst{27-20} = 0b11101110;
  2188. let Inst{19-16} = opc19_16;
  2189. let Inst{15-12} = Rt;
  2190. let Inst{11-8} = 0b1010;
  2191. let Inst{7} = 0;
  2192. let Inst{6-5} = 0b00;
  2193. let Inst{4} = 1;
  2194. let Inst{3-0} = 0b0000;
  2195. let Predicates = [HasVFP2];
  2196. let Unpredictable{7-5} = 0b111;
  2197. let Unpredictable{3-0} = 0b1111;
  2198. }
  2199. let DecoderMethod = "DecodeForVMRSandVMSR" in {
  2200. let Defs = [FPSCR] in {
  2201. let Predicates = [HasFPRegs] in
  2202. // Application level GPR -> FPSCR
  2203. def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPRnopc:$Rt),
  2204. "vmsr", "\tfpscr, $Rt",
  2205. [(int_arm_set_fpscr GPRnopc:$Rt)]>;
  2206. // System level GPR -> FPEXC
  2207. def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPRnopc:$Rt),
  2208. "vmsr", "\tfpexc, $Rt", []>;
  2209. // System level GPR -> FPSID
  2210. def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPRnopc:$Rt),
  2211. "vmsr", "\tfpsid, $Rt", []>;
  2212. def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPRnopc:$Rt),
  2213. "vmsr", "\tfpinst, $Rt", []>;
  2214. def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPRnopc:$Rt),
  2215. "vmsr", "\tfpinst2, $Rt", []>;
  2216. }
  2217. let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
  2218. // System level GPR -> FPSCR with context saving for security extensions
  2219. def VMSR_FPCXTNS : MovToVFP<0b1110 /* fpcxtns */, (outs), (ins GPR:$Rt),
  2220. "vmsr", "\tfpcxtns, $Rt", []>;
  2221. }
  2222. let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
  2223. // System level GPR -> FPSCR with context saving for security extensions
  2224. def VMSR_FPCXTS : MovToVFP<0b1111 /* fpcxts */, (outs), (ins GPR:$Rt),
  2225. "vmsr", "\tfpcxts, $Rt", []>;
  2226. }
  2227. let Predicates = [HasV8_1MMainline, HasFPRegs] in {
  2228. // System level GPR -> FPSCR_NZCVQC
  2229. def VMSR_FPSCR_NZCVQC
  2230. : MovToVFP<0b0010 /* fpscr_nzcvqc */,
  2231. (outs cl_FPSCR_NZCV:$fpscr_out), (ins GPR:$Rt),
  2232. "vmsr", "\tfpscr_nzcvqc, $Rt", []>;
  2233. }
  2234. let Predicates = [HasV8_1MMainline, HasMVEInt] in {
  2235. // System level GPR -> VPR/P0
  2236. let Defs = [VPR] in
  2237. def VMSR_VPR : MovToVFP<0b1100 /* vpr */, (outs), (ins GPR:$Rt),
  2238. "vmsr", "\tvpr, $Rt", []>;
  2239. def VMSR_P0 : MovToVFP<0b1101 /* p0 */, (outs VCCR:$cond), (ins GPR:$Rt),
  2240. "vmsr", "\tp0, $Rt", []>;
  2241. }
  2242. }
  2243. //===----------------------------------------------------------------------===//
  2244. // Misc.
  2245. //
  2246. // Materialize FP immediates. VFP3 only.
  2247. let isReMaterializable = 1 in {
  2248. def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
  2249. VFPMiscFrm, IIC_fpUNA64,
  2250. "vmov", ".f64\t$Dd, $imm", "",
  2251. [(set DPR:$Dd, vfp_f64imm:$imm)]>,
  2252. Requires<[HasVFP3,HasDPVFP]> {
  2253. bits<5> Dd;
  2254. bits<8> imm;
  2255. let Inst{27-23} = 0b11101;
  2256. let Inst{22} = Dd{4};
  2257. let Inst{21-20} = 0b11;
  2258. let Inst{19-16} = imm{7-4};
  2259. let Inst{15-12} = Dd{3-0};
  2260. let Inst{11-9} = 0b101;
  2261. let Inst{8} = 1; // Double precision.
  2262. let Inst{7-4} = 0b0000;
  2263. let Inst{3-0} = imm{3-0};
  2264. }
  2265. def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
  2266. VFPMiscFrm, IIC_fpUNA32,
  2267. "vmov", ".f32\t$Sd, $imm", "",
  2268. [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
  2269. bits<5> Sd;
  2270. bits<8> imm;
  2271. let Inst{27-23} = 0b11101;
  2272. let Inst{22} = Sd{0};
  2273. let Inst{21-20} = 0b11;
  2274. let Inst{19-16} = imm{7-4};
  2275. let Inst{15-12} = Sd{4-1};
  2276. let Inst{11-9} = 0b101;
  2277. let Inst{8} = 0; // Single precision.
  2278. let Inst{7-4} = 0b0000;
  2279. let Inst{3-0} = imm{3-0};
  2280. }
  2281. def FCONSTH : VFPAI<(outs HPR:$Sd), (ins vfp_f16imm:$imm),
  2282. VFPMiscFrm, IIC_fpUNA16,
  2283. "vmov", ".f16\t$Sd, $imm", "",
  2284. [(set (f16 HPR:$Sd), vfp_f16imm:$imm)]>,
  2285. Requires<[HasFullFP16]> {
  2286. bits<5> Sd;
  2287. bits<8> imm;
  2288. let Inst{27-23} = 0b11101;
  2289. let Inst{22} = Sd{0};
  2290. let Inst{21-20} = 0b11;
  2291. let Inst{19-16} = imm{7-4};
  2292. let Inst{15-12} = Sd{4-1};
  2293. let Inst{11-8} = 0b1001; // Half precision
  2294. let Inst{7-4} = 0b0000;
  2295. let Inst{3-0} = imm{3-0};
  2296. let isUnpredicable = 1;
  2297. }
  2298. }
  2299. def : Pat<(f32 (vfp_f32f16imm:$imm)),
  2300. (f32 (COPY_TO_REGCLASS (f16 (FCONSTH (vfp_f32f16imm_xform (f32 $imm)))), SPR))> {
  2301. let Predicates = [HasFullFP16];
  2302. }
  2303. //===----------------------------------------------------------------------===//
  2304. // Assembler aliases.
  2305. //
  2306. // A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to
  2307. // support them all, but supporting at least some of the basics is
  2308. // good to be friendly.
  2309. def : VFP2MnemonicAlias<"flds", "vldr">;
  2310. def : VFP2MnemonicAlias<"fldd", "vldr">;
  2311. def : VFP2MnemonicAlias<"fmrs", "vmov">;
  2312. def : VFP2MnemonicAlias<"fmsr", "vmov">;
  2313. def : VFP2MnemonicAlias<"fsqrts", "vsqrt">;
  2314. def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">;
  2315. def : VFP2MnemonicAlias<"fadds", "vadd.f32">;
  2316. def : VFP2MnemonicAlias<"faddd", "vadd.f64">;
  2317. def : VFP2MnemonicAlias<"fmrdd", "vmov">;
  2318. def : VFP2MnemonicAlias<"fmrds", "vmov">;
  2319. def : VFP2MnemonicAlias<"fmrrd", "vmov">;
  2320. def : VFP2MnemonicAlias<"fmdrr", "vmov">;
  2321. def : VFP2MnemonicAlias<"fmuls", "vmul.f32">;
  2322. def : VFP2MnemonicAlias<"fmuld", "vmul.f64">;
  2323. def : VFP2MnemonicAlias<"fnegs", "vneg.f32">;
  2324. def : VFP2MnemonicAlias<"fnegd", "vneg.f64">;
  2325. def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">;
  2326. def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">;
  2327. def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">;
  2328. def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">;
  2329. def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">;
  2330. def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">;
  2331. def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">;
  2332. def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">;
  2333. def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">;
  2334. def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">;
  2335. def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">;
  2336. def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">;
  2337. def : VFP2MnemonicAlias<"fsts", "vstr">;
  2338. def : VFP2MnemonicAlias<"fstd", "vstr">;
  2339. def : VFP2MnemonicAlias<"fmacd", "vmla.f64">;
  2340. def : VFP2MnemonicAlias<"fmacs", "vmla.f32">;
  2341. def : VFP2MnemonicAlias<"fcpys", "vmov.f32">;
  2342. def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">;
  2343. def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">;
  2344. def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">;
  2345. def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">;
  2346. def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">;
  2347. def : VFP2MnemonicAlias<"fmrx", "vmrs">;
  2348. def : VFP2MnemonicAlias<"fmxr", "vmsr">;
  2349. // Be friendly and accept the old form of zero-compare
  2350. def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>;
  2351. def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>;
  2352. def : InstAlias<"fmstat${p}", (FMSTAT pred:$p), 0>, Requires<[HasFPRegs]>;
  2353. def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm",
  2354. (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
  2355. def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm",
  2356. (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
  2357. def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm",
  2358. (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
  2359. def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm",
  2360. (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
  2361. // No need for the size suffix on VSQRT. It's implied by the register classes.
  2362. def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>;
  2363. def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>;
  2364. // VLDR/VSTR accept an optional type suffix.
  2365. def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr",
  2366. (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
  2367. def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr",
  2368. (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
  2369. def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr",
  2370. (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
  2371. def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr",
  2372. (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
  2373. // VMOV can accept optional 32-bit or less data type suffix suffix.
  2374. def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn",
  2375. (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
  2376. def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn",
  2377. (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
  2378. def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn",
  2379. (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
  2380. def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt",
  2381. (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
  2382. def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt",
  2383. (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
  2384. def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt",
  2385. (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
  2386. def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn",
  2387. (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>;
  2388. def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2",
  2389. (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>;
  2390. // VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way
  2391. // VMOVD does.
  2392. def : VFP2InstAlias<"vmov${p} $Sd, $Sm",
  2393. (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>;
  2394. // FCONSTD/FCONSTS alias for vmov.f64/vmov.f32
  2395. // These aliases provide added functionality over vmov.f instructions by
  2396. // allowing users to write assembly containing encoded floating point constants
  2397. // (e.g. #0x70 vs #1.0). Without these alises there is no way for the
  2398. // assembler to accept encoded fp constants (but the equivalent fp-literal is
  2399. // accepted directly by vmovf).
  2400. def : VFP3InstAlias<"fconstd${p} $Dd, $val",
  2401. (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>;
  2402. def : VFP3InstAlias<"fconsts${p} $Sd, $val",
  2403. (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>;
  2404. def VSCCLRMD : VFPXI<(outs), (ins pred:$p, fp_dreglist_with_vpr:$regs, variable_ops),
  2405. AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary,
  2406. "vscclrm{$p}\t$regs", "", []>, Sched<[]> {
  2407. bits<13> regs;
  2408. let Inst{31-23} = 0b111011001;
  2409. let Inst{22} = regs{12};
  2410. let Inst{21-16} = 0b011111;
  2411. let Inst{15-12} = regs{11-8};
  2412. let Inst{11-8} = 0b1011;
  2413. let Inst{7-1} = regs{7-1};
  2414. let Inst{0} = 0;
  2415. let DecoderMethod = "DecodeVSCCLRM";
  2416. list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt];
  2417. }
  2418. def VSCCLRMS : VFPXI<(outs), (ins pred:$p, fp_sreglist_with_vpr:$regs, variable_ops),
  2419. AddrModeNone, 4, IndexModeNone, VFPMiscFrm, NoItinerary,
  2420. "vscclrm{$p}\t$regs", "", []>, Sched<[]> {
  2421. bits<13> regs;
  2422. let Inst{31-23} = 0b111011001;
  2423. let Inst{22} = regs{8};
  2424. let Inst{21-16} = 0b011111;
  2425. let Inst{15-12} = regs{12-9};
  2426. let Inst{11-8} = 0b1010;
  2427. let Inst{7-0} = regs{7-0};
  2428. let DecoderMethod = "DecodeVSCCLRM";
  2429. list<Predicate> Predicates = [HasV8_1MMainline, Has8MSecExt];
  2430. }
  2431. //===----------------------------------------------------------------------===//
  2432. // Store VFP System Register to memory.
  2433. //
  2434. class vfp_vstrldr<bit opc, bit P, bit W, bits<4> SysReg, string sysreg,
  2435. dag oops, dag iops, IndexMode im, string Dest, string cstr>
  2436. : VFPI<oops, iops, AddrModeT2_i7s4, 4, im, VFPLdStFrm, IIC_fpSTAT,
  2437. !if(opc,"vldr","vstr"), !strconcat("\t", sysreg, ", ", Dest), cstr, []>,
  2438. Sched<[]> {
  2439. bits<12> addr;
  2440. let Inst{27-25} = 0b110;
  2441. let Inst{24} = P;
  2442. let Inst{23} = addr{7};
  2443. let Inst{22} = SysReg{3};
  2444. let Inst{21} = W;
  2445. let Inst{20} = opc;
  2446. let Inst{19-16} = addr{11-8};
  2447. let Inst{15-13} = SysReg{2-0};
  2448. let Inst{12-7} = 0b011111;
  2449. let Inst{6-0} = addr{6-0};
  2450. list<Predicate> Predicates = [HasFPRegs, HasV8_1MMainline];
  2451. let mayLoad = opc;
  2452. let mayStore = !if(opc, 0b0, 0b1);
  2453. let hasSideEffects = 1;
  2454. }
  2455. multiclass vfp_vstrldr_sysreg<bit opc, bits<4> SysReg, string sysreg,
  2456. dag oops=(outs), dag iops=(ins)> {
  2457. def _off :
  2458. vfp_vstrldr<opc, 1, 0, SysReg, sysreg,
  2459. oops, !con(iops, (ins t2addrmode_imm7s4:$addr)),
  2460. IndexModePost, "$addr", "" > {
  2461. let DecoderMethod = "DecodeVSTRVLDR_SYSREG<false>";
  2462. }
  2463. def _pre :
  2464. vfp_vstrldr<opc, 1, 1, SysReg, sysreg,
  2465. !con(oops, (outs GPRnopc:$wb)),
  2466. !con(iops, (ins t2addrmode_imm7s4_pre:$addr)),
  2467. IndexModePre, "$addr!", "$addr.base = $wb"> {
  2468. let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>";
  2469. }
  2470. def _post :
  2471. vfp_vstrldr<opc, 0, 1, SysReg, sysreg,
  2472. !con(oops, (outs GPRnopc:$wb)),
  2473. !con(iops, (ins t2_addr_offset_none:$Rn,
  2474. t2am_imm7s4_offset:$addr)),
  2475. IndexModePost, "$Rn$addr", "$Rn.base = $wb"> {
  2476. bits<4> Rn;
  2477. let Inst{19-16} = Rn{3-0};
  2478. let DecoderMethod = "DecodeVSTRVLDR_SYSREG<true>";
  2479. }
  2480. }
  2481. let Defs = [FPSCR] in {
  2482. defm VSTR_FPSCR : vfp_vstrldr_sysreg<0b0,0b0001, "fpscr">;
  2483. defm VSTR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b0,0b0010, "fpscr_nzcvqc">;
  2484. let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
  2485. defm VSTR_FPCXTNS : vfp_vstrldr_sysreg<0b0,0b1110, "fpcxtns">;
  2486. defm VSTR_FPCXTS : vfp_vstrldr_sysreg<0b0,0b1111, "fpcxts">;
  2487. }
  2488. }
  2489. let Predicates = [HasV8_1MMainline, HasMVEInt] in {
  2490. let Uses = [VPR] in {
  2491. defm VSTR_VPR : vfp_vstrldr_sysreg<0b0,0b1100, "vpr">;
  2492. }
  2493. defm VSTR_P0 : vfp_vstrldr_sysreg<0b0,0b1101, "p0",
  2494. (outs), (ins VCCR:$P0)>;
  2495. let Defs = [VPR] in {
  2496. defm VLDR_VPR : vfp_vstrldr_sysreg<0b1,0b1100, "vpr">;
  2497. }
  2498. defm VLDR_P0 : vfp_vstrldr_sysreg<0b1,0b1101, "p0",
  2499. (outs VCCR:$P0), (ins)>;
  2500. }
  2501. let Uses = [FPSCR] in {
  2502. defm VLDR_FPSCR : vfp_vstrldr_sysreg<0b1,0b0001, "fpscr">;
  2503. defm VLDR_FPSCR_NZCVQC : vfp_vstrldr_sysreg<0b1,0b0010, "fpscr_nzcvqc">;
  2504. let Predicates = [HasV8_1MMainline, Has8MSecExt] in {
  2505. defm VLDR_FPCXTNS : vfp_vstrldr_sysreg<0b1,0b1110, "fpcxtns">;
  2506. defm VLDR_FPCXTS : vfp_vstrldr_sysreg<0b1,0b1111, "fpcxts">;
  2507. }
  2508. }