RISCVInstrInfo.cpp 99 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869
  1. //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the RISCV implementation of the TargetInstrInfo class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "RISCVInstrInfo.h"
  13. #include "MCTargetDesc/RISCVMatInt.h"
  14. #include "RISCV.h"
  15. #include "RISCVMachineFunctionInfo.h"
  16. #include "RISCVSubtarget.h"
  17. #include "RISCVTargetMachine.h"
  18. #include "llvm/ADT/STLExtras.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. #include "llvm/Analysis/MemoryLocation.h"
  21. #include "llvm/CodeGen/LiveIntervals.h"
  22. #include "llvm/CodeGen/LiveVariables.h"
  23. #include "llvm/CodeGen/MachineCombinerPattern.h"
  24. #include "llvm/CodeGen/MachineFunctionPass.h"
  25. #include "llvm/CodeGen/MachineInstrBuilder.h"
  26. #include "llvm/CodeGen/MachineRegisterInfo.h"
  27. #include "llvm/CodeGen/RegisterScavenging.h"
  28. #include "llvm/IR/DebugInfoMetadata.h"
  29. #include "llvm/MC/MCInstBuilder.h"
  30. #include "llvm/MC/TargetRegistry.h"
  31. #include "llvm/Support/ErrorHandling.h"
  32. using namespace llvm;
  33. #define GEN_CHECK_COMPRESS_INSTR
  34. #include "RISCVGenCompressInstEmitter.inc"
  35. #define GET_INSTRINFO_CTOR_DTOR
  36. #define GET_INSTRINFO_NAMED_OPS
  37. #include "RISCVGenInstrInfo.inc"
  38. static cl::opt<bool> PreferWholeRegisterMove(
  39. "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
  40. cl::desc("Prefer whole register move for vector registers."));
  41. namespace llvm::RISCVVPseudosTable {
  42. using namespace RISCV;
  43. #define GET_RISCVVPseudosTable_IMPL
  44. #include "RISCVGenSearchableTables.inc"
  45. } // namespace llvm::RISCVVPseudosTable
  46. RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
  47. : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
  48. STI(STI) {}
  49. MCInst RISCVInstrInfo::getNop() const {
  50. if (STI.hasStdExtCOrZca())
  51. return MCInstBuilder(RISCV::C_NOP);
  52. return MCInstBuilder(RISCV::ADDI)
  53. .addReg(RISCV::X0)
  54. .addReg(RISCV::X0)
  55. .addImm(0);
  56. }
  57. unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
  58. int &FrameIndex) const {
  59. switch (MI.getOpcode()) {
  60. default:
  61. return 0;
  62. case RISCV::LB:
  63. case RISCV::LBU:
  64. case RISCV::LH:
  65. case RISCV::LHU:
  66. case RISCV::FLH:
  67. case RISCV::LW:
  68. case RISCV::FLW:
  69. case RISCV::LWU:
  70. case RISCV::LD:
  71. case RISCV::FLD:
  72. break;
  73. }
  74. if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
  75. MI.getOperand(2).getImm() == 0) {
  76. FrameIndex = MI.getOperand(1).getIndex();
  77. return MI.getOperand(0).getReg();
  78. }
  79. return 0;
  80. }
  81. unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
  82. int &FrameIndex) const {
  83. switch (MI.getOpcode()) {
  84. default:
  85. return 0;
  86. case RISCV::SB:
  87. case RISCV::SH:
  88. case RISCV::SW:
  89. case RISCV::FSH:
  90. case RISCV::FSW:
  91. case RISCV::SD:
  92. case RISCV::FSD:
  93. break;
  94. }
  95. if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
  96. MI.getOperand(2).getImm() == 0) {
  97. FrameIndex = MI.getOperand(1).getIndex();
  98. return MI.getOperand(0).getReg();
  99. }
  100. return 0;
  101. }
  102. static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
  103. unsigned NumRegs) {
  104. return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
  105. }
  106. static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
  107. const MachineBasicBlock &MBB,
  108. MachineBasicBlock::const_iterator MBBI,
  109. MachineBasicBlock::const_iterator &DefMBBI,
  110. RISCVII::VLMUL LMul) {
  111. if (PreferWholeRegisterMove)
  112. return false;
  113. assert(MBBI->getOpcode() == TargetOpcode::COPY &&
  114. "Unexpected COPY instruction.");
  115. Register SrcReg = MBBI->getOperand(1).getReg();
  116. const TargetRegisterInfo *TRI = STI.getRegisterInfo();
  117. bool FoundDef = false;
  118. bool FirstVSetVLI = false;
  119. unsigned FirstSEW = 0;
  120. while (MBBI != MBB.begin()) {
  121. --MBBI;
  122. if (MBBI->isMetaInstruction())
  123. continue;
  124. if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
  125. MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
  126. MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
  127. // There is a vsetvli between COPY and source define instruction.
  128. // vy = def_vop ... (producing instruction)
  129. // ...
  130. // vsetvli
  131. // ...
  132. // vx = COPY vy
  133. if (!FoundDef) {
  134. if (!FirstVSetVLI) {
  135. FirstVSetVLI = true;
  136. unsigned FirstVType = MBBI->getOperand(2).getImm();
  137. RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
  138. FirstSEW = RISCVVType::getSEW(FirstVType);
  139. // The first encountered vsetvli must have the same lmul as the
  140. // register class of COPY.
  141. if (FirstLMul != LMul)
  142. return false;
  143. }
  144. // Only permit `vsetvli x0, x0, vtype` between COPY and the source
  145. // define instruction.
  146. if (MBBI->getOperand(0).getReg() != RISCV::X0)
  147. return false;
  148. if (MBBI->getOperand(1).isImm())
  149. return false;
  150. if (MBBI->getOperand(1).getReg() != RISCV::X0)
  151. return false;
  152. continue;
  153. }
  154. // MBBI is the first vsetvli before the producing instruction.
  155. unsigned VType = MBBI->getOperand(2).getImm();
  156. // If there is a vsetvli between COPY and the producing instruction.
  157. if (FirstVSetVLI) {
  158. // If SEW is different, return false.
  159. if (RISCVVType::getSEW(VType) != FirstSEW)
  160. return false;
  161. }
  162. // If the vsetvli is tail undisturbed, keep the whole register move.
  163. if (!RISCVVType::isTailAgnostic(VType))
  164. return false;
  165. // The checking is conservative. We only have register classes for
  166. // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
  167. // for fractional LMUL operations. However, we could not use the vsetvli
  168. // lmul for widening operations. The result of widening operation is
  169. // 2 x LMUL.
  170. return LMul == RISCVVType::getVLMUL(VType);
  171. } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
  172. return false;
  173. } else if (MBBI->getNumDefs()) {
  174. // Check all the instructions which will change VL.
  175. // For example, vleff has implicit def VL.
  176. if (MBBI->modifiesRegister(RISCV::VL))
  177. return false;
  178. // Only converting whole register copies to vmv.v.v when the defining
  179. // value appears in the explicit operands.
  180. for (const MachineOperand &MO : MBBI->explicit_operands()) {
  181. if (!MO.isReg() || !MO.isDef())
  182. continue;
  183. if (!FoundDef && TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
  184. // We only permit the source of COPY has the same LMUL as the defined
  185. // operand.
  186. // There are cases we need to keep the whole register copy if the LMUL
  187. // is different.
  188. // For example,
  189. // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
  190. // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
  191. // # The COPY may be created by vlmul_trunc intrinsic.
  192. // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
  193. //
  194. // After widening, the valid value will be 4 x e32 elements. If we
  195. // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
  196. // FIXME: The COPY of subregister of Zvlsseg register will not be able
  197. // to convert to vmv.v.[v|i] under the constraint.
  198. if (MO.getReg() != SrcReg)
  199. return false;
  200. // In widening reduction instructions with LMUL_1 input vector case,
  201. // only checking the LMUL is insufficient due to reduction result is
  202. // always LMUL_1.
  203. // For example,
  204. // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
  205. // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
  206. // $v26 = COPY killed renamable $v8
  207. // After widening, The valid value will be 1 x e16 elements. If we
  208. // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
  209. uint64_t TSFlags = MBBI->getDesc().TSFlags;
  210. if (RISCVII::isRVVWideningReduction(TSFlags))
  211. return false;
  212. // If the producing instruction does not depend on vsetvli, do not
  213. // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
  214. if (!RISCVII::hasSEWOp(TSFlags) || !RISCVII::hasVLOp(TSFlags))
  215. return false;
  216. // Found the definition.
  217. FoundDef = true;
  218. DefMBBI = MBBI;
  219. break;
  220. }
  221. }
  222. }
  223. }
  224. return false;
  225. }
  226. void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
  227. MachineBasicBlock::iterator MBBI,
  228. const DebugLoc &DL, MCRegister DstReg,
  229. MCRegister SrcReg, bool KillSrc) const {
  230. if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
  231. BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
  232. .addReg(SrcReg, getKillRegState(KillSrc))
  233. .addImm(0);
  234. return;
  235. }
  236. // Handle copy from csr
  237. if (RISCV::VCSRRegClass.contains(SrcReg) &&
  238. RISCV::GPRRegClass.contains(DstReg)) {
  239. const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
  240. BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg)
  241. .addImm(RISCVSysReg::lookupSysRegByName(TRI.getName(SrcReg))->Encoding)
  242. .addReg(RISCV::X0);
  243. return;
  244. }
  245. // FPR->FPR copies and VR->VR copies.
  246. unsigned Opc;
  247. bool IsScalableVector = true;
  248. unsigned NF = 1;
  249. RISCVII::VLMUL LMul = RISCVII::LMUL_1;
  250. unsigned SubRegIdx = RISCV::sub_vrm1_0;
  251. if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
  252. if (!STI.hasStdExtZfh() && STI.hasStdExtZfhmin()) {
  253. // Zfhmin subset doesn't have FSGNJ_H, replaces FSGNJ_H with FSGNJ_S.
  254. const TargetRegisterInfo *TRI = STI.getRegisterInfo();
  255. DstReg = TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
  256. &RISCV::FPR32RegClass);
  257. SrcReg = TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
  258. &RISCV::FPR32RegClass);
  259. Opc = RISCV::FSGNJ_S;
  260. } else {
  261. Opc = RISCV::FSGNJ_H;
  262. }
  263. IsScalableVector = false;
  264. } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
  265. Opc = RISCV::FSGNJ_S;
  266. IsScalableVector = false;
  267. } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
  268. Opc = RISCV::FSGNJ_D;
  269. IsScalableVector = false;
  270. } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
  271. Opc = RISCV::VMV1R_V;
  272. LMul = RISCVII::LMUL_1;
  273. } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
  274. Opc = RISCV::VMV2R_V;
  275. LMul = RISCVII::LMUL_2;
  276. } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
  277. Opc = RISCV::VMV4R_V;
  278. LMul = RISCVII::LMUL_4;
  279. } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
  280. Opc = RISCV::VMV8R_V;
  281. LMul = RISCVII::LMUL_8;
  282. } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
  283. Opc = RISCV::VMV1R_V;
  284. SubRegIdx = RISCV::sub_vrm1_0;
  285. NF = 2;
  286. LMul = RISCVII::LMUL_1;
  287. } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
  288. Opc = RISCV::VMV2R_V;
  289. SubRegIdx = RISCV::sub_vrm2_0;
  290. NF = 2;
  291. LMul = RISCVII::LMUL_2;
  292. } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
  293. Opc = RISCV::VMV4R_V;
  294. SubRegIdx = RISCV::sub_vrm4_0;
  295. NF = 2;
  296. LMul = RISCVII::LMUL_4;
  297. } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
  298. Opc = RISCV::VMV1R_V;
  299. SubRegIdx = RISCV::sub_vrm1_0;
  300. NF = 3;
  301. LMul = RISCVII::LMUL_1;
  302. } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
  303. Opc = RISCV::VMV2R_V;
  304. SubRegIdx = RISCV::sub_vrm2_0;
  305. NF = 3;
  306. LMul = RISCVII::LMUL_2;
  307. } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
  308. Opc = RISCV::VMV1R_V;
  309. SubRegIdx = RISCV::sub_vrm1_0;
  310. NF = 4;
  311. LMul = RISCVII::LMUL_1;
  312. } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
  313. Opc = RISCV::VMV2R_V;
  314. SubRegIdx = RISCV::sub_vrm2_0;
  315. NF = 4;
  316. LMul = RISCVII::LMUL_2;
  317. } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
  318. Opc = RISCV::VMV1R_V;
  319. SubRegIdx = RISCV::sub_vrm1_0;
  320. NF = 5;
  321. LMul = RISCVII::LMUL_1;
  322. } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
  323. Opc = RISCV::VMV1R_V;
  324. SubRegIdx = RISCV::sub_vrm1_0;
  325. NF = 6;
  326. LMul = RISCVII::LMUL_1;
  327. } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
  328. Opc = RISCV::VMV1R_V;
  329. SubRegIdx = RISCV::sub_vrm1_0;
  330. NF = 7;
  331. LMul = RISCVII::LMUL_1;
  332. } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
  333. Opc = RISCV::VMV1R_V;
  334. SubRegIdx = RISCV::sub_vrm1_0;
  335. NF = 8;
  336. LMul = RISCVII::LMUL_1;
  337. } else {
  338. llvm_unreachable("Impossible reg-to-reg copy");
  339. }
  340. if (IsScalableVector) {
  341. bool UseVMV_V_V = false;
  342. MachineBasicBlock::const_iterator DefMBBI;
  343. unsigned VIOpc;
  344. if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
  345. UseVMV_V_V = true;
  346. // We only need to handle LMUL = 1/2/4/8 here because we only define
  347. // vector register classes for LMUL = 1/2/4/8.
  348. switch (LMul) {
  349. default:
  350. llvm_unreachable("Impossible LMUL for vector register copy.");
  351. case RISCVII::LMUL_1:
  352. Opc = RISCV::PseudoVMV_V_V_M1;
  353. VIOpc = RISCV::PseudoVMV_V_I_M1;
  354. break;
  355. case RISCVII::LMUL_2:
  356. Opc = RISCV::PseudoVMV_V_V_M2;
  357. VIOpc = RISCV::PseudoVMV_V_I_M2;
  358. break;
  359. case RISCVII::LMUL_4:
  360. Opc = RISCV::PseudoVMV_V_V_M4;
  361. VIOpc = RISCV::PseudoVMV_V_I_M4;
  362. break;
  363. case RISCVII::LMUL_8:
  364. Opc = RISCV::PseudoVMV_V_V_M8;
  365. VIOpc = RISCV::PseudoVMV_V_I_M8;
  366. break;
  367. }
  368. }
  369. bool UseVMV_V_I = false;
  370. if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
  371. UseVMV_V_I = true;
  372. Opc = VIOpc;
  373. }
  374. if (NF == 1) {
  375. auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
  376. if (UseVMV_V_I)
  377. MIB = MIB.add(DefMBBI->getOperand(1));
  378. else
  379. MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
  380. if (UseVMV_V_V) {
  381. const MCInstrDesc &Desc = DefMBBI->getDesc();
  382. MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
  383. MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
  384. MIB.addReg(RISCV::VL, RegState::Implicit);
  385. MIB.addReg(RISCV::VTYPE, RegState::Implicit);
  386. }
  387. } else {
  388. const TargetRegisterInfo *TRI = STI.getRegisterInfo();
  389. int I = 0, End = NF, Incr = 1;
  390. unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
  391. unsigned DstEncoding = TRI->getEncodingValue(DstReg);
  392. unsigned LMulVal;
  393. bool Fractional;
  394. std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
  395. assert(!Fractional && "It is impossible be fractional lmul here.");
  396. if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
  397. I = NF - 1;
  398. End = -1;
  399. Incr = -1;
  400. }
  401. for (; I != End; I += Incr) {
  402. auto MIB = BuildMI(MBB, MBBI, DL, get(Opc),
  403. TRI->getSubReg(DstReg, SubRegIdx + I));
  404. if (UseVMV_V_I)
  405. MIB = MIB.add(DefMBBI->getOperand(1));
  406. else
  407. MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
  408. getKillRegState(KillSrc));
  409. if (UseVMV_V_V) {
  410. const MCInstrDesc &Desc = DefMBBI->getDesc();
  411. MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
  412. MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
  413. MIB.addReg(RISCV::VL, RegState::Implicit);
  414. MIB.addReg(RISCV::VTYPE, RegState::Implicit);
  415. }
  416. }
  417. }
  418. } else {
  419. BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
  420. .addReg(SrcReg, getKillRegState(KillSrc))
  421. .addReg(SrcReg, getKillRegState(KillSrc));
  422. }
  423. }
  424. void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
  425. MachineBasicBlock::iterator I,
  426. Register SrcReg, bool IsKill, int FI,
  427. const TargetRegisterClass *RC,
  428. const TargetRegisterInfo *TRI,
  429. Register VReg) const {
  430. DebugLoc DL;
  431. if (I != MBB.end())
  432. DL = I->getDebugLoc();
  433. MachineFunction *MF = MBB.getParent();
  434. MachineFrameInfo &MFI = MF->getFrameInfo();
  435. unsigned Opcode;
  436. bool IsScalableVector = true;
  437. if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
  438. Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
  439. RISCV::SW : RISCV::SD;
  440. IsScalableVector = false;
  441. } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
  442. Opcode = RISCV::FSH;
  443. IsScalableVector = false;
  444. } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
  445. Opcode = RISCV::FSW;
  446. IsScalableVector = false;
  447. } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
  448. Opcode = RISCV::FSD;
  449. IsScalableVector = false;
  450. } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
  451. Opcode = RISCV::VS1R_V;
  452. } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
  453. Opcode = RISCV::VS2R_V;
  454. } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
  455. Opcode = RISCV::VS4R_V;
  456. } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
  457. Opcode = RISCV::VS8R_V;
  458. } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
  459. Opcode = RISCV::PseudoVSPILL2_M1;
  460. else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
  461. Opcode = RISCV::PseudoVSPILL2_M2;
  462. else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
  463. Opcode = RISCV::PseudoVSPILL2_M4;
  464. else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
  465. Opcode = RISCV::PseudoVSPILL3_M1;
  466. else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
  467. Opcode = RISCV::PseudoVSPILL3_M2;
  468. else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
  469. Opcode = RISCV::PseudoVSPILL4_M1;
  470. else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
  471. Opcode = RISCV::PseudoVSPILL4_M2;
  472. else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
  473. Opcode = RISCV::PseudoVSPILL5_M1;
  474. else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
  475. Opcode = RISCV::PseudoVSPILL6_M1;
  476. else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
  477. Opcode = RISCV::PseudoVSPILL7_M1;
  478. else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
  479. Opcode = RISCV::PseudoVSPILL8_M1;
  480. else
  481. llvm_unreachable("Can't store this register to stack slot");
  482. if (IsScalableVector) {
  483. MachineMemOperand *MMO = MF->getMachineMemOperand(
  484. MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
  485. MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
  486. MFI.setStackID(FI, TargetStackID::ScalableVector);
  487. BuildMI(MBB, I, DL, get(Opcode))
  488. .addReg(SrcReg, getKillRegState(IsKill))
  489. .addFrameIndex(FI)
  490. .addMemOperand(MMO);
  491. } else {
  492. MachineMemOperand *MMO = MF->getMachineMemOperand(
  493. MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
  494. MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
  495. BuildMI(MBB, I, DL, get(Opcode))
  496. .addReg(SrcReg, getKillRegState(IsKill))
  497. .addFrameIndex(FI)
  498. .addImm(0)
  499. .addMemOperand(MMO);
  500. }
  501. }
  502. void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
  503. MachineBasicBlock::iterator I,
  504. Register DstReg, int FI,
  505. const TargetRegisterClass *RC,
  506. const TargetRegisterInfo *TRI,
  507. Register VReg) const {
  508. DebugLoc DL;
  509. if (I != MBB.end())
  510. DL = I->getDebugLoc();
  511. MachineFunction *MF = MBB.getParent();
  512. MachineFrameInfo &MFI = MF->getFrameInfo();
  513. unsigned Opcode;
  514. bool IsScalableVector = true;
  515. if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
  516. Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
  517. RISCV::LW : RISCV::LD;
  518. IsScalableVector = false;
  519. } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
  520. Opcode = RISCV::FLH;
  521. IsScalableVector = false;
  522. } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
  523. Opcode = RISCV::FLW;
  524. IsScalableVector = false;
  525. } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
  526. Opcode = RISCV::FLD;
  527. IsScalableVector = false;
  528. } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
  529. Opcode = RISCV::VL1RE8_V;
  530. } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
  531. Opcode = RISCV::VL2RE8_V;
  532. } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
  533. Opcode = RISCV::VL4RE8_V;
  534. } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
  535. Opcode = RISCV::VL8RE8_V;
  536. } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
  537. Opcode = RISCV::PseudoVRELOAD2_M1;
  538. else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
  539. Opcode = RISCV::PseudoVRELOAD2_M2;
  540. else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
  541. Opcode = RISCV::PseudoVRELOAD2_M4;
  542. else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
  543. Opcode = RISCV::PseudoVRELOAD3_M1;
  544. else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
  545. Opcode = RISCV::PseudoVRELOAD3_M2;
  546. else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
  547. Opcode = RISCV::PseudoVRELOAD4_M1;
  548. else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
  549. Opcode = RISCV::PseudoVRELOAD4_M2;
  550. else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
  551. Opcode = RISCV::PseudoVRELOAD5_M1;
  552. else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
  553. Opcode = RISCV::PseudoVRELOAD6_M1;
  554. else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
  555. Opcode = RISCV::PseudoVRELOAD7_M1;
  556. else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
  557. Opcode = RISCV::PseudoVRELOAD8_M1;
  558. else
  559. llvm_unreachable("Can't load this register from stack slot");
  560. if (IsScalableVector) {
  561. MachineMemOperand *MMO = MF->getMachineMemOperand(
  562. MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
  563. MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
  564. MFI.setStackID(FI, TargetStackID::ScalableVector);
  565. BuildMI(MBB, I, DL, get(Opcode), DstReg)
  566. .addFrameIndex(FI)
  567. .addMemOperand(MMO);
  568. } else {
  569. MachineMemOperand *MMO = MF->getMachineMemOperand(
  570. MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
  571. MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
  572. BuildMI(MBB, I, DL, get(Opcode), DstReg)
  573. .addFrameIndex(FI)
  574. .addImm(0)
  575. .addMemOperand(MMO);
  576. }
  577. }
  578. MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
  579. MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
  580. MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
  581. VirtRegMap *VRM) const {
  582. const MachineFrameInfo &MFI = MF.getFrameInfo();
  583. // The below optimizations narrow the load so they are only valid for little
  584. // endian.
  585. // TODO: Support big endian by adding an offset into the frame object?
  586. if (MF.getDataLayout().isBigEndian())
  587. return nullptr;
  588. // Fold load from stack followed by sext.w into lw.
  589. // TODO: Fold with sext.b, sext.h, zext.b, zext.h, zext.w?
  590. if (Ops.size() != 1 || Ops[0] != 1)
  591. return nullptr;
  592. unsigned LoadOpc;
  593. switch (MI.getOpcode()) {
  594. default:
  595. if (RISCV::isSEXT_W(MI)) {
  596. LoadOpc = RISCV::LW;
  597. break;
  598. }
  599. if (RISCV::isZEXT_W(MI)) {
  600. LoadOpc = RISCV::LWU;
  601. break;
  602. }
  603. if (RISCV::isZEXT_B(MI)) {
  604. LoadOpc = RISCV::LBU;
  605. break;
  606. }
  607. return nullptr;
  608. case RISCV::SEXT_H:
  609. LoadOpc = RISCV::LH;
  610. break;
  611. case RISCV::SEXT_B:
  612. LoadOpc = RISCV::LB;
  613. break;
  614. case RISCV::ZEXT_H_RV32:
  615. case RISCV::ZEXT_H_RV64:
  616. LoadOpc = RISCV::LHU;
  617. break;
  618. }
  619. MachineMemOperand *MMO = MF.getMachineMemOperand(
  620. MachinePointerInfo::getFixedStack(MF, FrameIndex),
  621. MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex),
  622. MFI.getObjectAlign(FrameIndex));
  623. Register DstReg = MI.getOperand(0).getReg();
  624. return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(LoadOpc),
  625. DstReg)
  626. .addFrameIndex(FrameIndex)
  627. .addImm(0)
  628. .addMemOperand(MMO);
  629. }
  630. void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
  631. MachineBasicBlock::iterator MBBI,
  632. const DebugLoc &DL, Register DstReg, uint64_t Val,
  633. MachineInstr::MIFlag Flag) const {
  634. Register SrcReg = RISCV::X0;
  635. if (!STI.is64Bit() && !isInt<32>(Val))
  636. report_fatal_error("Should only materialize 32-bit constants for RV32");
  637. RISCVMatInt::InstSeq Seq =
  638. RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
  639. assert(!Seq.empty());
  640. for (RISCVMatInt::Inst &Inst : Seq) {
  641. switch (Inst.getOpndKind()) {
  642. case RISCVMatInt::Imm:
  643. BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
  644. .addImm(Inst.getImm())
  645. .setMIFlag(Flag);
  646. break;
  647. case RISCVMatInt::RegX0:
  648. BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
  649. .addReg(SrcReg, RegState::Kill)
  650. .addReg(RISCV::X0)
  651. .setMIFlag(Flag);
  652. break;
  653. case RISCVMatInt::RegReg:
  654. BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
  655. .addReg(SrcReg, RegState::Kill)
  656. .addReg(SrcReg, RegState::Kill)
  657. .setMIFlag(Flag);
  658. break;
  659. case RISCVMatInt::RegImm:
  660. BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
  661. .addReg(SrcReg, RegState::Kill)
  662. .addImm(Inst.getImm())
  663. .setMIFlag(Flag);
  664. break;
  665. }
  666. // Only the first instruction has X0 as its source.
  667. SrcReg = DstReg;
  668. }
  669. }
  670. static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc) {
  671. switch (Opc) {
  672. default:
  673. return RISCVCC::COND_INVALID;
  674. case RISCV::BEQ:
  675. return RISCVCC::COND_EQ;
  676. case RISCV::BNE:
  677. return RISCVCC::COND_NE;
  678. case RISCV::BLT:
  679. return RISCVCC::COND_LT;
  680. case RISCV::BGE:
  681. return RISCVCC::COND_GE;
  682. case RISCV::BLTU:
  683. return RISCVCC::COND_LTU;
  684. case RISCV::BGEU:
  685. return RISCVCC::COND_GEU;
  686. }
  687. }
  688. // The contents of values added to Cond are not examined outside of
  689. // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
  690. // push BranchOpcode, Reg1, Reg2.
  691. static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
  692. SmallVectorImpl<MachineOperand> &Cond) {
  693. // Block ends with fall-through condbranch.
  694. assert(LastInst.getDesc().isConditionalBranch() &&
  695. "Unknown conditional branch");
  696. Target = LastInst.getOperand(2).getMBB();
  697. unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
  698. Cond.push_back(MachineOperand::CreateImm(CC));
  699. Cond.push_back(LastInst.getOperand(0));
  700. Cond.push_back(LastInst.getOperand(1));
  701. }
  702. const MCInstrDesc &RISCVInstrInfo::getBrCond(RISCVCC::CondCode CC) const {
  703. switch (CC) {
  704. default:
  705. llvm_unreachable("Unknown condition code!");
  706. case RISCVCC::COND_EQ:
  707. return get(RISCV::BEQ);
  708. case RISCVCC::COND_NE:
  709. return get(RISCV::BNE);
  710. case RISCVCC::COND_LT:
  711. return get(RISCV::BLT);
  712. case RISCVCC::COND_GE:
  713. return get(RISCV::BGE);
  714. case RISCVCC::COND_LTU:
  715. return get(RISCV::BLTU);
  716. case RISCVCC::COND_GEU:
  717. return get(RISCV::BGEU);
  718. }
  719. }
  720. RISCVCC::CondCode RISCVCC::getOppositeBranchCondition(RISCVCC::CondCode CC) {
  721. switch (CC) {
  722. default:
  723. llvm_unreachable("Unrecognized conditional branch");
  724. case RISCVCC::COND_EQ:
  725. return RISCVCC::COND_NE;
  726. case RISCVCC::COND_NE:
  727. return RISCVCC::COND_EQ;
  728. case RISCVCC::COND_LT:
  729. return RISCVCC::COND_GE;
  730. case RISCVCC::COND_GE:
  731. return RISCVCC::COND_LT;
  732. case RISCVCC::COND_LTU:
  733. return RISCVCC::COND_GEU;
  734. case RISCVCC::COND_GEU:
  735. return RISCVCC::COND_LTU;
  736. }
  737. }
  738. bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
  739. MachineBasicBlock *&TBB,
  740. MachineBasicBlock *&FBB,
  741. SmallVectorImpl<MachineOperand> &Cond,
  742. bool AllowModify) const {
  743. TBB = FBB = nullptr;
  744. Cond.clear();
  745. // If the block has no terminators, it just falls into the block after it.
  746. MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
  747. if (I == MBB.end() || !isUnpredicatedTerminator(*I))
  748. return false;
  749. // Count the number of terminators and find the first unconditional or
  750. // indirect branch.
  751. MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
  752. int NumTerminators = 0;
  753. for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
  754. J++) {
  755. NumTerminators++;
  756. if (J->getDesc().isUnconditionalBranch() ||
  757. J->getDesc().isIndirectBranch()) {
  758. FirstUncondOrIndirectBr = J.getReverse();
  759. }
  760. }
  761. // If AllowModify is true, we can erase any terminators after
  762. // FirstUncondOrIndirectBR.
  763. if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
  764. while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
  765. std::next(FirstUncondOrIndirectBr)->eraseFromParent();
  766. NumTerminators--;
  767. }
  768. I = FirstUncondOrIndirectBr;
  769. }
  770. // We can't handle blocks that end in an indirect branch.
  771. if (I->getDesc().isIndirectBranch())
  772. return true;
  773. // We can't handle blocks with more than 2 terminators.
  774. if (NumTerminators > 2)
  775. return true;
  776. // Handle a single unconditional branch.
  777. if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
  778. TBB = getBranchDestBlock(*I);
  779. return false;
  780. }
  781. // Handle a single conditional branch.
  782. if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
  783. parseCondBranch(*I, TBB, Cond);
  784. return false;
  785. }
  786. // Handle a conditional branch followed by an unconditional branch.
  787. if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
  788. I->getDesc().isUnconditionalBranch()) {
  789. parseCondBranch(*std::prev(I), TBB, Cond);
  790. FBB = getBranchDestBlock(*I);
  791. return false;
  792. }
  793. // Otherwise, we can't handle this.
  794. return true;
  795. }
  796. unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
  797. int *BytesRemoved) const {
  798. if (BytesRemoved)
  799. *BytesRemoved = 0;
  800. MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
  801. if (I == MBB.end())
  802. return 0;
  803. if (!I->getDesc().isUnconditionalBranch() &&
  804. !I->getDesc().isConditionalBranch())
  805. return 0;
  806. // Remove the branch.
  807. if (BytesRemoved)
  808. *BytesRemoved += getInstSizeInBytes(*I);
  809. I->eraseFromParent();
  810. I = MBB.end();
  811. if (I == MBB.begin())
  812. return 1;
  813. --I;
  814. if (!I->getDesc().isConditionalBranch())
  815. return 1;
  816. // Remove the branch.
  817. if (BytesRemoved)
  818. *BytesRemoved += getInstSizeInBytes(*I);
  819. I->eraseFromParent();
  820. return 2;
  821. }
  822. // Inserts a branch into the end of the specific MachineBasicBlock, returning
  823. // the number of instructions inserted.
  824. unsigned RISCVInstrInfo::insertBranch(
  825. MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
  826. ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
  827. if (BytesAdded)
  828. *BytesAdded = 0;
  829. // Shouldn't be a fall through.
  830. assert(TBB && "insertBranch must not be told to insert a fallthrough");
  831. assert((Cond.size() == 3 || Cond.size() == 0) &&
  832. "RISCV branch conditions have two components!");
  833. // Unconditional branch.
  834. if (Cond.empty()) {
  835. MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
  836. if (BytesAdded)
  837. *BytesAdded += getInstSizeInBytes(MI);
  838. return 1;
  839. }
  840. // Either a one or two-way conditional branch.
  841. auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
  842. MachineInstr &CondMI =
  843. *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
  844. if (BytesAdded)
  845. *BytesAdded += getInstSizeInBytes(CondMI);
  846. // One-way conditional branch.
  847. if (!FBB)
  848. return 1;
  849. // Two-way conditional branch.
  850. MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
  851. if (BytesAdded)
  852. *BytesAdded += getInstSizeInBytes(MI);
  853. return 2;
  854. }
  855. void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
  856. MachineBasicBlock &DestBB,
  857. MachineBasicBlock &RestoreBB,
  858. const DebugLoc &DL, int64_t BrOffset,
  859. RegScavenger *RS) const {
  860. assert(RS && "RegScavenger required for long branching");
  861. assert(MBB.empty() &&
  862. "new block should be inserted for expanding unconditional branch");
  863. assert(MBB.pred_size() == 1);
  864. assert(RestoreBB.empty() &&
  865. "restore block should be inserted for restoring clobbered registers");
  866. MachineFunction *MF = MBB.getParent();
  867. MachineRegisterInfo &MRI = MF->getRegInfo();
  868. RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
  869. const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
  870. if (!isInt<32>(BrOffset))
  871. report_fatal_error(
  872. "Branch offsets outside of the signed 32-bit range not supported");
  873. // FIXME: A virtual register must be used initially, as the register
  874. // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
  875. // uses the same workaround).
  876. Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
  877. auto II = MBB.end();
  878. // We may also update the jump target to RestoreBB later.
  879. MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
  880. .addReg(ScratchReg, RegState::Define | RegState::Dead)
  881. .addMBB(&DestBB, RISCVII::MO_CALL);
  882. RS->enterBasicBlockEnd(MBB);
  883. Register TmpGPR =
  884. RS->scavengeRegisterBackwards(RISCV::GPRRegClass, MI.getIterator(),
  885. /*RestoreAfter=*/false, /*SpAdj=*/0,
  886. /*AllowSpill=*/false);
  887. if (TmpGPR != RISCV::NoRegister)
  888. RS->setRegUsed(TmpGPR);
  889. else {
  890. // The case when there is no scavenged register needs special handling.
  891. // Pick s11 because it doesn't make a difference.
  892. TmpGPR = RISCV::X27;
  893. int FrameIndex = RVFI->getBranchRelaxationScratchFrameIndex();
  894. if (FrameIndex == -1)
  895. report_fatal_error("underestimated function size");
  896. storeRegToStackSlot(MBB, MI, TmpGPR, /*IsKill=*/true, FrameIndex,
  897. &RISCV::GPRRegClass, TRI, Register());
  898. TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
  899. /*SpAdj=*/0, /*FIOperandNum=*/1);
  900. MI.getOperand(1).setMBB(&RestoreBB);
  901. loadRegFromStackSlot(RestoreBB, RestoreBB.end(), TmpGPR, FrameIndex,
  902. &RISCV::GPRRegClass, TRI, Register());
  903. TRI->eliminateFrameIndex(RestoreBB.back(),
  904. /*SpAdj=*/0, /*FIOperandNum=*/1);
  905. }
  906. MRI.replaceRegWith(ScratchReg, TmpGPR);
  907. MRI.clearVirtRegs();
  908. }
  909. bool RISCVInstrInfo::reverseBranchCondition(
  910. SmallVectorImpl<MachineOperand> &Cond) const {
  911. assert((Cond.size() == 3) && "Invalid branch condition!");
  912. auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
  913. Cond[0].setImm(getOppositeBranchCondition(CC));
  914. return false;
  915. }
  916. MachineBasicBlock *
  917. RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
  918. assert(MI.getDesc().isBranch() && "Unexpected opcode!");
  919. // The branch target is always the last operand.
  920. int NumOp = MI.getNumExplicitOperands();
  921. return MI.getOperand(NumOp - 1).getMBB();
  922. }
  923. bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
  924. int64_t BrOffset) const {
  925. unsigned XLen = STI.getXLen();
  926. // Ideally we could determine the supported branch offset from the
  927. // RISCVII::FormMask, but this can't be used for Pseudo instructions like
  928. // PseudoBR.
  929. switch (BranchOp) {
  930. default:
  931. llvm_unreachable("Unexpected opcode!");
  932. case RISCV::BEQ:
  933. case RISCV::BNE:
  934. case RISCV::BLT:
  935. case RISCV::BGE:
  936. case RISCV::BLTU:
  937. case RISCV::BGEU:
  938. return isIntN(13, BrOffset);
  939. case RISCV::JAL:
  940. case RISCV::PseudoBR:
  941. return isIntN(21, BrOffset);
  942. case RISCV::PseudoJump:
  943. return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
  944. }
  945. }
  946. // If the operation has a predicated pseudo instruction, return the pseudo
  947. // instruction opcode. Otherwise, return RISCV::INSTRUCTION_LIST_END.
  948. // TODO: Support more operations.
  949. unsigned getPredicatedOpcode(unsigned Opcode) {
  950. switch (Opcode) {
  951. case RISCV::ADD: return RISCV::PseudoCCADD; break;
  952. case RISCV::SUB: return RISCV::PseudoCCSUB; break;
  953. case RISCV::AND: return RISCV::PseudoCCAND; break;
  954. case RISCV::OR: return RISCV::PseudoCCOR; break;
  955. case RISCV::XOR: return RISCV::PseudoCCXOR; break;
  956. case RISCV::ADDW: return RISCV::PseudoCCADDW; break;
  957. case RISCV::SUBW: return RISCV::PseudoCCSUBW; break;
  958. }
  959. return RISCV::INSTRUCTION_LIST_END;
  960. }
  961. /// Identify instructions that can be folded into a CCMOV instruction, and
  962. /// return the defining instruction.
  963. static MachineInstr *canFoldAsPredicatedOp(Register Reg,
  964. const MachineRegisterInfo &MRI,
  965. const TargetInstrInfo *TII) {
  966. if (!Reg.isVirtual())
  967. return nullptr;
  968. if (!MRI.hasOneNonDBGUse(Reg))
  969. return nullptr;
  970. MachineInstr *MI = MRI.getVRegDef(Reg);
  971. if (!MI)
  972. return nullptr;
  973. // Check if MI can be predicated and folded into the CCMOV.
  974. if (getPredicatedOpcode(MI->getOpcode()) == RISCV::INSTRUCTION_LIST_END)
  975. return nullptr;
  976. // Check if MI has any other defs or physreg uses.
  977. for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
  978. const MachineOperand &MO = MI->getOperand(i);
  979. // Reject frame index operands, PEI can't handle the predicated pseudos.
  980. if (MO.isFI() || MO.isCPI() || MO.isJTI())
  981. return nullptr;
  982. if (!MO.isReg())
  983. continue;
  984. // MI can't have any tied operands, that would conflict with predication.
  985. if (MO.isTied())
  986. return nullptr;
  987. if (MO.isDef())
  988. return nullptr;
  989. // Allow constant physregs.
  990. if (MO.getReg().isPhysical() && !MRI.isConstantPhysReg(MO.getReg()))
  991. return nullptr;
  992. }
  993. bool DontMoveAcrossStores = true;
  994. if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores))
  995. return nullptr;
  996. return MI;
  997. }
  998. bool RISCVInstrInfo::analyzeSelect(const MachineInstr &MI,
  999. SmallVectorImpl<MachineOperand> &Cond,
  1000. unsigned &TrueOp, unsigned &FalseOp,
  1001. bool &Optimizable) const {
  1002. assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
  1003. "Unknown select instruction");
  1004. // CCMOV operands:
  1005. // 0: Def.
  1006. // 1: LHS of compare.
  1007. // 2: RHS of compare.
  1008. // 3: Condition code.
  1009. // 4: False use.
  1010. // 5: True use.
  1011. TrueOp = 5;
  1012. FalseOp = 4;
  1013. Cond.push_back(MI.getOperand(1));
  1014. Cond.push_back(MI.getOperand(2));
  1015. Cond.push_back(MI.getOperand(3));
  1016. // We can only fold when we support short forward branch opt.
  1017. Optimizable = STI.hasShortForwardBranchOpt();
  1018. return false;
  1019. }
  1020. MachineInstr *
  1021. RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
  1022. SmallPtrSetImpl<MachineInstr *> &SeenMIs,
  1023. bool PreferFalse) const {
  1024. assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
  1025. "Unknown select instruction");
  1026. if (!STI.hasShortForwardBranchOpt())
  1027. return nullptr;
  1028. MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
  1029. MachineInstr *DefMI =
  1030. canFoldAsPredicatedOp(MI.getOperand(5).getReg(), MRI, this);
  1031. bool Invert = !DefMI;
  1032. if (!DefMI)
  1033. DefMI = canFoldAsPredicatedOp(MI.getOperand(4).getReg(), MRI, this);
  1034. if (!DefMI)
  1035. return nullptr;
  1036. // Find new register class to use.
  1037. MachineOperand FalseReg = MI.getOperand(Invert ? 5 : 4);
  1038. Register DestReg = MI.getOperand(0).getReg();
  1039. const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
  1040. if (!MRI.constrainRegClass(DestReg, PreviousClass))
  1041. return nullptr;
  1042. unsigned PredOpc = getPredicatedOpcode(DefMI->getOpcode());
  1043. assert(PredOpc != RISCV::INSTRUCTION_LIST_END && "Unexpected opcode!");
  1044. // Create a new predicated version of DefMI.
  1045. MachineInstrBuilder NewMI =
  1046. BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(PredOpc), DestReg);
  1047. // Copy the condition portion.
  1048. NewMI.add(MI.getOperand(1));
  1049. NewMI.add(MI.getOperand(2));
  1050. // Add condition code, inverting if necessary.
  1051. auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
  1052. if (Invert)
  1053. CC = RISCVCC::getOppositeBranchCondition(CC);
  1054. NewMI.addImm(CC);
  1055. // Copy the false register.
  1056. NewMI.add(FalseReg);
  1057. // Copy all the DefMI operands.
  1058. const MCInstrDesc &DefDesc = DefMI->getDesc();
  1059. for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)
  1060. NewMI.add(DefMI->getOperand(i));
  1061. // Update SeenMIs set: register newly created MI and erase removed DefMI.
  1062. SeenMIs.insert(NewMI);
  1063. SeenMIs.erase(DefMI);
  1064. // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
  1065. // DefMI would be invalid when tranferred inside the loop. Checking for a
  1066. // loop is expensive, but at least remove kill flags if they are in different
  1067. // BBs.
  1068. if (DefMI->getParent() != MI.getParent())
  1069. NewMI->clearKillInfo();
  1070. // The caller will erase MI, but not DefMI.
  1071. DefMI->eraseFromParent();
  1072. return NewMI;
  1073. }
  1074. unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
  1075. if (MI.isMetaInstruction())
  1076. return 0;
  1077. unsigned Opcode = MI.getOpcode();
  1078. if (Opcode == TargetOpcode::INLINEASM ||
  1079. Opcode == TargetOpcode::INLINEASM_BR) {
  1080. const MachineFunction &MF = *MI.getParent()->getParent();
  1081. const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
  1082. return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
  1083. *TM.getMCAsmInfo());
  1084. }
  1085. if (MI.getParent() && MI.getParent()->getParent()) {
  1086. if (isCompressibleInst(MI, STI))
  1087. return 2;
  1088. }
  1089. return get(Opcode).getSize();
  1090. }
  1091. bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
  1092. const unsigned Opcode = MI.getOpcode();
  1093. switch (Opcode) {
  1094. default:
  1095. break;
  1096. case RISCV::FSGNJ_D:
  1097. case RISCV::FSGNJ_S:
  1098. case RISCV::FSGNJ_H:
  1099. // The canonical floating-point move is fsgnj rd, rs, rs.
  1100. return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
  1101. MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
  1102. case RISCV::ADDI:
  1103. case RISCV::ORI:
  1104. case RISCV::XORI:
  1105. return (MI.getOperand(1).isReg() &&
  1106. MI.getOperand(1).getReg() == RISCV::X0) ||
  1107. (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
  1108. }
  1109. return MI.isAsCheapAsAMove();
  1110. }
  1111. std::optional<DestSourcePair>
  1112. RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
  1113. if (MI.isMoveReg())
  1114. return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
  1115. switch (MI.getOpcode()) {
  1116. default:
  1117. break;
  1118. case RISCV::ADDI:
  1119. // Operand 1 can be a frameindex but callers expect registers
  1120. if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
  1121. MI.getOperand(2).getImm() == 0)
  1122. return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
  1123. break;
  1124. case RISCV::FSGNJ_D:
  1125. case RISCV::FSGNJ_S:
  1126. case RISCV::FSGNJ_H:
  1127. // The canonical floating-point move is fsgnj rd, rs, rs.
  1128. if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
  1129. MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
  1130. return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
  1131. break;
  1132. }
  1133. return std::nullopt;
  1134. }
  1135. void RISCVInstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
  1136. MachineInstr &OldMI2,
  1137. MachineInstr &NewMI1,
  1138. MachineInstr &NewMI2) const {
  1139. uint16_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
  1140. NewMI1.setFlags(IntersectedFlags);
  1141. NewMI2.setFlags(IntersectedFlags);
  1142. }
  1143. void RISCVInstrInfo::finalizeInsInstrs(
  1144. MachineInstr &Root, MachineCombinerPattern &P,
  1145. SmallVectorImpl<MachineInstr *> &InsInstrs) const {
  1146. int16_t FrmOpIdx =
  1147. RISCV::getNamedOperandIdx(Root.getOpcode(), RISCV::OpName::frm);
  1148. if (FrmOpIdx < 0) {
  1149. assert(all_of(InsInstrs,
  1150. [](MachineInstr *MI) {
  1151. return RISCV::getNamedOperandIdx(MI->getOpcode(),
  1152. RISCV::OpName::frm) < 0;
  1153. }) &&
  1154. "New instructions require FRM whereas the old one does not have it");
  1155. return;
  1156. }
  1157. const MachineOperand &FRM = Root.getOperand(FrmOpIdx);
  1158. MachineFunction &MF = *Root.getMF();
  1159. for (auto *NewMI : InsInstrs) {
  1160. assert(static_cast<unsigned>(RISCV::getNamedOperandIdx(
  1161. NewMI->getOpcode(), RISCV::OpName::frm)) ==
  1162. NewMI->getNumOperands() &&
  1163. "Instruction has unexpected number of operands");
  1164. MachineInstrBuilder MIB(MF, NewMI);
  1165. MIB.add(FRM);
  1166. if (FRM.getImm() == RISCVFPRndMode::DYN)
  1167. MIB.addUse(RISCV::FRM, RegState::Implicit);
  1168. }
  1169. }
  1170. static bool isFADD(unsigned Opc) {
  1171. switch (Opc) {
  1172. default:
  1173. return false;
  1174. case RISCV::FADD_H:
  1175. case RISCV::FADD_S:
  1176. case RISCV::FADD_D:
  1177. return true;
  1178. }
  1179. }
  1180. static bool isFSUB(unsigned Opc) {
  1181. switch (Opc) {
  1182. default:
  1183. return false;
  1184. case RISCV::FSUB_H:
  1185. case RISCV::FSUB_S:
  1186. case RISCV::FSUB_D:
  1187. return true;
  1188. }
  1189. }
  1190. static bool isFMUL(unsigned Opc) {
  1191. switch (Opc) {
  1192. default:
  1193. return false;
  1194. case RISCV::FMUL_H:
  1195. case RISCV::FMUL_S:
  1196. case RISCV::FMUL_D:
  1197. return true;
  1198. }
  1199. }
  1200. bool RISCVInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
  1201. bool &Commuted) const {
  1202. if (!TargetInstrInfo::hasReassociableSibling(Inst, Commuted))
  1203. return false;
  1204. const MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo();
  1205. unsigned OperandIdx = Commuted ? 2 : 1;
  1206. const MachineInstr &Sibling =
  1207. *MRI.getVRegDef(Inst.getOperand(OperandIdx).getReg());
  1208. int16_t InstFrmOpIdx =
  1209. RISCV::getNamedOperandIdx(Inst.getOpcode(), RISCV::OpName::frm);
  1210. int16_t SiblingFrmOpIdx =
  1211. RISCV::getNamedOperandIdx(Sibling.getOpcode(), RISCV::OpName::frm);
  1212. return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
  1213. RISCV::hasEqualFRM(Inst, Sibling);
  1214. }
  1215. bool RISCVInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
  1216. bool Invert) const {
  1217. unsigned Opc = Inst.getOpcode();
  1218. if (Invert) {
  1219. auto InverseOpcode = getInverseOpcode(Opc);
  1220. if (!InverseOpcode)
  1221. return false;
  1222. Opc = *InverseOpcode;
  1223. }
  1224. if (isFADD(Opc) || isFMUL(Opc))
  1225. return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
  1226. Inst.getFlag(MachineInstr::MIFlag::FmNsz);
  1227. switch (Opc) {
  1228. default:
  1229. return false;
  1230. case RISCV::ADD:
  1231. case RISCV::ADDW:
  1232. case RISCV::AND:
  1233. case RISCV::OR:
  1234. case RISCV::XOR:
  1235. // From RISC-V ISA spec, if both the high and low bits of the same product
  1236. // are required, then the recommended code sequence is:
  1237. //
  1238. // MULH[[S]U] rdh, rs1, rs2
  1239. // MUL rdl, rs1, rs2
  1240. // (source register specifiers must be in same order and rdh cannot be the
  1241. // same as rs1 or rs2)
  1242. //
  1243. // Microarchitectures can then fuse these into a single multiply operation
  1244. // instead of performing two separate multiplies.
  1245. // MachineCombiner may reassociate MUL operands and lose the fusion
  1246. // opportunity.
  1247. case RISCV::MUL:
  1248. case RISCV::MULW:
  1249. case RISCV::MIN:
  1250. case RISCV::MINU:
  1251. case RISCV::MAX:
  1252. case RISCV::MAXU:
  1253. case RISCV::FMIN_H:
  1254. case RISCV::FMIN_S:
  1255. case RISCV::FMIN_D:
  1256. case RISCV::FMAX_H:
  1257. case RISCV::FMAX_S:
  1258. case RISCV::FMAX_D:
  1259. return true;
  1260. }
  1261. return false;
  1262. }
  1263. std::optional<unsigned>
  1264. RISCVInstrInfo::getInverseOpcode(unsigned Opcode) const {
  1265. switch (Opcode) {
  1266. default:
  1267. return std::nullopt;
  1268. case RISCV::FADD_H:
  1269. return RISCV::FSUB_H;
  1270. case RISCV::FADD_S:
  1271. return RISCV::FSUB_S;
  1272. case RISCV::FADD_D:
  1273. return RISCV::FSUB_D;
  1274. case RISCV::FSUB_H:
  1275. return RISCV::FADD_H;
  1276. case RISCV::FSUB_S:
  1277. return RISCV::FADD_S;
  1278. case RISCV::FSUB_D:
  1279. return RISCV::FADD_D;
  1280. case RISCV::ADD:
  1281. return RISCV::SUB;
  1282. case RISCV::SUB:
  1283. return RISCV::ADD;
  1284. case RISCV::ADDW:
  1285. return RISCV::SUBW;
  1286. case RISCV::SUBW:
  1287. return RISCV::ADDW;
  1288. }
  1289. }
  1290. static bool canCombineFPFusedMultiply(const MachineInstr &Root,
  1291. const MachineOperand &MO,
  1292. bool DoRegPressureReduce) {
  1293. if (!MO.isReg() || !MO.getReg().isVirtual())
  1294. return false;
  1295. const MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
  1296. MachineInstr *MI = MRI.getVRegDef(MO.getReg());
  1297. if (!MI || !isFMUL(MI->getOpcode()))
  1298. return false;
  1299. if (!Root.getFlag(MachineInstr::MIFlag::FmContract) ||
  1300. !MI->getFlag(MachineInstr::MIFlag::FmContract))
  1301. return false;
  1302. // Try combining even if fmul has more than one use as it eliminates
  1303. // dependency between fadd(fsub) and fmul. However, it can extend liveranges
  1304. // for fmul operands, so reject the transformation in register pressure
  1305. // reduction mode.
  1306. if (DoRegPressureReduce && !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
  1307. return false;
  1308. // Do not combine instructions from different basic blocks.
  1309. if (Root.getParent() != MI->getParent())
  1310. return false;
  1311. return RISCV::hasEqualFRM(Root, *MI);
  1312. }
  1313. static bool
  1314. getFPFusedMultiplyPatterns(MachineInstr &Root,
  1315. SmallVectorImpl<MachineCombinerPattern> &Patterns,
  1316. bool DoRegPressureReduce) {
  1317. unsigned Opc = Root.getOpcode();
  1318. bool IsFAdd = isFADD(Opc);
  1319. if (!IsFAdd && !isFSUB(Opc))
  1320. return false;
  1321. bool Added = false;
  1322. if (canCombineFPFusedMultiply(Root, Root.getOperand(1),
  1323. DoRegPressureReduce)) {
  1324. Patterns.push_back(IsFAdd ? MachineCombinerPattern::FMADD_AX
  1325. : MachineCombinerPattern::FMSUB);
  1326. Added = true;
  1327. }
  1328. if (canCombineFPFusedMultiply(Root, Root.getOperand(2),
  1329. DoRegPressureReduce)) {
  1330. Patterns.push_back(IsFAdd ? MachineCombinerPattern::FMADD_XA
  1331. : MachineCombinerPattern::FNMSUB);
  1332. Added = true;
  1333. }
  1334. return Added;
  1335. }
  1336. static bool getFPPatterns(MachineInstr &Root,
  1337. SmallVectorImpl<MachineCombinerPattern> &Patterns,
  1338. bool DoRegPressureReduce) {
  1339. return getFPFusedMultiplyPatterns(Root, Patterns, DoRegPressureReduce);
  1340. }
  1341. bool RISCVInstrInfo::getMachineCombinerPatterns(
  1342. MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
  1343. bool DoRegPressureReduce) const {
  1344. if (getFPPatterns(Root, Patterns, DoRegPressureReduce))
  1345. return true;
  1346. return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
  1347. DoRegPressureReduce);
  1348. }
  1349. static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc,
  1350. MachineCombinerPattern Pattern) {
  1351. switch (RootOpc) {
  1352. default:
  1353. llvm_unreachable("Unexpected opcode");
  1354. case RISCV::FADD_H:
  1355. return RISCV::FMADD_H;
  1356. case RISCV::FADD_S:
  1357. return RISCV::FMADD_S;
  1358. case RISCV::FADD_D:
  1359. return RISCV::FMADD_D;
  1360. case RISCV::FSUB_H:
  1361. return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_H
  1362. : RISCV::FNMSUB_H;
  1363. case RISCV::FSUB_S:
  1364. return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_S
  1365. : RISCV::FNMSUB_S;
  1366. case RISCV::FSUB_D:
  1367. return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_D
  1368. : RISCV::FNMSUB_D;
  1369. }
  1370. }
  1371. static unsigned getAddendOperandIdx(MachineCombinerPattern Pattern) {
  1372. switch (Pattern) {
  1373. default:
  1374. llvm_unreachable("Unexpected pattern");
  1375. case MachineCombinerPattern::FMADD_AX:
  1376. case MachineCombinerPattern::FMSUB:
  1377. return 2;
  1378. case MachineCombinerPattern::FMADD_XA:
  1379. case MachineCombinerPattern::FNMSUB:
  1380. return 1;
  1381. }
  1382. }
  1383. static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev,
  1384. MachineCombinerPattern Pattern,
  1385. SmallVectorImpl<MachineInstr *> &InsInstrs,
  1386. SmallVectorImpl<MachineInstr *> &DelInstrs) {
  1387. MachineFunction *MF = Root.getMF();
  1388. MachineRegisterInfo &MRI = MF->getRegInfo();
  1389. const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
  1390. MachineOperand &Mul1 = Prev.getOperand(1);
  1391. MachineOperand &Mul2 = Prev.getOperand(2);
  1392. MachineOperand &Dst = Root.getOperand(0);
  1393. MachineOperand &Addend = Root.getOperand(getAddendOperandIdx(Pattern));
  1394. Register DstReg = Dst.getReg();
  1395. unsigned FusedOpc = getFPFusedMultiplyOpcode(Root.getOpcode(), Pattern);
  1396. auto IntersectedFlags = Root.getFlags() & Prev.getFlags();
  1397. DebugLoc MergedLoc =
  1398. DILocation::getMergedLocation(Root.getDebugLoc(), Prev.getDebugLoc());
  1399. MachineInstrBuilder MIB =
  1400. BuildMI(*MF, MergedLoc, TII->get(FusedOpc), DstReg)
  1401. .addReg(Mul1.getReg(), getKillRegState(Mul1.isKill()))
  1402. .addReg(Mul2.getReg(), getKillRegState(Mul2.isKill()))
  1403. .addReg(Addend.getReg(), getKillRegState(Addend.isKill()))
  1404. .setMIFlags(IntersectedFlags);
  1405. // Mul operands are not killed anymore.
  1406. Mul1.setIsKill(false);
  1407. Mul2.setIsKill(false);
  1408. InsInstrs.push_back(MIB);
  1409. if (MRI.hasOneNonDBGUse(Prev.getOperand(0).getReg()))
  1410. DelInstrs.push_back(&Prev);
  1411. DelInstrs.push_back(&Root);
  1412. }
  1413. void RISCVInstrInfo::genAlternativeCodeSequence(
  1414. MachineInstr &Root, MachineCombinerPattern Pattern,
  1415. SmallVectorImpl<MachineInstr *> &InsInstrs,
  1416. SmallVectorImpl<MachineInstr *> &DelInstrs,
  1417. DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
  1418. MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
  1419. switch (Pattern) {
  1420. default:
  1421. TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
  1422. DelInstrs, InstrIdxForVirtReg);
  1423. return;
  1424. case MachineCombinerPattern::FMADD_AX:
  1425. case MachineCombinerPattern::FMSUB: {
  1426. MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(1).getReg());
  1427. combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
  1428. return;
  1429. }
  1430. case MachineCombinerPattern::FMADD_XA:
  1431. case MachineCombinerPattern::FNMSUB: {
  1432. MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(2).getReg());
  1433. combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
  1434. return;
  1435. }
  1436. }
  1437. }
  1438. bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
  1439. StringRef &ErrInfo) const {
  1440. MCInstrDesc const &Desc = MI.getDesc();
  1441. for (auto &OI : enumerate(Desc.operands())) {
  1442. unsigned OpType = OI.value().OperandType;
  1443. if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
  1444. OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
  1445. const MachineOperand &MO = MI.getOperand(OI.index());
  1446. if (MO.isImm()) {
  1447. int64_t Imm = MO.getImm();
  1448. bool Ok;
  1449. switch (OpType) {
  1450. default:
  1451. llvm_unreachable("Unexpected operand type");
  1452. // clang-format off
  1453. #define CASE_OPERAND_UIMM(NUM) \
  1454. case RISCVOp::OPERAND_UIMM##NUM: \
  1455. Ok = isUInt<NUM>(Imm); \
  1456. break;
  1457. CASE_OPERAND_UIMM(2)
  1458. CASE_OPERAND_UIMM(3)
  1459. CASE_OPERAND_UIMM(4)
  1460. CASE_OPERAND_UIMM(5)
  1461. CASE_OPERAND_UIMM(7)
  1462. case RISCVOp::OPERAND_UIMM7_LSB00:
  1463. Ok = isShiftedUInt<5, 2>(Imm);
  1464. break;
  1465. case RISCVOp::OPERAND_UIMM8_LSB00:
  1466. Ok = isShiftedUInt<6, 2>(Imm);
  1467. break;
  1468. case RISCVOp::OPERAND_UIMM8_LSB000:
  1469. Ok = isShiftedUInt<5, 3>(Imm);
  1470. break;
  1471. CASE_OPERAND_UIMM(12)
  1472. CASE_OPERAND_UIMM(20)
  1473. // clang-format on
  1474. case RISCVOp::OPERAND_SIMM10_LSB0000_NONZERO:
  1475. Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
  1476. break;
  1477. case RISCVOp::OPERAND_ZERO:
  1478. Ok = Imm == 0;
  1479. break;
  1480. case RISCVOp::OPERAND_SIMM5:
  1481. Ok = isInt<5>(Imm);
  1482. break;
  1483. case RISCVOp::OPERAND_SIMM5_PLUS1:
  1484. Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
  1485. break;
  1486. case RISCVOp::OPERAND_SIMM6:
  1487. Ok = isInt<6>(Imm);
  1488. break;
  1489. case RISCVOp::OPERAND_SIMM6_NONZERO:
  1490. Ok = Imm != 0 && isInt<6>(Imm);
  1491. break;
  1492. case RISCVOp::OPERAND_VTYPEI10:
  1493. Ok = isUInt<10>(Imm);
  1494. break;
  1495. case RISCVOp::OPERAND_VTYPEI11:
  1496. Ok = isUInt<11>(Imm);
  1497. break;
  1498. case RISCVOp::OPERAND_SIMM12:
  1499. Ok = isInt<12>(Imm);
  1500. break;
  1501. case RISCVOp::OPERAND_SIMM12_LSB00000:
  1502. Ok = isShiftedInt<7, 5>(Imm);
  1503. break;
  1504. case RISCVOp::OPERAND_UIMMLOG2XLEN:
  1505. Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
  1506. break;
  1507. case RISCVOp::OPERAND_UIMMLOG2XLEN_NONZERO:
  1508. Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
  1509. Ok = Ok && Imm != 0;
  1510. break;
  1511. case RISCVOp::OPERAND_UIMM_SHFL:
  1512. Ok = STI.is64Bit() ? isUInt<5>(Imm) : isUInt<4>(Imm);
  1513. break;
  1514. case RISCVOp::OPERAND_RVKRNUM:
  1515. Ok = Imm >= 0 && Imm <= 10;
  1516. break;
  1517. }
  1518. if (!Ok) {
  1519. ErrInfo = "Invalid immediate";
  1520. return false;
  1521. }
  1522. }
  1523. }
  1524. }
  1525. const uint64_t TSFlags = Desc.TSFlags;
  1526. if (RISCVII::hasMergeOp(TSFlags)) {
  1527. unsigned OpIdx = RISCVII::getMergeOpNum(Desc);
  1528. if (MI.findTiedOperandIdx(0) != OpIdx) {
  1529. ErrInfo = "Merge op improperly tied";
  1530. return false;
  1531. }
  1532. }
  1533. if (RISCVII::hasVLOp(TSFlags)) {
  1534. const MachineOperand &Op = MI.getOperand(RISCVII::getVLOpNum(Desc));
  1535. if (!Op.isImm() && !Op.isReg()) {
  1536. ErrInfo = "Invalid operand type for VL operand";
  1537. return false;
  1538. }
  1539. if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
  1540. const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
  1541. auto *RC = MRI.getRegClass(Op.getReg());
  1542. if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
  1543. ErrInfo = "Invalid register class for VL operand";
  1544. return false;
  1545. }
  1546. }
  1547. if (!RISCVII::hasSEWOp(TSFlags)) {
  1548. ErrInfo = "VL operand w/o SEW operand?";
  1549. return false;
  1550. }
  1551. }
  1552. if (RISCVII::hasSEWOp(TSFlags)) {
  1553. unsigned OpIdx = RISCVII::getSEWOpNum(Desc);
  1554. uint64_t Log2SEW = MI.getOperand(OpIdx).getImm();
  1555. if (Log2SEW > 31) {
  1556. ErrInfo = "Unexpected SEW value";
  1557. return false;
  1558. }
  1559. unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
  1560. if (!RISCVVType::isValidSEW(SEW)) {
  1561. ErrInfo = "Unexpected SEW value";
  1562. return false;
  1563. }
  1564. }
  1565. if (RISCVII::hasVecPolicyOp(TSFlags)) {
  1566. unsigned OpIdx = RISCVII::getVecPolicyOpNum(Desc);
  1567. uint64_t Policy = MI.getOperand(OpIdx).getImm();
  1568. if (Policy > (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) {
  1569. ErrInfo = "Invalid Policy Value";
  1570. return false;
  1571. }
  1572. if (!RISCVII::hasVLOp(TSFlags)) {
  1573. ErrInfo = "policy operand w/o VL operand?";
  1574. return false;
  1575. }
  1576. // VecPolicy operands can only exist on instructions with passthru/merge
  1577. // arguments. Note that not all arguments with passthru have vec policy
  1578. // operands- some instructions have implicit policies.
  1579. unsigned UseOpIdx;
  1580. if (!MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
  1581. ErrInfo = "policy operand w/o tied operand?";
  1582. return false;
  1583. }
  1584. }
  1585. return true;
  1586. }
  1587. // Return true if get the base operand, byte offset of an instruction and the
  1588. // memory width. Width is the size of memory that is being loaded/stored.
  1589. bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
  1590. const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
  1591. unsigned &Width, const TargetRegisterInfo *TRI) const {
  1592. if (!LdSt.mayLoadOrStore())
  1593. return false;
  1594. // Here we assume the standard RISC-V ISA, which uses a base+offset
  1595. // addressing mode. You'll need to relax these conditions to support custom
  1596. // load/stores instructions.
  1597. if (LdSt.getNumExplicitOperands() != 3)
  1598. return false;
  1599. if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
  1600. return false;
  1601. if (!LdSt.hasOneMemOperand())
  1602. return false;
  1603. Width = (*LdSt.memoperands_begin())->getSize();
  1604. BaseReg = &LdSt.getOperand(1);
  1605. Offset = LdSt.getOperand(2).getImm();
  1606. return true;
  1607. }
  1608. bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
  1609. const MachineInstr &MIa, const MachineInstr &MIb) const {
  1610. assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
  1611. assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
  1612. if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
  1613. MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
  1614. return false;
  1615. // Retrieve the base register, offset from the base register and width. Width
  1616. // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
  1617. // base registers are identical, and the offset of a lower memory access +
  1618. // the width doesn't overlap the offset of a higher memory access,
  1619. // then the memory accesses are different.
  1620. const TargetRegisterInfo *TRI = STI.getRegisterInfo();
  1621. const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
  1622. int64_t OffsetA = 0, OffsetB = 0;
  1623. unsigned int WidthA = 0, WidthB = 0;
  1624. if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
  1625. getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
  1626. if (BaseOpA->isIdenticalTo(*BaseOpB)) {
  1627. int LowOffset = std::min(OffsetA, OffsetB);
  1628. int HighOffset = std::max(OffsetA, OffsetB);
  1629. int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
  1630. if (LowOffset + LowWidth <= HighOffset)
  1631. return true;
  1632. }
  1633. }
  1634. return false;
  1635. }
  1636. std::pair<unsigned, unsigned>
  1637. RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
  1638. const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
  1639. return std::make_pair(TF & Mask, TF & ~Mask);
  1640. }
  1641. ArrayRef<std::pair<unsigned, const char *>>
  1642. RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
  1643. using namespace RISCVII;
  1644. static const std::pair<unsigned, const char *> TargetFlags[] = {
  1645. {MO_CALL, "riscv-call"},
  1646. {MO_PLT, "riscv-plt"},
  1647. {MO_LO, "riscv-lo"},
  1648. {MO_HI, "riscv-hi"},
  1649. {MO_PCREL_LO, "riscv-pcrel-lo"},
  1650. {MO_PCREL_HI, "riscv-pcrel-hi"},
  1651. {MO_GOT_HI, "riscv-got-hi"},
  1652. {MO_TPREL_LO, "riscv-tprel-lo"},
  1653. {MO_TPREL_HI, "riscv-tprel-hi"},
  1654. {MO_TPREL_ADD, "riscv-tprel-add"},
  1655. {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
  1656. {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
  1657. return ArrayRef(TargetFlags);
  1658. }
  1659. bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
  1660. MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
  1661. const Function &F = MF.getFunction();
  1662. // Can F be deduplicated by the linker? If it can, don't outline from it.
  1663. if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
  1664. return false;
  1665. // Don't outline from functions with section markings; the program could
  1666. // expect that all the code is in the named section.
  1667. if (F.hasSection())
  1668. return false;
  1669. // It's safe to outline from MF.
  1670. return true;
  1671. }
  1672. bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
  1673. unsigned &Flags) const {
  1674. // More accurate safety checking is done in getOutliningCandidateInfo.
  1675. return TargetInstrInfo::isMBBSafeToOutlineFrom(MBB, Flags);
  1676. }
  1677. // Enum values indicating how an outlined call should be constructed.
  1678. enum MachineOutlinerConstructionID {
  1679. MachineOutlinerDefault
  1680. };
  1681. bool RISCVInstrInfo::shouldOutlineFromFunctionByDefault(
  1682. MachineFunction &MF) const {
  1683. return MF.getFunction().hasMinSize();
  1684. }
  1685. outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo(
  1686. std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
  1687. // First we need to filter out candidates where the X5 register (IE t0) can't
  1688. // be used to setup the function call.
  1689. auto CannotInsertCall = [](outliner::Candidate &C) {
  1690. const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
  1691. return !C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
  1692. };
  1693. llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
  1694. // If the sequence doesn't have enough candidates left, then we're done.
  1695. if (RepeatedSequenceLocs.size() < 2)
  1696. return outliner::OutlinedFunction();
  1697. unsigned SequenceSize = 0;
  1698. auto I = RepeatedSequenceLocs[0].front();
  1699. auto E = std::next(RepeatedSequenceLocs[0].back());
  1700. for (; I != E; ++I)
  1701. SequenceSize += getInstSizeInBytes(*I);
  1702. // call t0, function = 8 bytes.
  1703. unsigned CallOverhead = 8;
  1704. for (auto &C : RepeatedSequenceLocs)
  1705. C.setCallInfo(MachineOutlinerDefault, CallOverhead);
  1706. // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
  1707. unsigned FrameOverhead = 4;
  1708. if (RepeatedSequenceLocs[0]
  1709. .getMF()
  1710. ->getSubtarget<RISCVSubtarget>()
  1711. .hasStdExtCOrZca())
  1712. FrameOverhead = 2;
  1713. return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
  1714. FrameOverhead, MachineOutlinerDefault);
  1715. }
  1716. outliner::InstrType
  1717. RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI,
  1718. unsigned Flags) const {
  1719. MachineInstr &MI = *MBBI;
  1720. MachineBasicBlock *MBB = MI.getParent();
  1721. const TargetRegisterInfo *TRI =
  1722. MBB->getParent()->getSubtarget().getRegisterInfo();
  1723. const auto &F = MI.getMF()->getFunction();
  1724. // Positions generally can't safely be outlined.
  1725. if (MI.isPosition()) {
  1726. // We can manually strip out CFI instructions later.
  1727. if (MI.isCFIInstruction())
  1728. // If current function has exception handling code, we can't outline &
  1729. // strip these CFI instructions since it may break .eh_frame section
  1730. // needed in unwinding.
  1731. return F.needsUnwindTableEntry() ? outliner::InstrType::Illegal
  1732. : outliner::InstrType::Invisible;
  1733. return outliner::InstrType::Illegal;
  1734. }
  1735. // Don't trust the user to write safe inline assembly.
  1736. if (MI.isInlineAsm())
  1737. return outliner::InstrType::Illegal;
  1738. // We can't outline branches to other basic blocks.
  1739. if (MI.isTerminator() && !MBB->succ_empty())
  1740. return outliner::InstrType::Illegal;
  1741. // We need support for tail calls to outlined functions before return
  1742. // statements can be allowed.
  1743. if (MI.isReturn())
  1744. return outliner::InstrType::Illegal;
  1745. // Don't allow modifying the X5 register which we use for return addresses for
  1746. // these outlined functions.
  1747. if (MI.modifiesRegister(RISCV::X5, TRI) ||
  1748. MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
  1749. return outliner::InstrType::Illegal;
  1750. // Make sure the operands don't reference something unsafe.
  1751. for (const auto &MO : MI.operands()) {
  1752. if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI() || MO.isJTI())
  1753. return outliner::InstrType::Illegal;
  1754. // pcrel-hi and pcrel-lo can't put in separate sections, filter that out
  1755. // if any possible.
  1756. if (MO.getTargetFlags() == RISCVII::MO_PCREL_LO &&
  1757. (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||
  1758. F.hasSection()))
  1759. return outliner::InstrType::Illegal;
  1760. }
  1761. // Don't allow instructions which won't be materialized to impact outlining
  1762. // analysis.
  1763. if (MI.isMetaInstruction())
  1764. return outliner::InstrType::Invisible;
  1765. return outliner::InstrType::Legal;
  1766. }
  1767. void RISCVInstrInfo::buildOutlinedFrame(
  1768. MachineBasicBlock &MBB, MachineFunction &MF,
  1769. const outliner::OutlinedFunction &OF) const {
  1770. // Strip out any CFI instructions
  1771. bool Changed = true;
  1772. while (Changed) {
  1773. Changed = false;
  1774. auto I = MBB.begin();
  1775. auto E = MBB.end();
  1776. for (; I != E; ++I) {
  1777. if (I->isCFIInstruction()) {
  1778. I->removeFromParent();
  1779. Changed = true;
  1780. break;
  1781. }
  1782. }
  1783. }
  1784. MBB.addLiveIn(RISCV::X5);
  1785. // Add in a return instruction to the end of the outlined frame.
  1786. MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
  1787. .addReg(RISCV::X0, RegState::Define)
  1788. .addReg(RISCV::X5)
  1789. .addImm(0));
  1790. }
  1791. MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
  1792. Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
  1793. MachineFunction &MF, outliner::Candidate &C) const {
  1794. // Add in a call instruction to the outlined function at the given location.
  1795. It = MBB.insert(It,
  1796. BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
  1797. .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
  1798. RISCVII::MO_CALL));
  1799. return It;
  1800. }
  1801. // MIR printer helper function to annotate Operands with a comment.
  1802. std::string RISCVInstrInfo::createMIROperandComment(
  1803. const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
  1804. const TargetRegisterInfo *TRI) const {
  1805. // Print a generic comment for this operand if there is one.
  1806. std::string GenericComment =
  1807. TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI);
  1808. if (!GenericComment.empty())
  1809. return GenericComment;
  1810. // If not, we must have an immediate operand.
  1811. if (!Op.isImm())
  1812. return std::string();
  1813. std::string Comment;
  1814. raw_string_ostream OS(Comment);
  1815. uint64_t TSFlags = MI.getDesc().TSFlags;
  1816. // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
  1817. // operand of vector codegen pseudos.
  1818. if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
  1819. MI.getOpcode() == RISCV::PseudoVSETVLI ||
  1820. MI.getOpcode() == RISCV::PseudoVSETIVLI ||
  1821. MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
  1822. OpIdx == 2) {
  1823. unsigned Imm = MI.getOperand(OpIdx).getImm();
  1824. RISCVVType::printVType(Imm, OS);
  1825. } else if (RISCVII::hasSEWOp(TSFlags) &&
  1826. OpIdx == RISCVII::getSEWOpNum(MI.getDesc())) {
  1827. unsigned Log2SEW = MI.getOperand(OpIdx).getImm();
  1828. unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
  1829. assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
  1830. OS << "e" << SEW;
  1831. } else if (RISCVII::hasVecPolicyOp(TSFlags) &&
  1832. OpIdx == RISCVII::getVecPolicyOpNum(MI.getDesc())) {
  1833. unsigned Policy = MI.getOperand(OpIdx).getImm();
  1834. assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
  1835. "Invalid Policy Value");
  1836. OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
  1837. << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu");
  1838. }
  1839. OS.flush();
  1840. return Comment;
  1841. }
  1842. // clang-format off
  1843. #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
  1844. RISCV::PseudoV##OP##_##TYPE##_##LMUL
  1845. #define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
  1846. CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
  1847. case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
  1848. case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
  1849. case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
  1850. #define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
  1851. CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
  1852. case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
  1853. #define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
  1854. CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
  1855. case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
  1856. #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
  1857. CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
  1858. case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
  1859. #define CASE_VFMA_SPLATS(OP) \
  1860. CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
  1861. case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
  1862. case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
  1863. // clang-format on
  1864. bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
  1865. unsigned &SrcOpIdx1,
  1866. unsigned &SrcOpIdx2) const {
  1867. const MCInstrDesc &Desc = MI.getDesc();
  1868. if (!Desc.isCommutable())
  1869. return false;
  1870. switch (MI.getOpcode()) {
  1871. case RISCV::PseudoCCMOVGPR:
  1872. // Operands 4 and 5 are commutable.
  1873. return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
  1874. case CASE_VFMA_SPLATS(FMADD):
  1875. case CASE_VFMA_SPLATS(FMSUB):
  1876. case CASE_VFMA_SPLATS(FMACC):
  1877. case CASE_VFMA_SPLATS(FMSAC):
  1878. case CASE_VFMA_SPLATS(FNMADD):
  1879. case CASE_VFMA_SPLATS(FNMSUB):
  1880. case CASE_VFMA_SPLATS(FNMACC):
  1881. case CASE_VFMA_SPLATS(FNMSAC):
  1882. case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
  1883. case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
  1884. case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
  1885. case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
  1886. case CASE_VFMA_OPCODE_LMULS(MADD, VX):
  1887. case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
  1888. case CASE_VFMA_OPCODE_LMULS(MACC, VX):
  1889. case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
  1890. case CASE_VFMA_OPCODE_LMULS(MACC, VV):
  1891. case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
  1892. // If the tail policy is undisturbed we can't commute.
  1893. assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
  1894. if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
  1895. return false;
  1896. // For these instructions we can only swap operand 1 and operand 3 by
  1897. // changing the opcode.
  1898. unsigned CommutableOpIdx1 = 1;
  1899. unsigned CommutableOpIdx2 = 3;
  1900. if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
  1901. CommutableOpIdx2))
  1902. return false;
  1903. return true;
  1904. }
  1905. case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
  1906. case CASE_VFMA_OPCODE_LMULS_MF4(FMSUB, VV):
  1907. case CASE_VFMA_OPCODE_LMULS_MF4(FNMADD, VV):
  1908. case CASE_VFMA_OPCODE_LMULS_MF4(FNMSUB, VV):
  1909. case CASE_VFMA_OPCODE_LMULS(MADD, VV):
  1910. case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
  1911. // If the tail policy is undisturbed we can't commute.
  1912. assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
  1913. if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
  1914. return false;
  1915. // For these instructions we have more freedom. We can commute with the
  1916. // other multiplicand or with the addend/subtrahend/minuend.
  1917. // Any fixed operand must be from source 1, 2 or 3.
  1918. if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
  1919. return false;
  1920. if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
  1921. return false;
  1922. // It both ops are fixed one must be the tied source.
  1923. if (SrcOpIdx1 != CommuteAnyOperandIndex &&
  1924. SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
  1925. return false;
  1926. // Look for two different register operands assumed to be commutable
  1927. // regardless of the FMA opcode. The FMA opcode is adjusted later if
  1928. // needed.
  1929. if (SrcOpIdx1 == CommuteAnyOperandIndex ||
  1930. SrcOpIdx2 == CommuteAnyOperandIndex) {
  1931. // At least one of operands to be commuted is not specified and
  1932. // this method is free to choose appropriate commutable operands.
  1933. unsigned CommutableOpIdx1 = SrcOpIdx1;
  1934. if (SrcOpIdx1 == SrcOpIdx2) {
  1935. // Both of operands are not fixed. Set one of commutable
  1936. // operands to the tied source.
  1937. CommutableOpIdx1 = 1;
  1938. } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
  1939. // Only one of the operands is not fixed.
  1940. CommutableOpIdx1 = SrcOpIdx2;
  1941. }
  1942. // CommutableOpIdx1 is well defined now. Let's choose another commutable
  1943. // operand and assign its index to CommutableOpIdx2.
  1944. unsigned CommutableOpIdx2;
  1945. if (CommutableOpIdx1 != 1) {
  1946. // If we haven't already used the tied source, we must use it now.
  1947. CommutableOpIdx2 = 1;
  1948. } else {
  1949. Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
  1950. // The commuted operands should have different registers.
  1951. // Otherwise, the commute transformation does not change anything and
  1952. // is useless. We use this as a hint to make our decision.
  1953. if (Op1Reg != MI.getOperand(2).getReg())
  1954. CommutableOpIdx2 = 2;
  1955. else
  1956. CommutableOpIdx2 = 3;
  1957. }
  1958. // Assign the found pair of commutable indices to SrcOpIdx1 and
  1959. // SrcOpIdx2 to return those values.
  1960. if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
  1961. CommutableOpIdx2))
  1962. return false;
  1963. }
  1964. return true;
  1965. }
  1966. }
  1967. return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
  1968. }
  1969. #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
  1970. case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
  1971. Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
  1972. break;
  1973. #define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
  1974. CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
  1975. CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
  1976. CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
  1977. CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
  1978. #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
  1979. CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
  1980. CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
  1981. #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
  1982. CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
  1983. CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
  1984. #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
  1985. CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
  1986. CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
  1987. #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
  1988. CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
  1989. CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
  1990. CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
  1991. MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
  1992. bool NewMI,
  1993. unsigned OpIdx1,
  1994. unsigned OpIdx2) const {
  1995. auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
  1996. if (NewMI)
  1997. return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
  1998. return MI;
  1999. };
  2000. switch (MI.getOpcode()) {
  2001. case RISCV::PseudoCCMOVGPR: {
  2002. // CCMOV can be commuted by inverting the condition.
  2003. auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
  2004. CC = RISCVCC::getOppositeBranchCondition(CC);
  2005. auto &WorkingMI = cloneIfNew(MI);
  2006. WorkingMI.getOperand(3).setImm(CC);
  2007. return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI*/ false,
  2008. OpIdx1, OpIdx2);
  2009. }
  2010. case CASE_VFMA_SPLATS(FMACC):
  2011. case CASE_VFMA_SPLATS(FMADD):
  2012. case CASE_VFMA_SPLATS(FMSAC):
  2013. case CASE_VFMA_SPLATS(FMSUB):
  2014. case CASE_VFMA_SPLATS(FNMACC):
  2015. case CASE_VFMA_SPLATS(FNMADD):
  2016. case CASE_VFMA_SPLATS(FNMSAC):
  2017. case CASE_VFMA_SPLATS(FNMSUB):
  2018. case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
  2019. case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
  2020. case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
  2021. case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
  2022. case CASE_VFMA_OPCODE_LMULS(MADD, VX):
  2023. case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
  2024. case CASE_VFMA_OPCODE_LMULS(MACC, VX):
  2025. case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
  2026. case CASE_VFMA_OPCODE_LMULS(MACC, VV):
  2027. case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
  2028. // It only make sense to toggle these between clobbering the
  2029. // addend/subtrahend/minuend one of the multiplicands.
  2030. assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
  2031. assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
  2032. unsigned Opc;
  2033. switch (MI.getOpcode()) {
  2034. default:
  2035. llvm_unreachable("Unexpected opcode");
  2036. CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
  2037. CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
  2038. CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSAC, FMSUB)
  2039. CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSUB, FMSAC)
  2040. CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMACC, FNMADD)
  2041. CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMADD, FNMACC)
  2042. CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSAC, FNMSUB)
  2043. CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSUB, FNMSAC)
  2044. CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
  2045. CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMSAC, FMSUB, VV)
  2046. CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMACC, FNMADD, VV)
  2047. CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMSAC, FNMSUB, VV)
  2048. CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
  2049. CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
  2050. CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
  2051. CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
  2052. CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
  2053. CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
  2054. }
  2055. auto &WorkingMI = cloneIfNew(MI);
  2056. WorkingMI.setDesc(get(Opc));
  2057. return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
  2058. OpIdx1, OpIdx2);
  2059. }
  2060. case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
  2061. case CASE_VFMA_OPCODE_LMULS_MF4(FMSUB, VV):
  2062. case CASE_VFMA_OPCODE_LMULS_MF4(FNMADD, VV):
  2063. case CASE_VFMA_OPCODE_LMULS_MF4(FNMSUB, VV):
  2064. case CASE_VFMA_OPCODE_LMULS(MADD, VV):
  2065. case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
  2066. assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
  2067. // If one of the operands, is the addend we need to change opcode.
  2068. // Otherwise we're just swapping 2 of the multiplicands.
  2069. if (OpIdx1 == 3 || OpIdx2 == 3) {
  2070. unsigned Opc;
  2071. switch (MI.getOpcode()) {
  2072. default:
  2073. llvm_unreachable("Unexpected opcode");
  2074. CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
  2075. CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMSUB, FMSAC, VV)
  2076. CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMADD, FNMACC, VV)
  2077. CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMSUB, FNMSAC, VV)
  2078. CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
  2079. CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
  2080. }
  2081. auto &WorkingMI = cloneIfNew(MI);
  2082. WorkingMI.setDesc(get(Opc));
  2083. return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
  2084. OpIdx1, OpIdx2);
  2085. }
  2086. // Let the default code handle it.
  2087. break;
  2088. }
  2089. }
  2090. return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
  2091. }
  2092. #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
  2093. #undef CASE_VFMA_CHANGE_OPCODE_LMULS
  2094. #undef CASE_VFMA_CHANGE_OPCODE_COMMON
  2095. #undef CASE_VFMA_SPLATS
  2096. #undef CASE_VFMA_OPCODE_LMULS
  2097. #undef CASE_VFMA_OPCODE_COMMON
  2098. // clang-format off
  2099. #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
  2100. RISCV::PseudoV##OP##_##LMUL##_TIED
  2101. #define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
  2102. CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
  2103. case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
  2104. case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
  2105. case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
  2106. case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
  2107. #define CASE_WIDEOP_OPCODE_LMULS(OP) \
  2108. CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
  2109. case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
  2110. // clang-format on
  2111. #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
  2112. case RISCV::PseudoV##OP##_##LMUL##_TIED: \
  2113. NewOpc = RISCV::PseudoV##OP##_##LMUL; \
  2114. break;
  2115. #define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
  2116. CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
  2117. CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
  2118. CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
  2119. CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
  2120. CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
  2121. #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
  2122. CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
  2123. CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
  2124. MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
  2125. LiveVariables *LV,
  2126. LiveIntervals *LIS) const {
  2127. switch (MI.getOpcode()) {
  2128. default:
  2129. break;
  2130. case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
  2131. case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV):
  2132. case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
  2133. case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
  2134. case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
  2135. case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
  2136. // If the tail policy is undisturbed we can't convert.
  2137. assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
  2138. MI.getNumExplicitOperands() == 6);
  2139. if ((MI.getOperand(5).getImm() & 1) == 0)
  2140. return nullptr;
  2141. // clang-format off
  2142. unsigned NewOpc;
  2143. switch (MI.getOpcode()) {
  2144. default:
  2145. llvm_unreachable("Unexpected opcode");
  2146. CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(FWADD_WV)
  2147. CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(FWSUB_WV)
  2148. CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADD_WV)
  2149. CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADDU_WV)
  2150. CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUB_WV)
  2151. CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUBU_WV)
  2152. }
  2153. // clang-format on
  2154. MachineBasicBlock &MBB = *MI.getParent();
  2155. MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
  2156. .add(MI.getOperand(0))
  2157. .add(MI.getOperand(1))
  2158. .add(MI.getOperand(2))
  2159. .add(MI.getOperand(3))
  2160. .add(MI.getOperand(4));
  2161. MIB.copyImplicitOps(MI);
  2162. if (LV) {
  2163. unsigned NumOps = MI.getNumOperands();
  2164. for (unsigned I = 1; I < NumOps; ++I) {
  2165. MachineOperand &Op = MI.getOperand(I);
  2166. if (Op.isReg() && Op.isKill())
  2167. LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
  2168. }
  2169. }
  2170. if (LIS) {
  2171. SlotIndex Idx = LIS->ReplaceMachineInstrInMaps(MI, *MIB);
  2172. if (MI.getOperand(0).isEarlyClobber()) {
  2173. // Use operand 1 was tied to early-clobber def operand 0, so its live
  2174. // interval could have ended at an early-clobber slot. Now they are not
  2175. // tied we need to update it to the normal register slot.
  2176. LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
  2177. LiveRange::Segment *S = LI.getSegmentContaining(Idx);
  2178. if (S->end == Idx.getRegSlot(true))
  2179. S->end = Idx.getRegSlot();
  2180. }
  2181. }
  2182. return MIB;
  2183. }
  2184. }
  2185. return nullptr;
  2186. }
  2187. #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
  2188. #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
  2189. #undef CASE_WIDEOP_OPCODE_LMULS
  2190. #undef CASE_WIDEOP_OPCODE_COMMON
  2191. void RISCVInstrInfo::getVLENFactoredAmount(MachineFunction &MF,
  2192. MachineBasicBlock &MBB,
  2193. MachineBasicBlock::iterator II,
  2194. const DebugLoc &DL, Register DestReg,
  2195. int64_t Amount,
  2196. MachineInstr::MIFlag Flag) const {
  2197. assert(Amount > 0 && "There is no need to get VLEN scaled value.");
  2198. assert(Amount % 8 == 0 &&
  2199. "Reserve the stack by the multiple of one vector size.");
  2200. MachineRegisterInfo &MRI = MF.getRegInfo();
  2201. int64_t NumOfVReg = Amount / 8;
  2202. BuildMI(MBB, II, DL, get(RISCV::PseudoReadVLENB), DestReg).setMIFlag(Flag);
  2203. assert(isInt<32>(NumOfVReg) &&
  2204. "Expect the number of vector registers within 32-bits.");
  2205. if (isPowerOf2_32(NumOfVReg)) {
  2206. uint32_t ShiftAmount = Log2_32(NumOfVReg);
  2207. if (ShiftAmount == 0)
  2208. return;
  2209. BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
  2210. .addReg(DestReg, RegState::Kill)
  2211. .addImm(ShiftAmount)
  2212. .setMIFlag(Flag);
  2213. } else if (STI.hasStdExtZba() &&
  2214. ((NumOfVReg % 3 == 0 && isPowerOf2_64(NumOfVReg / 3)) ||
  2215. (NumOfVReg % 5 == 0 && isPowerOf2_64(NumOfVReg / 5)) ||
  2216. (NumOfVReg % 9 == 0 && isPowerOf2_64(NumOfVReg / 9)))) {
  2217. // We can use Zba SHXADD+SLLI instructions for multiply in some cases.
  2218. unsigned Opc;
  2219. uint32_t ShiftAmount;
  2220. if (NumOfVReg % 9 == 0) {
  2221. Opc = RISCV::SH3ADD;
  2222. ShiftAmount = Log2_64(NumOfVReg / 9);
  2223. } else if (NumOfVReg % 5 == 0) {
  2224. Opc = RISCV::SH2ADD;
  2225. ShiftAmount = Log2_64(NumOfVReg / 5);
  2226. } else if (NumOfVReg % 3 == 0) {
  2227. Opc = RISCV::SH1ADD;
  2228. ShiftAmount = Log2_64(NumOfVReg / 3);
  2229. } else {
  2230. llvm_unreachable("Unexpected number of vregs");
  2231. }
  2232. if (ShiftAmount)
  2233. BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
  2234. .addReg(DestReg, RegState::Kill)
  2235. .addImm(ShiftAmount)
  2236. .setMIFlag(Flag);
  2237. BuildMI(MBB, II, DL, get(Opc), DestReg)
  2238. .addReg(DestReg, RegState::Kill)
  2239. .addReg(DestReg)
  2240. .setMIFlag(Flag);
  2241. } else if (isPowerOf2_32(NumOfVReg - 1)) {
  2242. Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
  2243. uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
  2244. BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
  2245. .addReg(DestReg)
  2246. .addImm(ShiftAmount)
  2247. .setMIFlag(Flag);
  2248. BuildMI(MBB, II, DL, get(RISCV::ADD), DestReg)
  2249. .addReg(ScaledRegister, RegState::Kill)
  2250. .addReg(DestReg, RegState::Kill)
  2251. .setMIFlag(Flag);
  2252. } else if (isPowerOf2_32(NumOfVReg + 1)) {
  2253. Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
  2254. uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
  2255. BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
  2256. .addReg(DestReg)
  2257. .addImm(ShiftAmount)
  2258. .setMIFlag(Flag);
  2259. BuildMI(MBB, II, DL, get(RISCV::SUB), DestReg)
  2260. .addReg(ScaledRegister, RegState::Kill)
  2261. .addReg(DestReg, RegState::Kill)
  2262. .setMIFlag(Flag);
  2263. } else {
  2264. Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
  2265. movImm(MBB, II, DL, N, NumOfVReg, Flag);
  2266. if (!STI.hasStdExtM() && !STI.hasStdExtZmmul())
  2267. MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
  2268. MF.getFunction(),
  2269. "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
  2270. "offset."});
  2271. BuildMI(MBB, II, DL, get(RISCV::MUL), DestReg)
  2272. .addReg(DestReg, RegState::Kill)
  2273. .addReg(N, RegState::Kill)
  2274. .setMIFlag(Flag);
  2275. }
  2276. }
  2277. // Checks if all users only demand the lower \p OrigBits of the original
  2278. // instruction's result.
  2279. // TODO: handle multiple interdependent transformations
  2280. bool RISCVInstrInfo::hasAllNBitUsers(const MachineInstr &OrigMI,
  2281. const MachineRegisterInfo &MRI,
  2282. unsigned OrigBits) const {
  2283. SmallSet<std::pair<const MachineInstr *, unsigned>, 4> Visited;
  2284. SmallVector<std::pair<const MachineInstr *, unsigned>, 4> Worklist;
  2285. Worklist.push_back(std::make_pair(&OrigMI, OrigBits));
  2286. while (!Worklist.empty()) {
  2287. auto P = Worklist.pop_back_val();
  2288. const MachineInstr *MI = P.first;
  2289. unsigned Bits = P.second;
  2290. if (!Visited.insert(P).second)
  2291. continue;
  2292. // Only handle instructions with one def.
  2293. if (MI->getNumExplicitDefs() != 1)
  2294. return false;
  2295. for (auto &UserOp : MRI.use_operands(MI->getOperand(0).getReg())) {
  2296. const MachineInstr *UserMI = UserOp.getParent();
  2297. unsigned OpIdx = UserMI->getOperandNo(&UserOp);
  2298. switch (UserMI->getOpcode()) {
  2299. default:
  2300. return false;
  2301. case RISCV::ADDIW:
  2302. case RISCV::ADDW:
  2303. case RISCV::DIVUW:
  2304. case RISCV::DIVW:
  2305. case RISCV::MULW:
  2306. case RISCV::REMUW:
  2307. case RISCV::REMW:
  2308. case RISCV::SLLIW:
  2309. case RISCV::SLLW:
  2310. case RISCV::SRAIW:
  2311. case RISCV::SRAW:
  2312. case RISCV::SRLIW:
  2313. case RISCV::SRLW:
  2314. case RISCV::SUBW:
  2315. case RISCV::ROLW:
  2316. case RISCV::RORW:
  2317. case RISCV::RORIW:
  2318. case RISCV::CLZW:
  2319. case RISCV::CTZW:
  2320. case RISCV::CPOPW:
  2321. case RISCV::SLLI_UW:
  2322. case RISCV::FMV_W_X:
  2323. case RISCV::FCVT_H_W:
  2324. case RISCV::FCVT_H_WU:
  2325. case RISCV::FCVT_S_W:
  2326. case RISCV::FCVT_S_WU:
  2327. case RISCV::FCVT_D_W:
  2328. case RISCV::FCVT_D_WU:
  2329. if (Bits >= 32)
  2330. break;
  2331. return false;
  2332. case RISCV::SEXT_B:
  2333. case RISCV::PACKH:
  2334. if (Bits >= 8)
  2335. break;
  2336. return false;
  2337. case RISCV::SEXT_H:
  2338. case RISCV::FMV_H_X:
  2339. case RISCV::ZEXT_H_RV32:
  2340. case RISCV::ZEXT_H_RV64:
  2341. case RISCV::PACKW:
  2342. if (Bits >= 16)
  2343. break;
  2344. return false;
  2345. case RISCV::PACK:
  2346. if (Bits >= (STI.getXLen() / 2))
  2347. break;
  2348. return false;
  2349. case RISCV::SRLI: {
  2350. // If we are shifting right by less than Bits, and users don't demand
  2351. // any bits that were shifted into [Bits-1:0], then we can consider this
  2352. // as an N-Bit user.
  2353. unsigned ShAmt = UserMI->getOperand(2).getImm();
  2354. if (Bits > ShAmt) {
  2355. Worklist.push_back(std::make_pair(UserMI, Bits - ShAmt));
  2356. break;
  2357. }
  2358. return false;
  2359. }
  2360. // these overwrite higher input bits, otherwise the lower word of output
  2361. // depends only on the lower word of input. So check their uses read W.
  2362. case RISCV::SLLI:
  2363. if (Bits >= (STI.getXLen() - UserMI->getOperand(2).getImm()))
  2364. break;
  2365. Worklist.push_back(std::make_pair(UserMI, Bits));
  2366. break;
  2367. case RISCV::ANDI: {
  2368. uint64_t Imm = UserMI->getOperand(2).getImm();
  2369. if (Bits >= (unsigned)llvm::bit_width(Imm))
  2370. break;
  2371. Worklist.push_back(std::make_pair(UserMI, Bits));
  2372. break;
  2373. }
  2374. case RISCV::ORI: {
  2375. uint64_t Imm = UserMI->getOperand(2).getImm();
  2376. if (Bits >= (unsigned)llvm::bit_width<uint64_t>(~Imm))
  2377. break;
  2378. Worklist.push_back(std::make_pair(UserMI, Bits));
  2379. break;
  2380. }
  2381. case RISCV::SLL:
  2382. case RISCV::BSET:
  2383. case RISCV::BCLR:
  2384. case RISCV::BINV:
  2385. // Operand 2 is the shift amount which uses log2(xlen) bits.
  2386. if (OpIdx == 2) {
  2387. if (Bits >= Log2_32(STI.getXLen()))
  2388. break;
  2389. return false;
  2390. }
  2391. Worklist.push_back(std::make_pair(UserMI, Bits));
  2392. break;
  2393. case RISCV::SRA:
  2394. case RISCV::SRL:
  2395. case RISCV::ROL:
  2396. case RISCV::ROR:
  2397. // Operand 2 is the shift amount which uses 6 bits.
  2398. if (OpIdx == 2 && Bits >= Log2_32(STI.getXLen()))
  2399. break;
  2400. return false;
  2401. case RISCV::ADD_UW:
  2402. case RISCV::SH1ADD_UW:
  2403. case RISCV::SH2ADD_UW:
  2404. case RISCV::SH3ADD_UW:
  2405. // Operand 1 is implicitly zero extended.
  2406. if (OpIdx == 1 && Bits >= 32)
  2407. break;
  2408. Worklist.push_back(std::make_pair(UserMI, Bits));
  2409. break;
  2410. case RISCV::BEXTI:
  2411. if (UserMI->getOperand(2).getImm() >= Bits)
  2412. return false;
  2413. break;
  2414. case RISCV::SB:
  2415. // The first argument is the value to store.
  2416. if (OpIdx == 0 && Bits >= 8)
  2417. break;
  2418. return false;
  2419. case RISCV::SH:
  2420. // The first argument is the value to store.
  2421. if (OpIdx == 0 && Bits >= 16)
  2422. break;
  2423. return false;
  2424. case RISCV::SW:
  2425. // The first argument is the value to store.
  2426. if (OpIdx == 0 && Bits >= 32)
  2427. break;
  2428. return false;
  2429. // For these, lower word of output in these operations, depends only on
  2430. // the lower word of input. So, we check all uses only read lower word.
  2431. case RISCV::COPY:
  2432. case RISCV::PHI:
  2433. case RISCV::ADD:
  2434. case RISCV::ADDI:
  2435. case RISCV::AND:
  2436. case RISCV::MUL:
  2437. case RISCV::OR:
  2438. case RISCV::SUB:
  2439. case RISCV::XOR:
  2440. case RISCV::XORI:
  2441. case RISCV::ANDN:
  2442. case RISCV::BREV8:
  2443. case RISCV::CLMUL:
  2444. case RISCV::ORC_B:
  2445. case RISCV::ORN:
  2446. case RISCV::SH1ADD:
  2447. case RISCV::SH2ADD:
  2448. case RISCV::SH3ADD:
  2449. case RISCV::XNOR:
  2450. case RISCV::BSETI:
  2451. case RISCV::BCLRI:
  2452. case RISCV::BINVI:
  2453. Worklist.push_back(std::make_pair(UserMI, Bits));
  2454. break;
  2455. case RISCV::PseudoCCMOVGPR:
  2456. // Either operand 4 or operand 5 is returned by this instruction. If
  2457. // only the lower word of the result is used, then only the lower word
  2458. // of operand 4 and 5 is used.
  2459. if (OpIdx != 4 && OpIdx != 5)
  2460. return false;
  2461. Worklist.push_back(std::make_pair(UserMI, Bits));
  2462. break;
  2463. case RISCV::VT_MASKC:
  2464. case RISCV::VT_MASKCN:
  2465. if (OpIdx != 1)
  2466. return false;
  2467. Worklist.push_back(std::make_pair(UserMI, Bits));
  2468. break;
  2469. }
  2470. }
  2471. }
  2472. return true;
  2473. }
  2474. // Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
  2475. bool RISCV::isSEXT_W(const MachineInstr &MI) {
  2476. return MI.getOpcode() == RISCV::ADDIW && MI.getOperand(1).isReg() &&
  2477. MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
  2478. }
  2479. // Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
  2480. bool RISCV::isZEXT_W(const MachineInstr &MI) {
  2481. return MI.getOpcode() == RISCV::ADD_UW && MI.getOperand(1).isReg() &&
  2482. MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0;
  2483. }
  2484. // Returns true if this is the zext.b pattern, andi rd, rs1, 255.
  2485. bool RISCV::isZEXT_B(const MachineInstr &MI) {
  2486. return MI.getOpcode() == RISCV::ANDI && MI.getOperand(1).isReg() &&
  2487. MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
  2488. }
  2489. static bool isRVVWholeLoadStore(unsigned Opcode) {
  2490. switch (Opcode) {
  2491. default:
  2492. return false;
  2493. case RISCV::VS1R_V:
  2494. case RISCV::VS2R_V:
  2495. case RISCV::VS4R_V:
  2496. case RISCV::VS8R_V:
  2497. case RISCV::VL1RE8_V:
  2498. case RISCV::VL2RE8_V:
  2499. case RISCV::VL4RE8_V:
  2500. case RISCV::VL8RE8_V:
  2501. case RISCV::VL1RE16_V:
  2502. case RISCV::VL2RE16_V:
  2503. case RISCV::VL4RE16_V:
  2504. case RISCV::VL8RE16_V:
  2505. case RISCV::VL1RE32_V:
  2506. case RISCV::VL2RE32_V:
  2507. case RISCV::VL4RE32_V:
  2508. case RISCV::VL8RE32_V:
  2509. case RISCV::VL1RE64_V:
  2510. case RISCV::VL2RE64_V:
  2511. case RISCV::VL4RE64_V:
  2512. case RISCV::VL8RE64_V:
  2513. return true;
  2514. }
  2515. }
  2516. bool RISCV::isRVVSpill(const MachineInstr &MI) {
  2517. // RVV lacks any support for immediate addressing for stack addresses, so be
  2518. // conservative.
  2519. unsigned Opcode = MI.getOpcode();
  2520. if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
  2521. !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
  2522. return false;
  2523. return true;
  2524. }
  2525. std::optional<std::pair<unsigned, unsigned>>
  2526. RISCV::isRVVSpillForZvlsseg(unsigned Opcode) {
  2527. switch (Opcode) {
  2528. default:
  2529. return std::nullopt;
  2530. case RISCV::PseudoVSPILL2_M1:
  2531. case RISCV::PseudoVRELOAD2_M1:
  2532. return std::make_pair(2u, 1u);
  2533. case RISCV::PseudoVSPILL2_M2:
  2534. case RISCV::PseudoVRELOAD2_M2:
  2535. return std::make_pair(2u, 2u);
  2536. case RISCV::PseudoVSPILL2_M4:
  2537. case RISCV::PseudoVRELOAD2_M4:
  2538. return std::make_pair(2u, 4u);
  2539. case RISCV::PseudoVSPILL3_M1:
  2540. case RISCV::PseudoVRELOAD3_M1:
  2541. return std::make_pair(3u, 1u);
  2542. case RISCV::PseudoVSPILL3_M2:
  2543. case RISCV::PseudoVRELOAD3_M2:
  2544. return std::make_pair(3u, 2u);
  2545. case RISCV::PseudoVSPILL4_M1:
  2546. case RISCV::PseudoVRELOAD4_M1:
  2547. return std::make_pair(4u, 1u);
  2548. case RISCV::PseudoVSPILL4_M2:
  2549. case RISCV::PseudoVRELOAD4_M2:
  2550. return std::make_pair(4u, 2u);
  2551. case RISCV::PseudoVSPILL5_M1:
  2552. case RISCV::PseudoVRELOAD5_M1:
  2553. return std::make_pair(5u, 1u);
  2554. case RISCV::PseudoVSPILL6_M1:
  2555. case RISCV::PseudoVRELOAD6_M1:
  2556. return std::make_pair(6u, 1u);
  2557. case RISCV::PseudoVSPILL7_M1:
  2558. case RISCV::PseudoVRELOAD7_M1:
  2559. return std::make_pair(7u, 1u);
  2560. case RISCV::PseudoVSPILL8_M1:
  2561. case RISCV::PseudoVRELOAD8_M1:
  2562. return std::make_pair(8u, 1u);
  2563. }
  2564. }
  2565. bool RISCV::isFaultFirstLoad(const MachineInstr &MI) {
  2566. return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) &&
  2567. !MI.isInlineAsm();
  2568. }
  2569. bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) {
  2570. int16_t MI1FrmOpIdx =
  2571. RISCV::getNamedOperandIdx(MI1.getOpcode(), RISCV::OpName::frm);
  2572. int16_t MI2FrmOpIdx =
  2573. RISCV::getNamedOperandIdx(MI2.getOpcode(), RISCV::OpName::frm);
  2574. if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
  2575. return false;
  2576. MachineOperand FrmOp1 = MI1.getOperand(MI1FrmOpIdx);
  2577. MachineOperand FrmOp2 = MI2.getOperand(MI2FrmOpIdx);
  2578. return FrmOp1.getImm() == FrmOp2.getImm();
  2579. }