RISCVInstrInfoVSDPatterns.td 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046
  1. //===- RISCVInstrInfoVSDPatterns.td - RVV SDNode patterns --*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. ///
  9. /// This file contains the required infrastructure and SDNode patterns to
  10. /// support code generation for the standard 'V' (Vector) extension, version
  11. /// version 1.0.
  12. ///
  13. /// This file is included from and depends upon RISCVInstrInfoVPseudos.td
  14. ///
  15. /// Note: the patterns for RVV intrinsics are found in
  16. /// RISCVInstrInfoVPseudos.td.
  17. ///
  18. //===----------------------------------------------------------------------===//
  19. //===----------------------------------------------------------------------===//
  20. // Helpers to define the SDNode patterns.
  21. //===----------------------------------------------------------------------===//
  22. def rvv_vnot : PatFrag<(ops node:$in),
  23. (xor node:$in, (riscv_vmset_vl (XLenVT srcvalue)))>;
  24. multiclass VPatUSLoadStoreSDNode<ValueType type,
  25. int log2sew,
  26. LMULInfo vlmul,
  27. OutPatFrag avl,
  28. VReg reg_class,
  29. int sew = !shl(1, log2sew)>
  30. {
  31. defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX);
  32. defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX);
  33. // Load
  34. def : Pat<(type (load GPR:$rs1)),
  35. (load_instr GPR:$rs1, avl, log2sew)>;
  36. // Store
  37. def : Pat<(store type:$rs2, GPR:$rs1),
  38. (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>;
  39. }
  40. multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type,
  41. int log2sew,
  42. LMULInfo vlmul,
  43. VReg reg_class,
  44. int sew = !shl(1, log2sew)>
  45. {
  46. defvar load_instr =
  47. !cast<Instruction>("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V");
  48. defvar store_instr =
  49. !cast<Instruction>("VS"#!substr(vlmul.MX, 1)#"R_V");
  50. // Load
  51. def : Pat<(type (load GPR:$rs1)),
  52. (load_instr GPR:$rs1)>;
  53. // Store
  54. def : Pat<(store type:$rs2, GPR:$rs1),
  55. (store_instr reg_class:$rs2, GPR:$rs1)>;
  56. }
  57. multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m>
  58. {
  59. defvar load_instr = !cast<Instruction>("PseudoVLM_V_"#m.BX);
  60. defvar store_instr = !cast<Instruction>("PseudoVSM_V_"#m.BX);
  61. // Load
  62. def : Pat<(m.Mask (load GPR:$rs1)),
  63. (load_instr GPR:$rs1, m.AVL, m.Log2SEW)>;
  64. // Store
  65. def : Pat<(store m.Mask:$rs2, GPR:$rs1),
  66. (store_instr VR:$rs2, GPR:$rs1, m.AVL, m.Log2SEW)>;
  67. }
  68. class VPatBinarySDNode_VV<SDNode vop,
  69. string instruction_name,
  70. ValueType result_type,
  71. ValueType op_type,
  72. int sew,
  73. LMULInfo vlmul,
  74. OutPatFrag avl,
  75. VReg op_reg_class> :
  76. Pat<(result_type (vop
  77. (op_type op_reg_class:$rs1),
  78. (op_type op_reg_class:$rs2))),
  79. (!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX)
  80. op_reg_class:$rs1,
  81. op_reg_class:$rs2,
  82. avl, sew)>;
  83. class VPatBinarySDNode_XI<SDNode vop,
  84. string instruction_name,
  85. string suffix,
  86. ValueType result_type,
  87. ValueType vop_type,
  88. int sew,
  89. LMULInfo vlmul,
  90. OutPatFrag avl,
  91. VReg vop_reg_class,
  92. ComplexPattern SplatPatKind,
  93. DAGOperand xop_kind> :
  94. Pat<(result_type (vop
  95. (vop_type vop_reg_class:$rs1),
  96. (vop_type (SplatPatKind xop_kind:$rs2)))),
  97. (!cast<Instruction>(instruction_name#_#suffix#_# vlmul.MX)
  98. vop_reg_class:$rs1,
  99. xop_kind:$rs2,
  100. avl, sew)>;
  101. multiclass VPatBinarySDNode_VV_VX<SDNode vop, string instruction_name> {
  102. foreach vti = AllIntegerVectors in {
  103. def : VPatBinarySDNode_VV<vop, instruction_name,
  104. vti.Vector, vti.Vector, vti.Log2SEW,
  105. vti.LMul, vti.AVL, vti.RegClass>;
  106. def : VPatBinarySDNode_XI<vop, instruction_name, "VX",
  107. vti.Vector, vti.Vector, vti.Log2SEW,
  108. vti.LMul, vti.AVL, vti.RegClass,
  109. SplatPat, GPR>;
  110. }
  111. }
  112. multiclass VPatBinarySDNode_VV_VX_VI<SDNode vop, string instruction_name,
  113. Operand ImmType = simm5>
  114. : VPatBinarySDNode_VV_VX<vop, instruction_name> {
  115. foreach vti = AllIntegerVectors in {
  116. def : VPatBinarySDNode_XI<vop, instruction_name, "VI",
  117. vti.Vector, vti.Vector, vti.Log2SEW,
  118. vti.LMul, vti.AVL, vti.RegClass,
  119. !cast<ComplexPattern>(SplatPat#_#ImmType),
  120. ImmType>;
  121. }
  122. }
  123. class VPatBinarySDNode_VF<SDNode vop,
  124. string instruction_name,
  125. ValueType result_type,
  126. ValueType vop_type,
  127. ValueType xop_type,
  128. int sew,
  129. LMULInfo vlmul,
  130. OutPatFrag avl,
  131. VReg vop_reg_class,
  132. DAGOperand xop_kind> :
  133. Pat<(result_type (vop (vop_type vop_reg_class:$rs1),
  134. (vop_type (SplatFPOp xop_kind:$rs2)))),
  135. (!cast<Instruction>(instruction_name#"_"#vlmul.MX)
  136. vop_reg_class:$rs1,
  137. (xop_type xop_kind:$rs2),
  138. avl, sew)>;
  139. multiclass VPatBinaryFPSDNode_VV_VF<SDNode vop, string instruction_name> {
  140. foreach vti = AllFloatVectors in {
  141. def : VPatBinarySDNode_VV<vop, instruction_name,
  142. vti.Vector, vti.Vector, vti.Log2SEW,
  143. vti.LMul, vti.AVL, vti.RegClass>;
  144. def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
  145. vti.Vector, vti.Vector, vti.Scalar,
  146. vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass,
  147. vti.ScalarRegClass>;
  148. }
  149. }
  150. multiclass VPatBinaryFPSDNode_R_VF<SDNode vop, string instruction_name> {
  151. foreach fvti = AllFloatVectors in
  152. def : Pat<(fvti.Vector (vop (fvti.Vector (SplatFPOp fvti.Scalar:$rs2)),
  153. (fvti.Vector fvti.RegClass:$rs1))),
  154. (!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
  155. fvti.RegClass:$rs1,
  156. (fvti.Scalar fvti.ScalarRegClass:$rs2),
  157. fvti.AVL, fvti.Log2SEW)>;
  158. }
  159. multiclass VPatIntegerSetCCSDNode_VV<string instruction_name,
  160. CondCode cc> {
  161. foreach vti = AllIntegerVectors in {
  162. defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX);
  163. def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1),
  164. (vti.Vector vti.RegClass:$rs2), cc)),
  165. (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL,
  166. vti.Log2SEW)>;
  167. }
  168. }
  169. multiclass VPatIntegerSetCCSDNode_VV_Swappable<string instruction_name,
  170. CondCode cc, CondCode invcc>
  171. : VPatIntegerSetCCSDNode_VV<instruction_name, cc> {
  172. foreach vti = AllIntegerVectors in {
  173. defvar instruction = !cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX);
  174. def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs2),
  175. (vti.Vector vti.RegClass:$rs1), invcc)),
  176. (instruction vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL,
  177. vti.Log2SEW)>;
  178. }
  179. }
  180. multiclass VPatIntegerSetCCSDNode_XI<
  181. string instruction_name,
  182. CondCode cc,
  183. string kind,
  184. ComplexPattern SplatPatKind,
  185. DAGOperand xop_kind> {
  186. foreach vti = AllIntegerVectors in {
  187. defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX);
  188. def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1),
  189. (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)),
  190. (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>;
  191. }
  192. }
  193. multiclass VPatIntegerSetCCSDNode_XI_Swappable<string instruction_name,
  194. CondCode cc, CondCode invcc,
  195. string kind,
  196. ComplexPattern SplatPatKind,
  197. DAGOperand xop_kind>
  198. : VPatIntegerSetCCSDNode_XI<instruction_name, cc, kind, SplatPatKind,
  199. xop_kind> {
  200. foreach vti = AllIntegerVectors in {
  201. defvar instruction = !cast<Instruction>(instruction_name#_#kind#_#vti.LMul.MX);
  202. def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1),
  203. (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)),
  204. (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>;
  205. def : Pat<(vti.Mask (setcc (vti.Vector (SplatPatKind xop_kind:$rs2)),
  206. (vti.Vector vti.RegClass:$rs1), invcc)),
  207. (instruction vti.RegClass:$rs1, xop_kind:$rs2, vti.AVL, vti.Log2SEW)>;
  208. }
  209. }
  210. multiclass VPatIntegerSetCCSDNode_VX_Swappable<string instruction_name,
  211. CondCode cc, CondCode invcc>
  212. : VPatIntegerSetCCSDNode_XI_Swappable<instruction_name, cc, invcc, "VX",
  213. SplatPat, GPR>;
  214. multiclass VPatIntegerSetCCSDNode_VI<string instruction_name, CondCode cc>
  215. : VPatIntegerSetCCSDNode_XI<instruction_name, cc, "VI", SplatPat_simm5, simm5>;
  216. multiclass VPatIntegerSetCCSDNode_VIPlus1<string instruction_name, CondCode cc,
  217. ComplexPattern splatpat_kind> {
  218. foreach vti = AllIntegerVectors in {
  219. defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX);
  220. def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1),
  221. (vti.Vector (splatpat_kind simm5:$rs2)),
  222. cc)),
  223. (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2),
  224. vti.AVL, vti.Log2SEW)>;
  225. }
  226. }
  227. multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc,
  228. string inst_name,
  229. string swapped_op_inst_name> {
  230. foreach fvti = AllFloatVectors in {
  231. def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1),
  232. (fvti.Vector fvti.RegClass:$rs2),
  233. cc)),
  234. (!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX)
  235. fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>;
  236. def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1),
  237. (SplatFPOp fvti.ScalarRegClass:$rs2),
  238. cc)),
  239. (!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
  240. fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
  241. fvti.AVL, fvti.Log2SEW)>;
  242. def : Pat<(fvti.Mask (setcc (SplatFPOp fvti.ScalarRegClass:$rs2),
  243. (fvti.Vector fvti.RegClass:$rs1),
  244. cc)),
  245. (!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
  246. fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
  247. fvti.AVL, fvti.Log2SEW)>;
  248. }
  249. }
  250. multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix,
  251. list <VTypeInfoToFraction> fraction_list> {
  252. foreach vtiTofti = fraction_list in {
  253. defvar vti = vtiTofti.Vti;
  254. defvar fti = vtiTofti.Fti;
  255. foreach op = ops in
  256. def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))),
  257. (!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX)
  258. fti.RegClass:$rs2, fti.AVL, vti.Log2SEW)>;
  259. }
  260. }
  261. multiclass VPatConvertI2FPSDNode_V<SDNode vop, string instruction_name> {
  262. foreach fvti = AllFloatVectors in {
  263. defvar ivti = GetIntVTypeInfo<fvti>.Vti;
  264. def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))),
  265. (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
  266. ivti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
  267. }
  268. }
  269. multiclass VPatConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
  270. foreach fvti = AllFloatVectors in {
  271. defvar ivti = GetIntVTypeInfo<fvti>.Vti;
  272. def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))),
  273. (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
  274. fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>;
  275. }
  276. }
  277. multiclass VPatWConvertI2FPSDNode_V<SDNode vop, string instruction_name> {
  278. foreach vtiToWti = AllWidenableIntToFloatVectors in {
  279. defvar ivti = vtiToWti.Vti;
  280. defvar fwti = vtiToWti.Wti;
  281. def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))),
  282. (!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
  283. ivti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>;
  284. }
  285. }
  286. multiclass VPatWConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
  287. foreach fvtiToFWti = AllWidenableFloatVectors in {
  288. defvar fvti = fvtiToFWti.Vti;
  289. defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
  290. def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))),
  291. (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
  292. fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
  293. }
  294. }
  295. multiclass VPatNConvertI2FPSDNode_V<SDNode vop, string instruction_name> {
  296. foreach fvtiToFWti = AllWidenableFloatVectors in {
  297. defvar fvti = fvtiToFWti.Vti;
  298. defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
  299. def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))),
  300. (!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
  301. iwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
  302. }
  303. }
  304. multiclass VPatNConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
  305. foreach vtiToWti = AllWidenableIntToFloatVectors in {
  306. defvar vti = vtiToWti.Vti;
  307. defvar fwti = vtiToWti.Wti;
  308. def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))),
  309. (!cast<Instruction>(instruction_name#"_"#vti.LMul.MX)
  310. fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>;
  311. }
  312. }
  313. multiclass VPatWidenBinarySDNode_VV_VX<SDNode op, PatFrags extop1, PatFrags extop2,
  314. string instruction_name> {
  315. foreach vtiToWti = AllWidenableIntVectors in {
  316. defvar vti = vtiToWti.Vti;
  317. defvar wti = vtiToWti.Wti;
  318. def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))),
  319. (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs1)))),
  320. (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
  321. vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>;
  322. def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))),
  323. (wti.Vector (extop2 (vti.Vector (SplatPat GPR:$rs1))))),
  324. (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX)
  325. vti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW)>;
  326. }
  327. }
  328. multiclass VPatWidenBinarySDNode_WV_WX<SDNode op, PatFrags extop,
  329. string instruction_name> {
  330. foreach vtiToWti = AllWidenableIntVectors in {
  331. defvar vti = vtiToWti.Vti;
  332. defvar wti = vtiToWti.Wti;
  333. def : Pat<(op (wti.Vector wti.RegClass:$rs2),
  334. (wti.Vector (extop (vti.Vector vti.RegClass:$rs1)))),
  335. (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED")
  336. wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW,
  337. TAIL_AGNOSTIC)>;
  338. def : Pat<(op (wti.Vector wti.RegClass:$rs2),
  339. (wti.Vector (extop (vti.Vector (SplatPat GPR:$rs1))))),
  340. (!cast<Instruction>(instruction_name#"_WX_"#vti.LMul.MX)
  341. wti.RegClass:$rs2, GPR:$rs1, vti.AVL, vti.Log2SEW)>;
  342. }
  343. }
  344. multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop,
  345. string instruction_name> {
  346. defm : VPatWidenBinarySDNode_VV_VX<op, extop, extop, instruction_name>;
  347. defm : VPatWidenBinarySDNode_WV_WX<op, extop, instruction_name>;
  348. }
  349. multiclass VPatWidenMulAddSDNode_VV<PatFrags extop1, PatFrags extop2, string instruction_name> {
  350. foreach vtiToWti = AllWidenableIntVectors in {
  351. defvar vti = vtiToWti.Vti;
  352. defvar wti = vtiToWti.Wti;
  353. def : Pat<
  354. (add (wti.Vector wti.RegClass:$rd),
  355. (mul_oneuse (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs1))),
  356. (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))),
  357. (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
  358. wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  359. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC
  360. )>;
  361. }
  362. }
  363. multiclass VPatWidenMulAddSDNode_VX<PatFrags extop1, PatFrags extop2, string instruction_name> {
  364. foreach vtiToWti = AllWidenableIntVectors in {
  365. defvar vti = vtiToWti.Vti;
  366. defvar wti = vtiToWti.Wti;
  367. def : Pat<
  368. (add (wti.Vector wti.RegClass:$rd),
  369. (mul_oneuse (wti.Vector (extop1 (vti.Vector (SplatPat GPR:$rs1)))),
  370. (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs2))))),
  371. (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX)
  372. wti.RegClass:$rd, GPR:$rs1, vti.RegClass:$rs2,
  373. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC
  374. )>;
  375. }
  376. }
  377. multiclass VPatWidenBinaryFPSDNode_VV_VF<SDNode op, string instruction_name> {
  378. foreach vtiToWti = AllWidenableFloatVectors in {
  379. defvar vti = vtiToWti.Vti;
  380. defvar wti = vtiToWti.Wti;
  381. def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse
  382. (vti.Vector vti.RegClass:$rs2),
  383. (vti.Mask true_mask), (XLenVT srcvalue))),
  384. (wti.Vector (riscv_fpextend_vl_oneuse
  385. (vti.Vector vti.RegClass:$rs1),
  386. (vti.Mask true_mask), (XLenVT srcvalue)))),
  387. (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
  388. vti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>;
  389. def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse
  390. (vti.Vector vti.RegClass:$rs2),
  391. (vti.Mask true_mask), (XLenVT srcvalue))),
  392. (wti.Vector (riscv_fpextend_vl_oneuse
  393. (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)),
  394. (vti.Mask true_mask), (XLenVT srcvalue)))),
  395. (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  396. vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>;
  397. def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse
  398. (vti.Vector vti.RegClass:$rs2),
  399. (vti.Mask true_mask), (XLenVT srcvalue))),
  400. (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))),
  401. (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  402. vti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>;
  403. }
  404. }
  405. multiclass VPatWidenBinaryFPSDNode_WV_WF<SDNode op, string instruction_name> {
  406. foreach vtiToWti = AllWidenableFloatVectors in {
  407. defvar vti = vtiToWti.Vti;
  408. defvar wti = vtiToWti.Wti;
  409. def : Pat<(op (wti.Vector wti.RegClass:$rs2),
  410. (wti.Vector (riscv_fpextend_vl_oneuse
  411. (vti.Vector vti.RegClass:$rs1),
  412. (vti.Mask true_mask), (XLenVT srcvalue)))),
  413. (!cast<Instruction>(instruction_name#"_WV_"#vti.LMul.MX#"_TIED")
  414. wti.RegClass:$rs2, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW,
  415. TAIL_AGNOSTIC)>;
  416. def : Pat<(op (wti.Vector wti.RegClass:$rs2),
  417. (wti.Vector (riscv_fpextend_vl_oneuse
  418. (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)),
  419. (vti.Mask true_mask), (XLenVT srcvalue)))),
  420. (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  421. wti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>;
  422. def : Pat<(op (wti.Vector wti.RegClass:$rs2),
  423. (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))),
  424. (!cast<Instruction>(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  425. wti.RegClass:$rs2, vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW)>;
  426. }
  427. }
  428. multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF<SDNode op, string instruction_name> {
  429. defm : VPatWidenBinaryFPSDNode_VV_VF<op, instruction_name>;
  430. defm : VPatWidenBinaryFPSDNode_WV_WF<op, instruction_name>;
  431. }
  432. multiclass VPatWidenFPMulAccSDNode_VV_VF<string instruction_name> {
  433. foreach vtiToWti = AllWidenableFloatVectors in {
  434. defvar vti = vtiToWti.Vti;
  435. defvar wti = vtiToWti.Wti;
  436. def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse
  437. (vti.Vector vti.RegClass:$rs1),
  438. (vti.Mask true_mask), (XLenVT srcvalue))),
  439. (wti.Vector (riscv_fpextend_vl_oneuse
  440. (vti.Vector vti.RegClass:$rs2),
  441. (vti.Mask true_mask), (XLenVT srcvalue))),
  442. (wti.Vector wti.RegClass:$rd)),
  443. (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
  444. wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  445. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  446. def : Pat<(fma (wti.Vector (SplatFPOp
  447. (fpext_oneuse vti.ScalarRegClass:$rs1))),
  448. (wti.Vector (riscv_fpextend_vl_oneuse
  449. (vti.Vector vti.RegClass:$rs2),
  450. (vti.Mask true_mask), (XLenVT srcvalue))),
  451. (wti.Vector wti.RegClass:$rd)),
  452. (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  453. wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  454. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  455. }
  456. }
  457. multiclass VPatWidenFPNegMulAccSDNode_VV_VF<string instruction_name> {
  458. foreach vtiToWti = AllWidenableFloatVectors in {
  459. defvar vti = vtiToWti.Vti;
  460. defvar wti = vtiToWti.Wti;
  461. def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse
  462. (vti.Vector vti.RegClass:$rs1),
  463. (vti.Mask true_mask), (XLenVT srcvalue)))),
  464. (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2),
  465. (vti.Mask true_mask), (XLenVT srcvalue)),
  466. (fneg wti.RegClass:$rd)),
  467. (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
  468. wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  469. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  470. def : Pat<(fma (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)),
  471. (fneg (wti.Vector (riscv_fpextend_vl_oneuse
  472. (vti.Vector vti.RegClass:$rs2),
  473. (vti.Mask true_mask), (XLenVT srcvalue)))),
  474. (fneg wti.RegClass:$rd)),
  475. (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  476. wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  477. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  478. def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))),
  479. (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2),
  480. (vti.Mask true_mask), (XLenVT srcvalue)),
  481. (fneg wti.RegClass:$rd)),
  482. (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  483. wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  484. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  485. }
  486. }
  487. multiclass VPatWidenFPMulSacSDNode_VV_VF<string instruction_name> {
  488. foreach vtiToWti = AllWidenableFloatVectors in {
  489. defvar vti = vtiToWti.Vti;
  490. defvar wti = vtiToWti.Wti;
  491. def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse
  492. (vti.Vector vti.RegClass:$rs1),
  493. (vti.Mask true_mask), (XLenVT srcvalue))),
  494. (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2),
  495. (vti.Mask true_mask), (XLenVT srcvalue)),
  496. (fneg wti.RegClass:$rd)),
  497. (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
  498. wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  499. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  500. def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1))),
  501. (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2),
  502. (vti.Mask true_mask), (XLenVT srcvalue)),
  503. (fneg wti.RegClass:$rd)),
  504. (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  505. wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  506. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  507. }
  508. }
  509. multiclass VPatWidenFPNegMulSacSDNode_VV_VF<string instruction_name> {
  510. foreach vtiToWti = AllWidenableFloatVectors in {
  511. defvar vti = vtiToWti.Vti;
  512. defvar wti = vtiToWti.Wti;
  513. def : Pat<(fma (fneg (wti.Vector (riscv_fpextend_vl_oneuse
  514. (vti.Vector vti.RegClass:$rs1),
  515. (vti.Mask true_mask), (XLenVT srcvalue)))),
  516. (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2),
  517. (vti.Mask true_mask), (XLenVT srcvalue)),
  518. wti.RegClass:$rd),
  519. (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
  520. wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  521. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  522. def : Pat<(fma (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1))),
  523. (fneg (wti.Vector (riscv_fpextend_vl_oneuse
  524. (vti.Vector vti.RegClass:$rs2),
  525. (vti.Mask true_mask), (XLenVT srcvalue)))),
  526. wti.RegClass:$rd),
  527. (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  528. wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  529. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  530. def : Pat<(fma (fneg (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))),
  531. (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2),
  532. (vti.Mask true_mask), (XLenVT srcvalue)),
  533. wti.RegClass:$rd),
  534. (!cast<Instruction>(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  535. wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  536. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  537. }
  538. }
  539. multiclass VPatMultiplyAddSDNode_VV_VX<SDNode op, string instruction_name> {
  540. foreach vti = AllIntegerVectors in {
  541. defvar suffix = vti.LMul.MX;
  542. // NOTE: We choose VMADD because it has the most commuting freedom. So it
  543. // works best with how TwoAddressInstructionPass tries commuting.
  544. def : Pat<(vti.Vector (op vti.RegClass:$rs2,
  545. (mul_oneuse vti.RegClass:$rs1, vti.RegClass:$rd))),
  546. (!cast<Instruction>(instruction_name#"_VV_"# suffix)
  547. vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
  548. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  549. // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally
  550. // commutable.
  551. def : Pat<(vti.Vector (op vti.RegClass:$rs2,
  552. (mul_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rd))),
  553. (!cast<Instruction>(instruction_name#"_VX_" # suffix)
  554. vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
  555. vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
  556. }
  557. }
  558. //===----------------------------------------------------------------------===//
  559. // Patterns.
  560. //===----------------------------------------------------------------------===//
  561. let Predicates = [HasVInstructions] in {
  562. // 7.4. Vector Unit-Stride Instructions
  563. foreach vti = !listconcat(FractionalGroupIntegerVectors,
  564. FractionalGroupFloatVectors) in
  565. defm : VPatUSLoadStoreSDNode<vti.Vector, vti.Log2SEW, vti.LMul,
  566. vti.AVL, vti.RegClass>;
  567. foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in
  568. defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul,
  569. vti.RegClass>;
  570. foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors) in
  571. defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul,
  572. vti.RegClass>;
  573. foreach mti = AllMasks in
  574. defm : VPatUSLoadStoreMaskSDNode<mti>;
  575. // 11. Vector Integer Arithmetic Instructions
  576. // 11.1. Vector Single-Width Integer Add and Subtract
  577. defm : VPatBinarySDNode_VV_VX_VI<add, "PseudoVADD">;
  578. defm : VPatBinarySDNode_VV_VX<sub, "PseudoVSUB">;
  579. // Handle VRSUB specially since it's the only integer binary op with reversed
  580. // pattern operands
  581. foreach vti = AllIntegerVectors in {
  582. def : Pat<(sub (vti.Vector (SplatPat GPR:$rs2)),
  583. (vti.Vector vti.RegClass:$rs1)),
  584. (!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX)
  585. vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.Log2SEW)>;
  586. def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)),
  587. (vti.Vector vti.RegClass:$rs1)),
  588. (!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX)
  589. vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.Log2SEW)>;
  590. }
  591. // 11.2. Vector Widening Integer Add and Subtract
  592. defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">;
  593. defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">;
  594. defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, anyext_oneuse, "PseudoVWADDU">;
  595. defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, sext_oneuse, "PseudoVWSUB">;
  596. defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, zext_oneuse, "PseudoVWSUBU">;
  597. defm : VPatWidenBinarySDNode_VV_VX_WV_WX<sub, anyext_oneuse, "PseudoVWSUBU">;
  598. // 11.3. Vector Integer Extension
  599. defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF2",
  600. AllFractionableVF2IntVectors>;
  601. defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF2",
  602. AllFractionableVF2IntVectors>;
  603. defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF4",
  604. AllFractionableVF4IntVectors>;
  605. defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF4",
  606. AllFractionableVF4IntVectors>;
  607. defm : VPatExtendSDNode_V<[zext, anyext], "PseudoVZEXT", "VF8",
  608. AllFractionableVF8IntVectors>;
  609. defm : VPatExtendSDNode_V<[sext], "PseudoVSEXT", "VF8",
  610. AllFractionableVF8IntVectors>;
  611. // 11.5. Vector Bitwise Logical Instructions
  612. defm : VPatBinarySDNode_VV_VX_VI<and, "PseudoVAND">;
  613. defm : VPatBinarySDNode_VV_VX_VI<or, "PseudoVOR">;
  614. defm : VPatBinarySDNode_VV_VX_VI<xor, "PseudoVXOR">;
  615. // 11.6. Vector Single-Width Bit Shift Instructions
  616. defm : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>;
  617. defm : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>;
  618. defm : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>;
  619. foreach vti = AllIntegerVectors in {
  620. // Emit shift by 1 as an add since it might be faster.
  621. def : Pat<(shl (vti.Vector vti.RegClass:$rs1),
  622. (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))),
  623. (!cast<Instruction>("PseudoVADD_VV_"# vti.LMul.MX)
  624. vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>;
  625. }
  626. // 11.8. Vector Integer Comparison Instructions
  627. defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSEQ", SETEQ>;
  628. defm : VPatIntegerSetCCSDNode_VV<"PseudoVMSNE", SETNE>;
  629. defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLT", SETLT, SETGT>;
  630. defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLTU", SETULT, SETUGT>;
  631. defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLE", SETLE, SETGE>;
  632. defm : VPatIntegerSetCCSDNode_VV_Swappable<"PseudoVMSLEU", SETULE, SETUGE>;
  633. defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSEQ", SETEQ, SETEQ>;
  634. defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSNE", SETNE, SETNE>;
  635. defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLT", SETLT, SETGT>;
  636. defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLTU", SETULT, SETUGT>;
  637. defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLE", SETLE, SETGE>;
  638. defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSLEU", SETULE, SETUGE>;
  639. defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGT", SETGT, SETLT>;
  640. defm : VPatIntegerSetCCSDNode_VX_Swappable<"PseudoVMSGTU", SETUGT, SETULT>;
  641. // There is no VMSGE(U)_VX instruction
  642. defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSEQ", SETEQ>;
  643. defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSNE", SETNE>;
  644. defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLE", SETLE>;
  645. defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSLEU", SETULE>;
  646. defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGT", SETGT>;
  647. defm : VPatIntegerSetCCSDNode_VI<"PseudoVMSGTU", SETUGT>;
  648. defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLE", SETLT,
  649. SplatPat_simm5_plus1_nonzero>;
  650. defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSLEU", SETULT,
  651. SplatPat_simm5_plus1_nonzero>;
  652. defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGT", SETGE,
  653. SplatPat_simm5_plus1>;
  654. defm : VPatIntegerSetCCSDNode_VIPlus1<"PseudoVMSGTU", SETUGE,
  655. SplatPat_simm5_plus1_nonzero>;
  656. // 11.9. Vector Integer Min/Max Instructions
  657. defm : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">;
  658. defm : VPatBinarySDNode_VV_VX<smin, "PseudoVMIN">;
  659. defm : VPatBinarySDNode_VV_VX<umax, "PseudoVMAXU">;
  660. defm : VPatBinarySDNode_VV_VX<smax, "PseudoVMAX">;
  661. // 11.10. Vector Single-Width Integer Multiply Instructions
  662. defm : VPatBinarySDNode_VV_VX<mul, "PseudoVMUL">;
  663. defm : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH">;
  664. defm : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU">;
  665. // 11.11. Vector Integer Divide Instructions
  666. defm : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIVU">;
  667. defm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV">;
  668. defm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU">;
  669. defm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM">;
  670. // 11.12. Vector Widening Integer Multiply Instructions
  671. defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, sext_oneuse,
  672. "PseudoVWMUL">;
  673. defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, zext_oneuse,
  674. "PseudoVWMULU">;
  675. defm : VPatWidenBinarySDNode_VV_VX<mul, anyext_oneuse, anyext_oneuse,
  676. "PseudoVWMULU">;
  677. defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, anyext_oneuse,
  678. "PseudoVWMULU">;
  679. defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, zext_oneuse,
  680. "PseudoVWMULSU">;
  681. defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, anyext_oneuse,
  682. "PseudoVWMULSU">;
  683. // 11.13 Vector Single-Width Integer Multiply-Add Instructions.
  684. defm : VPatMultiplyAddSDNode_VV_VX<add, "PseudoVMADD">;
  685. defm : VPatMultiplyAddSDNode_VV_VX<sub, "PseudoVNMSUB">;
  686. // 11.14 Vector Widening Integer Multiply-Add Instructions
  687. defm : VPatWidenMulAddSDNode_VV<sext_oneuse, sext_oneuse, "PseudoVWMACC">;
  688. defm : VPatWidenMulAddSDNode_VX<sext_oneuse, sext_oneuse, "PseudoVWMACC">;
  689. defm : VPatWidenMulAddSDNode_VV<zext_oneuse, zext_oneuse, "PseudoVWMACCU">;
  690. defm : VPatWidenMulAddSDNode_VX<zext_oneuse, zext_oneuse, "PseudoVWMACCU">;
  691. defm : VPatWidenMulAddSDNode_VV<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">;
  692. defm : VPatWidenMulAddSDNode_VX<sext_oneuse, zext_oneuse, "PseudoVWMACCSU">;
  693. defm : VPatWidenMulAddSDNode_VX<zext_oneuse, sext_oneuse, "PseudoVWMACCUS">;
  694. // 11.15. Vector Integer Merge Instructions
  695. foreach vti = AllIntegerVectors in {
  696. def : Pat<(vti.Vector (vselect (vti.Mask V0), vti.RegClass:$rs1,
  697. vti.RegClass:$rs2)),
  698. (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
  699. vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask V0),
  700. vti.AVL, vti.Log2SEW)>;
  701. def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat XLenVT:$rs1),
  702. vti.RegClass:$rs2)),
  703. (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
  704. vti.RegClass:$rs2, GPR:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>;
  705. def : Pat<(vti.Vector (vselect (vti.Mask V0), (SplatPat_simm5 simm5:$rs1),
  706. vti.RegClass:$rs2)),
  707. (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
  708. vti.RegClass:$rs2, simm5:$rs1, (vti.Mask V0), vti.AVL, vti.Log2SEW)>;
  709. }
  710. // 12. Vector Fixed-Point Arithmetic Instructions
  711. // 12.1. Vector Single-Width Saturating Add and Subtract
  712. defm : VPatBinarySDNode_VV_VX_VI<saddsat, "PseudoVSADD">;
  713. defm : VPatBinarySDNode_VV_VX_VI<uaddsat, "PseudoVSADDU">;
  714. defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">;
  715. defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">;
  716. // 15. Vector Mask Instructions
  717. // 15.1. Vector Mask-Register Logical Instructions
  718. foreach mti = AllMasks in {
  719. def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)),
  720. (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX)
  721. VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
  722. def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)),
  723. (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX)
  724. VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
  725. def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)),
  726. (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX)
  727. VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
  728. def : Pat<(mti.Mask (rvv_vnot (and VR:$rs1, VR:$rs2))),
  729. (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
  730. VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
  731. def : Pat<(mti.Mask (rvv_vnot (or VR:$rs1, VR:$rs2))),
  732. (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX)
  733. VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
  734. def : Pat<(mti.Mask (rvv_vnot (xor VR:$rs1, VR:$rs2))),
  735. (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX)
  736. VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
  737. def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))),
  738. (!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX)
  739. VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
  740. def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))),
  741. (!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX)
  742. VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
  743. // Handle rvv_vnot the same as the vmnot.m pseudoinstruction.
  744. def : Pat<(mti.Mask (rvv_vnot VR:$rs)),
  745. (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
  746. VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>;
  747. }
  748. } // Predicates = [HasVInstructions]
  749. // 13. Vector Floating-Point Instructions
  750. let Predicates = [HasVInstructionsAnyF] in {
  751. // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
  752. defm : VPatBinaryFPSDNode_VV_VF<fadd, "PseudoVFADD">;
  753. defm : VPatBinaryFPSDNode_VV_VF<fsub, "PseudoVFSUB">;
  754. defm : VPatBinaryFPSDNode_R_VF<fsub, "PseudoVFRSUB">;
  755. // 13.3. Vector Widening Floating-Point Add/Subtract Instructions
  756. defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF<fadd, "PseudoVFWADD">;
  757. defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF<fsub, "PseudoVFWSUB">;
  758. // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
  759. defm : VPatBinaryFPSDNode_VV_VF<fmul, "PseudoVFMUL">;
  760. defm : VPatBinaryFPSDNode_VV_VF<fdiv, "PseudoVFDIV">;
  761. defm : VPatBinaryFPSDNode_R_VF<fdiv, "PseudoVFRDIV">;
  762. // 13.5. Vector Widening Floating-Point Multiply Instructions
  763. defm : VPatWidenBinaryFPSDNode_VV_VF<fmul, "PseudoVFWMUL">;
  764. // 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions.
  765. foreach fvti = AllFloatVectors in {
  766. // NOTE: We choose VFMADD because it has the most commuting freedom. So it
  767. // works best with how TwoAddressInstructionPass tries commuting.
  768. defvar suffix = fvti.LMul.MX;
  769. def : Pat<(fvti.Vector (fma fvti.RegClass:$rs1, fvti.RegClass:$rd,
  770. fvti.RegClass:$rs2)),
  771. (!cast<Instruction>("PseudoVFMADD_VV_"# suffix)
  772. fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
  773. fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  774. def : Pat<(fvti.Vector (fma fvti.RegClass:$rs1, fvti.RegClass:$rd,
  775. (fneg fvti.RegClass:$rs2))),
  776. (!cast<Instruction>("PseudoVFMSUB_VV_"# suffix)
  777. fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
  778. fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  779. def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd,
  780. (fneg fvti.RegClass:$rs2))),
  781. (!cast<Instruction>("PseudoVFNMADD_VV_"# suffix)
  782. fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
  783. fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  784. def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd,
  785. fvti.RegClass:$rs2)),
  786. (!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix)
  787. fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
  788. fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  789. // The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally
  790. // commutable.
  791. def : Pat<(fvti.Vector (fma (SplatFPOp fvti.ScalarRegClass:$rs1),
  792. fvti.RegClass:$rd, fvti.RegClass:$rs2)),
  793. (!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix)
  794. fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
  795. fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  796. def : Pat<(fvti.Vector (fma (SplatFPOp fvti.ScalarRegClass:$rs1),
  797. fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))),
  798. (!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
  799. fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
  800. fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  801. def : Pat<(fvti.Vector (fma (SplatFPOp fvti.ScalarRegClass:$rs1),
  802. (fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))),
  803. (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix)
  804. fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
  805. fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  806. def : Pat<(fvti.Vector (fma (SplatFPOp fvti.ScalarRegClass:$rs1),
  807. (fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)),
  808. (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
  809. fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
  810. fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  811. // The splat might be negated.
  812. def : Pat<(fvti.Vector (fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)),
  813. fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))),
  814. (!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix)
  815. fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
  816. fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  817. def : Pat<(fvti.Vector (fma (fneg (SplatFPOp fvti.ScalarRegClass:$rs1)),
  818. fvti.RegClass:$rd, fvti.RegClass:$rs2)),
  819. (!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
  820. fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
  821. fvti.AVL, fvti.Log2SEW, TAIL_AGNOSTIC)>;
  822. }
  823. // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
  824. defm : VPatWidenFPMulAccSDNode_VV_VF<"PseudoVFWMACC">;
  825. defm : VPatWidenFPNegMulAccSDNode_VV_VF<"PseudoVFWNMACC">;
  826. defm : VPatWidenFPMulSacSDNode_VV_VF<"PseudoVFWMSAC">;
  827. defm : VPatWidenFPNegMulSacSDNode_VV_VF<"PseudoVFWNMSAC">;
  828. foreach vti = AllFloatVectors in {
  829. // 13.8. Vector Floating-Point Square-Root Instruction
  830. def : Pat<(fsqrt (vti.Vector vti.RegClass:$rs2)),
  831. (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX)
  832. vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>;
  833. // 13.12. Vector Floating-Point Sign-Injection Instructions
  834. def : Pat<(fabs (vti.Vector vti.RegClass:$rs)),
  835. (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX)
  836. vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>;
  837. // Handle fneg with VFSGNJN using the same input for both operands.
  838. def : Pat<(fneg (vti.Vector vti.RegClass:$rs)),
  839. (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
  840. vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>;
  841. def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
  842. (vti.Vector vti.RegClass:$rs2))),
  843. (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX)
  844. vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>;
  845. def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
  846. (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))),
  847. (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  848. vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>;
  849. def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
  850. (vti.Vector (fneg vti.RegClass:$rs2)))),
  851. (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
  852. vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>;
  853. def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
  854. (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))),
  855. (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
  856. vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>;
  857. }
  858. // 13.11. Vector Floating-Point MIN/MAX Instructions
  859. defm : VPatBinaryFPSDNode_VV_VF<fminnum, "PseudoVFMIN">;
  860. defm : VPatBinaryFPSDNode_VV_VF<fmaxnum, "PseudoVFMAX">;
  861. // 13.13. Vector Floating-Point Compare Instructions
  862. defm : VPatFPSetCCSDNode_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">;
  863. defm : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">;
  864. defm : VPatFPSetCCSDNode_VV_VF_FV<SETNE, "PseudoVMFNE", "PseudoVMFNE">;
  865. defm : VPatFPSetCCSDNode_VV_VF_FV<SETUNE, "PseudoVMFNE", "PseudoVMFNE">;
  866. defm : VPatFPSetCCSDNode_VV_VF_FV<SETLT, "PseudoVMFLT", "PseudoVMFGT">;
  867. defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLT, "PseudoVMFLT", "PseudoVMFGT">;
  868. defm : VPatFPSetCCSDNode_VV_VF_FV<SETLE, "PseudoVMFLE", "PseudoVMFGE">;
  869. defm : VPatFPSetCCSDNode_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">;
  870. // Floating-point vselects:
  871. // 11.15. Vector Integer Merge Instructions
  872. // 13.15. Vector Floating-Point Merge Instruction
  873. foreach fvti = AllFloatVectors in {
  874. def : Pat<(fvti.Vector (vselect (fvti.Mask V0), fvti.RegClass:$rs1,
  875. fvti.RegClass:$rs2)),
  876. (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
  877. fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask V0),
  878. fvti.AVL, fvti.Log2SEW)>;
  879. def : Pat<(fvti.Vector (vselect (fvti.Mask V0),
  880. (SplatFPOp fvti.ScalarRegClass:$rs1),
  881. fvti.RegClass:$rs2)),
  882. (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
  883. fvti.RegClass:$rs2,
  884. (fvti.Scalar fvti.ScalarRegClass:$rs1),
  885. (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>;
  886. def : Pat<(fvti.Vector (vselect (fvti.Mask V0),
  887. (SplatFPOp (fvti.Scalar fpimm0)),
  888. fvti.RegClass:$rs2)),
  889. (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
  890. fvti.RegClass:$rs2, 0, (fvti.Mask V0), fvti.AVL, fvti.Log2SEW)>;
  891. }
  892. // 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
  893. defm : VPatConvertFP2ISDNode_V<fp_to_sint, "PseudoVFCVT_RTZ_X_F_V">;
  894. defm : VPatConvertFP2ISDNode_V<fp_to_uint, "PseudoVFCVT_RTZ_XU_F_V">;
  895. defm : VPatConvertI2FPSDNode_V<sint_to_fp, "PseudoVFCVT_F_X_V">;
  896. defm : VPatConvertI2FPSDNode_V<uint_to_fp, "PseudoVFCVT_F_XU_V">;
  897. // 13.18. Widening Floating-Point/Integer Type-Convert Instructions
  898. defm : VPatWConvertFP2ISDNode_V<fp_to_sint, "PseudoVFWCVT_RTZ_X_F_V">;
  899. defm : VPatWConvertFP2ISDNode_V<fp_to_uint, "PseudoVFWCVT_RTZ_XU_F_V">;
  900. defm : VPatWConvertI2FPSDNode_V<sint_to_fp, "PseudoVFWCVT_F_X_V">;
  901. defm : VPatWConvertI2FPSDNode_V<uint_to_fp, "PseudoVFWCVT_F_XU_V">;
  902. // 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
  903. defm : VPatNConvertFP2ISDNode_V<fp_to_sint, "PseudoVFNCVT_RTZ_X_F_W">;
  904. defm : VPatNConvertFP2ISDNode_V<fp_to_uint, "PseudoVFNCVT_RTZ_XU_F_W">;
  905. defm : VPatNConvertI2FPSDNode_V<sint_to_fp, "PseudoVFNCVT_F_X_W">;
  906. defm : VPatNConvertI2FPSDNode_V<uint_to_fp, "PseudoVFNCVT_F_XU_W">;
  907. foreach fvtiToFWti = AllWidenableFloatVectors in {
  908. defvar fvti = fvtiToFWti.Vti;
  909. defvar fwti = fvtiToFWti.Wti;
  910. def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))),
  911. (!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX)
  912. fwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
  913. }
  914. } // Predicates = [HasVInstructionsAnyF]
  915. //===----------------------------------------------------------------------===//
  916. // Vector Splats
  917. //===----------------------------------------------------------------------===//
  918. let Predicates = [HasVInstructionsAnyF] in {
  919. foreach fvti = AllFloatVectors in {
  920. def : Pat<(fvti.Vector (SplatFPOp fvti.ScalarRegClass:$rs1)),
  921. (!cast<Instruction>("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
  922. (fvti.Scalar fvti.ScalarRegClass:$rs1),
  923. fvti.AVL, fvti.Log2SEW)>;
  924. def : Pat<(fvti.Vector (SplatFPOp (fvti.Scalar fpimm0))),
  925. (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
  926. 0, fvti.AVL, fvti.Log2SEW)>;
  927. }
  928. } // Predicates = [HasVInstructionsAnyF]
  929. //===----------------------------------------------------------------------===//
  930. // Vector Element Extracts
  931. //===----------------------------------------------------------------------===//
  932. let Predicates = [HasVInstructionsAnyF] in
  933. foreach vti = AllFloatVectors in {
  934. defvar vmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_",
  935. vti.ScalarSuffix,
  936. "_S_", vti.LMul.MX));
  937. // Only pattern-match extract-element operations where the index is 0. Any
  938. // other index will have been custom-lowered to slide the vector correctly
  939. // into place.
  940. def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)),
  941. (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>;
  942. }