Combine.td 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910
  1. //===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Declare GlobalISel combine rules and provide mechanisms to opt-out.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. // Common base class for GICombineRule and GICombineGroup.
  13. class GICombine {
  14. // See GICombineGroup. We only declare it here to make the tablegen pass
  15. // simpler.
  16. list<GICombine> Rules = ?;
  17. }
  18. // A group of combine rules that can be added to a GICombiner or another group.
  19. class GICombineGroup<list<GICombine> rules> : GICombine {
  20. // The rules contained in this group. The rules in a group are flattened into
  21. // a single list and sorted into whatever order is most efficient. However,
  22. // they will never be re-ordered such that behaviour differs from the
  23. // specified order. It is therefore possible to use the order of rules in this
  24. // list to describe priorities.
  25. let Rules = rules;
  26. }
  27. class GICombinerHelperArg<string type, string name> {
  28. string Type = type;
  29. string Name = name;
  30. }
  31. // Declares a combiner helper class
  32. class GICombinerHelper<string classname, list<GICombine> rules>
  33. : GICombineGroup<rules> {
  34. // The class name to use in the generated output.
  35. string Classname = classname;
  36. // The name of a run-time compiler option that will be generated to disable
  37. // specific rules within this combiner.
  38. string DisableRuleOption = ?;
  39. // The state class to inherit from (if any). The generated helper will inherit
  40. // from this class and will forward arguments to its constructors.
  41. string StateClass = "";
  42. // Any additional arguments that should be appended to the tryCombine*().
  43. list<GICombinerHelperArg> AdditionalArguments =
  44. [GICombinerHelperArg<"CombinerHelper &", "Helper">];
  45. }
  46. class GICombineRule<dag defs, dag match, dag apply> : GICombine {
  47. /// Defines the external interface of the match rule. This includes:
  48. /// * The names of the root nodes (requires at least one)
  49. /// See GIDefKind for details.
  50. dag Defs = defs;
  51. /// Defines the things which must be true for the pattern to match
  52. /// See GIMatchKind for details.
  53. dag Match = match;
  54. /// Defines the things which happen after the decision is made to apply a
  55. /// combine rule.
  56. /// See GIApplyKind for details.
  57. dag Apply = apply;
  58. }
  59. /// The operator at the root of a GICombineRule.Defs dag.
  60. def defs;
  61. /// All arguments of the defs operator must be subclasses of GIDefKind or
  62. /// sub-dags whose operator is GIDefKindWithArgs.
  63. class GIDefKind;
  64. class GIDefKindWithArgs;
  65. /// Declare a root node. There must be at least one of these in every combine
  66. /// rule.
  67. /// TODO: The plan is to elide `root` definitions and determine it from the DAG
  68. /// itself with an overide for situations where the usual determination
  69. /// is incorrect.
  70. def root : GIDefKind;
  71. /// Declares data that is passed from the match stage to the apply stage.
  72. class GIDefMatchData<string type> : GIDefKind {
  73. /// A C++ type name indicating the storage type.
  74. string Type = type;
  75. }
  76. def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
  77. def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
  78. def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;
  79. /// The operator at the root of a GICombineRule.Match dag.
  80. def match;
  81. /// All arguments of the match operator must be either:
  82. /// * A subclass of GIMatchKind
  83. /// * A subclass of GIMatchKindWithArgs
  84. /// * A subclass of Instruction
  85. /// * A MIR code block (deprecated)
  86. /// The GIMatchKind and GIMatchKindWithArgs cases are described in more detail
  87. /// in their definitions below.
  88. /// For the Instruction case, these are collected into a DAG where operand names
  89. /// that occur multiple times introduce edges.
  90. class GIMatchKind;
  91. class GIMatchKindWithArgs;
  92. /// In lieu of having proper macro support. Trivial one-off opcode checks can be
  93. /// performed with this.
  94. def wip_match_opcode : GIMatchKindWithArgs;
  95. /// The operator at the root of a GICombineRule.Apply dag.
  96. def apply;
  97. /// All arguments of the apply operator must be subclasses of GIApplyKind, or
  98. /// sub-dags whose operator is GIApplyKindWithArgs, or an MIR block
  99. /// (deprecated).
  100. class GIApplyKind;
  101. class GIApplyKindWithArgs;
  102. def register_matchinfo: GIDefMatchData<"Register">;
  103. def int64_matchinfo: GIDefMatchData<"int64_t">;
  104. def apint_matchinfo : GIDefMatchData<"APInt">;
  105. def build_fn_matchinfo :
  106. GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
  107. def copy_prop : GICombineRule<
  108. (defs root:$d),
  109. (match (COPY $d, $s):$mi,
  110. [{ return Helper.matchCombineCopy(*${mi}); }]),
  111. (apply [{ Helper.applyCombineCopy(*${mi}); }])>;
  112. def extending_loads : GICombineRule<
  113. (defs root:$root, extending_load_matchdata:$matchinfo),
  114. (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
  115. [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
  116. (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;
  117. def load_and_mask : GICombineRule<
  118. (defs root:$root, build_fn_matchinfo:$matchinfo),
  119. (match (wip_match_opcode G_AND):$root,
  120. [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]),
  121. (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
  122. def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>;
  123. def sext_trunc_sextload : GICombineRule<
  124. (defs root:$d),
  125. (match (wip_match_opcode G_SEXT_INREG):$d,
  126. [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
  127. (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;
  128. def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
  129. def sext_inreg_of_load : GICombineRule<
  130. (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
  131. (match (wip_match_opcode G_SEXT_INREG):$root,
  132. [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
  133. (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;
  134. def combine_indexed_load_store : GICombineRule<
  135. (defs root:$root, indexed_load_store_matchdata:$matchinfo),
  136. (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
  137. [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
  138. (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;
  139. def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
  140. def opt_brcond_by_inverting_cond : GICombineRule<
  141. (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
  142. (match (wip_match_opcode G_BR):$root,
  143. [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
  144. (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;
  145. def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
  146. def ptr_add_immed_chain : GICombineRule<
  147. (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
  148. (match (wip_match_opcode G_PTR_ADD):$d,
  149. [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
  150. (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;
  151. // Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
  152. def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
  153. def shift_immed_chain : GICombineRule<
  154. (defs root:$d, shift_immed_matchdata:$matchinfo),
  155. (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
  156. [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
  157. (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;
  158. // Transform shift (logic (shift X, C0), Y), C1
  159. // -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
  160. def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
  161. def shift_of_shifted_logic_chain : GICombineRule<
  162. (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
  163. (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
  164. [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
  165. (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;
  166. def mul_to_shl_matchdata : GIDefMatchData<"unsigned">;
  167. def mul_to_shl : GICombineRule<
  168. (defs root:$d, mul_to_shl_matchdata:$matchinfo),
  169. (match (G_MUL $d, $op1, $op2):$mi,
  170. [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
  171. (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;
  172. // shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
  173. def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
  174. def reduce_shl_of_extend : GICombineRule<
  175. (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
  176. (match (G_SHL $dst, $src0, $src1):$mi,
  177. [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
  178. (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;
  179. def narrow_binop_feeding_and : GICombineRule<
  180. (defs root:$root, build_fn_matchinfo:$matchinfo),
  181. (match (wip_match_opcode G_AND):$root,
  182. [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]),
  183. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  184. // [us]itofp(undef) = 0, because the result value is bounded.
  185. def undef_to_fp_zero : GICombineRule<
  186. (defs root:$root),
  187. (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
  188. [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  189. (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;
  190. def undef_to_int_zero: GICombineRule<
  191. (defs root:$root),
  192. (match (wip_match_opcode G_AND, G_MUL):$root,
  193. [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  194. (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
  195. def undef_to_negative_one: GICombineRule<
  196. (defs root:$root),
  197. (match (wip_match_opcode G_OR):$root,
  198. [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  199. (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;
  200. def binop_left_undef_to_zero: GICombineRule<
  201. (defs root:$root),
  202. (match (wip_match_opcode G_SHL):$root,
  203. [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
  204. (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
  205. // Instructions where if any source operand is undef, the instruction can be
  206. // replaced with undef.
  207. def propagate_undef_any_op: GICombineRule<
  208. (defs root:$root),
  209. (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root,
  210. [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  211. (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
  212. // Instructions where if all source operands are undef, the instruction can be
  213. // replaced with undef.
  214. def propagate_undef_all_ops: GICombineRule<
  215. (defs root:$root),
  216. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  217. [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
  218. (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
  219. // Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
  220. def propagate_undef_shuffle_mask: GICombineRule<
  221. (defs root:$root),
  222. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  223. [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
  224. (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
  225. // Fold (cond ? x : x) -> x
  226. def select_same_val: GICombineRule<
  227. (defs root:$root),
  228. (match (wip_match_opcode G_SELECT):$root,
  229. [{ return Helper.matchSelectSameVal(*${root}); }]),
  230. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
  231. >;
  232. // Fold (undef ? x : y) -> y
  233. def select_undef_cmp: GICombineRule<
  234. (defs root:$root),
  235. (match (wip_match_opcode G_SELECT):$root,
  236. [{ return Helper.matchUndefSelectCmp(*${root}); }]),
  237. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
  238. >;
  239. // Fold (true ? x : y) -> x
  240. // Fold (false ? x : y) -> y
  241. def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">;
  242. def select_constant_cmp: GICombineRule<
  243. (defs root:$root, select_constant_cmp_matchdata:$matchinfo),
  244. (match (wip_match_opcode G_SELECT):$root,
  245. [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
  246. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
  247. >;
  248. // Fold x op 0 -> x
  249. def right_identity_zero: GICombineRule<
  250. (defs root:$root),
  251. (match (wip_match_opcode G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR, G_LSHR,
  252. G_PTR_ADD, G_ROTL, G_ROTR):$root,
  253. [{ return Helper.matchConstantOp(${root}->getOperand(2), 0); }]),
  254. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
  255. >;
  256. // Fold x op 1 -> x
  257. def right_identity_one: GICombineRule<
  258. (defs root:$root),
  259. (match (wip_match_opcode G_MUL):$root,
  260. [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]),
  261. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
  262. >;
  263. // Fold (x op x) - > x
  264. def binop_same_val: GICombineRule<
  265. (defs root:$root),
  266. (match (wip_match_opcode G_AND, G_OR):$root,
  267. [{ return Helper.matchBinOpSameVal(*${root}); }]),
  268. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
  269. >;
  270. // Fold (0 op x) - > 0
  271. def binop_left_to_zero: GICombineRule<
  272. (defs root:$root),
  273. (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
  274. [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
  275. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
  276. >;
  277. def urem_pow2_to_mask : GICombineRule<
  278. (defs root:$root),
  279. (match (wip_match_opcode G_UREM):$root,
  280. [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
  281. (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
  282. >;
  283. // Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
  284. def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
  285. def div_rem_to_divrem : GICombineRule<
  286. (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
  287. (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
  288. [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
  289. (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
  290. >;
  291. // Fold (x op 0) - > 0
  292. def binop_right_to_zero: GICombineRule<
  293. (defs root:$root),
  294. (match (wip_match_opcode G_MUL):$root,
  295. [{ return Helper.matchOperandIsZero(*${root}, 2); }]),
  296. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
  297. >;
  298. // Erase stores of undef values.
  299. def erase_undef_store : GICombineRule<
  300. (defs root:$root),
  301. (match (wip_match_opcode G_STORE):$root,
  302. [{ return Helper.matchUndefStore(*${root}); }]),
  303. (apply [{ return Helper.eraseInst(*${root}); }])
  304. >;
  305. def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
  306. def simplify_add_to_sub: GICombineRule <
  307. (defs root:$root, simplify_add_to_sub_matchinfo:$info),
  308. (match (wip_match_opcode G_ADD):$root,
  309. [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
  310. (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
  311. >;
  312. // Fold fp_op(cst) to the constant result of the floating point operation.
  313. def constant_fp_op_matchinfo: GIDefMatchData<"Optional<APFloat>">;
  314. def constant_fp_op: GICombineRule <
  315. (defs root:$root, constant_fp_op_matchinfo:$info),
  316. (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root,
  317. [{ return Helper.matchCombineConstantFoldFpUnary(*${root}, ${info}); }]),
  318. (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${info}); }])
  319. >;
  320. // Fold int2ptr(ptr2int(x)) -> x
  321. def p2i_to_i2p: GICombineRule<
  322. (defs root:$root, register_matchinfo:$info),
  323. (match (wip_match_opcode G_INTTOPTR):$root,
  324. [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
  325. (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
  326. >;
  327. // Fold ptr2int(int2ptr(x)) -> x
  328. def i2p_to_p2i: GICombineRule<
  329. (defs root:$root, register_matchinfo:$info),
  330. (match (wip_match_opcode G_PTRTOINT):$root,
  331. [{ return Helper.matchCombineP2IToI2P(*${root}, ${info}); }]),
  332. (apply [{ Helper.applyCombineP2IToI2P(*${root}, ${info}); }])
  333. >;
  334. // Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
  335. def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
  336. def add_p2i_to_ptradd : GICombineRule<
  337. (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
  338. (match (wip_match_opcode G_ADD):$root,
  339. [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
  340. (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
  341. >;
  342. // Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
  343. def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">;
  344. def const_ptradd_to_i2p: GICombineRule<
  345. (defs root:$root, const_ptradd_to_i2p_matchinfo:$info),
  346. (match (wip_match_opcode G_PTR_ADD):$root,
  347. [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
  348. (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
  349. >;
  350. // Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
  351. def hoist_logic_op_with_same_opcode_hands: GICombineRule <
  352. (defs root:$root, instruction_steps_matchdata:$info),
  353. (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
  354. [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
  355. (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
  356. >;
  357. // Fold ashr (shl x, C), C -> sext_inreg (C)
  358. def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
  359. def shl_ashr_to_sext_inreg : GICombineRule<
  360. (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
  361. (match (wip_match_opcode G_ASHR): $root,
  362. [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
  363. (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
  364. >;
  365. // Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
  366. def overlapping_and: GICombineRule <
  367. (defs root:$root, build_fn_matchinfo:$info),
  368. (match (wip_match_opcode G_AND):$root,
  369. [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
  370. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
  371. >;
  372. // Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
  373. def redundant_and: GICombineRule <
  374. (defs root:$root, register_matchinfo:$matchinfo),
  375. (match (wip_match_opcode G_AND):$root,
  376. [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
  377. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
  378. >;
  379. // Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
  380. def redundant_or: GICombineRule <
  381. (defs root:$root, register_matchinfo:$matchinfo),
  382. (match (wip_match_opcode G_OR):$root,
  383. [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
  384. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
  385. >;
  386. // If the input is already sign extended, just drop the extension.
  387. // sext_inreg x, K ->
  388. // if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
  389. def redundant_sext_inreg: GICombineRule <
  390. (defs root:$root),
  391. (match (wip_match_opcode G_SEXT_INREG):$root,
  392. [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
  393. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
  394. >;
  395. // Fold (anyext (trunc x)) -> x if the source type is same as
  396. // the destination type.
  397. def anyext_trunc_fold: GICombineRule <
  398. (defs root:$root, register_matchinfo:$matchinfo),
  399. (match (wip_match_opcode G_ANYEXT):$root,
  400. [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
  401. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
  402. >;
  403. // Fold (zext (trunc x)) -> x if the source type is same as the destination type
  404. // and truncated bits are known to be zero.
  405. def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">;
  406. def zext_trunc_fold: GICombineRule <
  407. (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo),
  408. (match (wip_match_opcode G_ZEXT):$root,
  409. [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
  410. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
  411. >;
  412. // Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
  413. def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">;
  414. def ext_ext_fold: GICombineRule <
  415. (defs root:$root, ext_ext_fold_matchinfo:$matchinfo),
  416. (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root,
  417. [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]),
  418. (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }])
  419. >;
  420. def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">;
  421. def not_cmp_fold : GICombineRule<
  422. (defs root:$d, not_cmp_fold_matchinfo:$info),
  423. (match (wip_match_opcode G_XOR): $d,
  424. [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
  425. (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
  426. >;
  427. // Fold (fneg (fneg x)) -> x.
  428. def fneg_fneg_fold: GICombineRule <
  429. (defs root:$root, register_matchinfo:$matchinfo),
  430. (match (wip_match_opcode G_FNEG):$root,
  431. [{ return Helper.matchCombineFNegOfFNeg(*${root}, ${matchinfo}); }]),
  432. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
  433. >;
  434. // Fold (unmerge(merge x, y, z)) -> z, y, z.
  435. def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">;
  436. def unmerge_merge : GICombineRule<
  437. (defs root:$d, unmerge_merge_matchinfo:$info),
  438. (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  439. [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
  440. (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
  441. >;
  442. // Fold merge(unmerge).
  443. def merge_unmerge : GICombineRule<
  444. (defs root:$d, register_matchinfo:$matchinfo),
  445. (match (wip_match_opcode G_MERGE_VALUES):$d,
  446. [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]),
  447. (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }])
  448. >;
  449. // Fold (fabs (fabs x)) -> (fabs x).
  450. def fabs_fabs_fold: GICombineRule<
  451. (defs root:$root, register_matchinfo:$matchinfo),
  452. (match (wip_match_opcode G_FABS):$root,
  453. [{ return Helper.matchCombineFAbsOfFAbs(*${root}, ${matchinfo}); }]),
  454. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
  455. >;
  456. // Fold (fabs (fneg x)) -> (fabs x).
  457. def fabs_fneg_fold: GICombineRule <
  458. (defs root:$root, build_fn_matchinfo:$matchinfo),
  459. (match (wip_match_opcode G_FABS):$root,
  460. [{ return Helper.matchCombineFAbsOfFNeg(*${root}, ${matchinfo}); }]),
  461. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  462. // Fold (unmerge cst) -> cst1, cst2, ...
  463. def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
  464. def unmerge_cst : GICombineRule<
  465. (defs root:$d, unmerge_cst_matchinfo:$info),
  466. (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  467. [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
  468. (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
  469. >;
  470. // Fold (unmerge undef) -> undef, undef, ...
  471. def unmerge_undef : GICombineRule<
  472. (defs root:$root, build_fn_matchinfo:$info),
  473. (match (wip_match_opcode G_UNMERGE_VALUES): $root,
  474. [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
  475. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
  476. >;
  477. // Transform x,y<dead> = unmerge z -> x = trunc z.
  478. def unmerge_dead_to_trunc : GICombineRule<
  479. (defs root:$d),
  480. (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  481. [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
  482. (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
  483. >;
  484. // Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
  485. def unmerge_zext_to_zext : GICombineRule<
  486. (defs root:$d),
  487. (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  488. [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
  489. (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
  490. >;
  491. // Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x).
  492. def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">;
  493. def trunc_ext_fold: GICombineRule <
  494. (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo),
  495. (match (wip_match_opcode G_TRUNC):$root,
  496. [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]),
  497. (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }])
  498. >;
  499. // Fold trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits().
  500. def trunc_shl_matchinfo : GIDefMatchData<"std::pair<Register, Register>">;
  501. def trunc_shl: GICombineRule <
  502. (defs root:$root, trunc_shl_matchinfo:$matchinfo),
  503. (match (wip_match_opcode G_TRUNC):$root,
  504. [{ return Helper.matchCombineTruncOfShl(*${root}, ${matchinfo}); }]),
  505. (apply [{ Helper.applyCombineTruncOfShl(*${root}, ${matchinfo}); }])
  506. >;
  507. // Transform (mul x, -1) -> (sub 0, x)
  508. def mul_by_neg_one: GICombineRule <
  509. (defs root:$root),
  510. (match (wip_match_opcode G_MUL):$root,
  511. [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]),
  512. (apply [{ Helper.applyCombineMulByNegativeOne(*${root}); }])
  513. >;
  514. // Fold (xor (and x, y), y) -> (and (not x), y)
  515. def xor_of_and_with_same_reg_matchinfo :
  516. GIDefMatchData<"std::pair<Register, Register>">;
  517. def xor_of_and_with_same_reg: GICombineRule <
  518. (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
  519. (match (wip_match_opcode G_XOR):$root,
  520. [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
  521. (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
  522. >;
  523. // Transform (ptr_add 0, x) -> (int_to_ptr x)
  524. def ptr_add_with_zero: GICombineRule<
  525. (defs root:$root),
  526. (match (wip_match_opcode G_PTR_ADD):$root,
  527. [{ return Helper.matchPtrAddZero(*${root}); }]),
  528. (apply [{ Helper.applyPtrAddZero(*${root}); }])>;
  529. def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">;
  530. def combine_insert_vec_elts_build_vector : GICombineRule<
  531. (defs root:$root, regs_small_vec:$info),
  532. (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
  533. [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
  534. (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;
  535. def load_or_combine : GICombineRule<
  536. (defs root:$root, build_fn_matchinfo:$info),
  537. (match (wip_match_opcode G_OR):$root,
  538. [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
  539. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  540. def truncstore_merge_matcdata : GIDefMatchData<"MergeTruncStoresInfo">;
  541. def truncstore_merge : GICombineRule<
  542. (defs root:$root, truncstore_merge_matcdata:$info),
  543. (match (wip_match_opcode G_STORE):$root,
  544. [{ return Helper.matchTruncStoreMerge(*${root}, ${info}); }]),
  545. (apply [{ Helper.applyTruncStoreMerge(*${root}, ${info}); }])>;
  546. def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
  547. def extend_through_phis : GICombineRule<
  548. (defs root:$root, extend_through_phis_matchdata:$matchinfo),
  549. (match (wip_match_opcode G_PHI):$root,
  550. [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
  551. (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;
  552. // Currently only the one combine above.
  553. def insert_vec_elt_combines : GICombineGroup<
  554. [combine_insert_vec_elts_build_vector]>;
  555. def extract_vec_elt_build_vec : GICombineRule<
  556. (defs root:$root, register_matchinfo:$matchinfo),
  557. (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
  558. [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
  559. (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;
  560. // Fold away full elt extracts from a build_vector.
  561. def extract_all_elts_from_build_vector_matchinfo :
  562. GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
  563. def extract_all_elts_from_build_vector : GICombineRule<
  564. (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
  565. (match (wip_match_opcode G_BUILD_VECTOR):$root,
  566. [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
  567. (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;
  568. def extract_vec_elt_combines : GICombineGroup<[
  569. extract_vec_elt_build_vec,
  570. extract_all_elts_from_build_vector]>;
  571. def funnel_shift_from_or_shift : GICombineRule<
  572. (defs root:$root, build_fn_matchinfo:$info),
  573. (match (wip_match_opcode G_OR):$root,
  574. [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]),
  575. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
  576. >;
  577. def funnel_shift_to_rotate : GICombineRule<
  578. (defs root:$root),
  579. (match (wip_match_opcode G_FSHL, G_FSHR):$root,
  580. [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
  581. (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
  582. >;
  583. def rotate_out_of_range : GICombineRule<
  584. (defs root:$root),
  585. (match (wip_match_opcode G_ROTR, G_ROTL):$root,
  586. [{ return Helper.matchRotateOutOfRange(*${root}); }]),
  587. (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
  588. >;
  589. def icmp_to_true_false_known_bits : GICombineRule<
  590. (defs root:$d, int64_matchinfo:$matchinfo),
  591. (match (wip_match_opcode G_ICMP):$d,
  592. [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
  593. (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
  594. def icmp_to_lhs_known_bits : GICombineRule<
  595. (defs root:$root, build_fn_matchinfo:$info),
  596. (match (wip_match_opcode G_ICMP):$root,
  597. [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]),
  598. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  599. def and_or_disjoint_mask : GICombineRule<
  600. (defs root:$root, build_fn_matchinfo:$info),
  601. (match (wip_match_opcode G_AND):$root,
  602. [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]),
  603. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>;
  604. def bitfield_extract_from_and : GICombineRule<
  605. (defs root:$root, build_fn_matchinfo:$info),
  606. (match (wip_match_opcode G_AND):$root,
  607. [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
  608. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  609. def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift,
  610. funnel_shift_to_rotate]>;
  611. def bitfield_extract_from_sext_inreg : GICombineRule<
  612. (defs root:$root, build_fn_matchinfo:$info),
  613. (match (wip_match_opcode G_SEXT_INREG):$root,
  614. [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
  615. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  616. def bitfield_extract_from_shr : GICombineRule<
  617. (defs root:$root, build_fn_matchinfo:$info),
  618. (match (wip_match_opcode G_ASHR, G_LSHR):$root,
  619. [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]),
  620. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  621. def bitfield_extract_from_shr_and : GICombineRule<
  622. (defs root:$root, build_fn_matchinfo:$info),
  623. (match (wip_match_opcode G_ASHR, G_LSHR):$root,
  624. [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]),
  625. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  626. def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
  627. bitfield_extract_from_and,
  628. bitfield_extract_from_shr,
  629. bitfield_extract_from_shr_and]>;
  630. def udiv_by_const : GICombineRule<
  631. (defs root:$root),
  632. (match (wip_match_opcode G_UDIV):$root,
  633. [{ return Helper.matchUDivByConst(*${root}); }]),
  634. (apply [{ Helper.applyUDivByConst(*${root}); }])>;
  635. def intdiv_combines : GICombineGroup<[udiv_by_const]>;
  636. def reassoc_ptradd : GICombineRule<
  637. (defs root:$root, build_fn_matchinfo:$matchinfo),
  638. (match (wip_match_opcode G_PTR_ADD):$root,
  639. [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
  640. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  641. def reassocs : GICombineGroup<[reassoc_ptradd]>;
  642. // Constant fold operations.
  643. def constant_fold : GICombineRule<
  644. (defs root:$d, apint_matchinfo:$matchinfo),
  645. (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR):$d,
  646. [{ return Helper.matchConstantFold(*${d}, ${matchinfo}); }]),
  647. (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
  648. def mulo_by_2: GICombineRule<
  649. (defs root:$root, build_fn_matchinfo:$matchinfo),
  650. (match (wip_match_opcode G_UMULO, G_SMULO):$root,
  651. [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]),
  652. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  653. def mulh_to_lshr : GICombineRule<
  654. (defs root:$root),
  655. (match (wip_match_opcode G_UMULH):$root,
  656. [{ return Helper.matchUMulHToLShr(*${root}); }]),
  657. (apply [{ Helper.applyUMulHToLShr(*${root}); }])>;
  658. def mulh_combines : GICombineGroup<[mulh_to_lshr]>;
  659. def redundant_neg_operands: GICombineRule<
  660. (defs root:$root, build_fn_matchinfo:$matchinfo),
  661. (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
  662. [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
  663. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  664. // Transform (fadd x, (fmul y, z)) -> (fma y, z, x)
  665. // (fadd x, (fmul y, z)) -> (fmad y, z, x)
  666. // Transform (fadd (fmul x, y), z) -> (fma x, y, z)
  667. // (fadd (fmul x, y), z) -> (fmad x, y, z)
  668. def combine_fadd_fmul_to_fmad_or_fma: GICombineRule<
  669. (defs root:$root, build_fn_matchinfo:$info),
  670. (match (wip_match_opcode G_FADD):$root,
  671. [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root},
  672. ${info}); }]),
  673. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  674. // Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
  675. // -> (fmad (fpext x), (fpext y), z)
  676. // Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
  677. // -> (fmad (fpext y), (fpext z), x)
  678. def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule<
  679. (defs root:$root, build_fn_matchinfo:$info),
  680. (match (wip_match_opcode G_FADD):$root,
  681. [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root},
  682. ${info}); }]),
  683. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  684. // Transform (fadd (fma x, y, (fmul z, u)), v) -> (fma x, y, (fma z, u, v))
  685. // (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v))
  686. // Transform (fadd v, (fma x, y, (fmul z, u))) -> (fma x, y, (fma z, u, v))
  687. // (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v))
  688. def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
  689. (defs root:$root, build_fn_matchinfo:$info),
  690. (match (wip_match_opcode G_FADD):$root,
  691. [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root},
  692. ${info}); }]),
  693. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  694. // Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
  695. // (fma x, y, (fma (fpext u), (fpext v), z))
  696. def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
  697. (defs root:$root, build_fn_matchinfo:$info),
  698. (match (wip_match_opcode G_FADD):$root,
  699. [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
  700. *${root}, ${info}); }]),
  701. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  702. // Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
  703. // -> (fmad x, y, -z)
  704. def combine_fsub_fmul_to_fmad_or_fma: GICombineRule<
  705. (defs root:$root, build_fn_matchinfo:$info),
  706. (match (wip_match_opcode G_FSUB):$root,
  707. [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root},
  708. ${info}); }]),
  709. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  710. // Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
  711. // (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
  712. def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule<
  713. (defs root:$root, build_fn_matchinfo:$info),
  714. (match (wip_match_opcode G_FSUB):$root,
  715. [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root},
  716. ${info}); }]),
  717. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  718. // Transform (fsub (fpext (fmul x, y)), z) ->
  719. // (fma (fpext x), (fpext y), (fneg z))
  720. def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
  721. (defs root:$root, build_fn_matchinfo:$info),
  722. (match (wip_match_opcode G_FSUB):$root,
  723. [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root},
  724. ${info}); }]),
  725. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  726. // Transform (fsub (fneg (fpext (fmul x, y))), z) ->
  727. // (fneg (fma (fpext x), (fpext y), z))
  728. def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
  729. (defs root:$root, build_fn_matchinfo:$info),
  730. (match (wip_match_opcode G_FSUB):$root,
  731. [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
  732. *${root}, ${info}); }]),
  733. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  734. // FIXME: These should use the custom predicate feature once it lands.
  735. def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
  736. undef_to_negative_one,
  737. binop_left_undef_to_zero,
  738. propagate_undef_any_op,
  739. propagate_undef_all_ops,
  740. propagate_undef_shuffle_mask,
  741. erase_undef_store,
  742. unmerge_undef]>;
  743. def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
  744. binop_same_val, binop_left_to_zero,
  745. binop_right_to_zero, p2i_to_i2p,
  746. i2p_to_p2i, anyext_trunc_fold,
  747. fneg_fneg_fold, right_identity_one]>;
  748. def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p,
  749. overlapping_and, mulo_by_2]>;
  750. def known_bits_simplifications : GICombineGroup<[
  751. redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
  752. zext_trunc_fold, icmp_to_true_false_known_bits, icmp_to_lhs_known_bits]>;
  753. def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
  754. narrow_binop_feeding_and]>;
  755. def phi_combines : GICombineGroup<[extend_through_phis]>;
  756. def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp]>;
  757. def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
  758. mul_by_neg_one]>;
  759. def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
  760. combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
  761. combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
  762. combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
  763. combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;
  764. def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
  765. extract_vec_elt_combines, combines_for_extload,
  766. combine_indexed_load_store, undef_combines, identity_combines, phi_combines,
  767. simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands,
  768. reassocs, ptr_add_immed_chain,
  769. shl_ashr_to_sext_inreg, sext_inreg_of_load,
  770. width_reduction_combines, select_combines,
  771. known_bits_simplifications, ext_ext_fold,
  772. not_cmp_fold, opt_brcond_by_inverting_cond,
  773. unmerge_merge, fabs_fabs_fold, unmerge_cst, unmerge_dead_to_trunc,
  774. unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shl,
  775. const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
  776. shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
  777. truncstore_merge, div_rem_to_divrem, funnel_shift_combines,
  778. form_bitfield_extract, constant_fold, fabs_fneg_fold,
  779. intdiv_combines, mulh_combines, redundant_neg_operands,
  780. and_or_disjoint_mask, fma_combines]>;
  781. // A combine group used to for prelegalizer combiners at -O0. The combines in
  782. // this group have been selected based on experiments to balance code size and
  783. // compile time performance.
  784. def optnone_combines : GICombineGroup<[trivial_combines,
  785. ptr_add_immed_chain, combines_for_extload,
  786. not_cmp_fold, opt_brcond_by_inverting_cond]>;