Combine.td 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. //===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Declare GlobalISel combine rules and provide mechanisms to opt-out.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. // Common base class for GICombineRule and GICombineGroup.
  13. class GICombine {
  14. // See GICombineGroup. We only declare it here to make the tablegen pass
  15. // simpler.
  16. list<GICombine> Rules = ?;
  17. }
  18. // A group of combine rules that can be added to a GICombiner or another group.
  19. class GICombineGroup<list<GICombine> rules> : GICombine {
  20. // The rules contained in this group. The rules in a group are flattened into
  21. // a single list and sorted into whatever order is most efficient. However,
  22. // they will never be re-ordered such that behaviour differs from the
  23. // specified order. It is therefore possible to use the order of rules in this
  24. // list to describe priorities.
  25. let Rules = rules;
  26. }
  27. class GICombinerHelperArg<string type, string name> {
  28. string Type = type;
  29. string Name = name;
  30. }
  31. // Declares a combiner helper class
  32. class GICombinerHelper<string classname, list<GICombine> rules>
  33. : GICombineGroup<rules> {
  34. // The class name to use in the generated output.
  35. string Classname = classname;
  36. // The name of a run-time compiler option that will be generated to disable
  37. // specific rules within this combiner.
  38. string DisableRuleOption = ?;
  39. // The state class to inherit from (if any). The generated helper will inherit
  40. // from this class and will forward arguments to its constructors.
  41. string StateClass = "";
  42. // Any additional arguments that should be appended to the tryCombine*().
  43. list<GICombinerHelperArg> AdditionalArguments =
  44. [GICombinerHelperArg<"CombinerHelper &", "Helper">];
  45. }
  46. class GICombineRule<dag defs, dag match, dag apply> : GICombine {
  47. /// Defines the external interface of the match rule. This includes:
  48. /// * The names of the root nodes (requires at least one)
  49. /// See GIDefKind for details.
  50. dag Defs = defs;
  51. /// Defines the things which must be true for the pattern to match
  52. /// See GIMatchKind for details.
  53. dag Match = match;
  54. /// Defines the things which happen after the decision is made to apply a
  55. /// combine rule.
  56. /// See GIApplyKind for details.
  57. dag Apply = apply;
  58. /// Defines the predicates that are checked before the match function
  59. /// is called. Targets can use this to, for instance, check Subtarget
  60. /// features.
  61. list<Predicate> Predicates = [];
  62. }
  63. /// The operator at the root of a GICombineRule.Defs dag.
  64. def defs;
  65. /// All arguments of the defs operator must be subclasses of GIDefKind or
  66. /// sub-dags whose operator is GIDefKindWithArgs.
  67. class GIDefKind;
  68. class GIDefKindWithArgs;
  69. /// Declare a root node. There must be at least one of these in every combine
  70. /// rule.
  71. /// TODO: The plan is to elide `root` definitions and determine it from the DAG
  72. /// itself with an overide for situations where the usual determination
  73. /// is incorrect.
  74. def root : GIDefKind;
  75. /// Declares data that is passed from the match stage to the apply stage.
  76. class GIDefMatchData<string type> : GIDefKind {
  77. /// A C++ type name indicating the storage type.
  78. string Type = type;
  79. }
  80. def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
  81. def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
  82. def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;
  83. /// The operator at the root of a GICombineRule.Match dag.
  84. def match;
  85. /// All arguments of the match operator must be either:
  86. /// * A subclass of GIMatchKind
  87. /// * A subclass of GIMatchKindWithArgs
  88. /// * A subclass of Instruction
  89. /// * A MIR code block (deprecated)
  90. /// The GIMatchKind and GIMatchKindWithArgs cases are described in more detail
  91. /// in their definitions below.
  92. /// For the Instruction case, these are collected into a DAG where operand names
  93. /// that occur multiple times introduce edges.
  94. class GIMatchKind;
  95. class GIMatchKindWithArgs;
  96. /// In lieu of having proper macro support. Trivial one-off opcode checks can be
  97. /// performed with this.
  98. def wip_match_opcode : GIMatchKindWithArgs;
  99. /// The operator at the root of a GICombineRule.Apply dag.
  100. def apply;
  101. /// All arguments of the apply operator must be subclasses of GIApplyKind, or
  102. /// sub-dags whose operator is GIApplyKindWithArgs, or an MIR block
  103. /// (deprecated).
  104. class GIApplyKind;
  105. class GIApplyKindWithArgs;
  106. def register_matchinfo: GIDefMatchData<"Register">;
  107. def int64_matchinfo: GIDefMatchData<"int64_t">;
  108. def apint_matchinfo : GIDefMatchData<"APInt">;
  109. def build_fn_matchinfo :
  110. GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
  111. def unsigned_matchinfo: GIDefMatchData<"unsigned">;
  112. def copy_prop : GICombineRule<
  113. (defs root:$d),
  114. (match (COPY $d, $s):$mi,
  115. [{ return Helper.matchCombineCopy(*${mi}); }]),
  116. (apply [{ Helper.applyCombineCopy(*${mi}); }])>;
  117. // idempotent operations
  118. // Fold (freeze (freeze x)) -> (freeze x).
  119. // Fold (fabs (fabs x)) -> (fabs x).
  120. // Fold (fcanonicalize (fcanonicalize x)) -> (fcanonicalize x).
  121. def idempotent_prop : GICombineRule<
  122. (defs root:$mi),
  123. (match (wip_match_opcode G_FREEZE, G_FABS, G_FCANONICALIZE):$mi,
  124. [{ return MRI.getVRegDef(${mi}->getOperand(1).getReg())->getOpcode() ==
  125. ${mi}->getOpcode(); }]),
  126. (apply [{ Helper.replaceSingleDefInstWithOperand(*${mi}, 1); }])>;
  127. def extending_loads : GICombineRule<
  128. (defs root:$root, extending_load_matchdata:$matchinfo),
  129. (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
  130. [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
  131. (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;
  132. def load_and_mask : GICombineRule<
  133. (defs root:$root, build_fn_matchinfo:$matchinfo),
  134. (match (wip_match_opcode G_AND):$root,
  135. [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]),
  136. (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
  137. def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>;
  138. def sext_trunc_sextload : GICombineRule<
  139. (defs root:$d),
  140. (match (wip_match_opcode G_SEXT_INREG):$d,
  141. [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
  142. (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;
  143. def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
  144. def sext_inreg_of_load : GICombineRule<
  145. (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
  146. (match (wip_match_opcode G_SEXT_INREG):$root,
  147. [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
  148. (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;
  149. def sext_inreg_to_zext_inreg : GICombineRule<
  150. (defs root:$dst),
  151. (match
  152. (G_SEXT_INREG $dst, $src, $imm):$root,
  153. [{
  154. unsigned BitWidth = MRI.getType(${src}.getReg()).getScalarSizeInBits();
  155. return Helper.getKnownBits()->maskedValueIsZero(${src}.getReg(),
  156. APInt::getOneBitSet(BitWidth, ${imm}.getImm() - 1)); }]),
  157. (apply [{
  158. Helper.getBuilder().setInstrAndDebugLoc(*${root});
  159. Helper.getBuilder().buildZExtInReg(${dst}, ${src}, ${imm}.getImm());
  160. ${root}->eraseFromParent();
  161. return true;
  162. }])
  163. >;
  164. def combine_indexed_load_store : GICombineRule<
  165. (defs root:$root, indexed_load_store_matchdata:$matchinfo),
  166. (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
  167. [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
  168. (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;
  169. def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
  170. def opt_brcond_by_inverting_cond : GICombineRule<
  171. (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
  172. (match (wip_match_opcode G_BR):$root,
  173. [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
  174. (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;
  175. def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
  176. def ptr_add_immed_chain : GICombineRule<
  177. (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
  178. (match (wip_match_opcode G_PTR_ADD):$d,
  179. [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
  180. (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;
  181. // Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
  182. def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
  183. def shift_immed_chain : GICombineRule<
  184. (defs root:$d, shift_immed_matchdata:$matchinfo),
  185. (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
  186. [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
  187. (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;
  188. // Transform shift (logic (shift X, C0), Y), C1
  189. // -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
  190. def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
  191. def shift_of_shifted_logic_chain : GICombineRule<
  192. (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
  193. (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
  194. [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
  195. (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;
  196. def mul_to_shl_matchdata : GIDefMatchData<"unsigned">;
  197. def mul_to_shl : GICombineRule<
  198. (defs root:$d, mul_to_shl_matchdata:$matchinfo),
  199. (match (G_MUL $d, $op1, $op2):$mi,
  200. [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
  201. (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;
  202. // shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
  203. def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
  204. def reduce_shl_of_extend : GICombineRule<
  205. (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
  206. (match (G_SHL $dst, $src0, $src1):$mi,
  207. [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
  208. (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;
  209. def narrow_binop_feeding_and : GICombineRule<
  210. (defs root:$root, build_fn_matchinfo:$matchinfo),
  211. (match (wip_match_opcode G_AND):$root,
  212. [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]),
  213. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  214. // [us]itofp(undef) = 0, because the result value is bounded.
  215. def undef_to_fp_zero : GICombineRule<
  216. (defs root:$root),
  217. (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
  218. [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  219. (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;
  220. def undef_to_int_zero: GICombineRule<
  221. (defs root:$root),
  222. (match (wip_match_opcode G_AND, G_MUL):$root,
  223. [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  224. (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
  225. def undef_to_negative_one: GICombineRule<
  226. (defs root:$root),
  227. (match (wip_match_opcode G_OR):$root,
  228. [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  229. (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;
  230. def binop_left_undef_to_zero: GICombineRule<
  231. (defs root:$root),
  232. (match (wip_match_opcode G_SHL, G_UDIV, G_UREM):$root,
  233. [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
  234. (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
  235. def binop_right_undef_to_undef: GICombineRule<
  236. (defs root:$root),
  237. (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
  238. [{ return Helper.matchOperandIsUndef(*${root}, 2); }]),
  239. (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
  240. def unary_undef_to_zero: GICombineRule<
  241. (defs root:$root),
  242. (match (wip_match_opcode G_ABS):$root,
  243. [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
  244. (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
  245. // Instructions where if any source operand is undef, the instruction can be
  246. // replaced with undef.
  247. def propagate_undef_any_op: GICombineRule<
  248. (defs root:$root),
  249. (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root,
  250. [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  251. (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
  252. // Instructions where if all source operands are undef, the instruction can be
  253. // replaced with undef.
  254. def propagate_undef_all_ops: GICombineRule<
  255. (defs root:$root),
  256. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  257. [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
  258. (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
  259. // Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
  260. def propagate_undef_shuffle_mask: GICombineRule<
  261. (defs root:$root),
  262. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  263. [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
  264. (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
  265. // Replace an insert/extract element of an out of bounds index with undef.
  266. def insert_extract_vec_elt_out_of_bounds : GICombineRule<
  267. (defs root:$root),
  268. (match (wip_match_opcode G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT):$root,
  269. [{ return Helper.matchInsertExtractVecEltOutOfBounds(*${root}); }]),
  270. (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
  271. // Fold (cond ? x : x) -> x
  272. def select_same_val: GICombineRule<
  273. (defs root:$root),
  274. (match (wip_match_opcode G_SELECT):$root,
  275. [{ return Helper.matchSelectSameVal(*${root}); }]),
  276. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
  277. >;
  278. // Fold (undef ? x : y) -> y
  279. def select_undef_cmp: GICombineRule<
  280. (defs root:$root),
  281. (match (wip_match_opcode G_SELECT):$root,
  282. [{ return Helper.matchUndefSelectCmp(*${root}); }]),
  283. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
  284. >;
  285. // Fold (true ? x : y) -> x
  286. // Fold (false ? x : y) -> y
  287. def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">;
  288. def select_constant_cmp: GICombineRule<
  289. (defs root:$root, select_constant_cmp_matchdata:$matchinfo),
  290. (match (wip_match_opcode G_SELECT):$root,
  291. [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
  292. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
  293. >;
  294. def select_to_logical : GICombineRule<
  295. (defs root:$root, build_fn_matchinfo:$matchinfo),
  296. (match (wip_match_opcode G_SELECT):$root,
  297. [{ return Helper.matchSelectToLogical(*${root}, ${matchinfo}); }]),
  298. (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
  299. >;
  300. // Fold (C op x) -> (x op C)
  301. // TODO: handle more isCommutable opcodes
  302. // TODO: handle compares (currently not marked as isCommutable)
  303. def commute_constant_to_rhs : GICombineRule<
  304. (defs root:$root),
  305. (match (wip_match_opcode G_ADD, G_MUL, G_AND, G_OR, G_XOR):$root, [{
  306. return getIConstantVRegVal(${root}->getOperand(1).getReg(), MRI).has_value();
  307. }]),
  308. (apply [{
  309. Observer.changingInstr(*${root});
  310. Register LHSReg = ${root}->getOperand(1).getReg();
  311. Register RHSReg = ${root}->getOperand(2).getReg();
  312. ${root}->getOperand(1).setReg(RHSReg);
  313. ${root}->getOperand(2).setReg(LHSReg);
  314. Observer.changedInstr(*${root});
  315. }])
  316. >;
  317. // Fold x op 0 -> x
  318. def right_identity_zero: GICombineRule<
  319. (defs root:$root),
  320. (match (wip_match_opcode G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR, G_LSHR,
  321. G_PTR_ADD, G_ROTL, G_ROTR):$root,
  322. [{ return Helper.matchConstantOp(${root}->getOperand(2), 0); }]),
  323. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
  324. >;
  325. // Fold x op 1 -> x
  326. def right_identity_one: GICombineRule<
  327. (defs root:$root),
  328. (match (wip_match_opcode G_MUL):$root,
  329. [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]),
  330. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
  331. >;
  332. // Fold (x op x) - > x
  333. def binop_same_val: GICombineRule<
  334. (defs root:$root),
  335. (match (wip_match_opcode G_AND, G_OR):$root,
  336. [{ return Helper.matchBinOpSameVal(*${root}); }]),
  337. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
  338. >;
  339. // Fold (0 op x) - > 0
  340. def binop_left_to_zero: GICombineRule<
  341. (defs root:$root),
  342. (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
  343. [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
  344. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
  345. >;
  346. def urem_pow2_to_mask : GICombineRule<
  347. (defs root:$root),
  348. (match (wip_match_opcode G_UREM):$root,
  349. [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
  350. (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
  351. >;
  352. // Push a binary operator through a select on constants.
  353. //
  354. // binop (select cond, K0, K1), K2 ->
  355. // select cond, (binop K0, K2), (binop K1, K2)
  356. // Every binary operator that has constant folding. We currently do
  357. // not have constant folding for G_FPOW, G_FMAXNUM_IEEE or
  358. // G_FMINNUM_IEEE.
  359. def fold_binop_into_select : GICombineRule<
  360. (defs root:$root, unsigned_matchinfo:$select_op_no),
  361. (match (wip_match_opcode
  362. G_ADD, G_SUB, G_PTR_ADD, G_AND, G_OR, G_XOR,
  363. G_SDIV, G_SREM, G_UDIV, G_UREM, G_LSHR, G_ASHR, G_SHL,
  364. G_SMIN, G_SMAX, G_UMIN, G_UMAX,
  365. G_FMUL, G_FADD, G_FSUB, G_FDIV, G_FREM,
  366. G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
  367. [{ return Helper.matchFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]),
  368. (apply [{ return Helper.applyFoldBinOpIntoSelect(*${root}, ${select_op_no}); }])
  369. >;
  370. // Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
  371. def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
  372. def div_rem_to_divrem : GICombineRule<
  373. (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
  374. (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
  375. [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
  376. (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
  377. >;
  378. // Fold (x op 0) - > 0
  379. def binop_right_to_zero: GICombineRule<
  380. (defs root:$root),
  381. (match (wip_match_opcode G_MUL):$root,
  382. [{ return Helper.matchOperandIsZero(*${root}, 2); }]),
  383. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
  384. >;
  385. // Erase stores of undef values.
  386. def erase_undef_store : GICombineRule<
  387. (defs root:$root),
  388. (match (wip_match_opcode G_STORE):$root,
  389. [{ return Helper.matchUndefStore(*${root}); }]),
  390. (apply [{ return Helper.eraseInst(*${root}); }])
  391. >;
  392. def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
  393. def simplify_add_to_sub: GICombineRule <
  394. (defs root:$root, simplify_add_to_sub_matchinfo:$info),
  395. (match (wip_match_opcode G_ADD):$root,
  396. [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
  397. (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
  398. >;
  399. // Fold fp_op(cst) to the constant result of the floating point operation.
  400. def constant_fp_op_matchinfo: GIDefMatchData<"std::optional<APFloat>">;
  401. def constant_fp_op: GICombineRule <
  402. (defs root:$root, constant_fp_op_matchinfo:$info),
  403. (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root,
  404. [{ return Helper.matchCombineConstantFoldFpUnary(*${root}, ${info}); }]),
  405. (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${info}); }])
  406. >;
  407. // Fold int2ptr(ptr2int(x)) -> x
  408. def p2i_to_i2p: GICombineRule<
  409. (defs root:$root, register_matchinfo:$info),
  410. (match (wip_match_opcode G_INTTOPTR):$root,
  411. [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
  412. (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
  413. >;
  414. // Fold ptr2int(int2ptr(x)) -> x
  415. def i2p_to_p2i: GICombineRule<
  416. (defs root:$dst, register_matchinfo:$info),
  417. (match (G_INTTOPTR $t, $ptr),
  418. (G_PTRTOINT $dst, $t):$mi,
  419. [{ ${info} = ${ptr}.getReg(); }]),
  420. (apply [{ Helper.applyCombineP2IToI2P(*${mi}, ${info}); }])
  421. >;
  422. // Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
  423. def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
  424. def add_p2i_to_ptradd : GICombineRule<
  425. (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
  426. (match (wip_match_opcode G_ADD):$root,
  427. [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
  428. (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
  429. >;
  430. // Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
  431. def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">;
  432. def const_ptradd_to_i2p: GICombineRule<
  433. (defs root:$root, const_ptradd_to_i2p_matchinfo:$info),
  434. (match (wip_match_opcode G_PTR_ADD):$root,
  435. [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
  436. (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
  437. >;
  438. // Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
  439. def hoist_logic_op_with_same_opcode_hands: GICombineRule <
  440. (defs root:$root, instruction_steps_matchdata:$info),
  441. (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
  442. [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
  443. (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
  444. >;
  445. // Fold ashr (shl x, C), C -> sext_inreg (C)
  446. def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
  447. def shl_ashr_to_sext_inreg : GICombineRule<
  448. (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
  449. (match (wip_match_opcode G_ASHR): $root,
  450. [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
  451. (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
  452. >;
  453. // Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
  454. def overlapping_and: GICombineRule <
  455. (defs root:$root, build_fn_matchinfo:$info),
  456. (match (wip_match_opcode G_AND):$root,
  457. [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
  458. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
  459. >;
  460. // Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
  461. def redundant_and: GICombineRule <
  462. (defs root:$root, register_matchinfo:$matchinfo),
  463. (match (wip_match_opcode G_AND):$root,
  464. [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
  465. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
  466. >;
  467. // Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
  468. def redundant_or: GICombineRule <
  469. (defs root:$root, register_matchinfo:$matchinfo),
  470. (match (wip_match_opcode G_OR):$root,
  471. [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
  472. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
  473. >;
  474. // If the input is already sign extended, just drop the extension.
  475. // sext_inreg x, K ->
  476. // if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
  477. def redundant_sext_inreg: GICombineRule <
  478. (defs root:$root),
  479. (match (wip_match_opcode G_SEXT_INREG):$root,
  480. [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
  481. (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
  482. >;
  483. // Fold (anyext (trunc x)) -> x if the source type is same as
  484. // the destination type.
  485. def anyext_trunc_fold: GICombineRule <
  486. (defs root:$root, register_matchinfo:$matchinfo),
  487. (match (wip_match_opcode G_ANYEXT):$root,
  488. [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
  489. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
  490. >;
  491. // Fold (zext (trunc x)) -> x if the source type is same as the destination type
  492. // and truncated bits are known to be zero.
  493. def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">;
  494. def zext_trunc_fold: GICombineRule <
  495. (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo),
  496. (match (wip_match_opcode G_ZEXT):$root,
  497. [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
  498. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
  499. >;
  500. // Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
  501. def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">;
  502. def ext_ext_fold: GICombineRule <
  503. (defs root:$root, ext_ext_fold_matchinfo:$matchinfo),
  504. (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root,
  505. [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]),
  506. (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }])
  507. >;
  508. def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">;
  509. def not_cmp_fold : GICombineRule<
  510. (defs root:$d, not_cmp_fold_matchinfo:$info),
  511. (match (wip_match_opcode G_XOR): $d,
  512. [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
  513. (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
  514. >;
  515. // Fold (fneg (fneg x)) -> x.
  516. def fneg_fneg_fold: GICombineRule <
  517. (defs root:$dst, register_matchinfo:$matchinfo),
  518. (match (G_FNEG $t, $src),
  519. (G_FNEG $dst, $t):$mi,
  520. [{ ${matchinfo} = ${src}.getReg(); }]),
  521. (apply [{ return Helper.replaceSingleDefInstWithReg(*${mi}, ${matchinfo}); }])
  522. >;
  523. // Fold (unmerge(merge x, y, z)) -> z, y, z.
  524. def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">;
  525. def unmerge_merge : GICombineRule<
  526. (defs root:$d, unmerge_merge_matchinfo:$info),
  527. (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  528. [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
  529. (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
  530. >;
  531. // Fold merge(unmerge).
  532. def merge_unmerge : GICombineRule<
  533. (defs root:$d, register_matchinfo:$matchinfo),
  534. (match (wip_match_opcode G_MERGE_VALUES):$d,
  535. [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]),
  536. (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }])
  537. >;
  538. // Fold (fabs (fneg x)) -> (fabs x).
  539. def fabs_fneg_fold: GICombineRule <
  540. (defs root:$root, build_fn_matchinfo:$matchinfo),
  541. (match (wip_match_opcode G_FABS):$root,
  542. [{ return Helper.matchCombineFAbsOfFNeg(*${root}, ${matchinfo}); }]),
  543. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  544. // Fold (unmerge cst) -> cst1, cst2, ...
  545. def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
  546. def unmerge_cst : GICombineRule<
  547. (defs root:$d, unmerge_cst_matchinfo:$info),
  548. (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  549. [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
  550. (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
  551. >;
  552. // Fold (unmerge undef) -> undef, undef, ...
  553. def unmerge_undef : GICombineRule<
  554. (defs root:$root, build_fn_matchinfo:$info),
  555. (match (wip_match_opcode G_UNMERGE_VALUES): $root,
  556. [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
  557. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
  558. >;
  559. // Transform x,y<dead> = unmerge z -> x = trunc z.
  560. def unmerge_dead_to_trunc : GICombineRule<
  561. (defs root:$d),
  562. (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  563. [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
  564. (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
  565. >;
  566. // Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
  567. def unmerge_zext_to_zext : GICombineRule<
  568. (defs root:$d),
  569. (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  570. [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
  571. (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
  572. >;
  573. // Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x).
  574. def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">;
  575. def trunc_ext_fold: GICombineRule <
  576. (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo),
  577. (match (wip_match_opcode G_TRUNC):$root,
  578. [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]),
  579. (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }])
  580. >;
  581. // Under certain conditions, transform:
  582. // trunc (shl x, K) -> shl (trunc x), K//
  583. // trunc ([al]shr x, K) -> (trunc ([al]shr (trunc x), K))
  584. def trunc_shift_matchinfo : GIDefMatchData<"std::pair<MachineInstr*, LLT>">;
  585. def trunc_shift: GICombineRule <
  586. (defs root:$root, trunc_shift_matchinfo:$matchinfo),
  587. (match (wip_match_opcode G_TRUNC):$root,
  588. [{ return Helper.matchCombineTruncOfShift(*${root}, ${matchinfo}); }]),
  589. (apply [{ Helper.applyCombineTruncOfShift(*${root}, ${matchinfo}); }])
  590. >;
  591. // Transform (mul x, -1) -> (sub 0, x)
  592. def mul_by_neg_one: GICombineRule <
  593. (defs root:$root),
  594. (match (wip_match_opcode G_MUL):$root,
  595. [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]),
  596. (apply [{ Helper.applyCombineMulByNegativeOne(*${root}); }])
  597. >;
  598. // Fold (xor (and x, y), y) -> (and (not x), y)
  599. def xor_of_and_with_same_reg_matchinfo :
  600. GIDefMatchData<"std::pair<Register, Register>">;
  601. def xor_of_and_with_same_reg: GICombineRule <
  602. (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
  603. (match (wip_match_opcode G_XOR):$root,
  604. [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
  605. (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
  606. >;
  607. // Transform (ptr_add 0, x) -> (int_to_ptr x)
  608. def ptr_add_with_zero: GICombineRule<
  609. (defs root:$root),
  610. (match (wip_match_opcode G_PTR_ADD):$root,
  611. [{ return Helper.matchPtrAddZero(*${root}); }]),
  612. (apply [{ Helper.applyPtrAddZero(*${root}); }])>;
  613. def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">;
  614. def combine_insert_vec_elts_build_vector : GICombineRule<
  615. (defs root:$root, regs_small_vec:$info),
  616. (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
  617. [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
  618. (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;
  619. def load_or_combine : GICombineRule<
  620. (defs root:$root, build_fn_matchinfo:$info),
  621. (match (wip_match_opcode G_OR):$root,
  622. [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
  623. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  624. def truncstore_merge_matcdata : GIDefMatchData<"MergeTruncStoresInfo">;
  625. def truncstore_merge : GICombineRule<
  626. (defs root:$root, truncstore_merge_matcdata:$info),
  627. (match (wip_match_opcode G_STORE):$root,
  628. [{ return Helper.matchTruncStoreMerge(*${root}, ${info}); }]),
  629. (apply [{ Helper.applyTruncStoreMerge(*${root}, ${info}); }])>;
  630. def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
  631. def extend_through_phis : GICombineRule<
  632. (defs root:$root, extend_through_phis_matchdata:$matchinfo),
  633. (match (wip_match_opcode G_PHI):$root,
  634. [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
  635. (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;
  636. // Currently only the one combine above.
  637. def insert_vec_elt_combines : GICombineGroup<
  638. [combine_insert_vec_elts_build_vector]>;
  639. def extract_vec_elt_build_vec : GICombineRule<
  640. (defs root:$root, register_matchinfo:$matchinfo),
  641. (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
  642. [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
  643. (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;
  644. // Fold away full elt extracts from a build_vector.
  645. def extract_all_elts_from_build_vector_matchinfo :
  646. GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
  647. def extract_all_elts_from_build_vector : GICombineRule<
  648. (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
  649. (match (wip_match_opcode G_BUILD_VECTOR):$root,
  650. [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
  651. (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;
  652. def extract_vec_elt_combines : GICombineGroup<[
  653. extract_vec_elt_build_vec,
  654. extract_all_elts_from_build_vector]>;
  655. def funnel_shift_from_or_shift : GICombineRule<
  656. (defs root:$root, build_fn_matchinfo:$info),
  657. (match (wip_match_opcode G_OR):$root,
  658. [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]),
  659. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
  660. >;
  661. def funnel_shift_to_rotate : GICombineRule<
  662. (defs root:$root),
  663. (match (wip_match_opcode G_FSHL, G_FSHR):$root,
  664. [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
  665. (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
  666. >;
  667. def rotate_out_of_range : GICombineRule<
  668. (defs root:$root),
  669. (match (wip_match_opcode G_ROTR, G_ROTL):$root,
  670. [{ return Helper.matchRotateOutOfRange(*${root}); }]),
  671. (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
  672. >;
  673. def icmp_to_true_false_known_bits : GICombineRule<
  674. (defs root:$d, int64_matchinfo:$matchinfo),
  675. (match (wip_match_opcode G_ICMP):$d,
  676. [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
  677. (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
  678. def icmp_to_lhs_known_bits : GICombineRule<
  679. (defs root:$root, build_fn_matchinfo:$info),
  680. (match (wip_match_opcode G_ICMP):$root,
  681. [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]),
  682. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  683. def redundant_binop_in_equality : GICombineRule<
  684. (defs root:$root, build_fn_matchinfo:$info),
  685. (match (wip_match_opcode G_ICMP):$root,
  686. [{ return Helper.matchRedundantBinOpInEquality(*${root}, ${info}); }]),
  687. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  688. def and_or_disjoint_mask : GICombineRule<
  689. (defs root:$root, build_fn_matchinfo:$info),
  690. (match (wip_match_opcode G_AND):$root,
  691. [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]),
  692. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>;
  693. def bitfield_extract_from_and : GICombineRule<
  694. (defs root:$root, build_fn_matchinfo:$info),
  695. (match (wip_match_opcode G_AND):$root,
  696. [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
  697. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  698. def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift,
  699. funnel_shift_to_rotate]>;
  700. def bitfield_extract_from_sext_inreg : GICombineRule<
  701. (defs root:$root, build_fn_matchinfo:$info),
  702. (match (wip_match_opcode G_SEXT_INREG):$root,
  703. [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
  704. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  705. def bitfield_extract_from_shr : GICombineRule<
  706. (defs root:$root, build_fn_matchinfo:$info),
  707. (match (wip_match_opcode G_ASHR, G_LSHR):$root,
  708. [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]),
  709. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  710. def bitfield_extract_from_shr_and : GICombineRule<
  711. (defs root:$root, build_fn_matchinfo:$info),
  712. (match (wip_match_opcode G_ASHR, G_LSHR):$root,
  713. [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]),
  714. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  715. def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
  716. bitfield_extract_from_and,
  717. bitfield_extract_from_shr,
  718. bitfield_extract_from_shr_and]>;
  719. def udiv_by_const : GICombineRule<
  720. (defs root:$root),
  721. (match (wip_match_opcode G_UDIV):$root,
  722. [{ return Helper.matchUDivByConst(*${root}); }]),
  723. (apply [{ Helper.applyUDivByConst(*${root}); }])>;
  724. def sdiv_by_const : GICombineRule<
  725. (defs root:$root),
  726. (match (wip_match_opcode G_SDIV):$root,
  727. [{ return Helper.matchSDivByConst(*${root}); }]),
  728. (apply [{ Helper.applySDivByConst(*${root}); }])>;
  729. def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const]>;
  730. def reassoc_ptradd : GICombineRule<
  731. (defs root:$root, build_fn_matchinfo:$matchinfo),
  732. (match (wip_match_opcode G_PTR_ADD):$root,
  733. [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
  734. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  735. def reassocs : GICombineGroup<[reassoc_ptradd]>;
  736. // Constant fold operations.
  737. def constant_fold : GICombineRule<
  738. (defs root:$d, apint_matchinfo:$matchinfo),
  739. (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR):$d,
  740. [{ return Helper.matchConstantFold(*${d}, ${matchinfo}); }]),
  741. (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
  742. def mulo_by_2: GICombineRule<
  743. (defs root:$root, build_fn_matchinfo:$matchinfo),
  744. (match (wip_match_opcode G_UMULO, G_SMULO):$root,
  745. [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]),
  746. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  747. def mulo_by_0: GICombineRule<
  748. (defs root:$root, build_fn_matchinfo:$matchinfo),
  749. (match (wip_match_opcode G_UMULO, G_SMULO):$root,
  750. [{ return Helper.matchMulOBy0(*${root}, ${matchinfo}); }]),
  751. (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
  752. def addo_by_0: GICombineRule<
  753. (defs root:$root, build_fn_matchinfo:$matchinfo),
  754. (match (wip_match_opcode G_UADDO, G_SADDO):$root,
  755. [{ return Helper.matchAddOBy0(*${root}, ${matchinfo}); }]),
  756. (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
  757. // Transform (uadde x, y, 0) -> (uaddo x, y)
  758. // (sadde x, y, 0) -> (saddo x, y)
  759. // (usube x, y, 0) -> (usubo x, y)
  760. // (ssube x, y, 0) -> (ssubo x, y)
  761. def adde_to_addo: GICombineRule<
  762. (defs root:$root, build_fn_matchinfo:$matchinfo),
  763. (match (wip_match_opcode G_UADDE, G_SADDE, G_USUBE, G_SSUBE):$root,
  764. [{ return Helper.matchAddEToAddO(*${root}, ${matchinfo}); }]),
  765. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  766. def mulh_to_lshr : GICombineRule<
  767. (defs root:$root),
  768. (match (wip_match_opcode G_UMULH):$root,
  769. [{ return Helper.matchUMulHToLShr(*${root}); }]),
  770. (apply [{ Helper.applyUMulHToLShr(*${root}); }])>;
  771. def mulh_combines : GICombineGroup<[mulh_to_lshr]>;
  772. def redundant_neg_operands: GICombineRule<
  773. (defs root:$root, build_fn_matchinfo:$matchinfo),
  774. (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
  775. [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
  776. (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
  777. // Transform (fsub +-0.0, X) -> (fneg X)
  778. def fsub_to_fneg: GICombineRule<
  779. (defs root:$root, register_matchinfo:$matchinfo),
  780. (match (wip_match_opcode G_FSUB):$root,
  781. [{ return Helper.matchFsubToFneg(*${root}, ${matchinfo}); }]),
  782. (apply [{ Helper.applyFsubToFneg(*${root}, ${matchinfo}); }])>;
  783. // Transform (fadd x, (fmul y, z)) -> (fma y, z, x)
  784. // (fadd x, (fmul y, z)) -> (fmad y, z, x)
  785. // Transform (fadd (fmul x, y), z) -> (fma x, y, z)
  786. // (fadd (fmul x, y), z) -> (fmad x, y, z)
  787. def combine_fadd_fmul_to_fmad_or_fma: GICombineRule<
  788. (defs root:$root, build_fn_matchinfo:$info),
  789. (match (wip_match_opcode G_FADD):$root,
  790. [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root},
  791. ${info}); }]),
  792. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  793. // Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
  794. // -> (fmad (fpext x), (fpext y), z)
  795. // Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
  796. // -> (fmad (fpext y), (fpext z), x)
  797. def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule<
  798. (defs root:$root, build_fn_matchinfo:$info),
  799. (match (wip_match_opcode G_FADD):$root,
  800. [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root},
  801. ${info}); }]),
  802. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  803. // Transform (fadd (fma x, y, (fmul z, u)), v) -> (fma x, y, (fma z, u, v))
  804. // (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v))
  805. // Transform (fadd v, (fma x, y, (fmul z, u))) -> (fma x, y, (fma z, u, v))
  806. // (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v))
  807. def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
  808. (defs root:$root, build_fn_matchinfo:$info),
  809. (match (wip_match_opcode G_FADD):$root,
  810. [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root},
  811. ${info}); }]),
  812. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  813. // Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
  814. // (fma x, y, (fma (fpext u), (fpext v), z))
  815. def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
  816. (defs root:$root, build_fn_matchinfo:$info),
  817. (match (wip_match_opcode G_FADD):$root,
  818. [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
  819. *${root}, ${info}); }]),
  820. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  821. // Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
  822. // -> (fmad x, y, -z)
  823. def combine_fsub_fmul_to_fmad_or_fma: GICombineRule<
  824. (defs root:$root, build_fn_matchinfo:$info),
  825. (match (wip_match_opcode G_FSUB):$root,
  826. [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root},
  827. ${info}); }]),
  828. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  829. // Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
  830. // (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
  831. def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule<
  832. (defs root:$root, build_fn_matchinfo:$info),
  833. (match (wip_match_opcode G_FSUB):$root,
  834. [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root},
  835. ${info}); }]),
  836. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  837. // Transform (fsub (fpext (fmul x, y)), z) ->
  838. // (fma (fpext x), (fpext y), (fneg z))
  839. def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
  840. (defs root:$root, build_fn_matchinfo:$info),
  841. (match (wip_match_opcode G_FSUB):$root,
  842. [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root},
  843. ${info}); }]),
  844. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  845. // Transform (fsub (fneg (fpext (fmul x, y))), z) ->
  846. // (fneg (fma (fpext x), (fpext y), z))
  847. def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
  848. (defs root:$root, build_fn_matchinfo:$info),
  849. (match (wip_match_opcode G_FSUB):$root,
  850. [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
  851. *${root}, ${info}); }]),
  852. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  853. def combine_minmax_nan: GICombineRule<
  854. (defs root:$root, unsigned_matchinfo:$info),
  855. (match (wip_match_opcode G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
  856. [{ return Helper.matchCombineFMinMaxNaN(*${root}, ${info}); }]),
  857. (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${info}); }])>;
  858. // Transform (add x, (sub y, x)) -> y
  859. // Transform (add (sub y, x), x) -> y
  860. def add_sub_reg: GICombineRule <
  861. (defs root:$root, register_matchinfo:$matchinfo),
  862. (match (wip_match_opcode G_ADD):$root,
  863. [{ return Helper.matchAddSubSameReg(*${root}, ${matchinfo}); }]),
  864. (apply [{ return Helper.replaceSingleDefInstWithReg(*${root},
  865. ${matchinfo}); }])>;
  866. def buildvector_identity_fold : GICombineRule<
  867. (defs root:$build_vector, register_matchinfo:$matchinfo),
  868. (match (wip_match_opcode G_BUILD_VECTOR_TRUNC, G_BUILD_VECTOR):$build_vector,
  869. [{ return Helper.matchBuildVectorIdentityFold(*${build_vector}, ${matchinfo}); }]),
  870. (apply [{ Helper.replaceSingleDefInstWithReg(*${build_vector}, ${matchinfo}); }])>;
  871. def trunc_buildvector_fold : GICombineRule<
  872. (defs root:$op, register_matchinfo:$matchinfo),
  873. (match (wip_match_opcode G_TRUNC):$op,
  874. [{ return Helper.matchTruncBuildVectorFold(*${op}, ${matchinfo}); }]),
  875. (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;
  876. def trunc_lshr_buildvector_fold : GICombineRule<
  877. (defs root:$op, register_matchinfo:$matchinfo),
  878. (match (wip_match_opcode G_TRUNC):$op,
  879. [{ return Helper.matchTruncLshrBuildVectorFold(*${op}, ${matchinfo}); }]),
  880. (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;
  881. // Transform:
  882. // (x + y) - y -> x
  883. // (x + y) - x -> y
  884. // x - (y + x) -> 0 - y
  885. // x - (x + z) -> 0 - z
  886. def sub_add_reg: GICombineRule <
  887. (defs root:$root, build_fn_matchinfo:$matchinfo),
  888. (match (wip_match_opcode G_SUB):$root,
  889. [{ return Helper.matchSubAddSameReg(*${root}, ${matchinfo}); }]),
  890. (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
  891. def bitcast_bitcast_fold : GICombineRule<
  892. (defs root:$dst),
  893. (match (G_BITCAST $dst, $src1):$op, (G_BITCAST $src1, $src0),
  894. [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]),
  895. (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>;
  896. def select_to_minmax: GICombineRule<
  897. (defs root:$root, build_fn_matchinfo:$info),
  898. (match (wip_match_opcode G_SELECT):$root,
  899. [{ return Helper.matchSimplifySelectToMinMax(*${root}, ${info}); }]),
  900. (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
  901. // FIXME: These should use the custom predicate feature once it lands.
  902. def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
  903. undef_to_negative_one,
  904. binop_left_undef_to_zero,
  905. binop_right_undef_to_undef,
  906. unary_undef_to_zero,
  907. propagate_undef_any_op,
  908. propagate_undef_all_ops,
  909. propagate_undef_shuffle_mask,
  910. erase_undef_store,
  911. unmerge_undef,
  912. insert_extract_vec_elt_out_of_bounds]>;
  913. def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
  914. binop_same_val, binop_left_to_zero,
  915. binop_right_to_zero, p2i_to_i2p,
  916. i2p_to_p2i, anyext_trunc_fold,
  917. fneg_fneg_fold, right_identity_one,
  918. add_sub_reg, buildvector_identity_fold,
  919. trunc_buildvector_fold,
  920. trunc_lshr_buildvector_fold,
  921. bitcast_bitcast_fold]>;
  922. def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p,
  923. overlapping_and, mulo_by_2, mulo_by_0,
  924. addo_by_0, adde_to_addo,
  925. combine_minmax_nan]>;
  926. def known_bits_simplifications : GICombineGroup<[
  927. redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
  928. zext_trunc_fold, icmp_to_true_false_known_bits, icmp_to_lhs_known_bits,
  929. sext_inreg_to_zext_inreg]>;
  930. def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
  931. narrow_binop_feeding_and]>;
  932. def phi_combines : GICombineGroup<[extend_through_phis]>;
  933. def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp,
  934. select_to_logical]>;
  935. def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
  936. mul_by_neg_one, idempotent_prop]>;
  937. def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
  938. combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
  939. combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
  940. combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
  941. combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;
  942. def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
  943. extract_vec_elt_combines, combines_for_extload,
  944. combine_indexed_load_store, undef_combines, identity_combines, phi_combines,
  945. simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands,
  946. reassocs, ptr_add_immed_chain,
  947. shl_ashr_to_sext_inreg, sext_inreg_of_load,
  948. width_reduction_combines, select_combines,
  949. known_bits_simplifications, ext_ext_fold,
  950. not_cmp_fold, opt_brcond_by_inverting_cond,
  951. unmerge_merge, unmerge_cst, unmerge_dead_to_trunc,
  952. unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shift,
  953. const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
  954. shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
  955. truncstore_merge, div_rem_to_divrem, funnel_shift_combines,
  956. form_bitfield_extract, constant_fold, fabs_fneg_fold,
  957. intdiv_combines, mulh_combines, redundant_neg_operands,
  958. and_or_disjoint_mask, fma_combines, fold_binop_into_select,
  959. sub_add_reg, select_to_minmax, redundant_binop_in_equality,
  960. fsub_to_fneg, commute_constant_to_rhs]>;
  961. // A combine group used to for prelegalizer combiners at -O0. The combines in
  962. // this group have been selected based on experiments to balance code size and
  963. // compile time performance.
  964. def optnone_combines : GICombineGroup<[trivial_combines,
  965. ptr_add_immed_chain, combines_for_extload,
  966. not_cmp_fold, opt_brcond_by_inverting_cond]>;