AArch64Combine.td 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. //=- AArch64.td - Define AArch64 Combine Rules ---------------*- tablegen -*-=//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. //
  10. //===----------------------------------------------------------------------===//
  11. include "llvm/Target/GlobalISel/Combine.td"
  12. def fconstant_to_constant : GICombineRule<
  13. (defs root:$root),
  14. (match (wip_match_opcode G_FCONSTANT):$root,
  15. [{ return matchFConstantToConstant(*${root}, MRI); }]),
  16. (apply [{ applyFConstantToConstant(*${root}); }])>;
  17. def icmp_redundant_trunc_matchdata : GIDefMatchData<"Register">;
  18. def icmp_redundant_trunc : GICombineRule<
  19. (defs root:$root, icmp_redundant_trunc_matchdata:$matchinfo),
  20. (match (wip_match_opcode G_ICMP):$root,
  21. [{ return matchICmpRedundantTrunc(*${root}, MRI, Helper.getKnownBits(), ${matchinfo}); }]),
  22. (apply [{ applyICmpRedundantTrunc(*${root}, MRI, B, Observer, ${matchinfo}); }])>;
  23. // AArch64-specific offset folding for G_GLOBAL_VALUE.
  24. def fold_global_offset_matchdata : GIDefMatchData<"std::pair<uint64_t, uint64_t>">;
  25. def fold_global_offset : GICombineRule<
  26. (defs root:$root, fold_global_offset_matchdata:$matchinfo),
  27. (match (wip_match_opcode G_GLOBAL_VALUE):$root,
  28. [{ return matchFoldGlobalOffset(*${root}, MRI, ${matchinfo}); }]),
  29. (apply [{ return applyFoldGlobalOffset(*${root}, MRI, B, Observer, ${matchinfo});}])
  30. >;
  31. def AArch64PreLegalizerCombinerHelper: GICombinerHelper<
  32. "AArch64GenPreLegalizerCombinerHelper", [all_combines,
  33. fconstant_to_constant,
  34. icmp_redundant_trunc,
  35. fold_global_offset]> {
  36. let DisableRuleOption = "aarch64prelegalizercombiner-disable-rule";
  37. let StateClass = "AArch64PreLegalizerCombinerHelperState";
  38. let AdditionalArguments = [];
  39. }
  40. def AArch64O0PreLegalizerCombinerHelper: GICombinerHelper<
  41. "AArch64GenO0PreLegalizerCombinerHelper", [optnone_combines]> {
  42. let DisableRuleOption = "aarch64O0prelegalizercombiner-disable-rule";
  43. let StateClass = "AArch64O0PreLegalizerCombinerHelperState";
  44. let AdditionalArguments = [];
  45. }
  46. // Matchdata for combines which replace a G_SHUFFLE_VECTOR with a
  47. // target-specific opcode.
  48. def shuffle_matchdata : GIDefMatchData<"ShuffleVectorPseudo">;
  49. def rev : GICombineRule<
  50. (defs root:$root, shuffle_matchdata:$matchinfo),
  51. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  52. [{ return matchREV(*${root}, MRI, ${matchinfo}); }]),
  53. (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
  54. >;
  55. def zip : GICombineRule<
  56. (defs root:$root, shuffle_matchdata:$matchinfo),
  57. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  58. [{ return matchZip(*${root}, MRI, ${matchinfo}); }]),
  59. (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
  60. >;
  61. def uzp : GICombineRule<
  62. (defs root:$root, shuffle_matchdata:$matchinfo),
  63. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  64. [{ return matchUZP(*${root}, MRI, ${matchinfo}); }]),
  65. (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
  66. >;
  67. def dup: GICombineRule <
  68. (defs root:$root, shuffle_matchdata:$matchinfo),
  69. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  70. [{ return matchDup(*${root}, MRI, ${matchinfo}); }]),
  71. (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
  72. >;
  73. def trn : GICombineRule<
  74. (defs root:$root, shuffle_matchdata:$matchinfo),
  75. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  76. [{ return matchTRN(*${root}, MRI, ${matchinfo}); }]),
  77. (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
  78. >;
  79. def ext: GICombineRule <
  80. (defs root:$root, shuffle_matchdata:$matchinfo),
  81. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  82. [{ return matchEXT(*${root}, MRI, ${matchinfo}); }]),
  83. (apply [{ applyEXT(*${root}, ${matchinfo}); }])
  84. >;
  85. def shuf_to_ins_matchdata : GIDefMatchData<"std::tuple<Register, int, Register, int>">;
  86. def shuf_to_ins: GICombineRule <
  87. (defs root:$root, shuf_to_ins_matchdata:$matchinfo),
  88. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  89. [{ return matchINS(*${root}, MRI, ${matchinfo}); }]),
  90. (apply [{ return applyINS(*${root}, MRI, B, ${matchinfo}); }])
  91. >;
  92. def vashr_vlshr_imm_matchdata : GIDefMatchData<"int64_t">;
  93. def vashr_vlshr_imm : GICombineRule<
  94. (defs root:$root, vashr_vlshr_imm_matchdata:$matchinfo),
  95. (match (wip_match_opcode G_ASHR, G_LSHR):$root,
  96. [{ return matchVAshrLshrImm(*${root}, MRI, ${matchinfo}); }]),
  97. (apply [{ applyVAshrLshrImm(*${root}, MRI, ${matchinfo}); }])
  98. >;
  99. def form_duplane_matchdata :
  100. GIDefMatchData<"std::pair<unsigned, int>">;
  101. def form_duplane : GICombineRule <
  102. (defs root:$root, form_duplane_matchdata:$matchinfo),
  103. (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
  104. [{ return matchDupLane(*${root}, MRI, ${matchinfo}); }]),
  105. (apply [{ applyDupLane(*${root}, MRI, B, ${matchinfo}); }])
  106. >;
  107. def shuffle_vector_lowering : GICombineGroup<[dup, rev, ext, zip, uzp, trn,
  108. form_duplane,
  109. shuf_to_ins]>;
  110. def adjust_icmp_imm_matchdata :
  111. GIDefMatchData<"std::pair<uint64_t, CmpInst::Predicate>">;
  112. def adjust_icmp_imm : GICombineRule <
  113. (defs root:$root, adjust_icmp_imm_matchdata:$matchinfo),
  114. (match (wip_match_opcode G_ICMP):$root,
  115. [{ return matchAdjustICmpImmAndPred(*${root}, MRI, ${matchinfo}); }]),
  116. (apply [{ applyAdjustICmpImmAndPred(*${root}, ${matchinfo}, B, Observer); }])
  117. >;
  118. def swap_icmp_operands : GICombineRule <
  119. (defs root:$root),
  120. (match (wip_match_opcode G_ICMP):$root,
  121. [{ return trySwapICmpOperands(*${root}, MRI); }]),
  122. (apply [{ applySwapICmpOperands(*${root}, Observer); }])
  123. >;
  124. def icmp_lowering : GICombineGroup<[adjust_icmp_imm, swap_icmp_operands]>;
  125. def extractvecelt_pairwise_add_matchdata : GIDefMatchData<"std::tuple<unsigned, LLT, Register>">;
  126. def extractvecelt_pairwise_add : GICombineRule<
  127. (defs root:$root, extractvecelt_pairwise_add_matchdata:$matchinfo),
  128. (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
  129. [{ return matchExtractVecEltPairwiseAdd(*${root}, MRI, ${matchinfo}); }]),
  130. (apply [{ applyExtractVecEltPairwiseAdd(*${root}, MRI, B, ${matchinfo}); }])
  131. >;
  132. def mul_const_matchdata : GIDefMatchData<"std::function<void(MachineIRBuilder&, Register)>">;
  133. def mul_const : GICombineRule<
  134. (defs root:$root, mul_const_matchdata:$matchinfo),
  135. (match (wip_match_opcode G_MUL):$root,
  136. [{ return matchAArch64MulConstCombine(*${root}, MRI, ${matchinfo}); }]),
  137. (apply [{ applyAArch64MulConstCombine(*${root}, MRI, B, ${matchinfo}); }])
  138. >;
  139. def build_vector_to_dup : GICombineRule<
  140. (defs root:$root),
  141. (match (wip_match_opcode G_BUILD_VECTOR):$root,
  142. [{ return matchBuildVectorToDup(*${root}, MRI); }]),
  143. (apply [{ return applyBuildVectorToDup(*${root}, MRI, B); }])
  144. >;
  145. def build_vector_lowering : GICombineGroup<[build_vector_to_dup]>;
  146. def lower_vector_fcmp : GICombineRule<
  147. (defs root:$root),
  148. (match (wip_match_opcode G_FCMP):$root,
  149. [{ return lowerVectorFCMP(*${root}, MRI, B); }]),
  150. (apply [{}])>;
  151. def form_truncstore_matchdata : GIDefMatchData<"Register">;
  152. def form_truncstore : GICombineRule<
  153. (defs root:$root, form_truncstore_matchdata:$matchinfo),
  154. (match (wip_match_opcode G_STORE):$root,
  155. [{ return matchFormTruncstore(*${root}, MRI, ${matchinfo}); }]),
  156. (apply [{ applyFormTruncstore(*${root}, MRI, B, Observer, ${matchinfo}); }])
  157. >;
  158. def fold_merge_to_zext : GICombineRule<
  159. (defs root:$d),
  160. (match (wip_match_opcode G_MERGE_VALUES):$d,
  161. [{ return matchFoldMergeToZext(*${d}, MRI); }]),
  162. (apply [{ applyFoldMergeToZext(*${d}, MRI, B, Observer); }])
  163. >;
  164. def mutate_anyext_to_zext : GICombineRule<
  165. (defs root:$d),
  166. (match (wip_match_opcode G_ANYEXT):$d,
  167. [{ return matchMutateAnyExtToZExt(*${d}, MRI); }]),
  168. (apply [{ applyMutateAnyExtToZExt(*${d}, MRI, B, Observer); }])
  169. >;
  170. def split_store_zero_128 : GICombineRule<
  171. (defs root:$d),
  172. (match (wip_match_opcode G_STORE):$d,
  173. [{ return matchSplitStoreZero128(*${d}, MRI); }]),
  174. (apply [{ applySplitStoreZero128(*${d}, MRI, B, Observer); }])
  175. >;
  176. def vector_sext_inreg_to_shift : GICombineRule<
  177. (defs root:$d),
  178. (match (wip_match_opcode G_SEXT_INREG):$d,
  179. [{ return matchVectorSextInReg(*${d}, MRI); }]),
  180. (apply [{ applyVectorSextInReg(*${d}, MRI, B, Observer); }])
  181. >;
  182. // Post-legalization combines which should happen at all optimization levels.
  183. // (E.g. ones that facilitate matching for the selector) For example, matching
  184. // pseudos.
  185. def AArch64PostLegalizerLoweringHelper
  186. : GICombinerHelper<"AArch64GenPostLegalizerLoweringHelper",
  187. [shuffle_vector_lowering, vashr_vlshr_imm,
  188. icmp_lowering, build_vector_lowering,
  189. lower_vector_fcmp, form_truncstore,
  190. vector_sext_inreg_to_shift]> {
  191. let DisableRuleOption = "aarch64postlegalizerlowering-disable-rule";
  192. }
  193. // Post-legalization combines which are primarily optimizations.
  194. def AArch64PostLegalizerCombinerHelper
  195. : GICombinerHelper<"AArch64GenPostLegalizerCombinerHelper",
  196. [copy_prop, combines_for_extload,
  197. sext_trunc_sextload, mutate_anyext_to_zext,
  198. hoist_logic_op_with_same_opcode_hands,
  199. redundant_and, xor_of_and_with_same_reg,
  200. extractvecelt_pairwise_add, redundant_or,
  201. mul_const, redundant_sext_inreg,
  202. form_bitfield_extract, rotate_out_of_range,
  203. icmp_to_true_false_known_bits, merge_unmerge,
  204. select_combines, fold_merge_to_zext,
  205. constant_fold, identity_combines,
  206. ptr_add_immed_chain, overlapping_and,
  207. split_store_zero_128, undef_combines,
  208. select_to_minmax]> {
  209. let DisableRuleOption = "aarch64postlegalizercombiner-disable-rule";
  210. }