X86InstrFMA.td 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. //===-- X86InstrFMA.td - FMA Instruction Set ---------------*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file describes FMA (Fused Multiply-Add) instructions.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. //===----------------------------------------------------------------------===//
  13. // FMA3 - Intel 3 operand Fused Multiply-Add instructions
  14. //===----------------------------------------------------------------------===//
  15. // For all FMA opcodes declared in fma3p_rm_* and fma3s_rm_* multiclasses
  16. // defined below, both the register and memory variants are commutable.
  17. // For the register form the commutable operands are 1, 2 and 3.
  18. // For the memory variant the folded operand must be in 3. Thus,
  19. // in that case, only the operands 1 and 2 can be swapped.
  20. // Commuting some of operands may require the opcode change.
  21. // FMA*213*:
  22. // operands 1 and 2 (memory & register forms): *213* --> *213*(no changes);
  23. // operands 1 and 3 (register forms only): *213* --> *231*;
  24. // operands 2 and 3 (register forms only): *213* --> *132*.
  25. // FMA*132*:
  26. // operands 1 and 2 (memory & register forms): *132* --> *231*;
  27. // operands 1 and 3 (register forms only): *132* --> *132*(no changes);
  28. // operands 2 and 3 (register forms only): *132* --> *213*.
  29. // FMA*231*:
  30. // operands 1 and 2 (memory & register forms): *231* --> *132*;
  31. // operands 1 and 3 (register forms only): *231* --> *213*;
  32. // operands 2 and 3 (register forms only): *231* --> *231*(no changes).
  33. multiclass fma3p_rm_213<bits<8> opc, string OpcodeStr, RegisterClass RC,
  34. ValueType VT, X86MemOperand x86memop, PatFrag MemFrag,
  35. SDPatternOperator Op, X86FoldableSchedWrite sched> {
  36. def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
  37. (ins RC:$src1, RC:$src2, RC:$src3),
  38. !strconcat(OpcodeStr,
  39. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  40. [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, RC:$src3)))]>,
  41. Sched<[sched]>;
  42. let mayLoad = 1 in
  43. def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
  44. (ins RC:$src1, RC:$src2, x86memop:$src3),
  45. !strconcat(OpcodeStr,
  46. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  47. [(set RC:$dst, (VT (Op RC:$src2, RC:$src1,
  48. (MemFrag addr:$src3))))]>,
  49. Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
  50. }
  51. multiclass fma3p_rm_231<bits<8> opc, string OpcodeStr, RegisterClass RC,
  52. ValueType VT, X86MemOperand x86memop, PatFrag MemFrag,
  53. SDPatternOperator Op, X86FoldableSchedWrite sched> {
  54. let hasSideEffects = 0 in
  55. def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
  56. (ins RC:$src1, RC:$src2, RC:$src3),
  57. !strconcat(OpcodeStr,
  58. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  59. []>, Sched<[sched]>;
  60. let mayLoad = 1 in
  61. def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
  62. (ins RC:$src1, RC:$src2, x86memop:$src3),
  63. !strconcat(OpcodeStr,
  64. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  65. [(set RC:$dst, (VT (Op RC:$src2, (MemFrag addr:$src3),
  66. RC:$src1)))]>,
  67. Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
  68. }
  69. multiclass fma3p_rm_132<bits<8> opc, string OpcodeStr, RegisterClass RC,
  70. ValueType VT, X86MemOperand x86memop, PatFrag MemFrag,
  71. SDPatternOperator Op, X86FoldableSchedWrite sched> {
  72. let hasSideEffects = 0 in
  73. def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
  74. (ins RC:$src1, RC:$src2, RC:$src3),
  75. !strconcat(OpcodeStr,
  76. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  77. []>, Sched<[sched]>;
  78. // Pattern is 312 order so that the load is in a different place from the
  79. // 213 and 231 patterns this helps tablegen's duplicate pattern detection.
  80. let mayLoad = 1 in
  81. def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
  82. (ins RC:$src1, RC:$src2, x86memop:$src3),
  83. !strconcat(OpcodeStr,
  84. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  85. [(set RC:$dst, (VT (Op (MemFrag addr:$src3), RC:$src1,
  86. RC:$src2)))]>,
  87. Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
  88. }
  89. let Constraints = "$src1 = $dst", hasSideEffects = 0, isCommutable = 1,
  90. Uses = [MXCSR], mayRaiseFPException = 1 in
  91. multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
  92. string OpcodeStr, string PackTy, string Suff,
  93. PatFrag MemFrag128, PatFrag MemFrag256,
  94. SDPatternOperator Op, ValueType OpTy128, ValueType OpTy256,
  95. X86SchedWriteWidths sched> {
  96. defm NAME#213#Suff : fma3p_rm_213<opc213, !strconcat(OpcodeStr, "213", PackTy),
  97. VR128, OpTy128, f128mem, MemFrag128, Op, sched.XMM>;
  98. defm NAME#231#Suff : fma3p_rm_231<opc231, !strconcat(OpcodeStr, "231", PackTy),
  99. VR128, OpTy128, f128mem, MemFrag128, Op, sched.XMM>;
  100. defm NAME#132#Suff : fma3p_rm_132<opc132, !strconcat(OpcodeStr, "132", PackTy),
  101. VR128, OpTy128, f128mem, MemFrag128, Op, sched.XMM>;
  102. defm NAME#213#Suff#Y : fma3p_rm_213<opc213, !strconcat(OpcodeStr, "213", PackTy),
  103. VR256, OpTy256, f256mem, MemFrag256, Op, sched.YMM>,
  104. VEX_L;
  105. defm NAME#231#Suff#Y : fma3p_rm_231<opc231, !strconcat(OpcodeStr, "231", PackTy),
  106. VR256, OpTy256, f256mem, MemFrag256, Op, sched.YMM>,
  107. VEX_L;
  108. defm NAME#132#Suff#Y : fma3p_rm_132<opc132, !strconcat(OpcodeStr, "132", PackTy),
  109. VR256, OpTy256, f256mem, MemFrag256, Op, sched.YMM>,
  110. VEX_L;
  111. }
  112. // Fused Multiply-Add
  113. let ExeDomain = SSEPackedSingle in {
  114. defm VFMADD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", "PS",
  115. loadv4f32, loadv8f32, any_fma, v4f32, v8f32,
  116. SchedWriteFMA>;
  117. defm VFMSUB : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", "PS",
  118. loadv4f32, loadv8f32, X86any_Fmsub, v4f32, v8f32,
  119. SchedWriteFMA>;
  120. defm VFMADDSUB : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps", "PS",
  121. loadv4f32, loadv8f32, X86Fmaddsub, v4f32, v8f32,
  122. SchedWriteFMA>;
  123. defm VFMSUBADD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps", "PS",
  124. loadv4f32, loadv8f32, X86Fmsubadd, v4f32, v8f32,
  125. SchedWriteFMA>;
  126. }
  127. let ExeDomain = SSEPackedDouble in {
  128. defm VFMADD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", "PD",
  129. loadv2f64, loadv4f64, any_fma, v2f64,
  130. v4f64, SchedWriteFMA>, VEX_W;
  131. defm VFMSUB : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", "PD",
  132. loadv2f64, loadv4f64, X86any_Fmsub, v2f64,
  133. v4f64, SchedWriteFMA>, VEX_W;
  134. defm VFMADDSUB : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd", "PD",
  135. loadv2f64, loadv4f64, X86Fmaddsub,
  136. v2f64, v4f64, SchedWriteFMA>, VEX_W;
  137. defm VFMSUBADD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd", "PD",
  138. loadv2f64, loadv4f64, X86Fmsubadd,
  139. v2f64, v4f64, SchedWriteFMA>, VEX_W;
  140. }
  141. // Fused Negative Multiply-Add
  142. let ExeDomain = SSEPackedSingle in {
  143. defm VFNMADD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", "PS", loadv4f32,
  144. loadv8f32, X86any_Fnmadd, v4f32, v8f32, SchedWriteFMA>;
  145. defm VFNMSUB : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", "PS", loadv4f32,
  146. loadv8f32, X86any_Fnmsub, v4f32, v8f32, SchedWriteFMA>;
  147. }
  148. let ExeDomain = SSEPackedDouble in {
  149. defm VFNMADD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", "PD", loadv2f64,
  150. loadv4f64, X86any_Fnmadd, v2f64, v4f64, SchedWriteFMA>, VEX_W;
  151. defm VFNMSUB : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd", "PD", loadv2f64,
  152. loadv4f64, X86any_Fnmsub, v2f64, v4f64, SchedWriteFMA>, VEX_W;
  153. }
  154. // All source register operands of FMA opcodes defined in fma3s_rm multiclass
  155. // can be commuted. In many cases such commute transformation requires an opcode
  156. // adjustment, for example, commuting the operands 1 and 2 in FMA*132 form
  157. // would require an opcode change to FMA*231:
  158. // FMA*132* reg1, reg2, reg3; // reg1 * reg3 + reg2;
  159. // -->
  160. // FMA*231* reg2, reg1, reg3; // reg1 * reg3 + reg2;
  161. // Please see more detailed comment at the very beginning of the section
  162. // defining FMA3 opcodes above.
  163. multiclass fma3s_rm_213<bits<8> opc, string OpcodeStr,
  164. X86MemOperand x86memop, RegisterClass RC,
  165. SDPatternOperator OpNode,
  166. X86FoldableSchedWrite sched> {
  167. def r : FMA3S<opc, MRMSrcReg, (outs RC:$dst),
  168. (ins RC:$src1, RC:$src2, RC:$src3),
  169. !strconcat(OpcodeStr,
  170. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  171. [(set RC:$dst, (OpNode RC:$src2, RC:$src1, RC:$src3))]>,
  172. Sched<[sched]>;
  173. let mayLoad = 1 in
  174. def m : FMA3S<opc, MRMSrcMem, (outs RC:$dst),
  175. (ins RC:$src1, RC:$src2, x86memop:$src3),
  176. !strconcat(OpcodeStr,
  177. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  178. [(set RC:$dst,
  179. (OpNode RC:$src2, RC:$src1, (load addr:$src3)))]>,
  180. Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
  181. }
  182. multiclass fma3s_rm_231<bits<8> opc, string OpcodeStr,
  183. X86MemOperand x86memop, RegisterClass RC,
  184. SDPatternOperator OpNode, X86FoldableSchedWrite sched> {
  185. let hasSideEffects = 0 in
  186. def r : FMA3S<opc, MRMSrcReg, (outs RC:$dst),
  187. (ins RC:$src1, RC:$src2, RC:$src3),
  188. !strconcat(OpcodeStr,
  189. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  190. []>, Sched<[sched]>;
  191. let mayLoad = 1 in
  192. def m : FMA3S<opc, MRMSrcMem, (outs RC:$dst),
  193. (ins RC:$src1, RC:$src2, x86memop:$src3),
  194. !strconcat(OpcodeStr,
  195. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  196. [(set RC:$dst,
  197. (OpNode RC:$src2, (load addr:$src3), RC:$src1))]>,
  198. Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
  199. }
  200. multiclass fma3s_rm_132<bits<8> opc, string OpcodeStr,
  201. X86MemOperand x86memop, RegisterClass RC,
  202. SDPatternOperator OpNode, X86FoldableSchedWrite sched> {
  203. let hasSideEffects = 0 in
  204. def r : FMA3S<opc, MRMSrcReg, (outs RC:$dst),
  205. (ins RC:$src1, RC:$src2, RC:$src3),
  206. !strconcat(OpcodeStr,
  207. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  208. []>, Sched<[sched]>;
  209. // Pattern is 312 order so that the load is in a different place from the
  210. // 213 and 231 patterns this helps tablegen's duplicate pattern detection.
  211. let mayLoad = 1 in
  212. def m : FMA3S<opc, MRMSrcMem, (outs RC:$dst),
  213. (ins RC:$src1, RC:$src2, x86memop:$src3),
  214. !strconcat(OpcodeStr,
  215. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  216. [(set RC:$dst,
  217. (OpNode (load addr:$src3), RC:$src1, RC:$src2))]>,
  218. Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
  219. }
  220. let Constraints = "$src1 = $dst", isCommutable = 1, isCodeGenOnly = 1,
  221. hasSideEffects = 0, Uses = [MXCSR], mayRaiseFPException = 1 in
  222. multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
  223. string OpStr, string PackTy, string Suff,
  224. SDPatternOperator OpNode, RegisterClass RC,
  225. X86MemOperand x86memop, X86FoldableSchedWrite sched> {
  226. defm NAME#213#Suff : fma3s_rm_213<opc213, !strconcat(OpStr, "213", PackTy),
  227. x86memop, RC, OpNode, sched>;
  228. defm NAME#231#Suff : fma3s_rm_231<opc231, !strconcat(OpStr, "231", PackTy),
  229. x86memop, RC, OpNode, sched>;
  230. defm NAME#132#Suff : fma3s_rm_132<opc132, !strconcat(OpStr, "132", PackTy),
  231. x86memop, RC, OpNode, sched>;
  232. }
  233. // These FMA*_Int instructions are defined specially for being used when
  234. // the scalar FMA intrinsics are lowered to machine instructions, and in that
  235. // sense, they are similar to existing ADD*_Int, SUB*_Int, MUL*_Int, etc.
  236. // instructions.
  237. //
  238. // All of the FMA*_Int opcodes are defined as commutable here.
  239. // Commuting the 2nd and 3rd source register operands of FMAs is quite trivial
  240. // and the corresponding optimizations have been developed.
  241. // Commuting the 1st operand of FMA*_Int requires some additional analysis,
  242. // the commute optimization is legal only if all users of FMA*_Int use only
  243. // the lowest element of the FMA*_Int instruction. Even though such analysis
  244. // may be not implemented yet we allow the routines doing the actual commute
  245. // transformation to decide if one or another instruction is commutable or not.
  246. let Constraints = "$src1 = $dst", isCommutable = 1, hasSideEffects = 0,
  247. Uses = [MXCSR], mayRaiseFPException = 1 in
  248. multiclass fma3s_rm_int<bits<8> opc, string OpcodeStr,
  249. Operand memopr, RegisterClass RC,
  250. X86FoldableSchedWrite sched> {
  251. def r_Int : FMA3S_Int<opc, MRMSrcReg, (outs RC:$dst),
  252. (ins RC:$src1, RC:$src2, RC:$src3),
  253. !strconcat(OpcodeStr,
  254. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  255. []>, Sched<[sched]>;
  256. let mayLoad = 1 in
  257. def m_Int : FMA3S_Int<opc, MRMSrcMem, (outs RC:$dst),
  258. (ins RC:$src1, RC:$src2, memopr:$src3),
  259. !strconcat(OpcodeStr,
  260. "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
  261. []>, Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
  262. }
  263. // The FMA 213 form is created for lowering of scalar FMA intrinsics
  264. // to machine instructions.
  265. // The FMA 132 form can trivially be get by commuting the 2nd and 3rd operands
  266. // of FMA 213 form.
  267. // The FMA 231 form can be get only by commuting the 1st operand of 213 or 132
  268. // forms and is possible only after special analysis of all uses of the initial
  269. // instruction. Such analysis do not exist yet and thus introducing the 231
  270. // form of FMA*_Int instructions is done using an optimistic assumption that
  271. // such analysis will be implemented eventually.
  272. multiclass fma3s_int_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
  273. string OpStr, string PackTy, string Suff,
  274. RegisterClass RC, Operand memop,
  275. X86FoldableSchedWrite sched> {
  276. defm NAME#132#Suff : fma3s_rm_int<opc132, !strconcat(OpStr, "132", PackTy),
  277. memop, RC, sched>;
  278. defm NAME#213#Suff : fma3s_rm_int<opc213, !strconcat(OpStr, "213", PackTy),
  279. memop, RC, sched>;
  280. defm NAME#231#Suff : fma3s_rm_int<opc231, !strconcat(OpStr, "231", PackTy),
  281. memop, RC, sched>;
  282. }
  283. multiclass fma3s<bits<8> opc132, bits<8> opc213, bits<8> opc231,
  284. string OpStr, SDPatternOperator OpNode, X86FoldableSchedWrite sched> {
  285. let ExeDomain = SSEPackedSingle in
  286. defm NAME : fma3s_forms<opc132, opc213, opc231, OpStr, "ss", "SS", OpNode,
  287. FR32, f32mem, sched>,
  288. fma3s_int_forms<opc132, opc213, opc231, OpStr, "ss", "SS",
  289. VR128, ssmem, sched>;
  290. let ExeDomain = SSEPackedDouble in
  291. defm NAME : fma3s_forms<opc132, opc213, opc231, OpStr, "sd", "SD", OpNode,
  292. FR64, f64mem, sched>,
  293. fma3s_int_forms<opc132, opc213, opc231, OpStr, "sd", "SD",
  294. VR128, sdmem, sched>, VEX_W;
  295. }
  296. defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", any_fma,
  297. SchedWriteFMA.Scl>, VEX_LIG;
  298. defm VFMSUB : fma3s<0x9B, 0xAB, 0xBB, "vfmsub", X86any_Fmsub,
  299. SchedWriteFMA.Scl>, VEX_LIG;
  300. defm VFNMADD : fma3s<0x9D, 0xAD, 0xBD, "vfnmadd", X86any_Fnmadd,
  301. SchedWriteFMA.Scl>, VEX_LIG;
  302. defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "vfnmsub", X86any_Fnmsub,
  303. SchedWriteFMA.Scl>, VEX_LIG;
  304. multiclass scalar_fma_patterns<SDPatternOperator Op, string Prefix, string Suffix,
  305. SDNode Move, ValueType VT, ValueType EltVT,
  306. RegisterClass RC, PatFrag mem_frag> {
  307. let Predicates = [HasFMA, NoAVX512] in {
  308. def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
  309. (Op RC:$src2,
  310. (EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
  311. RC:$src3))))),
  312. (!cast<Instruction>(Prefix#"213"#Suffix#"r_Int")
  313. VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
  314. (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
  315. def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
  316. (Op RC:$src2, RC:$src3,
  317. (EltVT (extractelt (VT VR128:$src1), (iPTR 0)))))))),
  318. (!cast<Instruction>(Prefix#"231"#Suffix#"r_Int")
  319. VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
  320. (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
  321. def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
  322. (Op RC:$src2,
  323. (EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
  324. (mem_frag addr:$src3)))))),
  325. (!cast<Instruction>(Prefix#"213"#Suffix#"m_Int")
  326. VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
  327. addr:$src3)>;
  328. def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
  329. (Op (EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
  330. (mem_frag addr:$src3), RC:$src2))))),
  331. (!cast<Instruction>(Prefix#"132"#Suffix#"m_Int")
  332. VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
  333. addr:$src3)>;
  334. def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
  335. (Op RC:$src2, (mem_frag addr:$src3),
  336. (EltVT (extractelt (VT VR128:$src1), (iPTR 0)))))))),
  337. (!cast<Instruction>(Prefix#"231"#Suffix#"m_Int")
  338. VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
  339. addr:$src3)>;
  340. }
  341. }
  342. defm : scalar_fma_patterns<any_fma, "VFMADD", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
  343. defm : scalar_fma_patterns<X86any_Fmsub, "VFMSUB", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
  344. defm : scalar_fma_patterns<X86any_Fnmadd, "VFNMADD", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
  345. defm : scalar_fma_patterns<X86any_Fnmsub, "VFNMSUB", "SS", X86Movss, v4f32, f32, FR32, loadf32>;
  346. defm : scalar_fma_patterns<any_fma, "VFMADD", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
  347. defm : scalar_fma_patterns<X86any_Fmsub, "VFMSUB", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
  348. defm : scalar_fma_patterns<X86any_Fnmadd, "VFNMADD", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
  349. defm : scalar_fma_patterns<X86any_Fnmsub, "VFNMSUB", "SD", X86Movsd, v2f64, f64, FR64, loadf64>;
  350. //===----------------------------------------------------------------------===//
  351. // FMA4 - AMD 4 operand Fused Multiply-Add instructions
  352. //===----------------------------------------------------------------------===//
  353. let Uses = [MXCSR], mayRaiseFPException = 1 in
  354. multiclass fma4s<bits<8> opc, string OpcodeStr, RegisterClass RC,
  355. X86MemOperand x86memop, ValueType OpVT, SDPatternOperator OpNode,
  356. PatFrag mem_frag, X86FoldableSchedWrite sched> {
  357. let isCommutable = 1 in
  358. def rr : FMA4S<opc, MRMSrcRegOp4, (outs RC:$dst),
  359. (ins RC:$src1, RC:$src2, RC:$src3),
  360. !strconcat(OpcodeStr,
  361. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  362. [(set RC:$dst,
  363. (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>, VEX_W, VEX_LIG,
  364. Sched<[sched]>;
  365. def rm : FMA4S<opc, MRMSrcMemOp4, (outs RC:$dst),
  366. (ins RC:$src1, RC:$src2, x86memop:$src3),
  367. !strconcat(OpcodeStr,
  368. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  369. [(set RC:$dst, (OpNode RC:$src1, RC:$src2,
  370. (mem_frag addr:$src3)))]>, VEX_W, VEX_LIG,
  371. Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
  372. def mr : FMA4S<opc, MRMSrcMem, (outs RC:$dst),
  373. (ins RC:$src1, x86memop:$src2, RC:$src3),
  374. !strconcat(OpcodeStr,
  375. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  376. [(set RC:$dst,
  377. (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>, VEX_LIG,
  378. Sched<[sched.Folded, sched.ReadAfterFold,
  379. // x86memop:$src2
  380. ReadDefault, ReadDefault, ReadDefault, ReadDefault,
  381. ReadDefault,
  382. // RC:$src3
  383. sched.ReadAfterFold]>;
  384. // For disassembler
  385. let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
  386. def rr_REV : FMA4S<opc, MRMSrcReg, (outs RC:$dst),
  387. (ins RC:$src1, RC:$src2, RC:$src3),
  388. !strconcat(OpcodeStr,
  389. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
  390. VEX_LIG, FoldGenData<NAME#rr>, Sched<[sched]>;
  391. }
  392. multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
  393. X86FoldableSchedWrite sched> {
  394. let isCodeGenOnly = 1, hasSideEffects = 0,
  395. Uses = [MXCSR], mayRaiseFPException = 1 in {
  396. def rr_Int : FMA4S_Int<opc, MRMSrcRegOp4, (outs VR128:$dst),
  397. (ins VR128:$src1, VR128:$src2, VR128:$src3),
  398. !strconcat(OpcodeStr,
  399. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  400. []>, VEX_W, VEX_LIG, Sched<[sched]>;
  401. let mayLoad = 1 in
  402. def rm_Int : FMA4S_Int<opc, MRMSrcMemOp4, (outs VR128:$dst),
  403. (ins VR128:$src1, VR128:$src2, memop:$src3),
  404. !strconcat(OpcodeStr,
  405. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  406. []>, VEX_W, VEX_LIG,
  407. Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
  408. let mayLoad = 1 in
  409. def mr_Int : FMA4S_Int<opc, MRMSrcMem, (outs VR128:$dst),
  410. (ins VR128:$src1, memop:$src2, VR128:$src3),
  411. !strconcat(OpcodeStr,
  412. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  413. []>,
  414. VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold,
  415. // memop:$src2
  416. ReadDefault, ReadDefault, ReadDefault,
  417. ReadDefault, ReadDefault,
  418. // VR128::$src3
  419. sched.ReadAfterFold]>;
  420. def rr_Int_REV : FMA4S_Int<opc, MRMSrcReg, (outs VR128:$dst),
  421. (ins VR128:$src1, VR128:$src2, VR128:$src3),
  422. !strconcat(OpcodeStr,
  423. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  424. []>, VEX_LIG, FoldGenData<NAME#rr_Int>, Sched<[sched]>;
  425. } // isCodeGenOnly = 1
  426. }
  427. let Uses = [MXCSR], mayRaiseFPException = 1 in
  428. multiclass fma4p<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
  429. ValueType OpVT128, ValueType OpVT256,
  430. PatFrag ld_frag128, PatFrag ld_frag256,
  431. X86SchedWriteWidths sched> {
  432. let isCommutable = 1 in
  433. def rr : FMA4<opc, MRMSrcRegOp4, (outs VR128:$dst),
  434. (ins VR128:$src1, VR128:$src2, VR128:$src3),
  435. !strconcat(OpcodeStr,
  436. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  437. [(set VR128:$dst,
  438. (OpVT128 (OpNode VR128:$src1, VR128:$src2, VR128:$src3)))]>,
  439. VEX_W, Sched<[sched.XMM]>;
  440. def rm : FMA4<opc, MRMSrcMemOp4, (outs VR128:$dst),
  441. (ins VR128:$src1, VR128:$src2, f128mem:$src3),
  442. !strconcat(OpcodeStr,
  443. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  444. [(set VR128:$dst, (OpNode VR128:$src1, VR128:$src2,
  445. (ld_frag128 addr:$src3)))]>, VEX_W,
  446. Sched<[sched.XMM.Folded, sched.XMM.ReadAfterFold, sched.XMM.ReadAfterFold]>;
  447. def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
  448. (ins VR128:$src1, f128mem:$src2, VR128:$src3),
  449. !strconcat(OpcodeStr,
  450. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  451. [(set VR128:$dst,
  452. (OpNode VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>,
  453. Sched<[sched.XMM.Folded, sched.XMM.ReadAfterFold,
  454. // f128mem:$src2
  455. ReadDefault, ReadDefault, ReadDefault, ReadDefault,
  456. ReadDefault,
  457. // VR128::$src3
  458. sched.XMM.ReadAfterFold]>;
  459. let isCommutable = 1 in
  460. def Yrr : FMA4<opc, MRMSrcRegOp4, (outs VR256:$dst),
  461. (ins VR256:$src1, VR256:$src2, VR256:$src3),
  462. !strconcat(OpcodeStr,
  463. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  464. [(set VR256:$dst,
  465. (OpVT256 (OpNode VR256:$src1, VR256:$src2, VR256:$src3)))]>,
  466. VEX_W, VEX_L, Sched<[sched.YMM]>;
  467. def Yrm : FMA4<opc, MRMSrcMemOp4, (outs VR256:$dst),
  468. (ins VR256:$src1, VR256:$src2, f256mem:$src3),
  469. !strconcat(OpcodeStr,
  470. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  471. [(set VR256:$dst, (OpNode VR256:$src1, VR256:$src2,
  472. (ld_frag256 addr:$src3)))]>, VEX_W, VEX_L,
  473. Sched<[sched.YMM.Folded, sched.YMM.ReadAfterFold, sched.YMM.ReadAfterFold]>;
  474. def Ymr : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
  475. (ins VR256:$src1, f256mem:$src2, VR256:$src3),
  476. !strconcat(OpcodeStr,
  477. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
  478. [(set VR256:$dst, (OpNode VR256:$src1,
  479. (ld_frag256 addr:$src2), VR256:$src3))]>, VEX_L,
  480. Sched<[sched.YMM.Folded, sched.YMM.ReadAfterFold,
  481. // f256mem:$src2
  482. ReadDefault, ReadDefault, ReadDefault, ReadDefault,
  483. ReadDefault,
  484. // VR256::$src3
  485. sched.YMM.ReadAfterFold]>;
  486. // For disassembler
  487. let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
  488. def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
  489. (ins VR128:$src1, VR128:$src2, VR128:$src3),
  490. !strconcat(OpcodeStr,
  491. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
  492. Sched<[sched.XMM]>, FoldGenData<NAME#rr>;
  493. def Yrr_REV : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
  494. (ins VR256:$src1, VR256:$src2, VR256:$src3),
  495. !strconcat(OpcodeStr,
  496. "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
  497. VEX_L, Sched<[sched.YMM]>, FoldGenData<NAME#Yrr>;
  498. } // isCodeGenOnly = 1
  499. }
  500. let ExeDomain = SSEPackedSingle in {
  501. // Scalar Instructions
  502. defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", FR32, f32mem, f32, any_fma, loadf32,
  503. SchedWriteFMA.Scl>,
  504. fma4s_int<0x6A, "vfmaddss", ssmem, SchedWriteFMA.Scl>;
  505. defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", FR32, f32mem, f32, X86any_Fmsub, loadf32,
  506. SchedWriteFMA.Scl>,
  507. fma4s_int<0x6E, "vfmsubss", ssmem, SchedWriteFMA.Scl>;
  508. defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", FR32, f32mem, f32,
  509. X86any_Fnmadd, loadf32, SchedWriteFMA.Scl>,
  510. fma4s_int<0x7A, "vfnmaddss", ssmem, SchedWriteFMA.Scl>;
  511. defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", FR32, f32mem, f32,
  512. X86any_Fnmsub, loadf32, SchedWriteFMA.Scl>,
  513. fma4s_int<0x7E, "vfnmsubss", ssmem, SchedWriteFMA.Scl>;
  514. // Packed Instructions
  515. defm VFMADDPS4 : fma4p<0x68, "vfmaddps", any_fma, v4f32, v8f32,
  516. loadv4f32, loadv8f32, SchedWriteFMA>;
  517. defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86any_Fmsub, v4f32, v8f32,
  518. loadv4f32, loadv8f32, SchedWriteFMA>;
  519. defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86any_Fnmadd, v4f32, v8f32,
  520. loadv4f32, loadv8f32, SchedWriteFMA>;
  521. defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86any_Fnmsub, v4f32, v8f32,
  522. loadv4f32, loadv8f32, SchedWriteFMA>;
  523. defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", X86Fmaddsub, v4f32, v8f32,
  524. loadv4f32, loadv8f32, SchedWriteFMA>;
  525. defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", X86Fmsubadd, v4f32, v8f32,
  526. loadv4f32, loadv8f32, SchedWriteFMA>;
  527. }
  528. let ExeDomain = SSEPackedDouble in {
  529. // Scalar Instructions
  530. defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", FR64, f64mem, f64, any_fma, loadf64,
  531. SchedWriteFMA.Scl>,
  532. fma4s_int<0x6B, "vfmaddsd", sdmem, SchedWriteFMA.Scl>;
  533. defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", FR64, f64mem, f64, X86any_Fmsub, loadf64,
  534. SchedWriteFMA.Scl>,
  535. fma4s_int<0x6F, "vfmsubsd", sdmem, SchedWriteFMA.Scl>;
  536. defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", FR64, f64mem, f64,
  537. X86any_Fnmadd, loadf64, SchedWriteFMA.Scl>,
  538. fma4s_int<0x7B, "vfnmaddsd", sdmem, SchedWriteFMA.Scl>;
  539. defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", FR64, f64mem, f64,
  540. X86any_Fnmsub, loadf64, SchedWriteFMA.Scl>,
  541. fma4s_int<0x7F, "vfnmsubsd", sdmem, SchedWriteFMA.Scl>;
  542. // Packed Instructions
  543. defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", any_fma, v2f64, v4f64,
  544. loadv2f64, loadv4f64, SchedWriteFMA>;
  545. defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86any_Fmsub, v2f64, v4f64,
  546. loadv2f64, loadv4f64, SchedWriteFMA>;
  547. defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86any_Fnmadd, v2f64, v4f64,
  548. loadv2f64, loadv4f64, SchedWriteFMA>;
  549. defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86any_Fnmsub, v2f64, v4f64,
  550. loadv2f64, loadv4f64, SchedWriteFMA>;
  551. defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", X86Fmaddsub, v2f64, v4f64,
  552. loadv2f64, loadv4f64, SchedWriteFMA>;
  553. defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", X86Fmsubadd, v2f64, v4f64,
  554. loadv2f64, loadv4f64, SchedWriteFMA>;
  555. }
  556. multiclass scalar_fma4_patterns<SDPatternOperator Op, string Name,
  557. ValueType VT, RegisterClass RC,
  558. PatFrag mem_frag> {
  559. let Predicates = [HasFMA4] in {
  560. def : Pat<(VT (X86vzmovl (VT (scalar_to_vector
  561. (Op RC:$src1, RC:$src2, RC:$src3))))),
  562. (!cast<Instruction>(Name#"rr_Int")
  563. (VT (COPY_TO_REGCLASS RC:$src1, VR128)),
  564. (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
  565. (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
  566. def : Pat<(VT (X86vzmovl (VT (scalar_to_vector
  567. (Op RC:$src1, RC:$src2,
  568. (mem_frag addr:$src3)))))),
  569. (!cast<Instruction>(Name#"rm_Int")
  570. (VT (COPY_TO_REGCLASS RC:$src1, VR128)),
  571. (VT (COPY_TO_REGCLASS RC:$src2, VR128)), addr:$src3)>;
  572. def : Pat<(VT (X86vzmovl (VT (scalar_to_vector
  573. (Op RC:$src1, (mem_frag addr:$src2),
  574. RC:$src3))))),
  575. (!cast<Instruction>(Name#"mr_Int")
  576. (VT (COPY_TO_REGCLASS RC:$src1, VR128)), addr:$src2,
  577. (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
  578. }
  579. }
  580. defm : scalar_fma4_patterns<any_fma, "VFMADDSS4", v4f32, FR32, loadf32>;
  581. defm : scalar_fma4_patterns<X86any_Fmsub, "VFMSUBSS4", v4f32, FR32, loadf32>;
  582. defm : scalar_fma4_patterns<X86any_Fnmadd, "VFNMADDSS4", v4f32, FR32, loadf32>;
  583. defm : scalar_fma4_patterns<X86any_Fnmsub, "VFNMSUBSS4", v4f32, FR32, loadf32>;
  584. defm : scalar_fma4_patterns<any_fma, "VFMADDSD4", v2f64, FR64, loadf64>;
  585. defm : scalar_fma4_patterns<X86any_Fmsub, "VFMSUBSD4", v2f64, FR64, loadf64>;
  586. defm : scalar_fma4_patterns<X86any_Fnmadd, "VFNMADDSD4", v2f64, FR64, loadf64>;
  587. defm : scalar_fma4_patterns<X86any_Fnmsub, "VFNMSUBSD4", v2f64, FR64, loadf64>;