X86SelectionDAGInfo.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. //===-- X86SelectionDAGInfo.cpp - X86 SelectionDAG Info -------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the X86SelectionDAGInfo class.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "X86SelectionDAGInfo.h"
  13. #include "X86ISelLowering.h"
  14. #include "X86InstrInfo.h"
  15. #include "X86RegisterInfo.h"
  16. #include "X86Subtarget.h"
  17. #include "llvm/CodeGen/MachineFrameInfo.h"
  18. #include "llvm/CodeGen/SelectionDAG.h"
  19. #include "llvm/CodeGen/TargetLowering.h"
  20. #include "llvm/IR/DerivedTypes.h"
  21. using namespace llvm;
  22. #define DEBUG_TYPE "x86-selectiondag-info"
  23. static cl::opt<bool>
  24. UseFSRMForMemcpy("x86-use-fsrm-for-memcpy", cl::Hidden, cl::init(false),
  25. cl::desc("Use fast short rep mov in memcpy lowering"));
  26. bool X86SelectionDAGInfo::isBaseRegConflictPossible(
  27. SelectionDAG &DAG, ArrayRef<MCPhysReg> ClobberSet) const {
  28. // We cannot use TRI->hasBasePointer() until *after* we select all basic
  29. // blocks. Legalization may introduce new stack temporaries with large
  30. // alignment requirements. Fall back to generic code if there are any
  31. // dynamic stack adjustments (hopefully rare) and the base pointer would
  32. // conflict if we had to use it.
  33. MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
  34. if (!MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment())
  35. return false;
  36. const X86RegisterInfo *TRI = static_cast<const X86RegisterInfo *>(
  37. DAG.getSubtarget().getRegisterInfo());
  38. return llvm::is_contained(ClobberSet, TRI->getBaseRegister());
  39. }
  40. SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
  41. SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Val,
  42. SDValue Size, Align Alignment, bool isVolatile,
  43. MachinePointerInfo DstPtrInfo) const {
  44. ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
  45. const X86Subtarget &Subtarget =
  46. DAG.getMachineFunction().getSubtarget<X86Subtarget>();
  47. #ifndef NDEBUG
  48. // If the base register might conflict with our physical registers, bail out.
  49. const MCPhysReg ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
  50. X86::ECX, X86::EAX, X86::EDI};
  51. assert(!isBaseRegConflictPossible(DAG, ClobberSet));
  52. #endif
  53. // If to a segment-relative address space, use the default lowering.
  54. if (DstPtrInfo.getAddrSpace() >= 256)
  55. return SDValue();
  56. // If not DWORD aligned or size is more than the threshold, call the library.
  57. // The libc version is likely to be faster for these cases. It can use the
  58. // address value and run time information about the CPU.
  59. if (Alignment < Align(4) || !ConstantSize ||
  60. ConstantSize->getZExtValue() > Subtarget.getMaxInlineSizeThreshold()) {
  61. // Check to see if there is a specialized entry-point for memory zeroing.
  62. ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Val);
  63. if (const char *bzeroName =
  64. (ValC && ValC->isZero())
  65. ? DAG.getTargetLoweringInfo().getLibcallName(RTLIB::BZERO)
  66. : nullptr) {
  67. const TargetLowering &TLI = DAG.getTargetLoweringInfo();
  68. EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout());
  69. Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
  70. TargetLowering::ArgListTy Args;
  71. TargetLowering::ArgListEntry Entry;
  72. Entry.Node = Dst;
  73. Entry.Ty = IntPtrTy;
  74. Args.push_back(Entry);
  75. Entry.Node = Size;
  76. Args.push_back(Entry);
  77. TargetLowering::CallLoweringInfo CLI(DAG);
  78. CLI.setDebugLoc(dl)
  79. .setChain(Chain)
  80. .setLibCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
  81. DAG.getExternalSymbol(bzeroName, IntPtr),
  82. std::move(Args))
  83. .setDiscardResult();
  84. std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
  85. return CallResult.second;
  86. }
  87. // Otherwise have the target-independent code call memset.
  88. return SDValue();
  89. }
  90. uint64_t SizeVal = ConstantSize->getZExtValue();
  91. SDValue InFlag;
  92. EVT AVT;
  93. SDValue Count;
  94. ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Val);
  95. unsigned BytesLeft = 0;
  96. if (ValC) {
  97. unsigned ValReg;
  98. uint64_t Val = ValC->getZExtValue() & 255;
  99. // If the value is a constant, then we can potentially use larger sets.
  100. if (Alignment > Align(2)) {
  101. // DWORD aligned
  102. AVT = MVT::i32;
  103. ValReg = X86::EAX;
  104. Val = (Val << 8) | Val;
  105. Val = (Val << 16) | Val;
  106. if (Subtarget.is64Bit() && Alignment > Align(8)) { // QWORD aligned
  107. AVT = MVT::i64;
  108. ValReg = X86::RAX;
  109. Val = (Val << 32) | Val;
  110. }
  111. } else if (Alignment == Align(2)) {
  112. // WORD aligned
  113. AVT = MVT::i16;
  114. ValReg = X86::AX;
  115. Val = (Val << 8) | Val;
  116. } else {
  117. // Byte aligned
  118. AVT = MVT::i8;
  119. ValReg = X86::AL;
  120. Count = DAG.getIntPtrConstant(SizeVal, dl);
  121. }
  122. if (AVT.bitsGT(MVT::i8)) {
  123. unsigned UBytes = AVT.getSizeInBits() / 8;
  124. Count = DAG.getIntPtrConstant(SizeVal / UBytes, dl);
  125. BytesLeft = SizeVal % UBytes;
  126. }
  127. Chain = DAG.getCopyToReg(Chain, dl, ValReg, DAG.getConstant(Val, dl, AVT),
  128. InFlag);
  129. InFlag = Chain.getValue(1);
  130. } else {
  131. AVT = MVT::i8;
  132. Count = DAG.getIntPtrConstant(SizeVal, dl);
  133. Chain = DAG.getCopyToReg(Chain, dl, X86::AL, Val, InFlag);
  134. InFlag = Chain.getValue(1);
  135. }
  136. bool Use64BitRegs = Subtarget.isTarget64BitLP64();
  137. Chain = DAG.getCopyToReg(Chain, dl, Use64BitRegs ? X86::RCX : X86::ECX,
  138. Count, InFlag);
  139. InFlag = Chain.getValue(1);
  140. Chain = DAG.getCopyToReg(Chain, dl, Use64BitRegs ? X86::RDI : X86::EDI,
  141. Dst, InFlag);
  142. InFlag = Chain.getValue(1);
  143. SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
  144. SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag };
  145. Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops);
  146. if (BytesLeft) {
  147. // Handle the last 1 - 7 bytes.
  148. unsigned Offset = SizeVal - BytesLeft;
  149. EVT AddrVT = Dst.getValueType();
  150. EVT SizeVT = Size.getValueType();
  151. Chain =
  152. DAG.getMemset(Chain, dl,
  153. DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
  154. DAG.getConstant(Offset, dl, AddrVT)),
  155. Val, DAG.getConstant(BytesLeft, dl, SizeVT), Alignment,
  156. isVolatile, false, DstPtrInfo.getWithOffset(Offset));
  157. }
  158. // TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
  159. return Chain;
  160. }
  161. /// Emit a single REP MOVS{B,W,D,Q} instruction.
  162. static SDValue emitRepmovs(const X86Subtarget &Subtarget, SelectionDAG &DAG,
  163. const SDLoc &dl, SDValue Chain, SDValue Dst,
  164. SDValue Src, SDValue Size, MVT AVT) {
  165. const bool Use64BitRegs = Subtarget.isTarget64BitLP64();
  166. const unsigned CX = Use64BitRegs ? X86::RCX : X86::ECX;
  167. const unsigned DI = Use64BitRegs ? X86::RDI : X86::EDI;
  168. const unsigned SI = Use64BitRegs ? X86::RSI : X86::ESI;
  169. SDValue InFlag;
  170. Chain = DAG.getCopyToReg(Chain, dl, CX, Size, InFlag);
  171. InFlag = Chain.getValue(1);
  172. Chain = DAG.getCopyToReg(Chain, dl, DI, Dst, InFlag);
  173. InFlag = Chain.getValue(1);
  174. Chain = DAG.getCopyToReg(Chain, dl, SI, Src, InFlag);
  175. InFlag = Chain.getValue(1);
  176. SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
  177. SDValue Ops[] = {Chain, DAG.getValueType(AVT), InFlag};
  178. return DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops);
  179. }
  180. /// Emit a single REP MOVSB instruction for a particular constant size.
  181. static SDValue emitRepmovsB(const X86Subtarget &Subtarget, SelectionDAG &DAG,
  182. const SDLoc &dl, SDValue Chain, SDValue Dst,
  183. SDValue Src, uint64_t Size) {
  184. return emitRepmovs(Subtarget, DAG, dl, Chain, Dst, Src,
  185. DAG.getIntPtrConstant(Size, dl), MVT::i8);
  186. }
  187. /// Returns the best type to use with repmovs depending on alignment.
  188. static MVT getOptimalRepmovsType(const X86Subtarget &Subtarget,
  189. uint64_t Align) {
  190. assert((Align != 0) && "Align is normalized");
  191. assert(isPowerOf2_64(Align) && "Align is a power of 2");
  192. switch (Align) {
  193. case 1:
  194. return MVT::i8;
  195. case 2:
  196. return MVT::i16;
  197. case 4:
  198. return MVT::i32;
  199. default:
  200. return Subtarget.is64Bit() ? MVT::i64 : MVT::i32;
  201. }
  202. }
  203. /// Returns a REP MOVS instruction, possibly with a few load/stores to implement
  204. /// a constant size memory copy. In some cases where we know REP MOVS is
  205. /// inefficient we return an empty SDValue so the calling code can either
  206. /// generate a load/store sequence or call the runtime memcpy function.
  207. static SDValue emitConstantSizeRepmov(
  208. SelectionDAG &DAG, const X86Subtarget &Subtarget, const SDLoc &dl,
  209. SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, EVT SizeVT,
  210. unsigned Align, bool isVolatile, bool AlwaysInline,
  211. MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) {
  212. /// TODO: Revisit next line: big copy with ERMSB on march >= haswell are very
  213. /// efficient.
  214. if (!AlwaysInline && Size > Subtarget.getMaxInlineSizeThreshold())
  215. return SDValue();
  216. /// If we have enhanced repmovs we use it.
  217. if (Subtarget.hasERMSB())
  218. return emitRepmovsB(Subtarget, DAG, dl, Chain, Dst, Src, Size);
  219. assert(!Subtarget.hasERMSB() && "No efficient RepMovs");
  220. /// We assume runtime memcpy will do a better job for unaligned copies when
  221. /// ERMS is not present.
  222. if (!AlwaysInline && (Align & 3) != 0)
  223. return SDValue();
  224. const MVT BlockType = getOptimalRepmovsType(Subtarget, Align);
  225. const uint64_t BlockBytes = BlockType.getSizeInBits() / 8;
  226. const uint64_t BlockCount = Size / BlockBytes;
  227. const uint64_t BytesLeft = Size % BlockBytes;
  228. SDValue RepMovs =
  229. emitRepmovs(Subtarget, DAG, dl, Chain, Dst, Src,
  230. DAG.getIntPtrConstant(BlockCount, dl), BlockType);
  231. /// RepMov can process the whole length.
  232. if (BytesLeft == 0)
  233. return RepMovs;
  234. assert(BytesLeft && "We have leftover at this point");
  235. /// In case we optimize for size we use repmovsb even if it's less efficient
  236. /// so we can save the loads/stores of the leftover.
  237. if (DAG.getMachineFunction().getFunction().hasMinSize())
  238. return emitRepmovsB(Subtarget, DAG, dl, Chain, Dst, Src, Size);
  239. // Handle the last 1 - 7 bytes.
  240. SmallVector<SDValue, 4> Results;
  241. Results.push_back(RepMovs);
  242. unsigned Offset = Size - BytesLeft;
  243. EVT DstVT = Dst.getValueType();
  244. EVT SrcVT = Src.getValueType();
  245. Results.push_back(DAG.getMemcpy(
  246. Chain, dl,
  247. DAG.getNode(ISD::ADD, dl, DstVT, Dst, DAG.getConstant(Offset, dl, DstVT)),
  248. DAG.getNode(ISD::ADD, dl, SrcVT, Src, DAG.getConstant(Offset, dl, SrcVT)),
  249. DAG.getConstant(BytesLeft, dl, SizeVT), llvm::Align(Align), isVolatile,
  250. /*AlwaysInline*/ true, /*isTailCall*/ false,
  251. DstPtrInfo.getWithOffset(Offset), SrcPtrInfo.getWithOffset(Offset)));
  252. return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Results);
  253. }
  254. SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
  255. SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
  256. SDValue Size, Align Alignment, bool isVolatile, bool AlwaysInline,
  257. MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
  258. // If to a segment-relative address space, use the default lowering.
  259. if (DstPtrInfo.getAddrSpace() >= 256 || SrcPtrInfo.getAddrSpace() >= 256)
  260. return SDValue();
  261. // If the base registers conflict with our physical registers, use the default
  262. // lowering.
  263. const MCPhysReg ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI,
  264. X86::ECX, X86::ESI, X86::EDI};
  265. if (isBaseRegConflictPossible(DAG, ClobberSet))
  266. return SDValue();
  267. const X86Subtarget &Subtarget =
  268. DAG.getMachineFunction().getSubtarget<X86Subtarget>();
  269. // If enabled and available, use fast short rep mov.
  270. if (UseFSRMForMemcpy && Subtarget.hasFSRM())
  271. return emitRepmovs(Subtarget, DAG, dl, Chain, Dst, Src, Size, MVT::i8);
  272. /// Handle constant sizes,
  273. if (ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size))
  274. return emitConstantSizeRepmov(
  275. DAG, Subtarget, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
  276. Size.getValueType(), Alignment.value(), isVolatile, AlwaysInline,
  277. DstPtrInfo, SrcPtrInfo);
  278. return SDValue();
  279. }