NVPTXTargetTransformInfo.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. //===-- NVPTXTargetTransformInfo.h - NVPTX specific TTI ---------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. /// \file
  9. /// This file a TargetTransformInfo::Concept conforming object specific to the
  10. /// NVPTX target machine. It uses the target's detailed information to
  11. /// provide more precise answers to certain TTI queries, while letting the
  12. /// target independent and default TTI implementations handle the rest.
  13. ///
  14. //===----------------------------------------------------------------------===//
  15. #ifndef LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
  16. #define LLVM_LIB_TARGET_NVPTX_NVPTXTARGETTRANSFORMINFO_H
  17. #include "NVPTXTargetMachine.h"
  18. #include "MCTargetDesc/NVPTXBaseInfo.h"
  19. #include "llvm/Analysis/TargetTransformInfo.h"
  20. #include "llvm/CodeGen/BasicTTIImpl.h"
  21. #include "llvm/CodeGen/TargetLowering.h"
  22. namespace llvm {
  23. class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
  24. typedef BasicTTIImplBase<NVPTXTTIImpl> BaseT;
  25. typedef TargetTransformInfo TTI;
  26. friend BaseT;
  27. const NVPTXSubtarget *ST;
  28. const NVPTXTargetLowering *TLI;
  29. const NVPTXSubtarget *getST() const { return ST; };
  30. const NVPTXTargetLowering *getTLI() const { return TLI; };
  31. public:
  32. explicit NVPTXTTIImpl(const NVPTXTargetMachine *TM, const Function &F)
  33. : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl()),
  34. TLI(ST->getTargetLowering()) {}
  35. bool hasBranchDivergence() { return true; }
  36. bool isSourceOfDivergence(const Value *V);
  37. unsigned getFlatAddressSpace() const {
  38. return AddressSpace::ADDRESS_SPACE_GENERIC;
  39. }
  40. bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const {
  41. return AS != AddressSpace::ADDRESS_SPACE_SHARED &&
  42. AS != AddressSpace::ADDRESS_SPACE_LOCAL && AS != ADDRESS_SPACE_PARAM;
  43. }
  44. Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
  45. IntrinsicInst &II) const;
  46. // Loads and stores can be vectorized if the alignment is at least as big as
  47. // the load/store we want to vectorize.
  48. bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
  49. unsigned AddrSpace) const {
  50. return Alignment >= ChainSizeInBytes;
  51. }
  52. bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
  53. unsigned AddrSpace) const {
  54. return isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, AddrSpace);
  55. }
  56. // NVPTX has infinite registers of all kinds, but the actual machine doesn't.
  57. // We conservatively return 1 here which is just enough to enable the
  58. // vectorizers but disables heuristics based on the number of registers.
  59. // FIXME: Return a more reasonable number, while keeping an eye on
  60. // LoopVectorizer's unrolling heuristics.
  61. unsigned getNumberOfRegisters(bool Vector) const { return 1; }
  62. // Only <2 x half> should be vectorized, so always return 32 for the vector
  63. // register size.
  64. TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
  65. return TypeSize::getFixed(32);
  66. }
  67. unsigned getMinVectorRegisterBitWidth() const { return 32; }
  68. // We don't want to prevent inlining because of target-cpu and -features
  69. // attributes that were added to newer versions of LLVM/Clang: There are
  70. // no incompatible functions in PTX, ptxas will throw errors in such cases.
  71. bool areInlineCompatible(const Function *Caller,
  72. const Function *Callee) const {
  73. return true;
  74. }
  75. // Increase the inlining cost threshold by a factor of 5, reflecting that
  76. // calls are particularly expensive in NVPTX.
  77. unsigned getInliningThresholdMultiplier() { return 5; }
  78. InstructionCost getArithmeticInstrCost(
  79. unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
  80. TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
  81. TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
  82. TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
  83. TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
  84. ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
  85. const Instruction *CxtI = nullptr);
  86. void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
  87. TTI::UnrollingPreferences &UP,
  88. OptimizationRemarkEmitter *ORE);
  89. void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
  90. TTI::PeelingPreferences &PP);
  91. bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) {
  92. // Volatile loads/stores are only supported for shared and global address
  93. // spaces, or for generic AS that maps to them.
  94. if (!(AddrSpace == llvm::ADDRESS_SPACE_GENERIC ||
  95. AddrSpace == llvm::ADDRESS_SPACE_GLOBAL ||
  96. AddrSpace == llvm::ADDRESS_SPACE_SHARED))
  97. return false;
  98. switch(I->getOpcode()){
  99. default:
  100. return false;
  101. case Instruction::Load:
  102. case Instruction::Store:
  103. return true;
  104. }
  105. }
  106. };
  107. } // end namespace llvm
  108. #endif