BasicTTIImpl.h 101 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. //
  14. /// \file
  15. /// This file provides a helper that implements much of the TTI interface in
  16. /// terms of the target-independent code generator and TargetLowering
  17. /// interfaces.
  18. //
  19. //===----------------------------------------------------------------------===//
  20. #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
  21. #define LLVM_CODEGEN_BASICTTIIMPL_H
  22. #include "llvm/ADT/APInt.h"
  23. #include "llvm/ADT/ArrayRef.h"
  24. #include "llvm/ADT/BitVector.h"
  25. #include "llvm/ADT/SmallPtrSet.h"
  26. #include "llvm/ADT/SmallVector.h"
  27. #include "llvm/Analysis/LoopInfo.h"
  28. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  29. #include "llvm/Analysis/TargetTransformInfo.h"
  30. #include "llvm/Analysis/TargetTransformInfoImpl.h"
  31. #include "llvm/CodeGen/ISDOpcodes.h"
  32. #include "llvm/CodeGen/TargetLowering.h"
  33. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  34. #include "llvm/CodeGen/ValueTypes.h"
  35. #include "llvm/IR/BasicBlock.h"
  36. #include "llvm/IR/Constant.h"
  37. #include "llvm/IR/Constants.h"
  38. #include "llvm/IR/DataLayout.h"
  39. #include "llvm/IR/DerivedTypes.h"
  40. #include "llvm/IR/InstrTypes.h"
  41. #include "llvm/IR/Instruction.h"
  42. #include "llvm/IR/Instructions.h"
  43. #include "llvm/IR/Intrinsics.h"
  44. #include "llvm/IR/Operator.h"
  45. #include "llvm/IR/Type.h"
  46. #include "llvm/IR/Value.h"
  47. #include "llvm/Support/Casting.h"
  48. #include "llvm/Support/CommandLine.h"
  49. #include "llvm/Support/ErrorHandling.h"
  50. #include "llvm/Support/MachineValueType.h"
  51. #include "llvm/Support/MathExtras.h"
  52. #include "llvm/Target/TargetMachine.h"
  53. #include "llvm/Target/TargetOptions.h"
  54. #include <algorithm>
  55. #include <cassert>
  56. #include <cstdint>
  57. #include <limits>
  58. #include <optional>
  59. #include <utility>
  60. namespace llvm {
  61. class Function;
  62. class GlobalValue;
  63. class LLVMContext;
  64. class ScalarEvolution;
  65. class SCEV;
  66. class TargetMachine;
  67. extern cl::opt<unsigned> PartialUnrollingThreshold;
  68. /// Base class which can be used to help build a TTI implementation.
  69. ///
  70. /// This class provides as much implementation of the TTI interface as is
  71. /// possible using the target independent parts of the code generator.
  72. ///
  73. /// In order to subclass it, your class must implement a getST() method to
  74. /// return the subtarget, and a getTLI() method to return the target lowering.
  75. /// We need these methods implemented in the derived class so that this class
  76. /// doesn't have to duplicate storage for them.
  77. template <typename T>
  78. class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
  79. private:
  80. using BaseT = TargetTransformInfoImplCRTPBase<T>;
  81. using TTI = TargetTransformInfo;
  82. /// Helper function to access this as a T.
  83. T *thisT() { return static_cast<T *>(this); }
  84. /// Estimate a cost of Broadcast as an extract and sequence of insert
  85. /// operations.
  86. InstructionCost getBroadcastShuffleOverhead(FixedVectorType *VTy,
  87. TTI::TargetCostKind CostKind) {
  88. InstructionCost Cost = 0;
  89. // Broadcast cost is equal to the cost of extracting the zero'th element
  90. // plus the cost of inserting it into every element of the result vector.
  91. Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
  92. CostKind, 0, nullptr, nullptr);
  93. for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
  94. Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
  95. CostKind, i, nullptr, nullptr);
  96. }
  97. return Cost;
  98. }
  99. /// Estimate a cost of shuffle as a sequence of extract and insert
  100. /// operations.
  101. InstructionCost getPermuteShuffleOverhead(FixedVectorType *VTy,
  102. TTI::TargetCostKind CostKind) {
  103. InstructionCost Cost = 0;
  104. // Shuffle cost is equal to the cost of extracting element from its argument
  105. // plus the cost of inserting them onto the result vector.
  106. // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
  107. // index 0 of first vector, index 1 of second vector,index 2 of first
  108. // vector and finally index 3 of second vector and insert them at index
  109. // <0,1,2,3> of result vector.
  110. for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
  111. Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
  112. CostKind, i, nullptr, nullptr);
  113. Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
  114. CostKind, i, nullptr, nullptr);
  115. }
  116. return Cost;
  117. }
  118. /// Estimate a cost of subvector extraction as a sequence of extract and
  119. /// insert operations.
  120. InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
  121. TTI::TargetCostKind CostKind,
  122. int Index,
  123. FixedVectorType *SubVTy) {
  124. assert(VTy && SubVTy &&
  125. "Can only extract subvectors from vectors");
  126. int NumSubElts = SubVTy->getNumElements();
  127. assert((!isa<FixedVectorType>(VTy) ||
  128. (Index + NumSubElts) <=
  129. (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
  130. "SK_ExtractSubvector index out of range");
  131. InstructionCost Cost = 0;
  132. // Subvector extraction cost is equal to the cost of extracting element from
  133. // the source type plus the cost of inserting them into the result vector
  134. // type.
  135. for (int i = 0; i != NumSubElts; ++i) {
  136. Cost +=
  137. thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
  138. CostKind, i + Index, nullptr, nullptr);
  139. Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
  140. CostKind, i, nullptr, nullptr);
  141. }
  142. return Cost;
  143. }
  144. /// Estimate a cost of subvector insertion as a sequence of extract and
  145. /// insert operations.
  146. InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
  147. TTI::TargetCostKind CostKind,
  148. int Index,
  149. FixedVectorType *SubVTy) {
  150. assert(VTy && SubVTy &&
  151. "Can only insert subvectors into vectors");
  152. int NumSubElts = SubVTy->getNumElements();
  153. assert((!isa<FixedVectorType>(VTy) ||
  154. (Index + NumSubElts) <=
  155. (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
  156. "SK_InsertSubvector index out of range");
  157. InstructionCost Cost = 0;
  158. // Subvector insertion cost is equal to the cost of extracting element from
  159. // the source type plus the cost of inserting them into the result vector
  160. // type.
  161. for (int i = 0; i != NumSubElts; ++i) {
  162. Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
  163. CostKind, i, nullptr, nullptr);
  164. Cost +=
  165. thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
  166. i + Index, nullptr, nullptr);
  167. }
  168. return Cost;
  169. }
  170. /// Local query method delegates up to T which *must* implement this!
  171. const TargetSubtargetInfo *getST() const {
  172. return static_cast<const T *>(this)->getST();
  173. }
  174. /// Local query method delegates up to T which *must* implement this!
  175. const TargetLoweringBase *getTLI() const {
  176. return static_cast<const T *>(this)->getTLI();
  177. }
  178. static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
  179. switch (M) {
  180. case TTI::MIM_Unindexed:
  181. return ISD::UNINDEXED;
  182. case TTI::MIM_PreInc:
  183. return ISD::PRE_INC;
  184. case TTI::MIM_PreDec:
  185. return ISD::PRE_DEC;
  186. case TTI::MIM_PostInc:
  187. return ISD::POST_INC;
  188. case TTI::MIM_PostDec:
  189. return ISD::POST_DEC;
  190. }
  191. llvm_unreachable("Unexpected MemIndexedMode");
  192. }
  193. InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
  194. Align Alignment,
  195. bool VariableMask,
  196. bool IsGatherScatter,
  197. TTI::TargetCostKind CostKind) {
  198. // We cannot scalarize scalable vectors, so return Invalid.
  199. if (isa<ScalableVectorType>(DataTy))
  200. return InstructionCost::getInvalid();
  201. auto *VT = cast<FixedVectorType>(DataTy);
  202. // Assume the target does not have support for gather/scatter operations
  203. // and provide a rough estimate.
  204. //
  205. // First, compute the cost of the individual memory operations.
  206. InstructionCost AddrExtractCost =
  207. IsGatherScatter
  208. ? getVectorInstrCost(Instruction::ExtractElement,
  209. FixedVectorType::get(
  210. PointerType::get(VT->getElementType(), 0),
  211. VT->getNumElements()),
  212. CostKind, -1, nullptr, nullptr)
  213. : 0;
  214. InstructionCost LoadCost =
  215. VT->getNumElements() *
  216. (AddrExtractCost +
  217. getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
  218. // Next, compute the cost of packing the result in a vector.
  219. InstructionCost PackingCost =
  220. getScalarizationOverhead(VT, Opcode != Instruction::Store,
  221. Opcode == Instruction::Store, CostKind);
  222. InstructionCost ConditionalCost = 0;
  223. if (VariableMask) {
  224. // Compute the cost of conditionally executing the memory operations with
  225. // variable masks. This includes extracting the individual conditions, a
  226. // branches and PHIs to combine the results.
  227. // NOTE: Estimating the cost of conditionally executing the memory
  228. // operations accurately is quite difficult and the current solution
  229. // provides a very rough estimate only.
  230. ConditionalCost =
  231. VT->getNumElements() *
  232. (getVectorInstrCost(
  233. Instruction::ExtractElement,
  234. FixedVectorType::get(Type::getInt1Ty(DataTy->getContext()),
  235. VT->getNumElements()),
  236. CostKind, -1, nullptr, nullptr) +
  237. getCFInstrCost(Instruction::Br, CostKind) +
  238. getCFInstrCost(Instruction::PHI, CostKind));
  239. }
  240. return LoadCost + PackingCost + ConditionalCost;
  241. }
  242. protected:
  243. explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
  244. : BaseT(DL) {}
  245. virtual ~BasicTTIImplBase() = default;
  246. using TargetTransformInfoImplBase::DL;
  247. public:
  248. /// \name Scalar TTI Implementations
  249. /// @{
  250. bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
  251. unsigned AddressSpace, Align Alignment,
  252. unsigned *Fast) const {
  253. EVT E = EVT::getIntegerVT(Context, BitWidth);
  254. return getTLI()->allowsMisalignedMemoryAccesses(
  255. E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast);
  256. }
  257. bool hasBranchDivergence() { return false; }
  258. bool useGPUDivergenceAnalysis() { return false; }
  259. bool isSourceOfDivergence(const Value *V) { return false; }
  260. bool isAlwaysUniform(const Value *V) { return false; }
  261. unsigned getFlatAddressSpace() {
  262. // Return an invalid address space.
  263. return -1;
  264. }
  265. bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
  266. Intrinsic::ID IID) const {
  267. return false;
  268. }
  269. bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
  270. return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
  271. }
  272. unsigned getAssumedAddrSpace(const Value *V) const {
  273. return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
  274. }
  275. bool isSingleThreaded() const {
  276. return getTLI()->getTargetMachine().Options.ThreadModel ==
  277. ThreadModel::Single;
  278. }
  279. std::pair<const Value *, unsigned>
  280. getPredicatedAddrSpace(const Value *V) const {
  281. return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
  282. }
  283. Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
  284. Value *NewV) const {
  285. return nullptr;
  286. }
  287. bool isLegalAddImmediate(int64_t imm) {
  288. return getTLI()->isLegalAddImmediate(imm);
  289. }
  290. bool isLegalICmpImmediate(int64_t imm) {
  291. return getTLI()->isLegalICmpImmediate(imm);
  292. }
  293. bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
  294. bool HasBaseReg, int64_t Scale,
  295. unsigned AddrSpace, Instruction *I = nullptr) {
  296. TargetLoweringBase::AddrMode AM;
  297. AM.BaseGV = BaseGV;
  298. AM.BaseOffs = BaseOffset;
  299. AM.HasBaseReg = HasBaseReg;
  300. AM.Scale = Scale;
  301. return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
  302. }
  303. unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
  304. Type *ScalarValTy) const {
  305. auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
  306. auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
  307. EVT VT = getTLI()->getValueType(DL, SrcTy);
  308. if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
  309. getTLI()->isOperationCustom(ISD::STORE, VT))
  310. return true;
  311. EVT ValVT =
  312. getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
  313. EVT LegalizedVT =
  314. getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
  315. return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
  316. };
  317. while (VF > 2 && IsSupportedByTarget(VF))
  318. VF /= 2;
  319. return VF;
  320. }
  321. bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
  322. const DataLayout &DL) const {
  323. EVT VT = getTLI()->getValueType(DL, Ty);
  324. return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
  325. }
  326. bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
  327. const DataLayout &DL) const {
  328. EVT VT = getTLI()->getValueType(DL, Ty);
  329. return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
  330. }
  331. bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
  332. return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
  333. }
  334. bool isNumRegsMajorCostOfLSR() {
  335. return TargetTransformInfoImplBase::isNumRegsMajorCostOfLSR();
  336. }
  337. bool isProfitableLSRChainElement(Instruction *I) {
  338. return TargetTransformInfoImplBase::isProfitableLSRChainElement(I);
  339. }
  340. InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
  341. int64_t BaseOffset, bool HasBaseReg,
  342. int64_t Scale, unsigned AddrSpace) {
  343. TargetLoweringBase::AddrMode AM;
  344. AM.BaseGV = BaseGV;
  345. AM.BaseOffs = BaseOffset;
  346. AM.HasBaseReg = HasBaseReg;
  347. AM.Scale = Scale;
  348. if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
  349. return 0;
  350. return -1;
  351. }
  352. bool isTruncateFree(Type *Ty1, Type *Ty2) {
  353. return getTLI()->isTruncateFree(Ty1, Ty2);
  354. }
  355. bool isProfitableToHoist(Instruction *I) {
  356. return getTLI()->isProfitableToHoist(I);
  357. }
  358. bool useAA() const { return getST()->useAA(); }
  359. bool isTypeLegal(Type *Ty) {
  360. EVT VT = getTLI()->getValueType(DL, Ty);
  361. return getTLI()->isTypeLegal(VT);
  362. }
  363. unsigned getRegUsageForType(Type *Ty) {
  364. EVT ETy = getTLI()->getValueType(DL, Ty);
  365. return getTLI()->getNumRegisters(Ty->getContext(), ETy);
  366. }
  367. InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
  368. ArrayRef<const Value *> Operands,
  369. TTI::TargetCostKind CostKind) {
  370. return BaseT::getGEPCost(PointeeType, Ptr, Operands, CostKind);
  371. }
  372. unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
  373. unsigned &JumpTableSize,
  374. ProfileSummaryInfo *PSI,
  375. BlockFrequencyInfo *BFI) {
  376. /// Try to find the estimated number of clusters. Note that the number of
  377. /// clusters identified in this function could be different from the actual
  378. /// numbers found in lowering. This function ignore switches that are
  379. /// lowered with a mix of jump table / bit test / BTree. This function was
  380. /// initially intended to be used when estimating the cost of switch in
  381. /// inline cost heuristic, but it's a generic cost model to be used in other
  382. /// places (e.g., in loop unrolling).
  383. unsigned N = SI.getNumCases();
  384. const TargetLoweringBase *TLI = getTLI();
  385. const DataLayout &DL = this->getDataLayout();
  386. JumpTableSize = 0;
  387. bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
  388. // Early exit if both a jump table and bit test are not allowed.
  389. if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
  390. return N;
  391. APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
  392. APInt MinCaseVal = MaxCaseVal;
  393. for (auto CI : SI.cases()) {
  394. const APInt &CaseVal = CI.getCaseValue()->getValue();
  395. if (CaseVal.sgt(MaxCaseVal))
  396. MaxCaseVal = CaseVal;
  397. if (CaseVal.slt(MinCaseVal))
  398. MinCaseVal = CaseVal;
  399. }
  400. // Check if suitable for a bit test
  401. if (N <= DL.getIndexSizeInBits(0u)) {
  402. SmallPtrSet<const BasicBlock *, 4> Dests;
  403. for (auto I : SI.cases())
  404. Dests.insert(I.getCaseSuccessor());
  405. if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
  406. DL))
  407. return 1;
  408. }
  409. // Check if suitable for a jump table.
  410. if (IsJTAllowed) {
  411. if (N < 2 || N < TLI->getMinimumJumpTableEntries())
  412. return N;
  413. uint64_t Range =
  414. (MaxCaseVal - MinCaseVal)
  415. .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
  416. // Check whether a range of clusters is dense enough for a jump table
  417. if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
  418. JumpTableSize = Range;
  419. return 1;
  420. }
  421. }
  422. return N;
  423. }
  424. bool shouldBuildLookupTables() {
  425. const TargetLoweringBase *TLI = getTLI();
  426. return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
  427. TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
  428. }
  429. bool shouldBuildRelLookupTables() const {
  430. const TargetMachine &TM = getTLI()->getTargetMachine();
  431. // If non-PIC mode, do not generate a relative lookup table.
  432. if (!TM.isPositionIndependent())
  433. return false;
  434. /// Relative lookup table entries consist of 32-bit offsets.
  435. /// Do not generate relative lookup tables for large code models
  436. /// in 64-bit achitectures where 32-bit offsets might not be enough.
  437. if (TM.getCodeModel() == CodeModel::Medium ||
  438. TM.getCodeModel() == CodeModel::Large)
  439. return false;
  440. Triple TargetTriple = TM.getTargetTriple();
  441. if (!TargetTriple.isArch64Bit())
  442. return false;
  443. // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
  444. // there.
  445. if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
  446. return false;
  447. return true;
  448. }
  449. bool haveFastSqrt(Type *Ty) {
  450. const TargetLoweringBase *TLI = getTLI();
  451. EVT VT = TLI->getValueType(DL, Ty);
  452. return TLI->isTypeLegal(VT) &&
  453. TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
  454. }
  455. bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
  456. return true;
  457. }
  458. InstructionCost getFPOpCost(Type *Ty) {
  459. // Check whether FADD is available, as a proxy for floating-point in
  460. // general.
  461. const TargetLoweringBase *TLI = getTLI();
  462. EVT VT = TLI->getValueType(DL, Ty);
  463. if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT))
  464. return TargetTransformInfo::TCC_Basic;
  465. return TargetTransformInfo::TCC_Expensive;
  466. }
  467. unsigned getInliningThresholdMultiplier() { return 1; }
  468. unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
  469. int getInlinerVectorBonusPercent() { return 150; }
  470. void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
  471. TTI::UnrollingPreferences &UP,
  472. OptimizationRemarkEmitter *ORE) {
  473. // This unrolling functionality is target independent, but to provide some
  474. // motivation for its intended use, for x86:
  475. // According to the Intel 64 and IA-32 Architectures Optimization Reference
  476. // Manual, Intel Core models and later have a loop stream detector (and
  477. // associated uop queue) that can benefit from partial unrolling.
  478. // The relevant requirements are:
  479. // - The loop must have no more than 4 (8 for Nehalem and later) branches
  480. // taken, and none of them may be calls.
  481. // - The loop can have no more than 18 (28 for Nehalem and later) uops.
  482. // According to the Software Optimization Guide for AMD Family 15h
  483. // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
  484. // and loop buffer which can benefit from partial unrolling.
  485. // The relevant requirements are:
  486. // - The loop must have fewer than 16 branches
  487. // - The loop must have less than 40 uops in all executed loop branches
  488. // The number of taken branches in a loop is hard to estimate here, and
  489. // benchmarking has revealed that it is better not to be conservative when
  490. // estimating the branch count. As a result, we'll ignore the branch limits
  491. // until someone finds a case where it matters in practice.
  492. unsigned MaxOps;
  493. const TargetSubtargetInfo *ST = getST();
  494. if (PartialUnrollingThreshold.getNumOccurrences() > 0)
  495. MaxOps = PartialUnrollingThreshold;
  496. else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
  497. MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
  498. else
  499. return;
  500. // Scan the loop: don't unroll loops with calls.
  501. for (BasicBlock *BB : L->blocks()) {
  502. for (Instruction &I : *BB) {
  503. if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
  504. if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
  505. if (!thisT()->isLoweredToCall(F))
  506. continue;
  507. }
  508. if (ORE) {
  509. ORE->emit([&]() {
  510. return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
  511. L->getHeader())
  512. << "advising against unrolling the loop because it "
  513. "contains a "
  514. << ore::NV("Call", &I);
  515. });
  516. }
  517. return;
  518. }
  519. }
  520. }
  521. // Enable runtime and partial unrolling up to the specified size.
  522. // Enable using trip count upper bound to unroll loops.
  523. UP.Partial = UP.Runtime = UP.UpperBound = true;
  524. UP.PartialThreshold = MaxOps;
  525. // Avoid unrolling when optimizing for size.
  526. UP.OptSizeThreshold = 0;
  527. UP.PartialOptSizeThreshold = 0;
  528. // Set number of instructions optimized when "back edge"
  529. // becomes "fall through" to default value of 2.
  530. UP.BEInsns = 2;
  531. }
  532. void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
  533. TTI::PeelingPreferences &PP) {
  534. PP.PeelCount = 0;
  535. PP.AllowPeeling = true;
  536. PP.AllowLoopNestsPeeling = false;
  537. PP.PeelProfiledIterations = true;
  538. }
  539. bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
  540. AssumptionCache &AC,
  541. TargetLibraryInfo *LibInfo,
  542. HardwareLoopInfo &HWLoopInfo) {
  543. return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
  544. }
  545. bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
  546. AssumptionCache &AC, TargetLibraryInfo *TLI,
  547. DominatorTree *DT,
  548. LoopVectorizationLegality *LVL,
  549. InterleavedAccessInfo *IAI) {
  550. return BaseT::preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LVL, IAI);
  551. }
  552. PredicationStyle emitGetActiveLaneMask() {
  553. return BaseT::emitGetActiveLaneMask();
  554. }
  555. std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
  556. IntrinsicInst &II) {
  557. return BaseT::instCombineIntrinsic(IC, II);
  558. }
  559. std::optional<Value *>
  560. simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
  561. APInt DemandedMask, KnownBits &Known,
  562. bool &KnownBitsComputed) {
  563. return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
  564. KnownBitsComputed);
  565. }
  566. std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
  567. InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
  568. APInt &UndefElts2, APInt &UndefElts3,
  569. std::function<void(Instruction *, unsigned, APInt, APInt &)>
  570. SimplifyAndSetOp) {
  571. return BaseT::simplifyDemandedVectorEltsIntrinsic(
  572. IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
  573. SimplifyAndSetOp);
  574. }
  575. virtual std::optional<unsigned>
  576. getCacheSize(TargetTransformInfo::CacheLevel Level) const {
  577. return std::optional<unsigned>(
  578. getST()->getCacheSize(static_cast<unsigned>(Level)));
  579. }
  580. virtual std::optional<unsigned>
  581. getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const {
  582. std::optional<unsigned> TargetResult =
  583. getST()->getCacheAssociativity(static_cast<unsigned>(Level));
  584. if (TargetResult)
  585. return TargetResult;
  586. return BaseT::getCacheAssociativity(Level);
  587. }
  588. virtual unsigned getCacheLineSize() const {
  589. return getST()->getCacheLineSize();
  590. }
  591. virtual unsigned getPrefetchDistance() const {
  592. return getST()->getPrefetchDistance();
  593. }
  594. virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
  595. unsigned NumStridedMemAccesses,
  596. unsigned NumPrefetches,
  597. bool HasCall) const {
  598. return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
  599. NumPrefetches, HasCall);
  600. }
  601. virtual unsigned getMaxPrefetchIterationsAhead() const {
  602. return getST()->getMaxPrefetchIterationsAhead();
  603. }
  604. virtual bool enableWritePrefetching() const {
  605. return getST()->enableWritePrefetching();
  606. }
  607. virtual bool shouldPrefetchAddressSpace(unsigned AS) const {
  608. return getST()->shouldPrefetchAddressSpace(AS);
  609. }
  610. /// @}
  611. /// \name Vector TTI Implementations
  612. /// @{
  613. TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
  614. return TypeSize::getFixed(32);
  615. }
  616. std::optional<unsigned> getMaxVScale() const { return std::nullopt; }
  617. std::optional<unsigned> getVScaleForTuning() const { return std::nullopt; }
  618. /// Estimate the overhead of scalarizing an instruction. Insert and Extract
  619. /// are set if the demanded result elements need to be inserted and/or
  620. /// extracted from vectors.
  621. InstructionCost getScalarizationOverhead(VectorType *InTy,
  622. const APInt &DemandedElts,
  623. bool Insert, bool Extract,
  624. TTI::TargetCostKind CostKind) {
  625. /// FIXME: a bitfield is not a reasonable abstraction for talking about
  626. /// which elements are needed from a scalable vector
  627. if (isa<ScalableVectorType>(InTy))
  628. return InstructionCost::getInvalid();
  629. auto *Ty = cast<FixedVectorType>(InTy);
  630. assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
  631. "Vector size mismatch");
  632. InstructionCost Cost = 0;
  633. for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
  634. if (!DemandedElts[i])
  635. continue;
  636. if (Insert)
  637. Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
  638. CostKind, i, nullptr, nullptr);
  639. if (Extract)
  640. Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
  641. CostKind, i, nullptr, nullptr);
  642. }
  643. return Cost;
  644. }
  645. /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
  646. InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert,
  647. bool Extract,
  648. TTI::TargetCostKind CostKind) {
  649. if (isa<ScalableVectorType>(InTy))
  650. return InstructionCost::getInvalid();
  651. auto *Ty = cast<FixedVectorType>(InTy);
  652. APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
  653. return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
  654. CostKind);
  655. }
  656. /// Estimate the overhead of scalarizing an instructions unique
  657. /// non-constant operands. The (potentially vector) types to use for each of
  658. /// argument are passes via Tys.
  659. InstructionCost
  660. getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
  661. ArrayRef<Type *> Tys,
  662. TTI::TargetCostKind CostKind) {
  663. assert(Args.size() == Tys.size() && "Expected matching Args and Tys");
  664. InstructionCost Cost = 0;
  665. SmallPtrSet<const Value*, 4> UniqueOperands;
  666. for (int I = 0, E = Args.size(); I != E; I++) {
  667. // Disregard things like metadata arguments.
  668. const Value *A = Args[I];
  669. Type *Ty = Tys[I];
  670. if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
  671. !Ty->isPtrOrPtrVectorTy())
  672. continue;
  673. if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
  674. if (auto *VecTy = dyn_cast<VectorType>(Ty))
  675. Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
  676. /*Extract*/ true, CostKind);
  677. }
  678. }
  679. return Cost;
  680. }
  681. /// Estimate the overhead of scalarizing the inputs and outputs of an
  682. /// instruction, with return type RetTy and arguments Args of type Tys. If
  683. /// Args are unknown (empty), then the cost associated with one argument is
  684. /// added as a heuristic.
  685. InstructionCost getScalarizationOverhead(VectorType *RetTy,
  686. ArrayRef<const Value *> Args,
  687. ArrayRef<Type *> Tys,
  688. TTI::TargetCostKind CostKind) {
  689. InstructionCost Cost = getScalarizationOverhead(
  690. RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
  691. if (!Args.empty())
  692. Cost += getOperandsScalarizationOverhead(Args, Tys, CostKind);
  693. else
  694. // When no information on arguments is provided, we add the cost
  695. // associated with one argument as a heuristic.
  696. Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
  697. /*Extract*/ true, CostKind);
  698. return Cost;
  699. }
  700. /// Estimate the cost of type-legalization and the legalized type.
  701. std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
  702. LLVMContext &C = Ty->getContext();
  703. EVT MTy = getTLI()->getValueType(DL, Ty);
  704. InstructionCost Cost = 1;
  705. // We keep legalizing the type until we find a legal kind. We assume that
  706. // the only operation that costs anything is the split. After splitting
  707. // we need to handle two types.
  708. while (true) {
  709. TargetLoweringBase::LegalizeKind LK = getTLI()->getTypeConversion(C, MTy);
  710. if (LK.first == TargetLoweringBase::TypeScalarizeScalableVector) {
  711. // Ensure we return a sensible simple VT here, since many callers of
  712. // this function require it.
  713. MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
  714. return std::make_pair(InstructionCost::getInvalid(), VT);
  715. }
  716. if (LK.first == TargetLoweringBase::TypeLegal)
  717. return std::make_pair(Cost, MTy.getSimpleVT());
  718. if (LK.first == TargetLoweringBase::TypeSplitVector ||
  719. LK.first == TargetLoweringBase::TypeExpandInteger)
  720. Cost *= 2;
  721. // Do not loop with f128 type.
  722. if (MTy == LK.second)
  723. return std::make_pair(Cost, MTy.getSimpleVT());
  724. // Keep legalizing the type.
  725. MTy = LK.second;
  726. }
  727. }
  728. unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
  729. InstructionCost getArithmeticInstrCost(
  730. unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
  731. TTI::OperandValueInfo Opd1Info = {TTI::OK_AnyValue, TTI::OP_None},
  732. TTI::OperandValueInfo Opd2Info = {TTI::OK_AnyValue, TTI::OP_None},
  733. ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
  734. const Instruction *CxtI = nullptr) {
  735. // Check if any of the operands are vector operands.
  736. const TargetLoweringBase *TLI = getTLI();
  737. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  738. assert(ISD && "Invalid opcode");
  739. // TODO: Handle more cost kinds.
  740. if (CostKind != TTI::TCK_RecipThroughput)
  741. return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
  742. Opd1Info, Opd2Info,
  743. Args, CxtI);
  744. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
  745. bool IsFloat = Ty->isFPOrFPVectorTy();
  746. // Assume that floating point arithmetic operations cost twice as much as
  747. // integer operations.
  748. InstructionCost OpCost = (IsFloat ? 2 : 1);
  749. if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
  750. // The operation is legal. Assume it costs 1.
  751. // TODO: Once we have extract/insert subvector cost we need to use them.
  752. return LT.first * OpCost;
  753. }
  754. if (!TLI->isOperationExpand(ISD, LT.second)) {
  755. // If the operation is custom lowered, then assume that the code is twice
  756. // as expensive.
  757. return LT.first * 2 * OpCost;
  758. }
  759. // An 'Expand' of URem and SRem is special because it may default
  760. // to expanding the operation into a sequence of sub-operations
  761. // i.e. X % Y -> X-(X/Y)*Y.
  762. if (ISD == ISD::UREM || ISD == ISD::SREM) {
  763. bool IsSigned = ISD == ISD::SREM;
  764. if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
  765. LT.second) ||
  766. TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
  767. LT.second)) {
  768. unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
  769. InstructionCost DivCost = thisT()->getArithmeticInstrCost(
  770. DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
  771. InstructionCost MulCost =
  772. thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
  773. InstructionCost SubCost =
  774. thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
  775. return DivCost + MulCost + SubCost;
  776. }
  777. }
  778. // We cannot scalarize scalable vectors, so return Invalid.
  779. if (isa<ScalableVectorType>(Ty))
  780. return InstructionCost::getInvalid();
  781. // Else, assume that we need to scalarize this op.
  782. // TODO: If one of the types get legalized by splitting, handle this
  783. // similarly to what getCastInstrCost() does.
  784. if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
  785. InstructionCost Cost = thisT()->getArithmeticInstrCost(
  786. Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
  787. Args, CxtI);
  788. // Return the cost of multiple scalar invocation plus the cost of
  789. // inserting and extracting the values.
  790. SmallVector<Type *> Tys(Args.size(), Ty);
  791. return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
  792. VTy->getNumElements() * Cost;
  793. }
  794. // We don't know anything about this scalar instruction.
  795. return OpCost;
  796. }
  797. TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind,
  798. ArrayRef<int> Mask) const {
  799. int Limit = Mask.size() * 2;
  800. if (Mask.empty() ||
  801. // Extra check required by isSingleSourceMaskImpl function (called by
  802. // ShuffleVectorInst::isSingleSourceMask).
  803. any_of(Mask, [Limit](int I) { return I >= Limit; }))
  804. return Kind;
  805. int Index;
  806. switch (Kind) {
  807. case TTI::SK_PermuteSingleSrc:
  808. if (ShuffleVectorInst::isReverseMask(Mask))
  809. return TTI::SK_Reverse;
  810. if (ShuffleVectorInst::isZeroEltSplatMask(Mask))
  811. return TTI::SK_Broadcast;
  812. break;
  813. case TTI::SK_PermuteTwoSrc:
  814. if (ShuffleVectorInst::isSelectMask(Mask))
  815. return TTI::SK_Select;
  816. if (ShuffleVectorInst::isTransposeMask(Mask))
  817. return TTI::SK_Transpose;
  818. if (ShuffleVectorInst::isSpliceMask(Mask, Index))
  819. return TTI::SK_Splice;
  820. break;
  821. case TTI::SK_Select:
  822. case TTI::SK_Reverse:
  823. case TTI::SK_Broadcast:
  824. case TTI::SK_Transpose:
  825. case TTI::SK_InsertSubvector:
  826. case TTI::SK_ExtractSubvector:
  827. case TTI::SK_Splice:
  828. break;
  829. }
  830. return Kind;
  831. }
  832. InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
  833. ArrayRef<int> Mask,
  834. TTI::TargetCostKind CostKind, int Index,
  835. VectorType *SubTp,
  836. ArrayRef<const Value *> Args = std::nullopt) {
  837. switch (improveShuffleKindFromMask(Kind, Mask)) {
  838. case TTI::SK_Broadcast:
  839. if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
  840. return getBroadcastShuffleOverhead(FVT, CostKind);
  841. return InstructionCost::getInvalid();
  842. case TTI::SK_Select:
  843. case TTI::SK_Splice:
  844. case TTI::SK_Reverse:
  845. case TTI::SK_Transpose:
  846. case TTI::SK_PermuteSingleSrc:
  847. case TTI::SK_PermuteTwoSrc:
  848. if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
  849. return getPermuteShuffleOverhead(FVT, CostKind);
  850. return InstructionCost::getInvalid();
  851. case TTI::SK_ExtractSubvector:
  852. return getExtractSubvectorOverhead(Tp, CostKind, Index,
  853. cast<FixedVectorType>(SubTp));
  854. case TTI::SK_InsertSubvector:
  855. return getInsertSubvectorOverhead(Tp, CostKind, Index,
  856. cast<FixedVectorType>(SubTp));
  857. }
  858. llvm_unreachable("Unknown TTI::ShuffleKind");
  859. }
  860. InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
  861. TTI::CastContextHint CCH,
  862. TTI::TargetCostKind CostKind,
  863. const Instruction *I = nullptr) {
  864. if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
  865. return 0;
  866. const TargetLoweringBase *TLI = getTLI();
  867. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  868. assert(ISD && "Invalid opcode");
  869. std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
  870. std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
  871. TypeSize SrcSize = SrcLT.second.getSizeInBits();
  872. TypeSize DstSize = DstLT.second.getSizeInBits();
  873. bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
  874. bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
  875. switch (Opcode) {
  876. default:
  877. break;
  878. case Instruction::Trunc:
  879. // Check for NOOP conversions.
  880. if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
  881. return 0;
  882. [[fallthrough]];
  883. case Instruction::BitCast:
  884. // Bitcast between types that are legalized to the same type are free and
  885. // assume int to/from ptr of the same size is also free.
  886. if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
  887. SrcSize == DstSize)
  888. return 0;
  889. break;
  890. case Instruction::FPExt:
  891. if (I && getTLI()->isExtFree(I))
  892. return 0;
  893. break;
  894. case Instruction::ZExt:
  895. if (TLI->isZExtFree(SrcLT.second, DstLT.second))
  896. return 0;
  897. [[fallthrough]];
  898. case Instruction::SExt:
  899. if (I && getTLI()->isExtFree(I))
  900. return 0;
  901. // If this is a zext/sext of a load, return 0 if the corresponding
  902. // extending load exists on target and the result type is legal.
  903. if (CCH == TTI::CastContextHint::Normal) {
  904. EVT ExtVT = EVT::getEVT(Dst);
  905. EVT LoadVT = EVT::getEVT(Src);
  906. unsigned LType =
  907. ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
  908. if (DstLT.first == SrcLT.first &&
  909. TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
  910. return 0;
  911. }
  912. break;
  913. case Instruction::AddrSpaceCast:
  914. if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
  915. Dst->getPointerAddressSpace()))
  916. return 0;
  917. break;
  918. }
  919. auto *SrcVTy = dyn_cast<VectorType>(Src);
  920. auto *DstVTy = dyn_cast<VectorType>(Dst);
  921. // If the cast is marked as legal (or promote) then assume low cost.
  922. if (SrcLT.first == DstLT.first &&
  923. TLI->isOperationLegalOrPromote(ISD, DstLT.second))
  924. return SrcLT.first;
  925. // Handle scalar conversions.
  926. if (!SrcVTy && !DstVTy) {
  927. // Just check the op cost. If the operation is legal then assume it costs
  928. // 1.
  929. if (!TLI->isOperationExpand(ISD, DstLT.second))
  930. return 1;
  931. // Assume that illegal scalar instruction are expensive.
  932. return 4;
  933. }
  934. // Check vector-to-vector casts.
  935. if (DstVTy && SrcVTy) {
  936. // If the cast is between same-sized registers, then the check is simple.
  937. if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
  938. // Assume that Zext is done using AND.
  939. if (Opcode == Instruction::ZExt)
  940. return SrcLT.first;
  941. // Assume that sext is done using SHL and SRA.
  942. if (Opcode == Instruction::SExt)
  943. return SrcLT.first * 2;
  944. // Just check the op cost. If the operation is legal then assume it
  945. // costs
  946. // 1 and multiply by the type-legalization overhead.
  947. if (!TLI->isOperationExpand(ISD, DstLT.second))
  948. return SrcLT.first * 1;
  949. }
  950. // If we are legalizing by splitting, query the concrete TTI for the cost
  951. // of casting the original vector twice. We also need to factor in the
  952. // cost of the split itself. Count that as 1, to be consistent with
  953. // getTypeLegalizationCost().
  954. bool SplitSrc =
  955. TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
  956. TargetLowering::TypeSplitVector;
  957. bool SplitDst =
  958. TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
  959. TargetLowering::TypeSplitVector;
  960. if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
  961. DstVTy->getElementCount().isVector()) {
  962. Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
  963. Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
  964. T *TTI = static_cast<T *>(this);
  965. // If both types need to be split then the split is free.
  966. InstructionCost SplitCost =
  967. (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
  968. return SplitCost +
  969. (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
  970. CostKind, I));
  971. }
  972. // Scalarization cost is Invalid, can't assume any num elements.
  973. if (isa<ScalableVectorType>(DstVTy))
  974. return InstructionCost::getInvalid();
  975. // In other cases where the source or destination are illegal, assume
  976. // the operation will get scalarized.
  977. unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
  978. InstructionCost Cost = thisT()->getCastInstrCost(
  979. Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
  980. // Return the cost of multiple scalar invocation plus the cost of
  981. // inserting and extracting the values.
  982. return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
  983. CostKind) +
  984. Num * Cost;
  985. }
  986. // We already handled vector-to-vector and scalar-to-scalar conversions.
  987. // This
  988. // is where we handle bitcast between vectors and scalars. We need to assume
  989. // that the conversion is scalarized in one way or another.
  990. if (Opcode == Instruction::BitCast) {
  991. // Illegal bitcasts are done by storing and loading from a stack slot.
  992. return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
  993. /*Extract*/ true, CostKind)
  994. : 0) +
  995. (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
  996. /*Extract*/ false, CostKind)
  997. : 0);
  998. }
  999. llvm_unreachable("Unhandled cast");
  1000. }
  1001. InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
  1002. VectorType *VecTy, unsigned Index) {
  1003. TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  1004. return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
  1005. CostKind, Index, nullptr, nullptr) +
  1006. thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
  1007. TTI::CastContextHint::None, CostKind);
  1008. }
  1009. InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
  1010. const Instruction *I = nullptr) {
  1011. return BaseT::getCFInstrCost(Opcode, CostKind, I);
  1012. }
  1013. InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
  1014. CmpInst::Predicate VecPred,
  1015. TTI::TargetCostKind CostKind,
  1016. const Instruction *I = nullptr) {
  1017. const TargetLoweringBase *TLI = getTLI();
  1018. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  1019. assert(ISD && "Invalid opcode");
  1020. // TODO: Handle other cost kinds.
  1021. if (CostKind != TTI::TCK_RecipThroughput)
  1022. return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
  1023. I);
  1024. // Selects on vectors are actually vector selects.
  1025. if (ISD == ISD::SELECT) {
  1026. assert(CondTy && "CondTy must exist");
  1027. if (CondTy->isVectorTy())
  1028. ISD = ISD::VSELECT;
  1029. }
  1030. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
  1031. if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
  1032. !TLI->isOperationExpand(ISD, LT.second)) {
  1033. // The operation is legal. Assume it costs 1. Multiply
  1034. // by the type-legalization overhead.
  1035. return LT.first * 1;
  1036. }
  1037. // Otherwise, assume that the cast is scalarized.
  1038. // TODO: If one of the types get legalized by splitting, handle this
  1039. // similarly to what getCastInstrCost() does.
  1040. if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
  1041. if (isa<ScalableVectorType>(ValTy))
  1042. return InstructionCost::getInvalid();
  1043. unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
  1044. if (CondTy)
  1045. CondTy = CondTy->getScalarType();
  1046. InstructionCost Cost = thisT()->getCmpSelInstrCost(
  1047. Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I);
  1048. // Return the cost of multiple scalar invocation plus the cost of
  1049. // inserting and extracting the values.
  1050. return getScalarizationOverhead(ValVTy, /*Insert*/ true,
  1051. /*Extract*/ false, CostKind) +
  1052. Num * Cost;
  1053. }
  1054. // Unknown scalar opcode.
  1055. return 1;
  1056. }
  1057. InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
  1058. TTI::TargetCostKind CostKind,
  1059. unsigned Index, Value *Op0, Value *Op1) {
  1060. return getRegUsageForType(Val->getScalarType());
  1061. }
  1062. InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
  1063. TTI::TargetCostKind CostKind,
  1064. unsigned Index) {
  1065. Value *Op0 = nullptr;
  1066. Value *Op1 = nullptr;
  1067. if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
  1068. Op0 = IE->getOperand(0);
  1069. Op1 = IE->getOperand(1);
  1070. }
  1071. return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
  1072. Op1);
  1073. }
  1074. InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
  1075. int VF,
  1076. const APInt &DemandedDstElts,
  1077. TTI::TargetCostKind CostKind) {
  1078. assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
  1079. "Unexpected size of DemandedDstElts.");
  1080. InstructionCost Cost;
  1081. auto *SrcVT = FixedVectorType::get(EltTy, VF);
  1082. auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);
  1083. // The Mask shuffling cost is extract all the elements of the Mask
  1084. // and insert each of them Factor times into the wide vector:
  1085. //
  1086. // E.g. an interleaved group with factor 3:
  1087. // %mask = icmp ult <8 x i32> %vec1, %vec2
  1088. // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
  1089. // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
  1090. // The cost is estimated as extract all mask elements from the <8xi1> mask
  1091. // vector and insert them factor times into the <24xi1> shuffled mask
  1092. // vector.
  1093. APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
  1094. Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
  1095. /*Insert*/ false,
  1096. /*Extract*/ true, CostKind);
  1097. Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
  1098. /*Insert*/ true,
  1099. /*Extract*/ false, CostKind);
  1100. return Cost;
  1101. }
  1102. InstructionCost
  1103. getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
  1104. unsigned AddressSpace, TTI::TargetCostKind CostKind,
  1105. TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
  1106. const Instruction *I = nullptr) {
  1107. assert(!Src->isVoidTy() && "Invalid type");
  1108. // Assume types, such as structs, are expensive.
  1109. if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
  1110. return 4;
  1111. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
  1112. // Assuming that all loads of legal types cost 1.
  1113. InstructionCost Cost = LT.first;
  1114. if (CostKind != TTI::TCK_RecipThroughput)
  1115. return Cost;
  1116. const DataLayout &DL = this->getDataLayout();
  1117. if (Src->isVectorTy() &&
  1118. // In practice it's not currently possible to have a change in lane
  1119. // length for extending loads or truncating stores so both types should
  1120. // have the same scalable property.
  1121. TypeSize::isKnownLT(DL.getTypeStoreSizeInBits(Src),
  1122. LT.second.getSizeInBits())) {
  1123. // This is a vector load that legalizes to a larger type than the vector
  1124. // itself. Unless the corresponding extending load or truncating store is
  1125. // legal, then this will scalarize.
  1126. TargetLowering::LegalizeAction LA = TargetLowering::Expand;
  1127. EVT MemVT = getTLI()->getValueType(DL, Src);
  1128. if (Opcode == Instruction::Store)
  1129. LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
  1130. else
  1131. LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
  1132. if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
  1133. // This is a vector load/store for some illegal type that is scalarized.
  1134. // We must account for the cost of building or decomposing the vector.
  1135. Cost += getScalarizationOverhead(
  1136. cast<VectorType>(Src), Opcode != Instruction::Store,
  1137. Opcode == Instruction::Store, CostKind);
  1138. }
  1139. }
  1140. return Cost;
  1141. }
  1142. InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
  1143. Align Alignment, unsigned AddressSpace,
  1144. TTI::TargetCostKind CostKind) {
  1145. return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
  1146. CostKind);
  1147. }
  1148. InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
  1149. const Value *Ptr, bool VariableMask,
  1150. Align Alignment,
  1151. TTI::TargetCostKind CostKind,
  1152. const Instruction *I = nullptr) {
  1153. return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
  1154. true, CostKind);
  1155. }
  1156. InstructionCost getInterleavedMemoryOpCost(
  1157. unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
  1158. Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
  1159. bool UseMaskForCond = false, bool UseMaskForGaps = false) {
  1160. // We cannot scalarize scalable vectors, so return Invalid.
  1161. if (isa<ScalableVectorType>(VecTy))
  1162. return InstructionCost::getInvalid();
  1163. auto *VT = cast<FixedVectorType>(VecTy);
  1164. unsigned NumElts = VT->getNumElements();
  1165. assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
  1166. unsigned NumSubElts = NumElts / Factor;
  1167. auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
  1168. // Firstly, the cost of load/store operation.
  1169. InstructionCost Cost;
  1170. if (UseMaskForCond || UseMaskForGaps)
  1171. Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
  1172. AddressSpace, CostKind);
  1173. else
  1174. Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
  1175. CostKind);
  1176. // Legalize the vector type, and get the legalized and unlegalized type
  1177. // sizes.
  1178. MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
  1179. unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
  1180. unsigned VecTyLTSize = VecTyLT.getStoreSize();
  1181. // Scale the cost of the memory operation by the fraction of legalized
  1182. // instructions that will actually be used. We shouldn't account for the
  1183. // cost of dead instructions since they will be removed.
  1184. //
  1185. // E.g., An interleaved load of factor 8:
  1186. // %vec = load <16 x i64>, <16 x i64>* %ptr
  1187. // %v0 = shufflevector %vec, undef, <0, 8>
  1188. //
  1189. // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
  1190. // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
  1191. // type). The other loads are unused.
  1192. //
  1193. // TODO: Note that legalization can turn masked loads/stores into unmasked
  1194. // (legalized) loads/stores. This can be reflected in the cost.
  1195. if (Cost.isValid() && VecTySize > VecTyLTSize) {
  1196. // The number of loads of a legal type it will take to represent a load
  1197. // of the unlegalized vector type.
  1198. unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);
  1199. // The number of elements of the unlegalized type that correspond to a
  1200. // single legal instruction.
  1201. unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);
  1202. // Determine which legal instructions will be used.
  1203. BitVector UsedInsts(NumLegalInsts, false);
  1204. for (unsigned Index : Indices)
  1205. for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
  1206. UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
  1207. // Scale the cost of the load by the fraction of legal instructions that
  1208. // will be used.
  1209. Cost = divideCeil(UsedInsts.count() * *Cost.getValue(), NumLegalInsts);
  1210. }
  1211. // Then plus the cost of interleave operation.
  1212. assert(Indices.size() <= Factor &&
  1213. "Interleaved memory op has too many members");
  1214. const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
  1215. const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);
  1216. APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
  1217. for (unsigned Index : Indices) {
  1218. assert(Index < Factor && "Invalid index for interleaved memory op");
  1219. for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
  1220. DemandedLoadStoreElts.setBit(Index + Elm * Factor);
  1221. }
  1222. if (Opcode == Instruction::Load) {
  1223. // The interleave cost is similar to extract sub vectors' elements
  1224. // from the wide vector, and insert them into sub vectors.
  1225. //
  1226. // E.g. An interleaved load of factor 2 (with one member of index 0):
  1227. // %vec = load <8 x i32>, <8 x i32>* %ptr
  1228. // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
  1229. // The cost is estimated as extract elements at 0, 2, 4, 6 from the
  1230. // <8 x i32> vector and insert them into a <4 x i32> vector.
  1231. InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
  1232. SubVT, DemandedAllSubElts,
  1233. /*Insert*/ true, /*Extract*/ false, CostKind);
  1234. Cost += Indices.size() * InsSubCost;
  1235. Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
  1236. /*Insert*/ false,
  1237. /*Extract*/ true, CostKind);
  1238. } else {
  1239. // The interleave cost is extract elements from sub vectors, and
  1240. // insert them into the wide vector.
  1241. //
  1242. // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
  1243. // (using VF=4):
  1244. // %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
  1245. // %gaps.mask = <true, true, false, true, true, false,
  1246. // true, true, false, true, true, false>
  1247. // call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
  1248. // i32 Align, <12 x i1> %gaps.mask
  1249. // The cost is estimated as extract all elements (of actual members,
  1250. // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
  1251. // i32> vector.
  1252. InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
  1253. SubVT, DemandedAllSubElts,
  1254. /*Insert*/ false, /*Extract*/ true, CostKind);
  1255. Cost += ExtSubCost * Indices.size();
  1256. Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
  1257. /*Insert*/ true,
  1258. /*Extract*/ false, CostKind);
  1259. }
  1260. if (!UseMaskForCond)
  1261. return Cost;
  1262. Type *I8Type = Type::getInt8Ty(VT->getContext());
  1263. Cost += thisT()->getReplicationShuffleCost(
  1264. I8Type, Factor, NumSubElts,
  1265. UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
  1266. CostKind);
  1267. // The Gaps mask is invariant and created outside the loop, therefore the
  1268. // cost of creating it is not accounted for here. However if we have both
  1269. // a MaskForGaps and some other mask that guards the execution of the
  1270. // memory access, we need to account for the cost of And-ing the two masks
  1271. // inside the loop.
  1272. if (UseMaskForGaps) {
  1273. auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
  1274. Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
  1275. CostKind);
  1276. }
  1277. return Cost;
  1278. }
  1279. /// Get intrinsic cost based on arguments.
  1280. InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
  1281. TTI::TargetCostKind CostKind) {
  1282. // Check for generically free intrinsics.
  1283. if (BaseT::getIntrinsicInstrCost(ICA, CostKind) == 0)
  1284. return 0;
  1285. // Assume that target intrinsics are cheap.
  1286. Intrinsic::ID IID = ICA.getID();
  1287. if (Function::isTargetIntrinsic(IID))
  1288. return TargetTransformInfo::TCC_Basic;
  1289. if (ICA.isTypeBasedOnly())
  1290. return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
  1291. Type *RetTy = ICA.getReturnType();
  1292. ElementCount RetVF =
  1293. (RetTy->isVectorTy() ? cast<VectorType>(RetTy)->getElementCount()
  1294. : ElementCount::getFixed(1));
  1295. const IntrinsicInst *I = ICA.getInst();
  1296. const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
  1297. FastMathFlags FMF = ICA.getFlags();
  1298. switch (IID) {
  1299. default:
  1300. break;
  1301. case Intrinsic::powi:
  1302. if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
  1303. bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
  1304. if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
  1305. ShouldOptForSize)) {
  1306. // The cost is modeled on the expansion performed by ExpandPowI in
  1307. // SelectionDAGBuilder.
  1308. APInt Exponent = RHSC->getValue().abs();
  1309. unsigned ActiveBits = Exponent.getActiveBits();
  1310. unsigned PopCount = Exponent.countPopulation();
  1311. InstructionCost Cost = (ActiveBits + PopCount - 2) *
  1312. thisT()->getArithmeticInstrCost(
  1313. Instruction::FMul, RetTy, CostKind);
  1314. if (RHSC->getSExtValue() < 0)
  1315. Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
  1316. CostKind);
  1317. return Cost;
  1318. }
  1319. }
  1320. break;
  1321. case Intrinsic::cttz:
  1322. // FIXME: If necessary, this should go in target-specific overrides.
  1323. if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
  1324. return TargetTransformInfo::TCC_Basic;
  1325. break;
  1326. case Intrinsic::ctlz:
  1327. // FIXME: If necessary, this should go in target-specific overrides.
  1328. if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
  1329. return TargetTransformInfo::TCC_Basic;
  1330. break;
  1331. case Intrinsic::memcpy:
  1332. return thisT()->getMemcpyCost(ICA.getInst());
  1333. case Intrinsic::masked_scatter: {
  1334. const Value *Mask = Args[3];
  1335. bool VarMask = !isa<Constant>(Mask);
  1336. Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
  1337. return thisT()->getGatherScatterOpCost(Instruction::Store,
  1338. ICA.getArgTypes()[0], Args[1],
  1339. VarMask, Alignment, CostKind, I);
  1340. }
  1341. case Intrinsic::masked_gather: {
  1342. const Value *Mask = Args[2];
  1343. bool VarMask = !isa<Constant>(Mask);
  1344. Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
  1345. return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
  1346. VarMask, Alignment, CostKind, I);
  1347. }
  1348. case Intrinsic::experimental_stepvector: {
  1349. if (isa<ScalableVectorType>(RetTy))
  1350. return BaseT::getIntrinsicInstrCost(ICA, CostKind);
  1351. // The cost of materialising a constant integer vector.
  1352. return TargetTransformInfo::TCC_Basic;
  1353. }
  1354. case Intrinsic::vector_extract: {
  1355. // FIXME: Handle case where a scalable vector is extracted from a scalable
  1356. // vector
  1357. if (isa<ScalableVectorType>(RetTy))
  1358. return BaseT::getIntrinsicInstrCost(ICA, CostKind);
  1359. unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
  1360. return thisT()->getShuffleCost(
  1361. TTI::SK_ExtractSubvector, cast<VectorType>(Args[0]->getType()),
  1362. std::nullopt, CostKind, Index, cast<VectorType>(RetTy));
  1363. }
  1364. case Intrinsic::vector_insert: {
  1365. // FIXME: Handle case where a scalable vector is inserted into a scalable
  1366. // vector
  1367. if (isa<ScalableVectorType>(Args[1]->getType()))
  1368. return BaseT::getIntrinsicInstrCost(ICA, CostKind);
  1369. unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
  1370. return thisT()->getShuffleCost(
  1371. TTI::SK_InsertSubvector, cast<VectorType>(Args[0]->getType()),
  1372. std::nullopt, CostKind, Index, cast<VectorType>(Args[1]->getType()));
  1373. }
  1374. case Intrinsic::experimental_vector_reverse: {
  1375. return thisT()->getShuffleCost(
  1376. TTI::SK_Reverse, cast<VectorType>(Args[0]->getType()), std::nullopt,
  1377. CostKind, 0, cast<VectorType>(RetTy));
  1378. }
  1379. case Intrinsic::experimental_vector_splice: {
  1380. unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
  1381. return thisT()->getShuffleCost(
  1382. TTI::SK_Splice, cast<VectorType>(Args[0]->getType()), std::nullopt,
  1383. CostKind, Index, cast<VectorType>(RetTy));
  1384. }
  1385. case Intrinsic::vector_reduce_add:
  1386. case Intrinsic::vector_reduce_mul:
  1387. case Intrinsic::vector_reduce_and:
  1388. case Intrinsic::vector_reduce_or:
  1389. case Intrinsic::vector_reduce_xor:
  1390. case Intrinsic::vector_reduce_smax:
  1391. case Intrinsic::vector_reduce_smin:
  1392. case Intrinsic::vector_reduce_fmax:
  1393. case Intrinsic::vector_reduce_fmin:
  1394. case Intrinsic::vector_reduce_umax:
  1395. case Intrinsic::vector_reduce_umin: {
  1396. IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
  1397. return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
  1398. }
  1399. case Intrinsic::vector_reduce_fadd:
  1400. case Intrinsic::vector_reduce_fmul: {
  1401. IntrinsicCostAttributes Attrs(
  1402. IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
  1403. return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
  1404. }
  1405. case Intrinsic::fshl:
  1406. case Intrinsic::fshr: {
  1407. const Value *X = Args[0];
  1408. const Value *Y = Args[1];
  1409. const Value *Z = Args[2];
  1410. const TTI::OperandValueInfo OpInfoX = TTI::getOperandInfo(X);
  1411. const TTI::OperandValueInfo OpInfoY = TTI::getOperandInfo(Y);
  1412. const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
  1413. const TTI::OperandValueInfo OpInfoBW =
  1414. {TTI::OK_UniformConstantValue,
  1415. isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2
  1416. : TTI::OP_None};
  1417. // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
  1418. // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
  1419. InstructionCost Cost = 0;
  1420. Cost +=
  1421. thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
  1422. Cost +=
  1423. thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
  1424. Cost += thisT()->getArithmeticInstrCost(
  1425. BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
  1426. {OpInfoZ.Kind, TTI::OP_None});
  1427. Cost += thisT()->getArithmeticInstrCost(
  1428. BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
  1429. {OpInfoZ.Kind, TTI::OP_None});
  1430. // Non-constant shift amounts requires a modulo.
  1431. if (!OpInfoZ.isConstant())
  1432. Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
  1433. CostKind, OpInfoZ, OpInfoBW);
  1434. // For non-rotates (X != Y) we must add shift-by-zero handling costs.
  1435. if (X != Y) {
  1436. Type *CondTy = RetTy->getWithNewBitWidth(1);
  1437. Cost +=
  1438. thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
  1439. CmpInst::ICMP_EQ, CostKind);
  1440. Cost +=
  1441. thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
  1442. CmpInst::ICMP_EQ, CostKind);
  1443. }
  1444. return Cost;
  1445. }
  1446. case Intrinsic::get_active_lane_mask: {
  1447. EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
  1448. EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);
  1449. // If we're not expanding the intrinsic then we assume this is cheap
  1450. // to implement.
  1451. if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgType)) {
  1452. return getTypeLegalizationCost(RetTy).first;
  1453. }
  1454. // Create the expanded types that will be used to calculate the uadd_sat
  1455. // operation.
  1456. Type *ExpRetTy = VectorType::get(
  1457. ICA.getArgTypes()[0], cast<VectorType>(RetTy)->getElementCount());
  1458. IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
  1459. InstructionCost Cost =
  1460. thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
  1461. Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
  1462. CmpInst::ICMP_ULT, CostKind);
  1463. return Cost;
  1464. }
  1465. }
  1466. // Assume that we need to scalarize this intrinsic.
  1467. // Compute the scalarization overhead based on Args for a vector
  1468. // intrinsic.
  1469. InstructionCost ScalarizationCost = InstructionCost::getInvalid();
  1470. if (RetVF.isVector() && !RetVF.isScalable()) {
  1471. ScalarizationCost = 0;
  1472. if (!RetTy->isVoidTy())
  1473. ScalarizationCost += getScalarizationOverhead(
  1474. cast<VectorType>(RetTy),
  1475. /*Insert*/ true, /*Extract*/ false, CostKind);
  1476. ScalarizationCost +=
  1477. getOperandsScalarizationOverhead(Args, ICA.getArgTypes(), CostKind);
  1478. }
  1479. IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
  1480. ScalarizationCost);
  1481. return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
  1482. }
  1483. /// Get intrinsic cost based on argument types.
  1484. /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
  1485. /// cost of scalarizing the arguments and the return value will be computed
  1486. /// based on types.
  1487. InstructionCost
  1488. getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
  1489. TTI::TargetCostKind CostKind) {
  1490. Intrinsic::ID IID = ICA.getID();
  1491. Type *RetTy = ICA.getReturnType();
  1492. const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
  1493. FastMathFlags FMF = ICA.getFlags();
  1494. InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
  1495. bool SkipScalarizationCost = ICA.skipScalarizationCost();
  1496. VectorType *VecOpTy = nullptr;
  1497. if (!Tys.empty()) {
  1498. // The vector reduction operand is operand 0 except for fadd/fmul.
  1499. // Their operand 0 is a scalar start value, so the vector op is operand 1.
  1500. unsigned VecTyIndex = 0;
  1501. if (IID == Intrinsic::vector_reduce_fadd ||
  1502. IID == Intrinsic::vector_reduce_fmul)
  1503. VecTyIndex = 1;
  1504. assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
  1505. VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
  1506. }
  1507. // Library call cost - other than size, make it expensive.
  1508. unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
  1509. unsigned ISD = 0;
  1510. switch (IID) {
  1511. default: {
  1512. // Scalable vectors cannot be scalarized, so return Invalid.
  1513. if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
  1514. return isa<ScalableVectorType>(Ty);
  1515. }))
  1516. return InstructionCost::getInvalid();
  1517. // Assume that we need to scalarize this intrinsic.
  1518. InstructionCost ScalarizationCost =
  1519. SkipScalarizationCost ? ScalarizationCostPassed : 0;
  1520. unsigned ScalarCalls = 1;
  1521. Type *ScalarRetTy = RetTy;
  1522. if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
  1523. if (!SkipScalarizationCost)
  1524. ScalarizationCost = getScalarizationOverhead(
  1525. RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
  1526. ScalarCalls = std::max(ScalarCalls,
  1527. cast<FixedVectorType>(RetVTy)->getNumElements());
  1528. ScalarRetTy = RetTy->getScalarType();
  1529. }
  1530. SmallVector<Type *, 4> ScalarTys;
  1531. for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
  1532. Type *Ty = Tys[i];
  1533. if (auto *VTy = dyn_cast<VectorType>(Ty)) {
  1534. if (!SkipScalarizationCost)
  1535. ScalarizationCost += getScalarizationOverhead(
  1536. VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
  1537. ScalarCalls = std::max(ScalarCalls,
  1538. cast<FixedVectorType>(VTy)->getNumElements());
  1539. Ty = Ty->getScalarType();
  1540. }
  1541. ScalarTys.push_back(Ty);
  1542. }
  1543. if (ScalarCalls == 1)
  1544. return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
  1545. IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
  1546. InstructionCost ScalarCost =
  1547. thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
  1548. return ScalarCalls * ScalarCost + ScalarizationCost;
  1549. }
  1550. // Look for intrinsics that can be lowered directly or turned into a scalar
  1551. // intrinsic call.
  1552. case Intrinsic::sqrt:
  1553. ISD = ISD::FSQRT;
  1554. break;
  1555. case Intrinsic::sin:
  1556. ISD = ISD::FSIN;
  1557. break;
  1558. case Intrinsic::cos:
  1559. ISD = ISD::FCOS;
  1560. break;
  1561. case Intrinsic::exp:
  1562. ISD = ISD::FEXP;
  1563. break;
  1564. case Intrinsic::exp2:
  1565. ISD = ISD::FEXP2;
  1566. break;
  1567. case Intrinsic::log:
  1568. ISD = ISD::FLOG;
  1569. break;
  1570. case Intrinsic::log10:
  1571. ISD = ISD::FLOG10;
  1572. break;
  1573. case Intrinsic::log2:
  1574. ISD = ISD::FLOG2;
  1575. break;
  1576. case Intrinsic::fabs:
  1577. ISD = ISD::FABS;
  1578. break;
  1579. case Intrinsic::canonicalize:
  1580. ISD = ISD::FCANONICALIZE;
  1581. break;
  1582. case Intrinsic::minnum:
  1583. ISD = ISD::FMINNUM;
  1584. break;
  1585. case Intrinsic::maxnum:
  1586. ISD = ISD::FMAXNUM;
  1587. break;
  1588. case Intrinsic::minimum:
  1589. ISD = ISD::FMINIMUM;
  1590. break;
  1591. case Intrinsic::maximum:
  1592. ISD = ISD::FMAXIMUM;
  1593. break;
  1594. case Intrinsic::copysign:
  1595. ISD = ISD::FCOPYSIGN;
  1596. break;
  1597. case Intrinsic::floor:
  1598. ISD = ISD::FFLOOR;
  1599. break;
  1600. case Intrinsic::ceil:
  1601. ISD = ISD::FCEIL;
  1602. break;
  1603. case Intrinsic::trunc:
  1604. ISD = ISD::FTRUNC;
  1605. break;
  1606. case Intrinsic::nearbyint:
  1607. ISD = ISD::FNEARBYINT;
  1608. break;
  1609. case Intrinsic::rint:
  1610. ISD = ISD::FRINT;
  1611. break;
  1612. case Intrinsic::round:
  1613. ISD = ISD::FROUND;
  1614. break;
  1615. case Intrinsic::roundeven:
  1616. ISD = ISD::FROUNDEVEN;
  1617. break;
  1618. case Intrinsic::pow:
  1619. ISD = ISD::FPOW;
  1620. break;
  1621. case Intrinsic::fma:
  1622. ISD = ISD::FMA;
  1623. break;
  1624. case Intrinsic::fmuladd:
  1625. ISD = ISD::FMA;
  1626. break;
  1627. case Intrinsic::experimental_constrained_fmuladd:
  1628. ISD = ISD::STRICT_FMA;
  1629. break;
  1630. // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
  1631. case Intrinsic::lifetime_start:
  1632. case Intrinsic::lifetime_end:
  1633. case Intrinsic::sideeffect:
  1634. case Intrinsic::pseudoprobe:
  1635. case Intrinsic::arithmetic_fence:
  1636. return 0;
  1637. case Intrinsic::masked_store: {
  1638. Type *Ty = Tys[0];
  1639. Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
  1640. return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
  1641. CostKind);
  1642. }
  1643. case Intrinsic::masked_load: {
  1644. Type *Ty = RetTy;
  1645. Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
  1646. return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
  1647. CostKind);
  1648. }
  1649. case Intrinsic::vector_reduce_add:
  1650. return thisT()->getArithmeticReductionCost(Instruction::Add, VecOpTy,
  1651. std::nullopt, CostKind);
  1652. case Intrinsic::vector_reduce_mul:
  1653. return thisT()->getArithmeticReductionCost(Instruction::Mul, VecOpTy,
  1654. std::nullopt, CostKind);
  1655. case Intrinsic::vector_reduce_and:
  1656. return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy,
  1657. std::nullopt, CostKind);
  1658. case Intrinsic::vector_reduce_or:
  1659. return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy,
  1660. std::nullopt, CostKind);
  1661. case Intrinsic::vector_reduce_xor:
  1662. return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy,
  1663. std::nullopt, CostKind);
  1664. case Intrinsic::vector_reduce_fadd:
  1665. return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy,
  1666. FMF, CostKind);
  1667. case Intrinsic::vector_reduce_fmul:
  1668. return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy,
  1669. FMF, CostKind);
  1670. case Intrinsic::vector_reduce_smax:
  1671. case Intrinsic::vector_reduce_smin:
  1672. case Intrinsic::vector_reduce_fmax:
  1673. case Intrinsic::vector_reduce_fmin:
  1674. return thisT()->getMinMaxReductionCost(
  1675. VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)),
  1676. /*IsUnsigned=*/false, CostKind);
  1677. case Intrinsic::vector_reduce_umax:
  1678. case Intrinsic::vector_reduce_umin:
  1679. return thisT()->getMinMaxReductionCost(
  1680. VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)),
  1681. /*IsUnsigned=*/true, CostKind);
  1682. case Intrinsic::abs: {
  1683. // abs(X) = select(icmp(X,0),X,sub(0,X))
  1684. Type *CondTy = RetTy->getWithNewBitWidth(1);
  1685. CmpInst::Predicate Pred = CmpInst::ICMP_SGT;
  1686. InstructionCost Cost = 0;
  1687. Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
  1688. Pred, CostKind);
  1689. Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
  1690. Pred, CostKind);
  1691. // TODO: Should we add an OperandValueProperties::OP_Zero property?
  1692. Cost += thisT()->getArithmeticInstrCost(
  1693. BinaryOperator::Sub, RetTy, CostKind, {TTI::OK_UniformConstantValue, TTI::OP_None});
  1694. return Cost;
  1695. }
  1696. case Intrinsic::smax:
  1697. case Intrinsic::smin:
  1698. case Intrinsic::umax:
  1699. case Intrinsic::umin: {
  1700. // minmax(X,Y) = select(icmp(X,Y),X,Y)
  1701. Type *CondTy = RetTy->getWithNewBitWidth(1);
  1702. bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
  1703. CmpInst::Predicate Pred =
  1704. IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
  1705. InstructionCost Cost = 0;
  1706. Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
  1707. Pred, CostKind);
  1708. Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
  1709. Pred, CostKind);
  1710. return Cost;
  1711. }
  1712. case Intrinsic::sadd_sat:
  1713. case Intrinsic::ssub_sat: {
  1714. Type *CondTy = RetTy->getWithNewBitWidth(1);
  1715. Type *OpTy = StructType::create({RetTy, CondTy});
  1716. Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
  1717. ? Intrinsic::sadd_with_overflow
  1718. : Intrinsic::ssub_with_overflow;
  1719. CmpInst::Predicate Pred = CmpInst::ICMP_SGT;
  1720. // SatMax -> Overflow && SumDiff < 0
  1721. // SatMin -> Overflow && SumDiff >= 0
  1722. InstructionCost Cost = 0;
  1723. IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
  1724. nullptr, ScalarizationCostPassed);
  1725. Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
  1726. Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
  1727. Pred, CostKind);
  1728. Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
  1729. CondTy, Pred, CostKind);
  1730. return Cost;
  1731. }
  1732. case Intrinsic::uadd_sat:
  1733. case Intrinsic::usub_sat: {
  1734. Type *CondTy = RetTy->getWithNewBitWidth(1);
  1735. Type *OpTy = StructType::create({RetTy, CondTy});
  1736. Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
  1737. ? Intrinsic::uadd_with_overflow
  1738. : Intrinsic::usub_with_overflow;
  1739. InstructionCost Cost = 0;
  1740. IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
  1741. nullptr, ScalarizationCostPassed);
  1742. Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
  1743. Cost +=
  1744. thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
  1745. CmpInst::BAD_ICMP_PREDICATE, CostKind);
  1746. return Cost;
  1747. }
  1748. case Intrinsic::smul_fix:
  1749. case Intrinsic::umul_fix: {
  1750. unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
  1751. Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
  1752. unsigned ExtOp =
  1753. IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
  1754. TTI::CastContextHint CCH = TTI::CastContextHint::None;
  1755. InstructionCost Cost = 0;
  1756. Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
  1757. Cost +=
  1758. thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
  1759. Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
  1760. CCH, CostKind);
  1761. Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy,
  1762. CostKind,
  1763. {TTI::OK_AnyValue, TTI::OP_None},
  1764. {TTI::OK_UniformConstantValue, TTI::OP_None});
  1765. Cost += thisT()->getArithmeticInstrCost(Instruction::Shl, RetTy, CostKind,
  1766. {TTI::OK_AnyValue, TTI::OP_None},
  1767. {TTI::OK_UniformConstantValue, TTI::OP_None});
  1768. Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
  1769. return Cost;
  1770. }
  1771. case Intrinsic::sadd_with_overflow:
  1772. case Intrinsic::ssub_with_overflow: {
  1773. Type *SumTy = RetTy->getContainedType(0);
  1774. Type *OverflowTy = RetTy->getContainedType(1);
  1775. unsigned Opcode = IID == Intrinsic::sadd_with_overflow
  1776. ? BinaryOperator::Add
  1777. : BinaryOperator::Sub;
  1778. // Add:
  1779. // Overflow -> (Result < LHS) ^ (RHS < 0)
  1780. // Sub:
  1781. // Overflow -> (Result < LHS) ^ (RHS > 0)
  1782. InstructionCost Cost = 0;
  1783. Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
  1784. Cost += 2 * thisT()->getCmpSelInstrCost(
  1785. Instruction::ICmp, SumTy, OverflowTy,
  1786. CmpInst::ICMP_SGT, CostKind);
  1787. Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
  1788. CostKind);
  1789. return Cost;
  1790. }
  1791. case Intrinsic::uadd_with_overflow:
  1792. case Intrinsic::usub_with_overflow: {
  1793. Type *SumTy = RetTy->getContainedType(0);
  1794. Type *OverflowTy = RetTy->getContainedType(1);
  1795. unsigned Opcode = IID == Intrinsic::uadd_with_overflow
  1796. ? BinaryOperator::Add
  1797. : BinaryOperator::Sub;
  1798. CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
  1799. ? CmpInst::ICMP_ULT
  1800. : CmpInst::ICMP_UGT;
  1801. InstructionCost Cost = 0;
  1802. Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
  1803. Cost +=
  1804. thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
  1805. Pred, CostKind);
  1806. return Cost;
  1807. }
  1808. case Intrinsic::smul_with_overflow:
  1809. case Intrinsic::umul_with_overflow: {
  1810. Type *MulTy = RetTy->getContainedType(0);
  1811. Type *OverflowTy = RetTy->getContainedType(1);
  1812. unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
  1813. Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
  1814. bool IsSigned = IID == Intrinsic::smul_with_overflow;
  1815. unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
  1816. TTI::CastContextHint CCH = TTI::CastContextHint::None;
  1817. InstructionCost Cost = 0;
  1818. Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
  1819. Cost +=
  1820. thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
  1821. Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
  1822. CCH, CostKind);
  1823. Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, ExtTy,
  1824. CostKind,
  1825. {TTI::OK_AnyValue, TTI::OP_None},
  1826. {TTI::OK_UniformConstantValue, TTI::OP_None});
  1827. if (IsSigned)
  1828. Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
  1829. CostKind,
  1830. {TTI::OK_AnyValue, TTI::OP_None},
  1831. {TTI::OK_UniformConstantValue, TTI::OP_None});
  1832. Cost += thisT()->getCmpSelInstrCost(
  1833. BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
  1834. return Cost;
  1835. }
  1836. case Intrinsic::fptosi_sat:
  1837. case Intrinsic::fptoui_sat: {
  1838. if (Tys.empty())
  1839. break;
  1840. Type *FromTy = Tys[0];
  1841. bool IsSigned = IID == Intrinsic::fptosi_sat;
  1842. InstructionCost Cost = 0;
  1843. IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
  1844. {FromTy, FromTy});
  1845. Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
  1846. IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
  1847. {FromTy, FromTy});
  1848. Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
  1849. Cost += thisT()->getCastInstrCost(
  1850. IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
  1851. TTI::CastContextHint::None, CostKind);
  1852. if (IsSigned) {
  1853. Type *CondTy = RetTy->getWithNewBitWidth(1);
  1854. Cost += thisT()->getCmpSelInstrCost(
  1855. BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
  1856. Cost += thisT()->getCmpSelInstrCost(
  1857. BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
  1858. }
  1859. return Cost;
  1860. }
  1861. case Intrinsic::ctpop:
  1862. ISD = ISD::CTPOP;
  1863. // In case of legalization use TCC_Expensive. This is cheaper than a
  1864. // library call but still not a cheap instruction.
  1865. SingleCallCost = TargetTransformInfo::TCC_Expensive;
  1866. break;
  1867. case Intrinsic::ctlz:
  1868. ISD = ISD::CTLZ;
  1869. break;
  1870. case Intrinsic::cttz:
  1871. ISD = ISD::CTTZ;
  1872. break;
  1873. case Intrinsic::bswap:
  1874. ISD = ISD::BSWAP;
  1875. break;
  1876. case Intrinsic::bitreverse:
  1877. ISD = ISD::BITREVERSE;
  1878. break;
  1879. }
  1880. const TargetLoweringBase *TLI = getTLI();
  1881. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(RetTy);
  1882. if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
  1883. if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
  1884. TLI->isFAbsFree(LT.second)) {
  1885. return 0;
  1886. }
  1887. // The operation is legal. Assume it costs 1.
  1888. // If the type is split to multiple registers, assume that there is some
  1889. // overhead to this.
  1890. // TODO: Once we have extract/insert subvector cost we need to use them.
  1891. if (LT.first > 1)
  1892. return (LT.first * 2);
  1893. else
  1894. return (LT.first * 1);
  1895. } else if (!TLI->isOperationExpand(ISD, LT.second)) {
  1896. // If the operation is custom lowered then assume
  1897. // that the code is twice as expensive.
  1898. return (LT.first * 2);
  1899. }
  1900. // If we can't lower fmuladd into an FMA estimate the cost as a floating
  1901. // point mul followed by an add.
  1902. if (IID == Intrinsic::fmuladd)
  1903. return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
  1904. CostKind) +
  1905. thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
  1906. CostKind);
  1907. if (IID == Intrinsic::experimental_constrained_fmuladd) {
  1908. IntrinsicCostAttributes FMulAttrs(
  1909. Intrinsic::experimental_constrained_fmul, RetTy, Tys);
  1910. IntrinsicCostAttributes FAddAttrs(
  1911. Intrinsic::experimental_constrained_fadd, RetTy, Tys);
  1912. return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
  1913. thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
  1914. }
  1915. // Else, assume that we need to scalarize this intrinsic. For math builtins
  1916. // this will emit a costly libcall, adding call overhead and spills. Make it
  1917. // very expensive.
  1918. if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
  1919. // Scalable vectors cannot be scalarized, so return Invalid.
  1920. if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
  1921. return isa<ScalableVectorType>(Ty);
  1922. }))
  1923. return InstructionCost::getInvalid();
  1924. InstructionCost ScalarizationCost =
  1925. SkipScalarizationCost
  1926. ? ScalarizationCostPassed
  1927. : getScalarizationOverhead(RetVTy, /*Insert*/ true,
  1928. /*Extract*/ false, CostKind);
  1929. unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
  1930. SmallVector<Type *, 4> ScalarTys;
  1931. for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
  1932. Type *Ty = Tys[i];
  1933. if (Ty->isVectorTy())
  1934. Ty = Ty->getScalarType();
  1935. ScalarTys.push_back(Ty);
  1936. }
  1937. IntrinsicCostAttributes Attrs(IID, RetTy->getScalarType(), ScalarTys, FMF);
  1938. InstructionCost ScalarCost =
  1939. thisT()->getIntrinsicInstrCost(Attrs, CostKind);
  1940. for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
  1941. if (auto *VTy = dyn_cast<VectorType>(Tys[i])) {
  1942. if (!ICA.skipScalarizationCost())
  1943. ScalarizationCost += getScalarizationOverhead(
  1944. VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
  1945. ScalarCalls = std::max(ScalarCalls,
  1946. cast<FixedVectorType>(VTy)->getNumElements());
  1947. }
  1948. }
  1949. return ScalarCalls * ScalarCost + ScalarizationCost;
  1950. }
  1951. // This is going to be turned into a library call, make it expensive.
  1952. return SingleCallCost;
  1953. }
  1954. /// Compute a cost of the given call instruction.
  1955. ///
  1956. /// Compute the cost of calling function F with return type RetTy and
  1957. /// argument types Tys. F might be nullptr, in this case the cost of an
  1958. /// arbitrary call with the specified signature will be returned.
  1959. /// This is used, for instance, when we estimate call of a vector
  1960. /// counterpart of the given function.
  1961. /// \param F Called function, might be nullptr.
  1962. /// \param RetTy Return value types.
  1963. /// \param Tys Argument types.
  1964. /// \returns The cost of Call instruction.
  1965. InstructionCost getCallInstrCost(Function *F, Type *RetTy,
  1966. ArrayRef<Type *> Tys,
  1967. TTI::TargetCostKind CostKind) {
  1968. return 10;
  1969. }
  1970. unsigned getNumberOfParts(Type *Tp) {
  1971. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
  1972. return LT.first.isValid() ? *LT.first.getValue() : 0;
  1973. }
  1974. InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *,
  1975. const SCEV *) {
  1976. return 0;
  1977. }
  1978. /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
  1979. /// We're assuming that reduction operation are performing the following way:
  1980. ///
  1981. /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
  1982. /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
  1983. /// \----------------v-------------/ \----------v------------/
  1984. /// n/2 elements n/2 elements
  1985. /// %red1 = op <n x t> %val, <n x t> val1
  1986. /// After this operation we have a vector %red1 where only the first n/2
  1987. /// elements are meaningful, the second n/2 elements are undefined and can be
  1988. /// dropped. All other operations are actually working with the vector of
  1989. /// length n/2, not n, though the real vector length is still n.
  1990. /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
  1991. /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
  1992. /// \----------------v-------------/ \----------v------------/
  1993. /// n/4 elements 3*n/4 elements
  1994. /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
  1995. /// length n/2, the resulting vector has length n/4 etc.
  1996. ///
  1997. /// The cost model should take into account that the actual length of the
  1998. /// vector is reduced on each iteration.
  1999. InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty,
  2000. TTI::TargetCostKind CostKind) {
  2001. // Targets must implement a default value for the scalable case, since
  2002. // we don't know how many lanes the vector has.
  2003. if (isa<ScalableVectorType>(Ty))
  2004. return InstructionCost::getInvalid();
  2005. Type *ScalarTy = Ty->getElementType();
  2006. unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
  2007. if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
  2008. ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
  2009. NumVecElts >= 2) {
  2010. // Or reduction for i1 is represented as:
  2011. // %val = bitcast <ReduxWidth x i1> to iReduxWidth
  2012. // %res = cmp ne iReduxWidth %val, 0
  2013. // And reduction for i1 is represented as:
  2014. // %val = bitcast <ReduxWidth x i1> to iReduxWidth
  2015. // %res = cmp eq iReduxWidth %val, 11111
  2016. Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
  2017. return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
  2018. TTI::CastContextHint::None, CostKind) +
  2019. thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
  2020. CmpInst::makeCmpResultType(ValTy),
  2021. CmpInst::BAD_ICMP_PREDICATE, CostKind);
  2022. }
  2023. unsigned NumReduxLevels = Log2_32(NumVecElts);
  2024. InstructionCost ArithCost = 0;
  2025. InstructionCost ShuffleCost = 0;
  2026. std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
  2027. unsigned LongVectorCount = 0;
  2028. unsigned MVTLen =
  2029. LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
  2030. while (NumVecElts > MVTLen) {
  2031. NumVecElts /= 2;
  2032. VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
  2033. ShuffleCost +=
  2034. thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
  2035. CostKind, NumVecElts, SubTy);
  2036. ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
  2037. Ty = SubTy;
  2038. ++LongVectorCount;
  2039. }
  2040. NumReduxLevels -= LongVectorCount;
  2041. // The minimal length of the vector is limited by the real length of vector
  2042. // operations performed on the current platform. That's why several final
  2043. // reduction operations are performed on the vectors with the same
  2044. // architecture-dependent length.
  2045. // By default reductions need one shuffle per reduction level.
  2046. ShuffleCost +=
  2047. NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
  2048. std::nullopt, CostKind, 0, Ty);
  2049. ArithCost +=
  2050. NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
  2051. return ShuffleCost + ArithCost +
  2052. thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
  2053. CostKind, 0, nullptr, nullptr);
  2054. }
  2055. /// Try to calculate the cost of performing strict (in-order) reductions,
  2056. /// which involves doing a sequence of floating point additions in lane
  2057. /// order, starting with an initial value. For example, consider a scalar
  2058. /// initial value 'InitVal' of type float and a vector of type <4 x float>:
  2059. ///
  2060. /// Vector = <float %v0, float %v1, float %v2, float %v3>
  2061. ///
  2062. /// %add1 = %InitVal + %v0
  2063. /// %add2 = %add1 + %v1
  2064. /// %add3 = %add2 + %v2
  2065. /// %add4 = %add3 + %v3
  2066. ///
  2067. /// As a simple estimate we can say the cost of such a reduction is 4 times
  2068. /// the cost of a scalar FP addition. We can only estimate the costs for
  2069. /// fixed-width vectors here because for scalable vectors we do not know the
  2070. /// runtime number of operations.
  2071. InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty,
  2072. TTI::TargetCostKind CostKind) {
  2073. // Targets must implement a default value for the scalable case, since
  2074. // we don't know how many lanes the vector has.
  2075. if (isa<ScalableVectorType>(Ty))
  2076. return InstructionCost::getInvalid();
  2077. auto *VTy = cast<FixedVectorType>(Ty);
  2078. InstructionCost ExtractCost = getScalarizationOverhead(
  2079. VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
  2080. InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
  2081. Opcode, VTy->getElementType(), CostKind);
  2082. ArithCost *= VTy->getNumElements();
  2083. return ExtractCost + ArithCost;
  2084. }
  2085. InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
  2086. std::optional<FastMathFlags> FMF,
  2087. TTI::TargetCostKind CostKind) {
  2088. if (TTI::requiresOrderedReduction(FMF))
  2089. return getOrderedReductionCost(Opcode, Ty, CostKind);
  2090. return getTreeReductionCost(Opcode, Ty, CostKind);
  2091. }
  2092. /// Try to calculate op costs for min/max reduction operations.
  2093. /// \param CondTy Conditional type for the Select instruction.
  2094. InstructionCost getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
  2095. bool IsUnsigned,
  2096. TTI::TargetCostKind CostKind) {
  2097. // Targets must implement a default value for the scalable case, since
  2098. // we don't know how many lanes the vector has.
  2099. if (isa<ScalableVectorType>(Ty))
  2100. return InstructionCost::getInvalid();
  2101. Type *ScalarTy = Ty->getElementType();
  2102. Type *ScalarCondTy = CondTy->getElementType();
  2103. unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
  2104. unsigned NumReduxLevels = Log2_32(NumVecElts);
  2105. unsigned CmpOpcode;
  2106. if (Ty->isFPOrFPVectorTy()) {
  2107. CmpOpcode = Instruction::FCmp;
  2108. } else {
  2109. assert(Ty->isIntOrIntVectorTy() &&
  2110. "expecting floating point or integer type for min/max reduction");
  2111. CmpOpcode = Instruction::ICmp;
  2112. }
  2113. InstructionCost MinMaxCost = 0;
  2114. InstructionCost ShuffleCost = 0;
  2115. std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
  2116. unsigned LongVectorCount = 0;
  2117. unsigned MVTLen =
  2118. LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
  2119. while (NumVecElts > MVTLen) {
  2120. NumVecElts /= 2;
  2121. auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
  2122. CondTy = FixedVectorType::get(ScalarCondTy, NumVecElts);
  2123. ShuffleCost +=
  2124. thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
  2125. CostKind, NumVecElts, SubTy);
  2126. MinMaxCost +=
  2127. thisT()->getCmpSelInstrCost(CmpOpcode, SubTy, CondTy,
  2128. CmpInst::BAD_ICMP_PREDICATE, CostKind) +
  2129. thisT()->getCmpSelInstrCost(Instruction::Select, SubTy, CondTy,
  2130. CmpInst::BAD_ICMP_PREDICATE, CostKind);
  2131. Ty = SubTy;
  2132. ++LongVectorCount;
  2133. }
  2134. NumReduxLevels -= LongVectorCount;
  2135. // The minimal length of the vector is limited by the real length of vector
  2136. // operations performed on the current platform. That's why several final
  2137. // reduction opertions are perfomed on the vectors with the same
  2138. // architecture-dependent length.
  2139. ShuffleCost +=
  2140. NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
  2141. std::nullopt, CostKind, 0, Ty);
  2142. MinMaxCost +=
  2143. NumReduxLevels *
  2144. (thisT()->getCmpSelInstrCost(CmpOpcode, Ty, CondTy,
  2145. CmpInst::BAD_ICMP_PREDICATE, CostKind) +
  2146. thisT()->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
  2147. CmpInst::BAD_ICMP_PREDICATE, CostKind));
  2148. // The last min/max should be in vector registers and we counted it above.
  2149. // So just need a single extractelement.
  2150. return ShuffleCost + MinMaxCost +
  2151. thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
  2152. CostKind, 0, nullptr, nullptr);
  2153. }
  2154. InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
  2155. Type *ResTy, VectorType *Ty,
  2156. std::optional<FastMathFlags> FMF,
  2157. TTI::TargetCostKind CostKind) {
  2158. // Without any native support, this is equivalent to the cost of
  2159. // vecreduce.opcode(ext(Ty A)).
  2160. VectorType *ExtTy = VectorType::get(ResTy, Ty);
  2161. InstructionCost RedCost =
  2162. thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
  2163. InstructionCost ExtCost = thisT()->getCastInstrCost(
  2164. IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
  2165. TTI::CastContextHint::None, CostKind);
  2166. return RedCost + ExtCost;
  2167. }
  2168. InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
  2169. VectorType *Ty,
  2170. TTI::TargetCostKind CostKind) {
  2171. // Without any native support, this is equivalent to the cost of
  2172. // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
  2173. // vecreduce.add(mul(A, B)).
  2174. VectorType *ExtTy = VectorType::get(ResTy, Ty);
  2175. InstructionCost RedCost = thisT()->getArithmeticReductionCost(
  2176. Instruction::Add, ExtTy, std::nullopt, CostKind);
  2177. InstructionCost ExtCost = thisT()->getCastInstrCost(
  2178. IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
  2179. TTI::CastContextHint::None, CostKind);
  2180. InstructionCost MulCost =
  2181. thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
  2182. return RedCost + MulCost + 2 * ExtCost;
  2183. }
  2184. InstructionCost getVectorSplitCost() { return 1; }
  2185. /// @}
  2186. };
  2187. /// Concrete BasicTTIImpl that can be used if no further customization
  2188. /// is needed.
  2189. class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
  2190. using BaseT = BasicTTIImplBase<BasicTTIImpl>;
  2191. friend class BasicTTIImplBase<BasicTTIImpl>;
  2192. const TargetSubtargetInfo *ST;
  2193. const TargetLoweringBase *TLI;
  2194. const TargetSubtargetInfo *getST() const { return ST; }
  2195. const TargetLoweringBase *getTLI() const { return TLI; }
  2196. public:
  2197. explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
  2198. };
  2199. } // end namespace llvm
  2200. #endif // LLVM_CODEGEN_BASICTTIIMPL_H
  2201. #ifdef __GNUC__
  2202. #pragma GCC diagnostic pop
  2203. #endif