AArch64TargetTransformInfo.cpp 134 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405
  1. //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #include "AArch64TargetTransformInfo.h"
  9. #include "AArch64ExpandImm.h"
  10. #include "AArch64PerfectShuffle.h"
  11. #include "MCTargetDesc/AArch64AddressingModes.h"
  12. #include "llvm/Analysis/IVDescriptors.h"
  13. #include "llvm/Analysis/LoopInfo.h"
  14. #include "llvm/Analysis/TargetTransformInfo.h"
  15. #include "llvm/CodeGen/BasicTTIImpl.h"
  16. #include "llvm/CodeGen/CostTable.h"
  17. #include "llvm/CodeGen/TargetLowering.h"
  18. #include "llvm/IR/IntrinsicInst.h"
  19. #include "llvm/IR/Intrinsics.h"
  20. #include "llvm/IR/IntrinsicsAArch64.h"
  21. #include "llvm/IR/PatternMatch.h"
  22. #include "llvm/Support/Debug.h"
  23. #include "llvm/Transforms/InstCombine/InstCombiner.h"
  24. #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
  25. #include <algorithm>
  26. #include <optional>
  27. using namespace llvm;
  28. using namespace llvm::PatternMatch;
  29. #define DEBUG_TYPE "aarch64tti"
  30. static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
  31. cl::init(true), cl::Hidden);
  32. static cl::opt<unsigned> SVEGatherOverhead("sve-gather-overhead", cl::init(10),
  33. cl::Hidden);
  34. static cl::opt<unsigned> SVEScatterOverhead("sve-scatter-overhead",
  35. cl::init(10), cl::Hidden);
  36. namespace {
  37. class TailFoldingKind {
  38. private:
  39. uint8_t Bits = 0; // Currently defaults to disabled.
  40. public:
  41. enum TailFoldingOpts {
  42. TFDisabled = 0x0,
  43. TFReductions = 0x01,
  44. TFRecurrences = 0x02,
  45. TFSimple = 0x80,
  46. TFAll = TFReductions | TFRecurrences | TFSimple
  47. };
  48. void operator=(const std::string &Val) {
  49. if (Val.empty())
  50. return;
  51. SmallVector<StringRef, 6> TailFoldTypes;
  52. StringRef(Val).split(TailFoldTypes, '+', -1, false);
  53. for (auto TailFoldType : TailFoldTypes) {
  54. if (TailFoldType == "disabled")
  55. Bits = 0;
  56. else if (TailFoldType == "all")
  57. Bits = TFAll;
  58. else if (TailFoldType == "default")
  59. Bits = 0; // Currently defaults to never tail-folding.
  60. else if (TailFoldType == "simple")
  61. add(TFSimple);
  62. else if (TailFoldType == "reductions")
  63. add(TFReductions);
  64. else if (TailFoldType == "recurrences")
  65. add(TFRecurrences);
  66. else if (TailFoldType == "noreductions")
  67. remove(TFReductions);
  68. else if (TailFoldType == "norecurrences")
  69. remove(TFRecurrences);
  70. else {
  71. errs()
  72. << "invalid argument " << TailFoldType.str()
  73. << " to -sve-tail-folding=; each element must be one of: disabled, "
  74. "all, default, simple, reductions, noreductions, recurrences, "
  75. "norecurrences\n";
  76. }
  77. }
  78. }
  79. operator uint8_t() const { return Bits; }
  80. void add(uint8_t Flag) { Bits |= Flag; }
  81. void remove(uint8_t Flag) { Bits &= ~Flag; }
  82. };
  83. } // namespace
  84. TailFoldingKind TailFoldingKindLoc;
  85. cl::opt<TailFoldingKind, true, cl::parser<std::string>> SVETailFolding(
  86. "sve-tail-folding",
  87. cl::desc(
  88. "Control the use of vectorisation using tail-folding for SVE:"
  89. "\ndisabled No loop types will vectorize using tail-folding"
  90. "\ndefault Uses the default tail-folding settings for the target "
  91. "CPU"
  92. "\nall All legal loop types will vectorize using tail-folding"
  93. "\nsimple Use tail-folding for simple loops (not reductions or "
  94. "recurrences)"
  95. "\nreductions Use tail-folding for loops containing reductions"
  96. "\nrecurrences Use tail-folding for loops containing fixed order "
  97. "recurrences"),
  98. cl::location(TailFoldingKindLoc));
  99. // Experimental option that will only be fully functional when the
  100. // code-generator is changed to use SVE instead of NEON for all fixed-width
  101. // operations.
  102. static cl::opt<bool> EnableFixedwidthAutovecInStreamingMode(
  103. "enable-fixedwidth-autovec-in-streaming-mode", cl::init(false), cl::Hidden);
  104. // Experimental option that will only be fully functional when the cost-model
  105. // and code-generator have been changed to avoid using scalable vector
  106. // instructions that are not legal in streaming SVE mode.
  107. static cl::opt<bool> EnableScalableAutovecInStreamingMode(
  108. "enable-scalable-autovec-in-streaming-mode", cl::init(false), cl::Hidden);
  109. bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
  110. const Function *Callee) const {
  111. SMEAttrs CallerAttrs(*Caller);
  112. SMEAttrs CalleeAttrs(*Callee);
  113. if (CallerAttrs.requiresSMChange(CalleeAttrs,
  114. /*BodyOverridesInterface=*/true) ||
  115. CallerAttrs.requiresLazySave(CalleeAttrs) ||
  116. CalleeAttrs.hasNewZAInterface())
  117. return false;
  118. const TargetMachine &TM = getTLI()->getTargetMachine();
  119. const FeatureBitset &CallerBits =
  120. TM.getSubtargetImpl(*Caller)->getFeatureBits();
  121. const FeatureBitset &CalleeBits =
  122. TM.getSubtargetImpl(*Callee)->getFeatureBits();
  123. // Inline a callee if its target-features are a subset of the callers
  124. // target-features.
  125. return (CallerBits & CalleeBits) == CalleeBits;
  126. }
  127. bool AArch64TTIImpl::shouldMaximizeVectorBandwidth(
  128. TargetTransformInfo::RegisterKind K) const {
  129. assert(K != TargetTransformInfo::RGK_Scalar);
  130. return K == TargetTransformInfo::RGK_FixedWidthVector;
  131. }
  132. /// Calculate the cost of materializing a 64-bit value. This helper
  133. /// method might only calculate a fraction of a larger immediate. Therefore it
  134. /// is valid to return a cost of ZERO.
  135. InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) {
  136. // Check if the immediate can be encoded within an instruction.
  137. if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
  138. return 0;
  139. if (Val < 0)
  140. Val = ~Val;
  141. // Calculate how many moves we will need to materialize this constant.
  142. SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
  143. AArch64_IMM::expandMOVImm(Val, 64, Insn);
  144. return Insn.size();
  145. }
  146. /// Calculate the cost of materializing the given constant.
  147. InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
  148. TTI::TargetCostKind CostKind) {
  149. assert(Ty->isIntegerTy());
  150. unsigned BitSize = Ty->getPrimitiveSizeInBits();
  151. if (BitSize == 0)
  152. return ~0U;
  153. // Sign-extend all constants to a multiple of 64-bit.
  154. APInt ImmVal = Imm;
  155. if (BitSize & 0x3f)
  156. ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
  157. // Split the constant into 64-bit chunks and calculate the cost for each
  158. // chunk.
  159. InstructionCost Cost = 0;
  160. for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
  161. APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
  162. int64_t Val = Tmp.getSExtValue();
  163. Cost += getIntImmCost(Val);
  164. }
  165. // We need at least one instruction to materialze the constant.
  166. return std::max<InstructionCost>(1, Cost);
  167. }
  168. InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
  169. const APInt &Imm, Type *Ty,
  170. TTI::TargetCostKind CostKind,
  171. Instruction *Inst) {
  172. assert(Ty->isIntegerTy());
  173. unsigned BitSize = Ty->getPrimitiveSizeInBits();
  174. // There is no cost model for constants with a bit size of 0. Return TCC_Free
  175. // here, so that constant hoisting will ignore this constant.
  176. if (BitSize == 0)
  177. return TTI::TCC_Free;
  178. unsigned ImmIdx = ~0U;
  179. switch (Opcode) {
  180. default:
  181. return TTI::TCC_Free;
  182. case Instruction::GetElementPtr:
  183. // Always hoist the base address of a GetElementPtr.
  184. if (Idx == 0)
  185. return 2 * TTI::TCC_Basic;
  186. return TTI::TCC_Free;
  187. case Instruction::Store:
  188. ImmIdx = 0;
  189. break;
  190. case Instruction::Add:
  191. case Instruction::Sub:
  192. case Instruction::Mul:
  193. case Instruction::UDiv:
  194. case Instruction::SDiv:
  195. case Instruction::URem:
  196. case Instruction::SRem:
  197. case Instruction::And:
  198. case Instruction::Or:
  199. case Instruction::Xor:
  200. case Instruction::ICmp:
  201. ImmIdx = 1;
  202. break;
  203. // Always return TCC_Free for the shift value of a shift instruction.
  204. case Instruction::Shl:
  205. case Instruction::LShr:
  206. case Instruction::AShr:
  207. if (Idx == 1)
  208. return TTI::TCC_Free;
  209. break;
  210. case Instruction::Trunc:
  211. case Instruction::ZExt:
  212. case Instruction::SExt:
  213. case Instruction::IntToPtr:
  214. case Instruction::PtrToInt:
  215. case Instruction::BitCast:
  216. case Instruction::PHI:
  217. case Instruction::Call:
  218. case Instruction::Select:
  219. case Instruction::Ret:
  220. case Instruction::Load:
  221. break;
  222. }
  223. if (Idx == ImmIdx) {
  224. int NumConstants = (BitSize + 63) / 64;
  225. InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
  226. return (Cost <= NumConstants * TTI::TCC_Basic)
  227. ? static_cast<int>(TTI::TCC_Free)
  228. : Cost;
  229. }
  230. return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
  231. }
  232. InstructionCost
  233. AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
  234. const APInt &Imm, Type *Ty,
  235. TTI::TargetCostKind CostKind) {
  236. assert(Ty->isIntegerTy());
  237. unsigned BitSize = Ty->getPrimitiveSizeInBits();
  238. // There is no cost model for constants with a bit size of 0. Return TCC_Free
  239. // here, so that constant hoisting will ignore this constant.
  240. if (BitSize == 0)
  241. return TTI::TCC_Free;
  242. // Most (all?) AArch64 intrinsics do not support folding immediates into the
  243. // selected instruction, so we compute the materialization cost for the
  244. // immediate directly.
  245. if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv)
  246. return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
  247. switch (IID) {
  248. default:
  249. return TTI::TCC_Free;
  250. case Intrinsic::sadd_with_overflow:
  251. case Intrinsic::uadd_with_overflow:
  252. case Intrinsic::ssub_with_overflow:
  253. case Intrinsic::usub_with_overflow:
  254. case Intrinsic::smul_with_overflow:
  255. case Intrinsic::umul_with_overflow:
  256. if (Idx == 1) {
  257. int NumConstants = (BitSize + 63) / 64;
  258. InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
  259. return (Cost <= NumConstants * TTI::TCC_Basic)
  260. ? static_cast<int>(TTI::TCC_Free)
  261. : Cost;
  262. }
  263. break;
  264. case Intrinsic::experimental_stackmap:
  265. if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
  266. return TTI::TCC_Free;
  267. break;
  268. case Intrinsic::experimental_patchpoint_void:
  269. case Intrinsic::experimental_patchpoint_i64:
  270. if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
  271. return TTI::TCC_Free;
  272. break;
  273. case Intrinsic::experimental_gc_statepoint:
  274. if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
  275. return TTI::TCC_Free;
  276. break;
  277. }
  278. return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
  279. }
  280. TargetTransformInfo::PopcntSupportKind
  281. AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
  282. assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
  283. if (TyWidth == 32 || TyWidth == 64)
  284. return TTI::PSK_FastHardware;
  285. // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
  286. return TTI::PSK_Software;
  287. }
  288. InstructionCost
  289. AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
  290. TTI::TargetCostKind CostKind) {
  291. auto *RetTy = ICA.getReturnType();
  292. switch (ICA.getID()) {
  293. case Intrinsic::umin:
  294. case Intrinsic::umax:
  295. case Intrinsic::smin:
  296. case Intrinsic::smax: {
  297. static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16,
  298. MVT::v8i16, MVT::v2i32, MVT::v4i32};
  299. auto LT = getTypeLegalizationCost(RetTy);
  300. // v2i64 types get converted to cmp+bif hence the cost of 2
  301. if (LT.second == MVT::v2i64)
  302. return LT.first * 2;
  303. if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
  304. return LT.first;
  305. break;
  306. }
  307. case Intrinsic::sadd_sat:
  308. case Intrinsic::ssub_sat:
  309. case Intrinsic::uadd_sat:
  310. case Intrinsic::usub_sat: {
  311. static const auto ValidSatTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16,
  312. MVT::v8i16, MVT::v2i32, MVT::v4i32,
  313. MVT::v2i64};
  314. auto LT = getTypeLegalizationCost(RetTy);
  315. // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
  316. // need to extend the type, as it uses shr(qadd(shl, shl)).
  317. unsigned Instrs =
  318. LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4;
  319. if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
  320. return LT.first * Instrs;
  321. break;
  322. }
  323. case Intrinsic::abs: {
  324. static const auto ValidAbsTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16,
  325. MVT::v8i16, MVT::v2i32, MVT::v4i32,
  326. MVT::v2i64};
  327. auto LT = getTypeLegalizationCost(RetTy);
  328. if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }))
  329. return LT.first;
  330. break;
  331. }
  332. case Intrinsic::experimental_stepvector: {
  333. InstructionCost Cost = 1; // Cost of the `index' instruction
  334. auto LT = getTypeLegalizationCost(RetTy);
  335. // Legalisation of illegal vectors involves an `index' instruction plus
  336. // (LT.first - 1) vector adds.
  337. if (LT.first > 1) {
  338. Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext());
  339. InstructionCost AddCost =
  340. getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind);
  341. Cost += AddCost * (LT.first - 1);
  342. }
  343. return Cost;
  344. }
  345. case Intrinsic::bitreverse: {
  346. static const CostTblEntry BitreverseTbl[] = {
  347. {Intrinsic::bitreverse, MVT::i32, 1},
  348. {Intrinsic::bitreverse, MVT::i64, 1},
  349. {Intrinsic::bitreverse, MVT::v8i8, 1},
  350. {Intrinsic::bitreverse, MVT::v16i8, 1},
  351. {Intrinsic::bitreverse, MVT::v4i16, 2},
  352. {Intrinsic::bitreverse, MVT::v8i16, 2},
  353. {Intrinsic::bitreverse, MVT::v2i32, 2},
  354. {Intrinsic::bitreverse, MVT::v4i32, 2},
  355. {Intrinsic::bitreverse, MVT::v1i64, 2},
  356. {Intrinsic::bitreverse, MVT::v2i64, 2},
  357. };
  358. const auto LegalisationCost = getTypeLegalizationCost(RetTy);
  359. const auto *Entry =
  360. CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second);
  361. if (Entry) {
  362. // Cost Model is using the legal type(i32) that i8 and i16 will be
  363. // converted to +1 so that we match the actual lowering cost
  364. if (TLI->getValueType(DL, RetTy, true) == MVT::i8 ||
  365. TLI->getValueType(DL, RetTy, true) == MVT::i16)
  366. return LegalisationCost.first * Entry->Cost + 1;
  367. return LegalisationCost.first * Entry->Cost;
  368. }
  369. break;
  370. }
  371. case Intrinsic::ctpop: {
  372. if (!ST->hasNEON()) {
  373. // 32-bit or 64-bit ctpop without NEON is 12 instructions.
  374. return getTypeLegalizationCost(RetTy).first * 12;
  375. }
  376. static const CostTblEntry CtpopCostTbl[] = {
  377. {ISD::CTPOP, MVT::v2i64, 4},
  378. {ISD::CTPOP, MVT::v4i32, 3},
  379. {ISD::CTPOP, MVT::v8i16, 2},
  380. {ISD::CTPOP, MVT::v16i8, 1},
  381. {ISD::CTPOP, MVT::i64, 4},
  382. {ISD::CTPOP, MVT::v2i32, 3},
  383. {ISD::CTPOP, MVT::v4i16, 2},
  384. {ISD::CTPOP, MVT::v8i8, 1},
  385. {ISD::CTPOP, MVT::i32, 5},
  386. };
  387. auto LT = getTypeLegalizationCost(RetTy);
  388. MVT MTy = LT.second;
  389. if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) {
  390. // Extra cost of +1 when illegal vector types are legalized by promoting
  391. // the integer type.
  392. int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() !=
  393. RetTy->getScalarSizeInBits()
  394. ? 1
  395. : 0;
  396. return LT.first * Entry->Cost + ExtraCost;
  397. }
  398. break;
  399. }
  400. case Intrinsic::sadd_with_overflow:
  401. case Intrinsic::uadd_with_overflow:
  402. case Intrinsic::ssub_with_overflow:
  403. case Intrinsic::usub_with_overflow:
  404. case Intrinsic::smul_with_overflow:
  405. case Intrinsic::umul_with_overflow: {
  406. static const CostTblEntry WithOverflowCostTbl[] = {
  407. {Intrinsic::sadd_with_overflow, MVT::i8, 3},
  408. {Intrinsic::uadd_with_overflow, MVT::i8, 3},
  409. {Intrinsic::sadd_with_overflow, MVT::i16, 3},
  410. {Intrinsic::uadd_with_overflow, MVT::i16, 3},
  411. {Intrinsic::sadd_with_overflow, MVT::i32, 1},
  412. {Intrinsic::uadd_with_overflow, MVT::i32, 1},
  413. {Intrinsic::sadd_with_overflow, MVT::i64, 1},
  414. {Intrinsic::uadd_with_overflow, MVT::i64, 1},
  415. {Intrinsic::ssub_with_overflow, MVT::i8, 3},
  416. {Intrinsic::usub_with_overflow, MVT::i8, 3},
  417. {Intrinsic::ssub_with_overflow, MVT::i16, 3},
  418. {Intrinsic::usub_with_overflow, MVT::i16, 3},
  419. {Intrinsic::ssub_with_overflow, MVT::i32, 1},
  420. {Intrinsic::usub_with_overflow, MVT::i32, 1},
  421. {Intrinsic::ssub_with_overflow, MVT::i64, 1},
  422. {Intrinsic::usub_with_overflow, MVT::i64, 1},
  423. {Intrinsic::smul_with_overflow, MVT::i8, 5},
  424. {Intrinsic::umul_with_overflow, MVT::i8, 4},
  425. {Intrinsic::smul_with_overflow, MVT::i16, 5},
  426. {Intrinsic::umul_with_overflow, MVT::i16, 4},
  427. {Intrinsic::smul_with_overflow, MVT::i32, 2}, // eg umull;tst
  428. {Intrinsic::umul_with_overflow, MVT::i32, 2}, // eg umull;cmp sxtw
  429. {Intrinsic::smul_with_overflow, MVT::i64, 3}, // eg mul;smulh;cmp
  430. {Intrinsic::umul_with_overflow, MVT::i64, 3}, // eg mul;umulh;cmp asr
  431. };
  432. EVT MTy = TLI->getValueType(DL, RetTy->getContainedType(0), true);
  433. if (MTy.isSimple())
  434. if (const auto *Entry = CostTableLookup(WithOverflowCostTbl, ICA.getID(),
  435. MTy.getSimpleVT()))
  436. return Entry->Cost;
  437. break;
  438. }
  439. case Intrinsic::fptosi_sat:
  440. case Intrinsic::fptoui_sat: {
  441. if (ICA.getArgTypes().empty())
  442. break;
  443. bool IsSigned = ICA.getID() == Intrinsic::fptosi_sat;
  444. auto LT = getTypeLegalizationCost(ICA.getArgTypes()[0]);
  445. EVT MTy = TLI->getValueType(DL, RetTy);
  446. // Check for the legal types, which are where the size of the input and the
  447. // output are the same, or we are using cvt f64->i32 or f32->i64.
  448. if ((LT.second == MVT::f32 || LT.second == MVT::f64 ||
  449. LT.second == MVT::v2f32 || LT.second == MVT::v4f32 ||
  450. LT.second == MVT::v2f64) &&
  451. (LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits() ||
  452. (LT.second == MVT::f64 && MTy == MVT::i32) ||
  453. (LT.second == MVT::f32 && MTy == MVT::i64)))
  454. return LT.first;
  455. // Similarly for fp16 sizes
  456. if (ST->hasFullFP16() &&
  457. ((LT.second == MVT::f16 && MTy == MVT::i32) ||
  458. ((LT.second == MVT::v4f16 || LT.second == MVT::v8f16) &&
  459. (LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits()))))
  460. return LT.first;
  461. // Otherwise we use a legal convert followed by a min+max
  462. if ((LT.second.getScalarType() == MVT::f32 ||
  463. LT.second.getScalarType() == MVT::f64 ||
  464. (ST->hasFullFP16() && LT.second.getScalarType() == MVT::f16)) &&
  465. LT.second.getScalarSizeInBits() >= MTy.getScalarSizeInBits()) {
  466. Type *LegalTy =
  467. Type::getIntNTy(RetTy->getContext(), LT.second.getScalarSizeInBits());
  468. if (LT.second.isVector())
  469. LegalTy = VectorType::get(LegalTy, LT.second.getVectorElementCount());
  470. InstructionCost Cost = 1;
  471. IntrinsicCostAttributes Attrs1(IsSigned ? Intrinsic::smin : Intrinsic::umin,
  472. LegalTy, {LegalTy, LegalTy});
  473. Cost += getIntrinsicInstrCost(Attrs1, CostKind);
  474. IntrinsicCostAttributes Attrs2(IsSigned ? Intrinsic::smax : Intrinsic::umax,
  475. LegalTy, {LegalTy, LegalTy});
  476. Cost += getIntrinsicInstrCost(Attrs2, CostKind);
  477. return LT.first * Cost;
  478. }
  479. break;
  480. }
  481. default:
  482. break;
  483. }
  484. return BaseT::getIntrinsicInstrCost(ICA, CostKind);
  485. }
  486. /// The function will remove redundant reinterprets casting in the presence
  487. /// of the control flow
  488. static std::optional<Instruction *> processPhiNode(InstCombiner &IC,
  489. IntrinsicInst &II) {
  490. SmallVector<Instruction *, 32> Worklist;
  491. auto RequiredType = II.getType();
  492. auto *PN = dyn_cast<PHINode>(II.getArgOperand(0));
  493. assert(PN && "Expected Phi Node!");
  494. // Don't create a new Phi unless we can remove the old one.
  495. if (!PN->hasOneUse())
  496. return std::nullopt;
  497. for (Value *IncValPhi : PN->incoming_values()) {
  498. auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi);
  499. if (!Reinterpret ||
  500. Reinterpret->getIntrinsicID() !=
  501. Intrinsic::aarch64_sve_convert_to_svbool ||
  502. RequiredType != Reinterpret->getArgOperand(0)->getType())
  503. return std::nullopt;
  504. }
  505. // Create the new Phi
  506. LLVMContext &Ctx = PN->getContext();
  507. IRBuilder<> Builder(Ctx);
  508. Builder.SetInsertPoint(PN);
  509. PHINode *NPN = Builder.CreatePHI(RequiredType, PN->getNumIncomingValues());
  510. Worklist.push_back(PN);
  511. for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) {
  512. auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I));
  513. NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I));
  514. Worklist.push_back(Reinterpret);
  515. }
  516. // Cleanup Phi Node and reinterprets
  517. return IC.replaceInstUsesWith(II, NPN);
  518. }
  519. // (from_svbool (binop (to_svbool pred) (svbool_t _) (svbool_t _))))
  520. // => (binop (pred) (from_svbool _) (from_svbool _))
  521. //
  522. // The above transformation eliminates a `to_svbool` in the predicate
  523. // operand of bitwise operation `binop` by narrowing the vector width of
  524. // the operation. For example, it would convert a `<vscale x 16 x i1>
  525. // and` into a `<vscale x 4 x i1> and`. This is profitable because
  526. // to_svbool must zero the new lanes during widening, whereas
  527. // from_svbool is free.
  528. static std::optional<Instruction *>
  529. tryCombineFromSVBoolBinOp(InstCombiner &IC, IntrinsicInst &II) {
  530. auto BinOp = dyn_cast<IntrinsicInst>(II.getOperand(0));
  531. if (!BinOp)
  532. return std::nullopt;
  533. auto IntrinsicID = BinOp->getIntrinsicID();
  534. switch (IntrinsicID) {
  535. case Intrinsic::aarch64_sve_and_z:
  536. case Intrinsic::aarch64_sve_bic_z:
  537. case Intrinsic::aarch64_sve_eor_z:
  538. case Intrinsic::aarch64_sve_nand_z:
  539. case Intrinsic::aarch64_sve_nor_z:
  540. case Intrinsic::aarch64_sve_orn_z:
  541. case Intrinsic::aarch64_sve_orr_z:
  542. break;
  543. default:
  544. return std::nullopt;
  545. }
  546. auto BinOpPred = BinOp->getOperand(0);
  547. auto BinOpOp1 = BinOp->getOperand(1);
  548. auto BinOpOp2 = BinOp->getOperand(2);
  549. auto PredIntr = dyn_cast<IntrinsicInst>(BinOpPred);
  550. if (!PredIntr ||
  551. PredIntr->getIntrinsicID() != Intrinsic::aarch64_sve_convert_to_svbool)
  552. return std::nullopt;
  553. auto PredOp = PredIntr->getOperand(0);
  554. auto PredOpTy = cast<VectorType>(PredOp->getType());
  555. if (PredOpTy != II.getType())
  556. return std::nullopt;
  557. IRBuilder<> Builder(II.getContext());
  558. Builder.SetInsertPoint(&II);
  559. SmallVector<Value *> NarrowedBinOpArgs = {PredOp};
  560. auto NarrowBinOpOp1 = Builder.CreateIntrinsic(
  561. Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp1});
  562. NarrowedBinOpArgs.push_back(NarrowBinOpOp1);
  563. if (BinOpOp1 == BinOpOp2)
  564. NarrowedBinOpArgs.push_back(NarrowBinOpOp1);
  565. else
  566. NarrowedBinOpArgs.push_back(Builder.CreateIntrinsic(
  567. Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp2}));
  568. auto NarrowedBinOp =
  569. Builder.CreateIntrinsic(IntrinsicID, {PredOpTy}, NarrowedBinOpArgs);
  570. return IC.replaceInstUsesWith(II, NarrowedBinOp);
  571. }
  572. static std::optional<Instruction *>
  573. instCombineConvertFromSVBool(InstCombiner &IC, IntrinsicInst &II) {
  574. // If the reinterpret instruction operand is a PHI Node
  575. if (isa<PHINode>(II.getArgOperand(0)))
  576. return processPhiNode(IC, II);
  577. if (auto BinOpCombine = tryCombineFromSVBoolBinOp(IC, II))
  578. return BinOpCombine;
  579. SmallVector<Instruction *, 32> CandidatesForRemoval;
  580. Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr;
  581. const auto *IVTy = cast<VectorType>(II.getType());
  582. // Walk the chain of conversions.
  583. while (Cursor) {
  584. // If the type of the cursor has fewer lanes than the final result, zeroing
  585. // must take place, which breaks the equivalence chain.
  586. const auto *CursorVTy = cast<VectorType>(Cursor->getType());
  587. if (CursorVTy->getElementCount().getKnownMinValue() <
  588. IVTy->getElementCount().getKnownMinValue())
  589. break;
  590. // If the cursor has the same type as I, it is a viable replacement.
  591. if (Cursor->getType() == IVTy)
  592. EarliestReplacement = Cursor;
  593. auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor);
  594. // If this is not an SVE conversion intrinsic, this is the end of the chain.
  595. if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() ==
  596. Intrinsic::aarch64_sve_convert_to_svbool ||
  597. IntrinsicCursor->getIntrinsicID() ==
  598. Intrinsic::aarch64_sve_convert_from_svbool))
  599. break;
  600. CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor);
  601. Cursor = IntrinsicCursor->getOperand(0);
  602. }
  603. // If no viable replacement in the conversion chain was found, there is
  604. // nothing to do.
  605. if (!EarliestReplacement)
  606. return std::nullopt;
  607. return IC.replaceInstUsesWith(II, EarliestReplacement);
  608. }
  609. static std::optional<Instruction *> instCombineSVESel(InstCombiner &IC,
  610. IntrinsicInst &II) {
  611. IRBuilder<> Builder(&II);
  612. auto Select = Builder.CreateSelect(II.getOperand(0), II.getOperand(1),
  613. II.getOperand(2));
  614. return IC.replaceInstUsesWith(II, Select);
  615. }
  616. static std::optional<Instruction *> instCombineSVEDup(InstCombiner &IC,
  617. IntrinsicInst &II) {
  618. IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
  619. if (!Pg)
  620. return std::nullopt;
  621. if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
  622. return std::nullopt;
  623. const auto PTruePattern =
  624. cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
  625. if (PTruePattern != AArch64SVEPredPattern::vl1)
  626. return std::nullopt;
  627. // The intrinsic is inserting into lane zero so use an insert instead.
  628. auto *IdxTy = Type::getInt64Ty(II.getContext());
  629. auto *Insert = InsertElementInst::Create(
  630. II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0));
  631. Insert->insertBefore(&II);
  632. Insert->takeName(&II);
  633. return IC.replaceInstUsesWith(II, Insert);
  634. }
  635. static std::optional<Instruction *> instCombineSVEDupX(InstCombiner &IC,
  636. IntrinsicInst &II) {
  637. // Replace DupX with a regular IR splat.
  638. IRBuilder<> Builder(II.getContext());
  639. Builder.SetInsertPoint(&II);
  640. auto *RetTy = cast<ScalableVectorType>(II.getType());
  641. Value *Splat =
  642. Builder.CreateVectorSplat(RetTy->getElementCount(), II.getArgOperand(0));
  643. Splat->takeName(&II);
  644. return IC.replaceInstUsesWith(II, Splat);
  645. }
  646. static std::optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC,
  647. IntrinsicInst &II) {
  648. LLVMContext &Ctx = II.getContext();
  649. IRBuilder<> Builder(Ctx);
  650. Builder.SetInsertPoint(&II);
  651. // Check that the predicate is all active
  652. auto *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(0));
  653. if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
  654. return std::nullopt;
  655. const auto PTruePattern =
  656. cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
  657. if (PTruePattern != AArch64SVEPredPattern::all)
  658. return std::nullopt;
  659. // Check that we have a compare of zero..
  660. auto *SplatValue =
  661. dyn_cast_or_null<ConstantInt>(getSplatValue(II.getArgOperand(2)));
  662. if (!SplatValue || !SplatValue->isZero())
  663. return std::nullopt;
  664. // ..against a dupq
  665. auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
  666. if (!DupQLane ||
  667. DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane)
  668. return std::nullopt;
  669. // Where the dupq is a lane 0 replicate of a vector insert
  670. if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero())
  671. return std::nullopt;
  672. auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0));
  673. if (!VecIns || VecIns->getIntrinsicID() != Intrinsic::vector_insert)
  674. return std::nullopt;
  675. // Where the vector insert is a fixed constant vector insert into undef at
  676. // index zero
  677. if (!isa<UndefValue>(VecIns->getArgOperand(0)))
  678. return std::nullopt;
  679. if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero())
  680. return std::nullopt;
  681. auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1));
  682. if (!ConstVec)
  683. return std::nullopt;
  684. auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType());
  685. auto *OutTy = dyn_cast<ScalableVectorType>(II.getType());
  686. if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements())
  687. return std::nullopt;
  688. unsigned NumElts = VecTy->getNumElements();
  689. unsigned PredicateBits = 0;
  690. // Expand intrinsic operands to a 16-bit byte level predicate
  691. for (unsigned I = 0; I < NumElts; ++I) {
  692. auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I));
  693. if (!Arg)
  694. return std::nullopt;
  695. if (!Arg->isZero())
  696. PredicateBits |= 1 << (I * (16 / NumElts));
  697. }
  698. // If all bits are zero bail early with an empty predicate
  699. if (PredicateBits == 0) {
  700. auto *PFalse = Constant::getNullValue(II.getType());
  701. PFalse->takeName(&II);
  702. return IC.replaceInstUsesWith(II, PFalse);
  703. }
  704. // Calculate largest predicate type used (where byte predicate is largest)
  705. unsigned Mask = 8;
  706. for (unsigned I = 0; I < 16; ++I)
  707. if ((PredicateBits & (1 << I)) != 0)
  708. Mask |= (I % 8);
  709. unsigned PredSize = Mask & -Mask;
  710. auto *PredType = ScalableVectorType::get(
  711. Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8));
  712. // Ensure all relevant bits are set
  713. for (unsigned I = 0; I < 16; I += PredSize)
  714. if ((PredicateBits & (1 << I)) == 0)
  715. return std::nullopt;
  716. auto *PTruePat =
  717. ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
  718. auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
  719. {PredType}, {PTruePat});
  720. auto *ConvertToSVBool = Builder.CreateIntrinsic(
  721. Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue});
  722. auto *ConvertFromSVBool =
  723. Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
  724. {II.getType()}, {ConvertToSVBool});
  725. ConvertFromSVBool->takeName(&II);
  726. return IC.replaceInstUsesWith(II, ConvertFromSVBool);
  727. }
  728. static std::optional<Instruction *> instCombineSVELast(InstCombiner &IC,
  729. IntrinsicInst &II) {
  730. IRBuilder<> Builder(II.getContext());
  731. Builder.SetInsertPoint(&II);
  732. Value *Pg = II.getArgOperand(0);
  733. Value *Vec = II.getArgOperand(1);
  734. auto IntrinsicID = II.getIntrinsicID();
  735. bool IsAfter = IntrinsicID == Intrinsic::aarch64_sve_lasta;
  736. // lastX(splat(X)) --> X
  737. if (auto *SplatVal = getSplatValue(Vec))
  738. return IC.replaceInstUsesWith(II, SplatVal);
  739. // If x and/or y is a splat value then:
  740. // lastX (binop (x, y)) --> binop(lastX(x), lastX(y))
  741. Value *LHS, *RHS;
  742. if (match(Vec, m_OneUse(m_BinOp(m_Value(LHS), m_Value(RHS))))) {
  743. if (isSplatValue(LHS) || isSplatValue(RHS)) {
  744. auto *OldBinOp = cast<BinaryOperator>(Vec);
  745. auto OpC = OldBinOp->getOpcode();
  746. auto *NewLHS =
  747. Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, LHS});
  748. auto *NewRHS =
  749. Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, RHS});
  750. auto *NewBinOp = BinaryOperator::CreateWithCopiedFlags(
  751. OpC, NewLHS, NewRHS, OldBinOp, OldBinOp->getName(), &II);
  752. return IC.replaceInstUsesWith(II, NewBinOp);
  753. }
  754. }
  755. auto *C = dyn_cast<Constant>(Pg);
  756. if (IsAfter && C && C->isNullValue()) {
  757. // The intrinsic is extracting lane 0 so use an extract instead.
  758. auto *IdxTy = Type::getInt64Ty(II.getContext());
  759. auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0));
  760. Extract->insertBefore(&II);
  761. Extract->takeName(&II);
  762. return IC.replaceInstUsesWith(II, Extract);
  763. }
  764. auto *IntrPG = dyn_cast<IntrinsicInst>(Pg);
  765. if (!IntrPG)
  766. return std::nullopt;
  767. if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
  768. return std::nullopt;
  769. const auto PTruePattern =
  770. cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue();
  771. // Can the intrinsic's predicate be converted to a known constant index?
  772. unsigned MinNumElts = getNumElementsFromSVEPredPattern(PTruePattern);
  773. if (!MinNumElts)
  774. return std::nullopt;
  775. unsigned Idx = MinNumElts - 1;
  776. // Increment the index if extracting the element after the last active
  777. // predicate element.
  778. if (IsAfter)
  779. ++Idx;
  780. // Ignore extracts whose index is larger than the known minimum vector
  781. // length. NOTE: This is an artificial constraint where we prefer to
  782. // maintain what the user asked for until an alternative is proven faster.
  783. auto *PgVTy = cast<ScalableVectorType>(Pg->getType());
  784. if (Idx >= PgVTy->getMinNumElements())
  785. return std::nullopt;
  786. // The intrinsic is extracting a fixed lane so use an extract instead.
  787. auto *IdxTy = Type::getInt64Ty(II.getContext());
  788. auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx));
  789. Extract->insertBefore(&II);
  790. Extract->takeName(&II);
  791. return IC.replaceInstUsesWith(II, Extract);
  792. }
  793. static std::optional<Instruction *> instCombineSVECondLast(InstCombiner &IC,
  794. IntrinsicInst &II) {
  795. // The SIMD&FP variant of CLAST[AB] is significantly faster than the scalar
  796. // integer variant across a variety of micro-architectures. Replace scalar
  797. // integer CLAST[AB] intrinsic with optimal SIMD&FP variant. A simple
  798. // bitcast-to-fp + clast[ab] + bitcast-to-int will cost a cycle or two more
  799. // depending on the micro-architecture, but has been observed as generally
  800. // being faster, particularly when the CLAST[AB] op is a loop-carried
  801. // dependency.
  802. IRBuilder<> Builder(II.getContext());
  803. Builder.SetInsertPoint(&II);
  804. Value *Pg = II.getArgOperand(0);
  805. Value *Fallback = II.getArgOperand(1);
  806. Value *Vec = II.getArgOperand(2);
  807. Type *Ty = II.getType();
  808. if (!Ty->isIntegerTy())
  809. return std::nullopt;
  810. Type *FPTy;
  811. switch (cast<IntegerType>(Ty)->getBitWidth()) {
  812. default:
  813. return std::nullopt;
  814. case 16:
  815. FPTy = Builder.getHalfTy();
  816. break;
  817. case 32:
  818. FPTy = Builder.getFloatTy();
  819. break;
  820. case 64:
  821. FPTy = Builder.getDoubleTy();
  822. break;
  823. }
  824. Value *FPFallBack = Builder.CreateBitCast(Fallback, FPTy);
  825. auto *FPVTy = VectorType::get(
  826. FPTy, cast<VectorType>(Vec->getType())->getElementCount());
  827. Value *FPVec = Builder.CreateBitCast(Vec, FPVTy);
  828. auto *FPII = Builder.CreateIntrinsic(II.getIntrinsicID(), {FPVec->getType()},
  829. {Pg, FPFallBack, FPVec});
  830. Value *FPIItoInt = Builder.CreateBitCast(FPII, II.getType());
  831. return IC.replaceInstUsesWith(II, FPIItoInt);
  832. }
  833. static std::optional<Instruction *> instCombineRDFFR(InstCombiner &IC,
  834. IntrinsicInst &II) {
  835. LLVMContext &Ctx = II.getContext();
  836. IRBuilder<> Builder(Ctx);
  837. Builder.SetInsertPoint(&II);
  838. // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr
  839. // can work with RDFFR_PP for ptest elimination.
  840. auto *AllPat =
  841. ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
  842. auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
  843. {II.getType()}, {AllPat});
  844. auto *RDFFR =
  845. Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue});
  846. RDFFR->takeName(&II);
  847. return IC.replaceInstUsesWith(II, RDFFR);
  848. }
  849. static std::optional<Instruction *>
  850. instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts) {
  851. const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue();
  852. if (Pattern == AArch64SVEPredPattern::all) {
  853. LLVMContext &Ctx = II.getContext();
  854. IRBuilder<> Builder(Ctx);
  855. Builder.SetInsertPoint(&II);
  856. Constant *StepVal = ConstantInt::get(II.getType(), NumElts);
  857. auto *VScale = Builder.CreateVScale(StepVal);
  858. VScale->takeName(&II);
  859. return IC.replaceInstUsesWith(II, VScale);
  860. }
  861. unsigned MinNumElts = getNumElementsFromSVEPredPattern(Pattern);
  862. return MinNumElts && NumElts >= MinNumElts
  863. ? std::optional<Instruction *>(IC.replaceInstUsesWith(
  864. II, ConstantInt::get(II.getType(), MinNumElts)))
  865. : std::nullopt;
  866. }
  867. static std::optional<Instruction *> instCombineSVEPTest(InstCombiner &IC,
  868. IntrinsicInst &II) {
  869. Value *PgVal = II.getArgOperand(0);
  870. Value *OpVal = II.getArgOperand(1);
  871. IRBuilder<> Builder(II.getContext());
  872. Builder.SetInsertPoint(&II);
  873. // PTEST_<FIRST|LAST>(X, X) is equivalent to PTEST_ANY(X, X).
  874. // Later optimizations prefer this form.
  875. if (PgVal == OpVal &&
  876. (II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_first ||
  877. II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_last)) {
  878. Value *Ops[] = {PgVal, OpVal};
  879. Type *Tys[] = {PgVal->getType()};
  880. auto *PTest =
  881. Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptest_any, Tys, Ops);
  882. PTest->takeName(&II);
  883. return IC.replaceInstUsesWith(II, PTest);
  884. }
  885. IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(PgVal);
  886. IntrinsicInst *Op = dyn_cast<IntrinsicInst>(OpVal);
  887. if (!Pg || !Op)
  888. return std::nullopt;
  889. Intrinsic::ID OpIID = Op->getIntrinsicID();
  890. if (Pg->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool &&
  891. OpIID == Intrinsic::aarch64_sve_convert_to_svbool &&
  892. Pg->getArgOperand(0)->getType() == Op->getArgOperand(0)->getType()) {
  893. Value *Ops[] = {Pg->getArgOperand(0), Op->getArgOperand(0)};
  894. Type *Tys[] = {Pg->getArgOperand(0)->getType()};
  895. auto *PTest = Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
  896. PTest->takeName(&II);
  897. return IC.replaceInstUsesWith(II, PTest);
  898. }
  899. // Transform PTEST_ANY(X=OP(PG,...), X) -> PTEST_ANY(PG, X)).
  900. // Later optimizations may rewrite sequence to use the flag-setting variant
  901. // of instruction X to remove PTEST.
  902. if ((Pg == Op) && (II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_any) &&
  903. ((OpIID == Intrinsic::aarch64_sve_brka_z) ||
  904. (OpIID == Intrinsic::aarch64_sve_brkb_z) ||
  905. (OpIID == Intrinsic::aarch64_sve_brkpa_z) ||
  906. (OpIID == Intrinsic::aarch64_sve_brkpb_z) ||
  907. (OpIID == Intrinsic::aarch64_sve_rdffr_z) ||
  908. (OpIID == Intrinsic::aarch64_sve_and_z) ||
  909. (OpIID == Intrinsic::aarch64_sve_bic_z) ||
  910. (OpIID == Intrinsic::aarch64_sve_eor_z) ||
  911. (OpIID == Intrinsic::aarch64_sve_nand_z) ||
  912. (OpIID == Intrinsic::aarch64_sve_nor_z) ||
  913. (OpIID == Intrinsic::aarch64_sve_orn_z) ||
  914. (OpIID == Intrinsic::aarch64_sve_orr_z))) {
  915. Value *Ops[] = {Pg->getArgOperand(0), Pg};
  916. Type *Tys[] = {Pg->getType()};
  917. auto *PTest = Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
  918. PTest->takeName(&II);
  919. return IC.replaceInstUsesWith(II, PTest);
  920. }
  921. return std::nullopt;
  922. }
  923. template <Intrinsic::ID MulOpc, typename Intrinsic::ID FuseOpc>
  924. static std::optional<Instruction *>
  925. instCombineSVEVectorFuseMulAddSub(InstCombiner &IC, IntrinsicInst &II,
  926. bool MergeIntoAddendOp) {
  927. Value *P = II.getOperand(0);
  928. Value *MulOp0, *MulOp1, *AddendOp, *Mul;
  929. if (MergeIntoAddendOp) {
  930. AddendOp = II.getOperand(1);
  931. Mul = II.getOperand(2);
  932. } else {
  933. AddendOp = II.getOperand(2);
  934. Mul = II.getOperand(1);
  935. }
  936. if (!match(Mul, m_Intrinsic<MulOpc>(m_Specific(P), m_Value(MulOp0),
  937. m_Value(MulOp1))))
  938. return std::nullopt;
  939. if (!Mul->hasOneUse())
  940. return std::nullopt;
  941. Instruction *FMFSource = nullptr;
  942. if (II.getType()->isFPOrFPVectorTy()) {
  943. llvm::FastMathFlags FAddFlags = II.getFastMathFlags();
  944. // Stop the combine when the flags on the inputs differ in case dropping
  945. // flags would lead to us missing out on more beneficial optimizations.
  946. if (FAddFlags != cast<CallInst>(Mul)->getFastMathFlags())
  947. return std::nullopt;
  948. if (!FAddFlags.allowContract())
  949. return std::nullopt;
  950. FMFSource = &II;
  951. }
  952. IRBuilder<> Builder(II.getContext());
  953. Builder.SetInsertPoint(&II);
  954. CallInst *Res;
  955. if (MergeIntoAddendOp)
  956. Res = Builder.CreateIntrinsic(FuseOpc, {II.getType()},
  957. {P, AddendOp, MulOp0, MulOp1}, FMFSource);
  958. else
  959. Res = Builder.CreateIntrinsic(FuseOpc, {II.getType()},
  960. {P, MulOp0, MulOp1, AddendOp}, FMFSource);
  961. return IC.replaceInstUsesWith(II, Res);
  962. }
  963. static bool isAllActivePredicate(Value *Pred) {
  964. // Look through convert.from.svbool(convert.to.svbool(...) chain.
  965. Value *UncastedPred;
  966. if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_convert_from_svbool>(
  967. m_Intrinsic<Intrinsic::aarch64_sve_convert_to_svbool>(
  968. m_Value(UncastedPred)))))
  969. // If the predicate has the same or less lanes than the uncasted
  970. // predicate then we know the casting has no effect.
  971. if (cast<ScalableVectorType>(Pred->getType())->getMinNumElements() <=
  972. cast<ScalableVectorType>(UncastedPred->getType())->getMinNumElements())
  973. Pred = UncastedPred;
  974. return match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
  975. m_ConstantInt<AArch64SVEPredPattern::all>()));
  976. }
  977. static std::optional<Instruction *>
  978. instCombineSVELD1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) {
  979. IRBuilder<> Builder(II.getContext());
  980. Builder.SetInsertPoint(&II);
  981. Value *Pred = II.getOperand(0);
  982. Value *PtrOp = II.getOperand(1);
  983. Type *VecTy = II.getType();
  984. Value *VecPtr = Builder.CreateBitCast(PtrOp, VecTy->getPointerTo());
  985. if (isAllActivePredicate(Pred)) {
  986. LoadInst *Load = Builder.CreateLoad(VecTy, VecPtr);
  987. Load->copyMetadata(II);
  988. return IC.replaceInstUsesWith(II, Load);
  989. }
  990. CallInst *MaskedLoad =
  991. Builder.CreateMaskedLoad(VecTy, VecPtr, PtrOp->getPointerAlignment(DL),
  992. Pred, ConstantAggregateZero::get(VecTy));
  993. MaskedLoad->copyMetadata(II);
  994. return IC.replaceInstUsesWith(II, MaskedLoad);
  995. }
  996. static std::optional<Instruction *>
  997. instCombineSVEST1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) {
  998. IRBuilder<> Builder(II.getContext());
  999. Builder.SetInsertPoint(&II);
  1000. Value *VecOp = II.getOperand(0);
  1001. Value *Pred = II.getOperand(1);
  1002. Value *PtrOp = II.getOperand(2);
  1003. Value *VecPtr =
  1004. Builder.CreateBitCast(PtrOp, VecOp->getType()->getPointerTo());
  1005. if (isAllActivePredicate(Pred)) {
  1006. StoreInst *Store = Builder.CreateStore(VecOp, VecPtr);
  1007. Store->copyMetadata(II);
  1008. return IC.eraseInstFromFunction(II);
  1009. }
  1010. CallInst *MaskedStore = Builder.CreateMaskedStore(
  1011. VecOp, VecPtr, PtrOp->getPointerAlignment(DL), Pred);
  1012. MaskedStore->copyMetadata(II);
  1013. return IC.eraseInstFromFunction(II);
  1014. }
  1015. static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic) {
  1016. switch (Intrinsic) {
  1017. case Intrinsic::aarch64_sve_fmul:
  1018. return Instruction::BinaryOps::FMul;
  1019. case Intrinsic::aarch64_sve_fadd:
  1020. return Instruction::BinaryOps::FAdd;
  1021. case Intrinsic::aarch64_sve_fsub:
  1022. return Instruction::BinaryOps::FSub;
  1023. default:
  1024. return Instruction::BinaryOpsEnd;
  1025. }
  1026. }
  1027. static std::optional<Instruction *>
  1028. instCombineSVEVectorBinOp(InstCombiner &IC, IntrinsicInst &II) {
  1029. auto *OpPredicate = II.getOperand(0);
  1030. auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID());
  1031. if (BinOpCode == Instruction::BinaryOpsEnd ||
  1032. !match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
  1033. m_ConstantInt<AArch64SVEPredPattern::all>())))
  1034. return std::nullopt;
  1035. IRBuilder<> Builder(II.getContext());
  1036. Builder.SetInsertPoint(&II);
  1037. Builder.setFastMathFlags(II.getFastMathFlags());
  1038. auto BinOp =
  1039. Builder.CreateBinOp(BinOpCode, II.getOperand(1), II.getOperand(2));
  1040. return IC.replaceInstUsesWith(II, BinOp);
  1041. }
  1042. static std::optional<Instruction *> instCombineSVEVectorAdd(InstCombiner &IC,
  1043. IntrinsicInst &II) {
  1044. if (auto FMLA =
  1045. instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
  1046. Intrinsic::aarch64_sve_fmla>(IC, II,
  1047. true))
  1048. return FMLA;
  1049. if (auto MLA = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
  1050. Intrinsic::aarch64_sve_mla>(
  1051. IC, II, true))
  1052. return MLA;
  1053. if (auto FMAD =
  1054. instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
  1055. Intrinsic::aarch64_sve_fmad>(IC, II,
  1056. false))
  1057. return FMAD;
  1058. if (auto MAD = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
  1059. Intrinsic::aarch64_sve_mad>(
  1060. IC, II, false))
  1061. return MAD;
  1062. return instCombineSVEVectorBinOp(IC, II);
  1063. }
  1064. static std::optional<Instruction *> instCombineSVEVectorSub(InstCombiner &IC,
  1065. IntrinsicInst &II) {
  1066. if (auto FMLS =
  1067. instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
  1068. Intrinsic::aarch64_sve_fmls>(IC, II,
  1069. true))
  1070. return FMLS;
  1071. if (auto MLS = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
  1072. Intrinsic::aarch64_sve_mls>(
  1073. IC, II, true))
  1074. return MLS;
  1075. if (auto FMSB =
  1076. instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
  1077. Intrinsic::aarch64_sve_fnmsb>(
  1078. IC, II, false))
  1079. return FMSB;
  1080. return instCombineSVEVectorBinOp(IC, II);
  1081. }
  1082. static std::optional<Instruction *> instCombineSVEVectorMul(InstCombiner &IC,
  1083. IntrinsicInst &II) {
  1084. auto *OpPredicate = II.getOperand(0);
  1085. auto *OpMultiplicand = II.getOperand(1);
  1086. auto *OpMultiplier = II.getOperand(2);
  1087. IRBuilder<> Builder(II.getContext());
  1088. Builder.SetInsertPoint(&II);
  1089. // Return true if a given instruction is a unit splat value, false otherwise.
  1090. auto IsUnitSplat = [](auto *I) {
  1091. auto *SplatValue = getSplatValue(I);
  1092. if (!SplatValue)
  1093. return false;
  1094. return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
  1095. };
  1096. // Return true if a given instruction is an aarch64_sve_dup intrinsic call
  1097. // with a unit splat value, false otherwise.
  1098. auto IsUnitDup = [](auto *I) {
  1099. auto *IntrI = dyn_cast<IntrinsicInst>(I);
  1100. if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup)
  1101. return false;
  1102. auto *SplatValue = IntrI->getOperand(2);
  1103. return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
  1104. };
  1105. if (IsUnitSplat(OpMultiplier)) {
  1106. // [f]mul pg %n, (dupx 1) => %n
  1107. OpMultiplicand->takeName(&II);
  1108. return IC.replaceInstUsesWith(II, OpMultiplicand);
  1109. } else if (IsUnitDup(OpMultiplier)) {
  1110. // [f]mul pg %n, (dup pg 1) => %n
  1111. auto *DupInst = cast<IntrinsicInst>(OpMultiplier);
  1112. auto *DupPg = DupInst->getOperand(1);
  1113. // TODO: this is naive. The optimization is still valid if DupPg
  1114. // 'encompasses' OpPredicate, not only if they're the same predicate.
  1115. if (OpPredicate == DupPg) {
  1116. OpMultiplicand->takeName(&II);
  1117. return IC.replaceInstUsesWith(II, OpMultiplicand);
  1118. }
  1119. }
  1120. return instCombineSVEVectorBinOp(IC, II);
  1121. }
  1122. static std::optional<Instruction *> instCombineSVEUnpack(InstCombiner &IC,
  1123. IntrinsicInst &II) {
  1124. IRBuilder<> Builder(II.getContext());
  1125. Builder.SetInsertPoint(&II);
  1126. Value *UnpackArg = II.getArgOperand(0);
  1127. auto *RetTy = cast<ScalableVectorType>(II.getType());
  1128. bool IsSigned = II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpkhi ||
  1129. II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpklo;
  1130. // Hi = uunpkhi(splat(X)) --> Hi = splat(extend(X))
  1131. // Lo = uunpklo(splat(X)) --> Lo = splat(extend(X))
  1132. if (auto *ScalarArg = getSplatValue(UnpackArg)) {
  1133. ScalarArg =
  1134. Builder.CreateIntCast(ScalarArg, RetTy->getScalarType(), IsSigned);
  1135. Value *NewVal =
  1136. Builder.CreateVectorSplat(RetTy->getElementCount(), ScalarArg);
  1137. NewVal->takeName(&II);
  1138. return IC.replaceInstUsesWith(II, NewVal);
  1139. }
  1140. return std::nullopt;
  1141. }
  1142. static std::optional<Instruction *> instCombineSVETBL(InstCombiner &IC,
  1143. IntrinsicInst &II) {
  1144. auto *OpVal = II.getOperand(0);
  1145. auto *OpIndices = II.getOperand(1);
  1146. VectorType *VTy = cast<VectorType>(II.getType());
  1147. // Check whether OpIndices is a constant splat value < minimal element count
  1148. // of result.
  1149. auto *SplatValue = dyn_cast_or_null<ConstantInt>(getSplatValue(OpIndices));
  1150. if (!SplatValue ||
  1151. SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue()))
  1152. return std::nullopt;
  1153. // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to
  1154. // splat_vector(extractelement(OpVal, SplatValue)) for further optimization.
  1155. IRBuilder<> Builder(II.getContext());
  1156. Builder.SetInsertPoint(&II);
  1157. auto *Extract = Builder.CreateExtractElement(OpVal, SplatValue);
  1158. auto *VectorSplat =
  1159. Builder.CreateVectorSplat(VTy->getElementCount(), Extract);
  1160. VectorSplat->takeName(&II);
  1161. return IC.replaceInstUsesWith(II, VectorSplat);
  1162. }
  1163. static std::optional<Instruction *> instCombineSVEZip(InstCombiner &IC,
  1164. IntrinsicInst &II) {
  1165. // zip1(uzp1(A, B), uzp2(A, B)) --> A
  1166. // zip2(uzp1(A, B), uzp2(A, B)) --> B
  1167. Value *A, *B;
  1168. if (match(II.getArgOperand(0),
  1169. m_Intrinsic<Intrinsic::aarch64_sve_uzp1>(m_Value(A), m_Value(B))) &&
  1170. match(II.getArgOperand(1), m_Intrinsic<Intrinsic::aarch64_sve_uzp2>(
  1171. m_Specific(A), m_Specific(B))))
  1172. return IC.replaceInstUsesWith(
  1173. II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? A : B));
  1174. return std::nullopt;
  1175. }
  1176. static std::optional<Instruction *>
  1177. instCombineLD1GatherIndex(InstCombiner &IC, IntrinsicInst &II) {
  1178. Value *Mask = II.getOperand(0);
  1179. Value *BasePtr = II.getOperand(1);
  1180. Value *Index = II.getOperand(2);
  1181. Type *Ty = II.getType();
  1182. Value *PassThru = ConstantAggregateZero::get(Ty);
  1183. // Contiguous gather => masked load.
  1184. // (sve.ld1.gather.index Mask BasePtr (sve.index IndexBase 1))
  1185. // => (masked.load (gep BasePtr IndexBase) Align Mask zeroinitializer)
  1186. Value *IndexBase;
  1187. if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>(
  1188. m_Value(IndexBase), m_SpecificInt(1)))) {
  1189. IRBuilder<> Builder(II.getContext());
  1190. Builder.SetInsertPoint(&II);
  1191. Align Alignment =
  1192. BasePtr->getPointerAlignment(II.getModule()->getDataLayout());
  1193. Type *VecPtrTy = PointerType::getUnqual(Ty);
  1194. Value *Ptr = Builder.CreateGEP(cast<VectorType>(Ty)->getElementType(),
  1195. BasePtr, IndexBase);
  1196. Ptr = Builder.CreateBitCast(Ptr, VecPtrTy);
  1197. CallInst *MaskedLoad =
  1198. Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask, PassThru);
  1199. MaskedLoad->takeName(&II);
  1200. return IC.replaceInstUsesWith(II, MaskedLoad);
  1201. }
  1202. return std::nullopt;
  1203. }
  1204. static std::optional<Instruction *>
  1205. instCombineST1ScatterIndex(InstCombiner &IC, IntrinsicInst &II) {
  1206. Value *Val = II.getOperand(0);
  1207. Value *Mask = II.getOperand(1);
  1208. Value *BasePtr = II.getOperand(2);
  1209. Value *Index = II.getOperand(3);
  1210. Type *Ty = Val->getType();
  1211. // Contiguous scatter => masked store.
  1212. // (sve.st1.scatter.index Value Mask BasePtr (sve.index IndexBase 1))
  1213. // => (masked.store Value (gep BasePtr IndexBase) Align Mask)
  1214. Value *IndexBase;
  1215. if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>(
  1216. m_Value(IndexBase), m_SpecificInt(1)))) {
  1217. IRBuilder<> Builder(II.getContext());
  1218. Builder.SetInsertPoint(&II);
  1219. Align Alignment =
  1220. BasePtr->getPointerAlignment(II.getModule()->getDataLayout());
  1221. Value *Ptr = Builder.CreateGEP(cast<VectorType>(Ty)->getElementType(),
  1222. BasePtr, IndexBase);
  1223. Type *VecPtrTy = PointerType::getUnqual(Ty);
  1224. Ptr = Builder.CreateBitCast(Ptr, VecPtrTy);
  1225. (void)Builder.CreateMaskedStore(Val, Ptr, Alignment, Mask);
  1226. return IC.eraseInstFromFunction(II);
  1227. }
  1228. return std::nullopt;
  1229. }
  1230. static std::optional<Instruction *> instCombineSVESDIV(InstCombiner &IC,
  1231. IntrinsicInst &II) {
  1232. IRBuilder<> Builder(II.getContext());
  1233. Builder.SetInsertPoint(&II);
  1234. Type *Int32Ty = Builder.getInt32Ty();
  1235. Value *Pred = II.getOperand(0);
  1236. Value *Vec = II.getOperand(1);
  1237. Value *DivVec = II.getOperand(2);
  1238. Value *SplatValue = getSplatValue(DivVec);
  1239. ConstantInt *SplatConstantInt = dyn_cast_or_null<ConstantInt>(SplatValue);
  1240. if (!SplatConstantInt)
  1241. return std::nullopt;
  1242. APInt Divisor = SplatConstantInt->getValue();
  1243. if (Divisor.isPowerOf2()) {
  1244. Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2());
  1245. auto ASRD = Builder.CreateIntrinsic(
  1246. Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2});
  1247. return IC.replaceInstUsesWith(II, ASRD);
  1248. }
  1249. if (Divisor.isNegatedPowerOf2()) {
  1250. Divisor.negate();
  1251. Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2());
  1252. auto ASRD = Builder.CreateIntrinsic(
  1253. Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2});
  1254. auto NEG = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_neg,
  1255. {ASRD->getType()}, {ASRD, Pred, ASRD});
  1256. return IC.replaceInstUsesWith(II, NEG);
  1257. }
  1258. return std::nullopt;
  1259. }
  1260. bool SimplifyValuePattern(SmallVector<Value *> &Vec, bool AllowPoison) {
  1261. size_t VecSize = Vec.size();
  1262. if (VecSize == 1)
  1263. return true;
  1264. if (!isPowerOf2_64(VecSize))
  1265. return false;
  1266. size_t HalfVecSize = VecSize / 2;
  1267. for (auto LHS = Vec.begin(), RHS = Vec.begin() + HalfVecSize;
  1268. RHS != Vec.end(); LHS++, RHS++) {
  1269. if (*LHS != nullptr && *RHS != nullptr) {
  1270. if (*LHS == *RHS)
  1271. continue;
  1272. else
  1273. return false;
  1274. }
  1275. if (!AllowPoison)
  1276. return false;
  1277. if (*LHS == nullptr && *RHS != nullptr)
  1278. *LHS = *RHS;
  1279. }
  1280. Vec.resize(HalfVecSize);
  1281. SimplifyValuePattern(Vec, AllowPoison);
  1282. return true;
  1283. }
  1284. // Try to simplify dupqlane patterns like dupqlane(f32 A, f32 B, f32 A, f32 B)
  1285. // to dupqlane(f64(C)) where C is A concatenated with B
  1286. static std::optional<Instruction *> instCombineSVEDupqLane(InstCombiner &IC,
  1287. IntrinsicInst &II) {
  1288. Value *CurrentInsertElt = nullptr, *Default = nullptr;
  1289. if (!match(II.getOperand(0),
  1290. m_Intrinsic<Intrinsic::vector_insert>(
  1291. m_Value(Default), m_Value(CurrentInsertElt), m_Value())) ||
  1292. !isa<FixedVectorType>(CurrentInsertElt->getType()))
  1293. return std::nullopt;
  1294. auto IIScalableTy = cast<ScalableVectorType>(II.getType());
  1295. // Insert the scalars into a container ordered by InsertElement index
  1296. SmallVector<Value *> Elts(IIScalableTy->getMinNumElements(), nullptr);
  1297. while (auto InsertElt = dyn_cast<InsertElementInst>(CurrentInsertElt)) {
  1298. auto Idx = cast<ConstantInt>(InsertElt->getOperand(2));
  1299. Elts[Idx->getValue().getZExtValue()] = InsertElt->getOperand(1);
  1300. CurrentInsertElt = InsertElt->getOperand(0);
  1301. }
  1302. bool AllowPoison =
  1303. isa<PoisonValue>(CurrentInsertElt) && isa<PoisonValue>(Default);
  1304. if (!SimplifyValuePattern(Elts, AllowPoison))
  1305. return std::nullopt;
  1306. // Rebuild the simplified chain of InsertElements. e.g. (a, b, a, b) as (a, b)
  1307. IRBuilder<> Builder(II.getContext());
  1308. Builder.SetInsertPoint(&II);
  1309. Value *InsertEltChain = PoisonValue::get(CurrentInsertElt->getType());
  1310. for (size_t I = 0; I < Elts.size(); I++) {
  1311. if (Elts[I] == nullptr)
  1312. continue;
  1313. InsertEltChain = Builder.CreateInsertElement(InsertEltChain, Elts[I],
  1314. Builder.getInt64(I));
  1315. }
  1316. if (InsertEltChain == nullptr)
  1317. return std::nullopt;
  1318. // Splat the simplified sequence, e.g. (f16 a, f16 b, f16 c, f16 d) as one i64
  1319. // value or (f16 a, f16 b) as one i32 value. This requires an InsertSubvector
  1320. // be bitcast to a type wide enough to fit the sequence, be splatted, and then
  1321. // be narrowed back to the original type.
  1322. unsigned PatternWidth = IIScalableTy->getScalarSizeInBits() * Elts.size();
  1323. unsigned PatternElementCount = IIScalableTy->getScalarSizeInBits() *
  1324. IIScalableTy->getMinNumElements() /
  1325. PatternWidth;
  1326. IntegerType *WideTy = Builder.getIntNTy(PatternWidth);
  1327. auto *WideScalableTy = ScalableVectorType::get(WideTy, PatternElementCount);
  1328. auto *WideShuffleMaskTy =
  1329. ScalableVectorType::get(Builder.getInt32Ty(), PatternElementCount);
  1330. auto ZeroIdx = ConstantInt::get(Builder.getInt64Ty(), APInt(64, 0));
  1331. auto InsertSubvector = Builder.CreateInsertVector(
  1332. II.getType(), PoisonValue::get(II.getType()), InsertEltChain, ZeroIdx);
  1333. auto WideBitcast =
  1334. Builder.CreateBitOrPointerCast(InsertSubvector, WideScalableTy);
  1335. auto WideShuffleMask = ConstantAggregateZero::get(WideShuffleMaskTy);
  1336. auto WideShuffle = Builder.CreateShuffleVector(
  1337. WideBitcast, PoisonValue::get(WideScalableTy), WideShuffleMask);
  1338. auto NarrowBitcast =
  1339. Builder.CreateBitOrPointerCast(WideShuffle, II.getType());
  1340. return IC.replaceInstUsesWith(II, NarrowBitcast);
  1341. }
  1342. static std::optional<Instruction *> instCombineMaxMinNM(InstCombiner &IC,
  1343. IntrinsicInst &II) {
  1344. Value *A = II.getArgOperand(0);
  1345. Value *B = II.getArgOperand(1);
  1346. if (A == B)
  1347. return IC.replaceInstUsesWith(II, A);
  1348. return std::nullopt;
  1349. }
  1350. static std::optional<Instruction *> instCombineSVESrshl(InstCombiner &IC,
  1351. IntrinsicInst &II) {
  1352. IRBuilder<> Builder(&II);
  1353. Value *Pred = II.getOperand(0);
  1354. Value *Vec = II.getOperand(1);
  1355. Value *Shift = II.getOperand(2);
  1356. // Convert SRSHL into the simpler LSL intrinsic when fed by an ABS intrinsic.
  1357. Value *AbsPred, *MergedValue;
  1358. if (!match(Vec, m_Intrinsic<Intrinsic::aarch64_sve_sqabs>(
  1359. m_Value(MergedValue), m_Value(AbsPred), m_Value())) &&
  1360. !match(Vec, m_Intrinsic<Intrinsic::aarch64_sve_abs>(
  1361. m_Value(MergedValue), m_Value(AbsPred), m_Value())))
  1362. return std::nullopt;
  1363. // Transform is valid if any of the following are true:
  1364. // * The ABS merge value is an undef or non-negative
  1365. // * The ABS predicate is all active
  1366. // * The ABS predicate and the SRSHL predicates are the same
  1367. if (!isa<UndefValue>(MergedValue) && !match(MergedValue, m_NonNegative()) &&
  1368. AbsPred != Pred && !isAllActivePredicate(AbsPred))
  1369. return std::nullopt;
  1370. // Only valid when the shift amount is non-negative, otherwise the rounding
  1371. // behaviour of SRSHL cannot be ignored.
  1372. if (!match(Shift, m_NonNegative()))
  1373. return std::nullopt;
  1374. auto LSL = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_lsl, {II.getType()},
  1375. {Pred, Vec, Shift});
  1376. return IC.replaceInstUsesWith(II, LSL);
  1377. }
  1378. std::optional<Instruction *>
  1379. AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
  1380. IntrinsicInst &II) const {
  1381. Intrinsic::ID IID = II.getIntrinsicID();
  1382. switch (IID) {
  1383. default:
  1384. break;
  1385. case Intrinsic::aarch64_neon_fmaxnm:
  1386. case Intrinsic::aarch64_neon_fminnm:
  1387. return instCombineMaxMinNM(IC, II);
  1388. case Intrinsic::aarch64_sve_convert_from_svbool:
  1389. return instCombineConvertFromSVBool(IC, II);
  1390. case Intrinsic::aarch64_sve_dup:
  1391. return instCombineSVEDup(IC, II);
  1392. case Intrinsic::aarch64_sve_dup_x:
  1393. return instCombineSVEDupX(IC, II);
  1394. case Intrinsic::aarch64_sve_cmpne:
  1395. case Intrinsic::aarch64_sve_cmpne_wide:
  1396. return instCombineSVECmpNE(IC, II);
  1397. case Intrinsic::aarch64_sve_rdffr:
  1398. return instCombineRDFFR(IC, II);
  1399. case Intrinsic::aarch64_sve_lasta:
  1400. case Intrinsic::aarch64_sve_lastb:
  1401. return instCombineSVELast(IC, II);
  1402. case Intrinsic::aarch64_sve_clasta_n:
  1403. case Intrinsic::aarch64_sve_clastb_n:
  1404. return instCombineSVECondLast(IC, II);
  1405. case Intrinsic::aarch64_sve_cntd:
  1406. return instCombineSVECntElts(IC, II, 2);
  1407. case Intrinsic::aarch64_sve_cntw:
  1408. return instCombineSVECntElts(IC, II, 4);
  1409. case Intrinsic::aarch64_sve_cnth:
  1410. return instCombineSVECntElts(IC, II, 8);
  1411. case Intrinsic::aarch64_sve_cntb:
  1412. return instCombineSVECntElts(IC, II, 16);
  1413. case Intrinsic::aarch64_sve_ptest_any:
  1414. case Intrinsic::aarch64_sve_ptest_first:
  1415. case Intrinsic::aarch64_sve_ptest_last:
  1416. return instCombineSVEPTest(IC, II);
  1417. case Intrinsic::aarch64_sve_mul:
  1418. case Intrinsic::aarch64_sve_fmul:
  1419. return instCombineSVEVectorMul(IC, II);
  1420. case Intrinsic::aarch64_sve_fadd:
  1421. case Intrinsic::aarch64_sve_add:
  1422. return instCombineSVEVectorAdd(IC, II);
  1423. case Intrinsic::aarch64_sve_fsub:
  1424. case Intrinsic::aarch64_sve_sub:
  1425. return instCombineSVEVectorSub(IC, II);
  1426. case Intrinsic::aarch64_sve_tbl:
  1427. return instCombineSVETBL(IC, II);
  1428. case Intrinsic::aarch64_sve_uunpkhi:
  1429. case Intrinsic::aarch64_sve_uunpklo:
  1430. case Intrinsic::aarch64_sve_sunpkhi:
  1431. case Intrinsic::aarch64_sve_sunpklo:
  1432. return instCombineSVEUnpack(IC, II);
  1433. case Intrinsic::aarch64_sve_zip1:
  1434. case Intrinsic::aarch64_sve_zip2:
  1435. return instCombineSVEZip(IC, II);
  1436. case Intrinsic::aarch64_sve_ld1_gather_index:
  1437. return instCombineLD1GatherIndex(IC, II);
  1438. case Intrinsic::aarch64_sve_st1_scatter_index:
  1439. return instCombineST1ScatterIndex(IC, II);
  1440. case Intrinsic::aarch64_sve_ld1:
  1441. return instCombineSVELD1(IC, II, DL);
  1442. case Intrinsic::aarch64_sve_st1:
  1443. return instCombineSVEST1(IC, II, DL);
  1444. case Intrinsic::aarch64_sve_sdiv:
  1445. return instCombineSVESDIV(IC, II);
  1446. case Intrinsic::aarch64_sve_sel:
  1447. return instCombineSVESel(IC, II);
  1448. case Intrinsic::aarch64_sve_srshl:
  1449. return instCombineSVESrshl(IC, II);
  1450. case Intrinsic::aarch64_sve_dupq_lane:
  1451. return instCombineSVEDupqLane(IC, II);
  1452. }
  1453. return std::nullopt;
  1454. }
  1455. std::optional<Value *> AArch64TTIImpl::simplifyDemandedVectorEltsIntrinsic(
  1456. InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts,
  1457. APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3,
  1458. std::function<void(Instruction *, unsigned, APInt, APInt &)>
  1459. SimplifyAndSetOp) const {
  1460. switch (II.getIntrinsicID()) {
  1461. default:
  1462. break;
  1463. case Intrinsic::aarch64_neon_fcvtxn:
  1464. case Intrinsic::aarch64_neon_rshrn:
  1465. case Intrinsic::aarch64_neon_sqrshrn:
  1466. case Intrinsic::aarch64_neon_sqrshrun:
  1467. case Intrinsic::aarch64_neon_sqshrn:
  1468. case Intrinsic::aarch64_neon_sqshrun:
  1469. case Intrinsic::aarch64_neon_sqxtn:
  1470. case Intrinsic::aarch64_neon_sqxtun:
  1471. case Intrinsic::aarch64_neon_uqrshrn:
  1472. case Intrinsic::aarch64_neon_uqshrn:
  1473. case Intrinsic::aarch64_neon_uqxtn:
  1474. SimplifyAndSetOp(&II, 0, OrigDemandedElts, UndefElts);
  1475. break;
  1476. }
  1477. return std::nullopt;
  1478. }
  1479. TypeSize
  1480. AArch64TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
  1481. switch (K) {
  1482. case TargetTransformInfo::RGK_Scalar:
  1483. return TypeSize::getFixed(64);
  1484. case TargetTransformInfo::RGK_FixedWidthVector:
  1485. if (!ST->isStreamingSVEModeDisabled() &&
  1486. !EnableFixedwidthAutovecInStreamingMode)
  1487. return TypeSize::getFixed(0);
  1488. if (ST->hasSVE())
  1489. return TypeSize::getFixed(
  1490. std::max(ST->getMinSVEVectorSizeInBits(), 128u));
  1491. return TypeSize::getFixed(ST->hasNEON() ? 128 : 0);
  1492. case TargetTransformInfo::RGK_ScalableVector:
  1493. if (!ST->isStreamingSVEModeDisabled() && !EnableScalableAutovecInStreamingMode)
  1494. return TypeSize::getScalable(0);
  1495. return TypeSize::getScalable(ST->hasSVE() ? 128 : 0);
  1496. }
  1497. llvm_unreachable("Unsupported register kind");
  1498. }
  1499. bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
  1500. ArrayRef<const Value *> Args) {
  1501. // A helper that returns a vector type from the given type. The number of
  1502. // elements in type Ty determines the vector width.
  1503. auto toVectorTy = [&](Type *ArgTy) {
  1504. return VectorType::get(ArgTy->getScalarType(),
  1505. cast<VectorType>(DstTy)->getElementCount());
  1506. };
  1507. // Exit early if DstTy is not a vector type whose elements are at least
  1508. // 16-bits wide. SVE doesn't generally have the same set of instructions to
  1509. // perform an extend with the add/sub/mul. There are SMULLB style
  1510. // instructions, but they operate on top/bottom, requiring some sort of lane
  1511. // interleaving to be used with zext/sext.
  1512. if (!useNeonVector(DstTy) || DstTy->getScalarSizeInBits() < 16)
  1513. return false;
  1514. // Determine if the operation has a widening variant. We consider both the
  1515. // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
  1516. // instructions.
  1517. //
  1518. // TODO: Add additional widening operations (e.g., shl, etc.) once we
  1519. // verify that their extending operands are eliminated during code
  1520. // generation.
  1521. switch (Opcode) {
  1522. case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
  1523. case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
  1524. case Instruction::Mul: // SMULL(2), UMULL(2)
  1525. break;
  1526. default:
  1527. return false;
  1528. }
  1529. // To be a widening instruction (either the "wide" or "long" versions), the
  1530. // second operand must be a sign- or zero extend.
  1531. if (Args.size() != 2 ||
  1532. (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])))
  1533. return false;
  1534. auto *Extend = cast<CastInst>(Args[1]);
  1535. auto *Arg0 = dyn_cast<CastInst>(Args[0]);
  1536. // A mul only has a mull version (not like addw). Both operands need to be
  1537. // extending and the same type.
  1538. if (Opcode == Instruction::Mul &&
  1539. (!Arg0 || Arg0->getOpcode() != Extend->getOpcode() ||
  1540. Arg0->getOperand(0)->getType() != Extend->getOperand(0)->getType()))
  1541. return false;
  1542. // Legalize the destination type and ensure it can be used in a widening
  1543. // operation.
  1544. auto DstTyL = getTypeLegalizationCost(DstTy);
  1545. unsigned DstElTySize = DstTyL.second.getScalarSizeInBits();
  1546. if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits())
  1547. return false;
  1548. // Legalize the source type and ensure it can be used in a widening
  1549. // operation.
  1550. auto *SrcTy = toVectorTy(Extend->getSrcTy());
  1551. auto SrcTyL = getTypeLegalizationCost(SrcTy);
  1552. unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
  1553. if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
  1554. return false;
  1555. // Get the total number of vector elements in the legalized types.
  1556. InstructionCost NumDstEls =
  1557. DstTyL.first * DstTyL.second.getVectorMinNumElements();
  1558. InstructionCost NumSrcEls =
  1559. SrcTyL.first * SrcTyL.second.getVectorMinNumElements();
  1560. // Return true if the legalized types have the same number of vector elements
  1561. // and the destination element type size is twice that of the source type.
  1562. return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize;
  1563. }
  1564. InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
  1565. Type *Src,
  1566. TTI::CastContextHint CCH,
  1567. TTI::TargetCostKind CostKind,
  1568. const Instruction *I) {
  1569. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  1570. assert(ISD && "Invalid opcode");
  1571. // If the cast is observable, and it is used by a widening instruction (e.g.,
  1572. // uaddl, saddw, etc.), it may be free.
  1573. if (I && I->hasOneUser()) {
  1574. auto *SingleUser = cast<Instruction>(*I->user_begin());
  1575. SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
  1576. if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) {
  1577. // If the cast is the second operand, it is free. We will generate either
  1578. // a "wide" or "long" version of the widening instruction.
  1579. if (I == SingleUser->getOperand(1))
  1580. return 0;
  1581. // If the cast is not the second operand, it will be free if it looks the
  1582. // same as the second operand. In this case, we will generate a "long"
  1583. // version of the widening instruction.
  1584. if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1)))
  1585. if (I->getOpcode() == unsigned(Cast->getOpcode()) &&
  1586. cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy())
  1587. return 0;
  1588. }
  1589. }
  1590. // TODO: Allow non-throughput costs that aren't binary.
  1591. auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
  1592. if (CostKind != TTI::TCK_RecipThroughput)
  1593. return Cost == 0 ? 0 : 1;
  1594. return Cost;
  1595. };
  1596. EVT SrcTy = TLI->getValueType(DL, Src);
  1597. EVT DstTy = TLI->getValueType(DL, Dst);
  1598. if (!SrcTy.isSimple() || !DstTy.isSimple())
  1599. return AdjustCost(
  1600. BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
  1601. static const TypeConversionCostTblEntry
  1602. ConversionTbl[] = {
  1603. { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1}, // xtn
  1604. { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1}, // xtn
  1605. { ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1}, // xtn
  1606. { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1}, // xtn
  1607. { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 3}, // 2 xtn + 1 uzp1
  1608. { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1}, // xtn
  1609. { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2}, // 1 uzp1 + 1 xtn
  1610. { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1}, // 1 uzp1
  1611. { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1}, // 1 xtn
  1612. { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2}, // 1 uzp1 + 1 xtn
  1613. { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 4}, // 3 x uzp1 + xtn
  1614. { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1}, // 1 uzp1
  1615. { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 3}, // 3 x uzp1
  1616. { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 2}, // 2 x uzp1
  1617. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 1}, // uzp1
  1618. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 3}, // (2 + 1) x uzp1
  1619. { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 7}, // (4 + 2 + 1) x uzp1
  1620. { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2}, // 2 x uzp1
  1621. { ISD::TRUNCATE, MVT::v16i16, MVT::v16i64, 6}, // (4 + 2) x uzp1
  1622. { ISD::TRUNCATE, MVT::v16i32, MVT::v16i64, 4}, // 4 x uzp1
  1623. // Truncations on nxvmiN
  1624. { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 },
  1625. { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 },
  1626. { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 },
  1627. { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 },
  1628. { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 },
  1629. { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 },
  1630. { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 },
  1631. { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 },
  1632. { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 },
  1633. { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 },
  1634. { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 },
  1635. { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 },
  1636. { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 },
  1637. { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 },
  1638. { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 },
  1639. { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 },
  1640. // The number of shll instructions for the extension.
  1641. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
  1642. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
  1643. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
  1644. { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
  1645. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
  1646. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
  1647. { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
  1648. { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
  1649. { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
  1650. { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
  1651. { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
  1652. { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
  1653. { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
  1654. { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
  1655. { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
  1656. { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
  1657. // LowerVectorINT_TO_FP:
  1658. { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
  1659. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
  1660. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
  1661. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
  1662. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
  1663. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
  1664. // Complex: to v2f32
  1665. { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
  1666. { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
  1667. { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
  1668. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
  1669. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
  1670. { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
  1671. // Complex: to v4f32
  1672. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 },
  1673. { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
  1674. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
  1675. { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
  1676. // Complex: to v8f32
  1677. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 },
  1678. { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
  1679. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 },
  1680. { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
  1681. // Complex: to v16f32
  1682. { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
  1683. { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
  1684. // Complex: to v2f64
  1685. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
  1686. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
  1687. { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
  1688. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
  1689. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
  1690. { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
  1691. // Complex: to v4f64
  1692. { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 4 },
  1693. { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 4 },
  1694. // LowerVectorFP_TO_INT
  1695. { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
  1696. { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
  1697. { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
  1698. { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
  1699. { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
  1700. { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
  1701. // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
  1702. { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
  1703. { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
  1704. { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 },
  1705. { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
  1706. { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
  1707. { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 },
  1708. // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
  1709. { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
  1710. { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 },
  1711. { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
  1712. { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 },
  1713. // Complex, from nxv2f32.
  1714. { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
  1715. { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
  1716. { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
  1717. { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f32, 1 },
  1718. { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
  1719. { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
  1720. { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
  1721. { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f32, 1 },
  1722. // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
  1723. { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
  1724. { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
  1725. { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 },
  1726. { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
  1727. { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
  1728. { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 },
  1729. // Complex, from nxv2f64.
  1730. { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
  1731. { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
  1732. { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
  1733. { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f64, 1 },
  1734. { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
  1735. { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
  1736. { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
  1737. { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f64, 1 },
  1738. // Complex, from nxv4f32.
  1739. { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
  1740. { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
  1741. { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
  1742. { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f32, 1 },
  1743. { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
  1744. { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
  1745. { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
  1746. { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f32, 1 },
  1747. // Complex, from nxv8f64. Illegal -> illegal conversions not required.
  1748. { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
  1749. { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f64, 7 },
  1750. { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
  1751. { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f64, 7 },
  1752. // Complex, from nxv4f64. Illegal -> illegal conversions not required.
  1753. { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
  1754. { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
  1755. { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f64, 3 },
  1756. { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
  1757. { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
  1758. { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f64, 3 },
  1759. // Complex, from nxv8f32. Illegal -> illegal conversions not required.
  1760. { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
  1761. { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f32, 3 },
  1762. { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
  1763. { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f32, 3 },
  1764. // Complex, from nxv8f16.
  1765. { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
  1766. { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
  1767. { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
  1768. { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f16, 1 },
  1769. { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
  1770. { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
  1771. { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
  1772. { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f16, 1 },
  1773. // Complex, from nxv4f16.
  1774. { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
  1775. { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
  1776. { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
  1777. { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f16, 1 },
  1778. { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
  1779. { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
  1780. { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
  1781. { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f16, 1 },
  1782. // Complex, from nxv2f16.
  1783. { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
  1784. { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
  1785. { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
  1786. { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f16, 1 },
  1787. { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
  1788. { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
  1789. { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
  1790. { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f16, 1 },
  1791. // Truncate from nxvmf32 to nxvmf16.
  1792. { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 },
  1793. { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 },
  1794. { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 },
  1795. // Truncate from nxvmf64 to nxvmf16.
  1796. { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 },
  1797. { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 },
  1798. { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 },
  1799. // Truncate from nxvmf64 to nxvmf32.
  1800. { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 },
  1801. { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 },
  1802. { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 },
  1803. // Extend from nxvmf16 to nxvmf32.
  1804. { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1},
  1805. { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1},
  1806. { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2},
  1807. // Extend from nxvmf16 to nxvmf64.
  1808. { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1},
  1809. { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2},
  1810. { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4},
  1811. // Extend from nxvmf32 to nxvmf64.
  1812. { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1},
  1813. { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
  1814. { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
  1815. // Bitcasts from float to integer
  1816. { ISD::BITCAST, MVT::nxv2f16, MVT::nxv2i16, 0 },
  1817. { ISD::BITCAST, MVT::nxv4f16, MVT::nxv4i16, 0 },
  1818. { ISD::BITCAST, MVT::nxv2f32, MVT::nxv2i32, 0 },
  1819. // Bitcasts from integer to float
  1820. { ISD::BITCAST, MVT::nxv2i16, MVT::nxv2f16, 0 },
  1821. { ISD::BITCAST, MVT::nxv4i16, MVT::nxv4f16, 0 },
  1822. { ISD::BITCAST, MVT::nxv2i32, MVT::nxv2f32, 0 },
  1823. };
  1824. if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
  1825. DstTy.getSimpleVT(),
  1826. SrcTy.getSimpleVT()))
  1827. return AdjustCost(Entry->Cost);
  1828. static const TypeConversionCostTblEntry FP16Tbl[] = {
  1829. {ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f16, 1}, // fcvtzs
  1830. {ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f16, 1},
  1831. {ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f16, 1}, // fcvtzs
  1832. {ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f16, 1},
  1833. {ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f16, 2}, // fcvtl+fcvtzs
  1834. {ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f16, 2},
  1835. {ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f16, 2}, // fcvtzs+xtn
  1836. {ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f16, 2},
  1837. {ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f16, 1}, // fcvtzs
  1838. {ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f16, 1},
  1839. {ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f16, 4}, // 2*fcvtl+2*fcvtzs
  1840. {ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f16, 4},
  1841. {ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f16, 3}, // 2*fcvtzs+xtn
  1842. {ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f16, 3},
  1843. {ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f16, 2}, // 2*fcvtzs
  1844. {ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f16, 2},
  1845. {ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f16, 8}, // 4*fcvtl+4*fcvtzs
  1846. {ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f16, 8},
  1847. {ISD::UINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // ushll + ucvtf
  1848. {ISD::SINT_TO_FP, MVT::v8f16, MVT::v8i8, 2}, // sshll + scvtf
  1849. {ISD::UINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * ushl(2) + 2 * ucvtf
  1850. {ISD::SINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * sshl(2) + 2 * scvtf
  1851. };
  1852. if (ST->hasFullFP16())
  1853. if (const auto *Entry = ConvertCostTableLookup(
  1854. FP16Tbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
  1855. return AdjustCost(Entry->Cost);
  1856. return AdjustCost(
  1857. BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
  1858. }
  1859. InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode,
  1860. Type *Dst,
  1861. VectorType *VecTy,
  1862. unsigned Index) {
  1863. // Make sure we were given a valid extend opcode.
  1864. assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
  1865. "Invalid opcode");
  1866. // We are extending an element we extract from a vector, so the source type
  1867. // of the extend is the element type of the vector.
  1868. auto *Src = VecTy->getElementType();
  1869. // Sign- and zero-extends are for integer types only.
  1870. assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
  1871. // Get the cost for the extract. We compute the cost (if any) for the extend
  1872. // below.
  1873. TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  1874. InstructionCost Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy,
  1875. CostKind, Index, nullptr, nullptr);
  1876. // Legalize the types.
  1877. auto VecLT = getTypeLegalizationCost(VecTy);
  1878. auto DstVT = TLI->getValueType(DL, Dst);
  1879. auto SrcVT = TLI->getValueType(DL, Src);
  1880. // If the resulting type is still a vector and the destination type is legal,
  1881. // we may get the extension for free. If not, get the default cost for the
  1882. // extend.
  1883. if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
  1884. return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
  1885. CostKind);
  1886. // The destination type should be larger than the element type. If not, get
  1887. // the default cost for the extend.
  1888. if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits())
  1889. return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
  1890. CostKind);
  1891. switch (Opcode) {
  1892. default:
  1893. llvm_unreachable("Opcode should be either SExt or ZExt");
  1894. // For sign-extends, we only need a smov, which performs the extension
  1895. // automatically.
  1896. case Instruction::SExt:
  1897. return Cost;
  1898. // For zero-extends, the extend is performed automatically by a umov unless
  1899. // the destination type is i64 and the element type is i8 or i16.
  1900. case Instruction::ZExt:
  1901. if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
  1902. return Cost;
  1903. }
  1904. // If we are unable to perform the extend for free, get the default cost.
  1905. return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
  1906. CostKind);
  1907. }
  1908. InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode,
  1909. TTI::TargetCostKind CostKind,
  1910. const Instruction *I) {
  1911. if (CostKind != TTI::TCK_RecipThroughput)
  1912. return Opcode == Instruction::PHI ? 0 : 1;
  1913. assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
  1914. // Branches are assumed to be predicted.
  1915. return 0;
  1916. }
  1917. InstructionCost AArch64TTIImpl::getVectorInstrCostHelper(Type *Val,
  1918. unsigned Index,
  1919. bool HasRealUse) {
  1920. assert(Val->isVectorTy() && "This must be a vector type");
  1921. if (Index != -1U) {
  1922. // Legalize the type.
  1923. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Val);
  1924. // This type is legalized to a scalar type.
  1925. if (!LT.second.isVector())
  1926. return 0;
  1927. // The type may be split. For fixed-width vectors we can normalize the
  1928. // index to the new type.
  1929. if (LT.second.isFixedLengthVector()) {
  1930. unsigned Width = LT.second.getVectorNumElements();
  1931. Index = Index % Width;
  1932. }
  1933. // The element at index zero is already inside the vector.
  1934. // - For a physical (HasRealUse==true) insert-element or extract-element
  1935. // instruction that extracts integers, an explicit FPR -> GPR move is
  1936. // needed. So it has non-zero cost.
  1937. // - For the rest of cases (virtual instruction or element type is float),
  1938. // consider the instruction free.
  1939. //
  1940. // FIXME:
  1941. // If the extract-element and insert-element instructions could be
  1942. // simplified away (e.g., could be combined into users by looking at use-def
  1943. // context), they have no cost. This is not done in the first place for
  1944. // compile-time considerations.
  1945. if (Index == 0 && (!HasRealUse || !Val->getScalarType()->isIntegerTy()))
  1946. return 0;
  1947. }
  1948. // All other insert/extracts cost this much.
  1949. return ST->getVectorInsertExtractBaseCost();
  1950. }
  1951. InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
  1952. TTI::TargetCostKind CostKind,
  1953. unsigned Index, Value *Op0,
  1954. Value *Op1) {
  1955. return getVectorInstrCostHelper(Val, Index, false /* HasRealUse */);
  1956. }
  1957. InstructionCost AArch64TTIImpl::getVectorInstrCost(const Instruction &I,
  1958. Type *Val,
  1959. TTI::TargetCostKind CostKind,
  1960. unsigned Index) {
  1961. return getVectorInstrCostHelper(Val, Index, true /* HasRealUse */);
  1962. }
  1963. InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
  1964. unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
  1965. TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
  1966. ArrayRef<const Value *> Args,
  1967. const Instruction *CxtI) {
  1968. // TODO: Handle more cost kinds.
  1969. if (CostKind != TTI::TCK_RecipThroughput)
  1970. return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
  1971. Op2Info, Args, CxtI);
  1972. // Legalize the type.
  1973. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
  1974. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  1975. switch (ISD) {
  1976. default:
  1977. return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
  1978. Op2Info);
  1979. case ISD::SDIV:
  1980. if (Op2Info.isConstant() && Op2Info.isUniform() && Op2Info.isPowerOf2()) {
  1981. // On AArch64, scalar signed division by constants power-of-two are
  1982. // normally expanded to the sequence ADD + CMP + SELECT + SRA.
  1983. // The OperandValue properties many not be same as that of previous
  1984. // operation; conservatively assume OP_None.
  1985. InstructionCost Cost = getArithmeticInstrCost(
  1986. Instruction::Add, Ty, CostKind,
  1987. Op1Info.getNoProps(), Op2Info.getNoProps());
  1988. Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind,
  1989. Op1Info.getNoProps(), Op2Info.getNoProps());
  1990. Cost += getArithmeticInstrCost(
  1991. Instruction::Select, Ty, CostKind,
  1992. Op1Info.getNoProps(), Op2Info.getNoProps());
  1993. Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
  1994. Op1Info.getNoProps(), Op2Info.getNoProps());
  1995. return Cost;
  1996. }
  1997. [[fallthrough]];
  1998. case ISD::UDIV: {
  1999. if (Op2Info.isConstant() && Op2Info.isUniform()) {
  2000. auto VT = TLI->getValueType(DL, Ty);
  2001. if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) {
  2002. // Vector signed division by constant are expanded to the
  2003. // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division
  2004. // to MULHS + SUB + SRL + ADD + SRL.
  2005. InstructionCost MulCost = getArithmeticInstrCost(
  2006. Instruction::Mul, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps());
  2007. InstructionCost AddCost = getArithmeticInstrCost(
  2008. Instruction::Add, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps());
  2009. InstructionCost ShrCost = getArithmeticInstrCost(
  2010. Instruction::AShr, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps());
  2011. return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1;
  2012. }
  2013. }
  2014. InstructionCost Cost = BaseT::getArithmeticInstrCost(
  2015. Opcode, Ty, CostKind, Op1Info, Op2Info);
  2016. if (Ty->isVectorTy()) {
  2017. if (TLI->isOperationLegalOrCustom(ISD, LT.second) && ST->hasSVE()) {
  2018. // SDIV/UDIV operations are lowered using SVE, then we can have less
  2019. // costs.
  2020. if (isa<FixedVectorType>(Ty) && cast<FixedVectorType>(Ty)
  2021. ->getPrimitiveSizeInBits()
  2022. .getFixedValue() < 128) {
  2023. EVT VT = TLI->getValueType(DL, Ty);
  2024. static const CostTblEntry DivTbl[]{
  2025. {ISD::SDIV, MVT::v2i8, 5}, {ISD::SDIV, MVT::v4i8, 8},
  2026. {ISD::SDIV, MVT::v8i8, 8}, {ISD::SDIV, MVT::v2i16, 5},
  2027. {ISD::SDIV, MVT::v4i16, 5}, {ISD::SDIV, MVT::v2i32, 1},
  2028. {ISD::UDIV, MVT::v2i8, 5}, {ISD::UDIV, MVT::v4i8, 8},
  2029. {ISD::UDIV, MVT::v8i8, 8}, {ISD::UDIV, MVT::v2i16, 5},
  2030. {ISD::UDIV, MVT::v4i16, 5}, {ISD::UDIV, MVT::v2i32, 1}};
  2031. const auto *Entry = CostTableLookup(DivTbl, ISD, VT.getSimpleVT());
  2032. if (nullptr != Entry)
  2033. return Entry->Cost;
  2034. }
  2035. // For 8/16-bit elements, the cost is higher because the type
  2036. // requires promotion and possibly splitting:
  2037. if (LT.second.getScalarType() == MVT::i8)
  2038. Cost *= 8;
  2039. else if (LT.second.getScalarType() == MVT::i16)
  2040. Cost *= 4;
  2041. return Cost;
  2042. } else {
  2043. // If one of the operands is a uniform constant then the cost for each
  2044. // element is Cost for insertion, extraction and division.
  2045. // Insertion cost = 2, Extraction Cost = 2, Division = cost for the
  2046. // operation with scalar type
  2047. if ((Op1Info.isConstant() && Op1Info.isUniform()) ||
  2048. (Op2Info.isConstant() && Op2Info.isUniform())) {
  2049. if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
  2050. InstructionCost DivCost = BaseT::getArithmeticInstrCost(
  2051. Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info);
  2052. return (4 + DivCost) * VTy->getNumElements();
  2053. }
  2054. }
  2055. // On AArch64, without SVE, vector divisions are expanded
  2056. // into scalar divisions of each pair of elements.
  2057. Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty,
  2058. CostKind, Op1Info, Op2Info);
  2059. Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind,
  2060. Op1Info, Op2Info);
  2061. }
  2062. // TODO: if one of the arguments is scalar, then it's not necessary to
  2063. // double the cost of handling the vector elements.
  2064. Cost += Cost;
  2065. }
  2066. return Cost;
  2067. }
  2068. case ISD::MUL:
  2069. // When SVE is available, then we can lower the v2i64 operation using
  2070. // the SVE mul instruction, which has a lower cost.
  2071. if (LT.second == MVT::v2i64 && ST->hasSVE())
  2072. return LT.first;
  2073. // When SVE is not available, there is no MUL.2d instruction,
  2074. // which means mul <2 x i64> is expensive as elements are extracted
  2075. // from the vectors and the muls scalarized.
  2076. // As getScalarizationOverhead is a bit too pessimistic, we
  2077. // estimate the cost for a i64 vector directly here, which is:
  2078. // - four 2-cost i64 extracts,
  2079. // - two 2-cost i64 inserts, and
  2080. // - two 1-cost muls.
  2081. // So, for a v2i64 with LT.First = 1 the cost is 14, and for a v4i64 with
  2082. // LT.first = 2 the cost is 28. If both operands are extensions it will not
  2083. // need to scalarize so the cost can be cheaper (smull or umull).
  2084. // so the cost can be cheaper (smull or umull).
  2085. if (LT.second != MVT::v2i64 || isWideningInstruction(Ty, Opcode, Args))
  2086. return LT.first;
  2087. return LT.first * 14;
  2088. case ISD::ADD:
  2089. case ISD::XOR:
  2090. case ISD::OR:
  2091. case ISD::AND:
  2092. case ISD::SRL:
  2093. case ISD::SRA:
  2094. case ISD::SHL:
  2095. // These nodes are marked as 'custom' for combining purposes only.
  2096. // We know that they are legal. See LowerAdd in ISelLowering.
  2097. return LT.first;
  2098. case ISD::FADD:
  2099. case ISD::FSUB:
  2100. case ISD::FMUL:
  2101. case ISD::FDIV:
  2102. case ISD::FNEG:
  2103. // These nodes are marked as 'custom' just to lower them to SVE.
  2104. // We know said lowering will incur no additional cost.
  2105. if (!Ty->getScalarType()->isFP128Ty())
  2106. return 2 * LT.first;
  2107. return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
  2108. Op2Info);
  2109. }
  2110. }
  2111. InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty,
  2112. ScalarEvolution *SE,
  2113. const SCEV *Ptr) {
  2114. // Address computations in vectorized code with non-consecutive addresses will
  2115. // likely result in more instructions compared to scalar code where the
  2116. // computation can more often be merged into the index mode. The resulting
  2117. // extra micro-ops can significantly decrease throughput.
  2118. unsigned NumVectorInstToHideOverhead = 10;
  2119. int MaxMergeDistance = 64;
  2120. if (Ty->isVectorTy() && SE &&
  2121. !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
  2122. return NumVectorInstToHideOverhead;
  2123. // In many cases the address computation is not merged into the instruction
  2124. // addressing mode.
  2125. return 1;
  2126. }
  2127. InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
  2128. Type *CondTy,
  2129. CmpInst::Predicate VecPred,
  2130. TTI::TargetCostKind CostKind,
  2131. const Instruction *I) {
  2132. // TODO: Handle other cost kinds.
  2133. if (CostKind != TTI::TCK_RecipThroughput)
  2134. return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
  2135. I);
  2136. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  2137. // We don't lower some vector selects well that are wider than the register
  2138. // width.
  2139. if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) {
  2140. // We would need this many instructions to hide the scalarization happening.
  2141. const int AmortizationCost = 20;
  2142. // If VecPred is not set, check if we can get a predicate from the context
  2143. // instruction, if its type matches the requested ValTy.
  2144. if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) {
  2145. CmpInst::Predicate CurrentPred;
  2146. if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(),
  2147. m_Value())))
  2148. VecPred = CurrentPred;
  2149. }
  2150. // Check if we have a compare/select chain that can be lowered using
  2151. // a (F)CMxx & BFI pair.
  2152. if (CmpInst::isIntPredicate(VecPred) || VecPred == CmpInst::FCMP_OLE ||
  2153. VecPred == CmpInst::FCMP_OLT || VecPred == CmpInst::FCMP_OGT ||
  2154. VecPred == CmpInst::FCMP_OGE || VecPred == CmpInst::FCMP_OEQ ||
  2155. VecPred == CmpInst::FCMP_UNE) {
  2156. static const auto ValidMinMaxTys = {
  2157. MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
  2158. MVT::v4i32, MVT::v2i64, MVT::v2f32, MVT::v4f32, MVT::v2f64};
  2159. static const auto ValidFP16MinMaxTys = {MVT::v4f16, MVT::v8f16};
  2160. auto LT = getTypeLegalizationCost(ValTy);
  2161. if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }) ||
  2162. (ST->hasFullFP16() &&
  2163. any_of(ValidFP16MinMaxTys, [&LT](MVT M) { return M == LT.second; })))
  2164. return LT.first;
  2165. }
  2166. static const TypeConversionCostTblEntry
  2167. VectorSelectTbl[] = {
  2168. { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
  2169. { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
  2170. { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
  2171. { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
  2172. { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
  2173. { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
  2174. };
  2175. EVT SelCondTy = TLI->getValueType(DL, CondTy);
  2176. EVT SelValTy = TLI->getValueType(DL, ValTy);
  2177. if (SelCondTy.isSimple() && SelValTy.isSimple()) {
  2178. if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
  2179. SelCondTy.getSimpleVT(),
  2180. SelValTy.getSimpleVT()))
  2181. return Entry->Cost;
  2182. }
  2183. }
  2184. // The base case handles scalable vectors fine for now, since it treats the
  2185. // cost as 1 * legalization cost.
  2186. return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
  2187. }
  2188. AArch64TTIImpl::TTI::MemCmpExpansionOptions
  2189. AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
  2190. TTI::MemCmpExpansionOptions Options;
  2191. if (ST->requiresStrictAlign()) {
  2192. // TODO: Add cost modeling for strict align. Misaligned loads expand to
  2193. // a bunch of instructions when strict align is enabled.
  2194. return Options;
  2195. }
  2196. Options.AllowOverlappingLoads = true;
  2197. Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
  2198. Options.NumLoadsPerBlock = Options.MaxNumLoads;
  2199. // TODO: Though vector loads usually perform well on AArch64, in some targets
  2200. // they may wake up the FP unit, which raises the power consumption. Perhaps
  2201. // they could be used with no holds barred (-O3).
  2202. Options.LoadSizes = {8, 4, 2, 1};
  2203. return Options;
  2204. }
  2205. bool AArch64TTIImpl::prefersVectorizedAddressing() const {
  2206. return ST->hasSVE();
  2207. }
  2208. InstructionCost
  2209. AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
  2210. Align Alignment, unsigned AddressSpace,
  2211. TTI::TargetCostKind CostKind) {
  2212. if (useNeonVector(Src))
  2213. return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
  2214. CostKind);
  2215. auto LT = getTypeLegalizationCost(Src);
  2216. if (!LT.first.isValid())
  2217. return InstructionCost::getInvalid();
  2218. // The code-generator is currently not able to handle scalable vectors
  2219. // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
  2220. // it. This change will be removed when code-generation for these types is
  2221. // sufficiently reliable.
  2222. if (cast<VectorType>(Src)->getElementCount() == ElementCount::getScalable(1))
  2223. return InstructionCost::getInvalid();
  2224. return LT.first;
  2225. }
  2226. static unsigned getSVEGatherScatterOverhead(unsigned Opcode) {
  2227. return Opcode == Instruction::Load ? SVEGatherOverhead : SVEScatterOverhead;
  2228. }
  2229. InstructionCost AArch64TTIImpl::getGatherScatterOpCost(
  2230. unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
  2231. Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
  2232. if (useNeonVector(DataTy))
  2233. return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
  2234. Alignment, CostKind, I);
  2235. auto *VT = cast<VectorType>(DataTy);
  2236. auto LT = getTypeLegalizationCost(DataTy);
  2237. if (!LT.first.isValid())
  2238. return InstructionCost::getInvalid();
  2239. // The code-generator is currently not able to handle scalable vectors
  2240. // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
  2241. // it. This change will be removed when code-generation for these types is
  2242. // sufficiently reliable.
  2243. if (cast<VectorType>(DataTy)->getElementCount() ==
  2244. ElementCount::getScalable(1))
  2245. return InstructionCost::getInvalid();
  2246. ElementCount LegalVF = LT.second.getVectorElementCount();
  2247. InstructionCost MemOpCost =
  2248. getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind,
  2249. {TTI::OK_AnyValue, TTI::OP_None}, I);
  2250. // Add on an overhead cost for using gathers/scatters.
  2251. // TODO: At the moment this is applied unilaterally for all CPUs, but at some
  2252. // point we may want a per-CPU overhead.
  2253. MemOpCost *= getSVEGatherScatterOverhead(Opcode);
  2254. return LT.first * MemOpCost * getMaxNumElements(LegalVF);
  2255. }
  2256. bool AArch64TTIImpl::useNeonVector(const Type *Ty) const {
  2257. return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
  2258. }
  2259. InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
  2260. MaybeAlign Alignment,
  2261. unsigned AddressSpace,
  2262. TTI::TargetCostKind CostKind,
  2263. TTI::OperandValueInfo OpInfo,
  2264. const Instruction *I) {
  2265. EVT VT = TLI->getValueType(DL, Ty, true);
  2266. // Type legalization can't handle structs
  2267. if (VT == MVT::Other)
  2268. return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace,
  2269. CostKind);
  2270. auto LT = getTypeLegalizationCost(Ty);
  2271. if (!LT.first.isValid())
  2272. return InstructionCost::getInvalid();
  2273. // The code-generator is currently not able to handle scalable vectors
  2274. // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
  2275. // it. This change will be removed when code-generation for these types is
  2276. // sufficiently reliable.
  2277. if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
  2278. if (VTy->getElementCount() == ElementCount::getScalable(1))
  2279. return InstructionCost::getInvalid();
  2280. // TODO: consider latency as well for TCK_SizeAndLatency.
  2281. if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
  2282. return LT.first;
  2283. if (CostKind != TTI::TCK_RecipThroughput)
  2284. return 1;
  2285. if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
  2286. LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) {
  2287. // Unaligned stores are extremely inefficient. We don't split all
  2288. // unaligned 128-bit stores because the negative impact that has shown in
  2289. // practice on inlined block copy code.
  2290. // We make such stores expensive so that we will only vectorize if there
  2291. // are 6 other instructions getting vectorized.
  2292. const int AmortizationCost = 6;
  2293. return LT.first * 2 * AmortizationCost;
  2294. }
  2295. // Opaque ptr or ptr vector types are i64s and can be lowered to STP/LDPs.
  2296. if (Ty->isPtrOrPtrVectorTy())
  2297. return LT.first;
  2298. // Check truncating stores and extending loads.
  2299. if (useNeonVector(Ty) &&
  2300. Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) {
  2301. // v4i8 types are lowered to scalar a load/store and sshll/xtn.
  2302. if (VT == MVT::v4i8)
  2303. return 2;
  2304. // Otherwise we need to scalarize.
  2305. return cast<FixedVectorType>(Ty)->getNumElements() * 2;
  2306. }
  2307. return LT.first;
  2308. }
  2309. InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost(
  2310. unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
  2311. Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
  2312. bool UseMaskForCond, bool UseMaskForGaps) {
  2313. assert(Factor >= 2 && "Invalid interleave factor");
  2314. auto *VecVTy = cast<FixedVectorType>(VecTy);
  2315. if (!UseMaskForCond && !UseMaskForGaps &&
  2316. Factor <= TLI->getMaxSupportedInterleaveFactor()) {
  2317. unsigned NumElts = VecVTy->getNumElements();
  2318. auto *SubVecTy =
  2319. FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
  2320. // ldN/stN only support legal vector types of size 64 or 128 in bits.
  2321. // Accesses having vector types that are a multiple of 128 bits can be
  2322. // matched to more than one ldN/stN instruction.
  2323. bool UseScalable;
  2324. if (NumElts % Factor == 0 &&
  2325. TLI->isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
  2326. return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
  2327. }
  2328. return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
  2329. Alignment, AddressSpace, CostKind,
  2330. UseMaskForCond, UseMaskForGaps);
  2331. }
  2332. InstructionCost
  2333. AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
  2334. InstructionCost Cost = 0;
  2335. TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  2336. for (auto *I : Tys) {
  2337. if (!I->isVectorTy())
  2338. continue;
  2339. if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() ==
  2340. 128)
  2341. Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) +
  2342. getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind);
  2343. }
  2344. return Cost;
  2345. }
  2346. unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
  2347. return ST->getMaxInterleaveFactor();
  2348. }
  2349. // For Falkor, we want to avoid having too many strided loads in a loop since
  2350. // that can exhaust the HW prefetcher resources. We adjust the unroller
  2351. // MaxCount preference below to attempt to ensure unrolling doesn't create too
  2352. // many strided loads.
  2353. static void
  2354. getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
  2355. TargetTransformInfo::UnrollingPreferences &UP) {
  2356. enum { MaxStridedLoads = 7 };
  2357. auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
  2358. int StridedLoads = 0;
  2359. // FIXME? We could make this more precise by looking at the CFG and
  2360. // e.g. not counting loads in each side of an if-then-else diamond.
  2361. for (const auto BB : L->blocks()) {
  2362. for (auto &I : *BB) {
  2363. LoadInst *LMemI = dyn_cast<LoadInst>(&I);
  2364. if (!LMemI)
  2365. continue;
  2366. Value *PtrValue = LMemI->getPointerOperand();
  2367. if (L->isLoopInvariant(PtrValue))
  2368. continue;
  2369. const SCEV *LSCEV = SE.getSCEV(PtrValue);
  2370. const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
  2371. if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
  2372. continue;
  2373. // FIXME? We could take pairing of unrolled load copies into account
  2374. // by looking at the AddRec, but we would probably have to limit this
  2375. // to loops with no stores or other memory optimization barriers.
  2376. ++StridedLoads;
  2377. // We've seen enough strided loads that seeing more won't make a
  2378. // difference.
  2379. if (StridedLoads > MaxStridedLoads / 2)
  2380. return StridedLoads;
  2381. }
  2382. }
  2383. return StridedLoads;
  2384. };
  2385. int StridedLoads = countStridedLoads(L, SE);
  2386. LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
  2387. << " strided loads\n");
  2388. // Pick the largest power of 2 unroll count that won't result in too many
  2389. // strided loads.
  2390. if (StridedLoads) {
  2391. UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
  2392. LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
  2393. << UP.MaxCount << '\n');
  2394. }
  2395. }
  2396. void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
  2397. TTI::UnrollingPreferences &UP,
  2398. OptimizationRemarkEmitter *ORE) {
  2399. // Enable partial unrolling and runtime unrolling.
  2400. BaseT::getUnrollingPreferences(L, SE, UP, ORE);
  2401. UP.UpperBound = true;
  2402. // For inner loop, it is more likely to be a hot one, and the runtime check
  2403. // can be promoted out from LICM pass, so the overhead is less, let's try
  2404. // a larger threshold to unroll more loops.
  2405. if (L->getLoopDepth() > 1)
  2406. UP.PartialThreshold *= 2;
  2407. // Disable partial & runtime unrolling on -Os.
  2408. UP.PartialOptSizeThreshold = 0;
  2409. if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
  2410. EnableFalkorHWPFUnrollFix)
  2411. getFalkorUnrollingPreferences(L, SE, UP);
  2412. // Scan the loop: don't unroll loops with calls as this could prevent
  2413. // inlining. Don't unroll vector loops either, as they don't benefit much from
  2414. // unrolling.
  2415. for (auto *BB : L->getBlocks()) {
  2416. for (auto &I : *BB) {
  2417. // Don't unroll vectorised loop.
  2418. if (I.getType()->isVectorTy())
  2419. return;
  2420. if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
  2421. if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
  2422. if (!isLoweredToCall(F))
  2423. continue;
  2424. }
  2425. return;
  2426. }
  2427. }
  2428. }
  2429. // Enable runtime unrolling for in-order models
  2430. // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by
  2431. // checking for that case, we can ensure that the default behaviour is
  2432. // unchanged
  2433. if (ST->getProcFamily() != AArch64Subtarget::Others &&
  2434. !ST->getSchedModel().isOutOfOrder()) {
  2435. UP.Runtime = true;
  2436. UP.Partial = true;
  2437. UP.UnrollRemainder = true;
  2438. UP.DefaultUnrollRuntimeCount = 4;
  2439. UP.UnrollAndJam = true;
  2440. UP.UnrollAndJamInnerLoopThreshold = 60;
  2441. }
  2442. }
  2443. void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
  2444. TTI::PeelingPreferences &PP) {
  2445. BaseT::getPeelingPreferences(L, SE, PP);
  2446. }
  2447. Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
  2448. Type *ExpectedType) {
  2449. switch (Inst->getIntrinsicID()) {
  2450. default:
  2451. return nullptr;
  2452. case Intrinsic::aarch64_neon_st2:
  2453. case Intrinsic::aarch64_neon_st3:
  2454. case Intrinsic::aarch64_neon_st4: {
  2455. // Create a struct type
  2456. StructType *ST = dyn_cast<StructType>(ExpectedType);
  2457. if (!ST)
  2458. return nullptr;
  2459. unsigned NumElts = Inst->arg_size() - 1;
  2460. if (ST->getNumElements() != NumElts)
  2461. return nullptr;
  2462. for (unsigned i = 0, e = NumElts; i != e; ++i) {
  2463. if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
  2464. return nullptr;
  2465. }
  2466. Value *Res = PoisonValue::get(ExpectedType);
  2467. IRBuilder<> Builder(Inst);
  2468. for (unsigned i = 0, e = NumElts; i != e; ++i) {
  2469. Value *L = Inst->getArgOperand(i);
  2470. Res = Builder.CreateInsertValue(Res, L, i);
  2471. }
  2472. return Res;
  2473. }
  2474. case Intrinsic::aarch64_neon_ld2:
  2475. case Intrinsic::aarch64_neon_ld3:
  2476. case Intrinsic::aarch64_neon_ld4:
  2477. if (Inst->getType() == ExpectedType)
  2478. return Inst;
  2479. return nullptr;
  2480. }
  2481. }
  2482. bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
  2483. MemIntrinsicInfo &Info) {
  2484. switch (Inst->getIntrinsicID()) {
  2485. default:
  2486. break;
  2487. case Intrinsic::aarch64_neon_ld2:
  2488. case Intrinsic::aarch64_neon_ld3:
  2489. case Intrinsic::aarch64_neon_ld4:
  2490. Info.ReadMem = true;
  2491. Info.WriteMem = false;
  2492. Info.PtrVal = Inst->getArgOperand(0);
  2493. break;
  2494. case Intrinsic::aarch64_neon_st2:
  2495. case Intrinsic::aarch64_neon_st3:
  2496. case Intrinsic::aarch64_neon_st4:
  2497. Info.ReadMem = false;
  2498. Info.WriteMem = true;
  2499. Info.PtrVal = Inst->getArgOperand(Inst->arg_size() - 1);
  2500. break;
  2501. }
  2502. switch (Inst->getIntrinsicID()) {
  2503. default:
  2504. return false;
  2505. case Intrinsic::aarch64_neon_ld2:
  2506. case Intrinsic::aarch64_neon_st2:
  2507. Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
  2508. break;
  2509. case Intrinsic::aarch64_neon_ld3:
  2510. case Intrinsic::aarch64_neon_st3:
  2511. Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
  2512. break;
  2513. case Intrinsic::aarch64_neon_ld4:
  2514. case Intrinsic::aarch64_neon_st4:
  2515. Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
  2516. break;
  2517. }
  2518. return true;
  2519. }
  2520. /// See if \p I should be considered for address type promotion. We check if \p
  2521. /// I is a sext with right type and used in memory accesses. If it used in a
  2522. /// "complex" getelementptr, we allow it to be promoted without finding other
  2523. /// sext instructions that sign extended the same initial value. A getelementptr
  2524. /// is considered as "complex" if it has more than 2 operands.
  2525. bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
  2526. const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
  2527. bool Considerable = false;
  2528. AllowPromotionWithoutCommonHeader = false;
  2529. if (!isa<SExtInst>(&I))
  2530. return false;
  2531. Type *ConsideredSExtType =
  2532. Type::getInt64Ty(I.getParent()->getParent()->getContext());
  2533. if (I.getType() != ConsideredSExtType)
  2534. return false;
  2535. // See if the sext is the one with the right type and used in at least one
  2536. // GetElementPtrInst.
  2537. for (const User *U : I.users()) {
  2538. if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
  2539. Considerable = true;
  2540. // A getelementptr is considered as "complex" if it has more than 2
  2541. // operands. We will promote a SExt used in such complex GEP as we
  2542. // expect some computation to be merged if they are done on 64 bits.
  2543. if (GEPInst->getNumOperands() > 2) {
  2544. AllowPromotionWithoutCommonHeader = true;
  2545. break;
  2546. }
  2547. }
  2548. }
  2549. return Considerable;
  2550. }
  2551. bool AArch64TTIImpl::isLegalToVectorizeReduction(
  2552. const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
  2553. if (!VF.isScalable())
  2554. return true;
  2555. Type *Ty = RdxDesc.getRecurrenceType();
  2556. if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty))
  2557. return false;
  2558. switch (RdxDesc.getRecurrenceKind()) {
  2559. case RecurKind::Add:
  2560. case RecurKind::FAdd:
  2561. case RecurKind::And:
  2562. case RecurKind::Or:
  2563. case RecurKind::Xor:
  2564. case RecurKind::SMin:
  2565. case RecurKind::SMax:
  2566. case RecurKind::UMin:
  2567. case RecurKind::UMax:
  2568. case RecurKind::FMin:
  2569. case RecurKind::FMax:
  2570. case RecurKind::SelectICmp:
  2571. case RecurKind::SelectFCmp:
  2572. case RecurKind::FMulAdd:
  2573. return true;
  2574. default:
  2575. return false;
  2576. }
  2577. }
  2578. InstructionCost
  2579. AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
  2580. bool IsUnsigned,
  2581. TTI::TargetCostKind CostKind) {
  2582. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
  2583. if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16())
  2584. return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
  2585. assert((isa<ScalableVectorType>(Ty) == isa<ScalableVectorType>(CondTy)) &&
  2586. "Both vector needs to be equally scalable");
  2587. InstructionCost LegalizationCost = 0;
  2588. if (LT.first > 1) {
  2589. Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext());
  2590. unsigned MinMaxOpcode =
  2591. Ty->isFPOrFPVectorTy()
  2592. ? Intrinsic::maxnum
  2593. : (IsUnsigned ? Intrinsic::umin : Intrinsic::smin);
  2594. IntrinsicCostAttributes Attrs(MinMaxOpcode, LegalVTy, {LegalVTy, LegalVTy});
  2595. LegalizationCost = getIntrinsicInstrCost(Attrs, CostKind) * (LT.first - 1);
  2596. }
  2597. return LegalizationCost + /*Cost of horizontal reduction*/ 2;
  2598. }
  2599. InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE(
  2600. unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) {
  2601. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
  2602. InstructionCost LegalizationCost = 0;
  2603. if (LT.first > 1) {
  2604. Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext());
  2605. LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind);
  2606. LegalizationCost *= LT.first - 1;
  2607. }
  2608. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  2609. assert(ISD && "Invalid opcode");
  2610. // Add the final reduction cost for the legal horizontal reduction
  2611. switch (ISD) {
  2612. case ISD::ADD:
  2613. case ISD::AND:
  2614. case ISD::OR:
  2615. case ISD::XOR:
  2616. case ISD::FADD:
  2617. return LegalizationCost + 2;
  2618. default:
  2619. return InstructionCost::getInvalid();
  2620. }
  2621. }
  2622. InstructionCost
  2623. AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
  2624. std::optional<FastMathFlags> FMF,
  2625. TTI::TargetCostKind CostKind) {
  2626. if (TTI::requiresOrderedReduction(FMF)) {
  2627. if (auto *FixedVTy = dyn_cast<FixedVectorType>(ValTy)) {
  2628. InstructionCost BaseCost =
  2629. BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
  2630. // Add on extra cost to reflect the extra overhead on some CPUs. We still
  2631. // end up vectorizing for more computationally intensive loops.
  2632. return BaseCost + FixedVTy->getNumElements();
  2633. }
  2634. if (Opcode != Instruction::FAdd)
  2635. return InstructionCost::getInvalid();
  2636. auto *VTy = cast<ScalableVectorType>(ValTy);
  2637. InstructionCost Cost =
  2638. getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind);
  2639. Cost *= getMaxNumElements(VTy->getElementCount());
  2640. return Cost;
  2641. }
  2642. if (isa<ScalableVectorType>(ValTy))
  2643. return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind);
  2644. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
  2645. MVT MTy = LT.second;
  2646. int ISD = TLI->InstructionOpcodeToISD(Opcode);
  2647. assert(ISD && "Invalid opcode");
  2648. // Horizontal adds can use the 'addv' instruction. We model the cost of these
  2649. // instructions as twice a normal vector add, plus 1 for each legalization
  2650. // step (LT.first). This is the only arithmetic vector reduction operation for
  2651. // which we have an instruction.
  2652. // OR, XOR and AND costs should match the codegen from:
  2653. // OR: llvm/test/CodeGen/AArch64/reduce-or.ll
  2654. // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll
  2655. // AND: llvm/test/CodeGen/AArch64/reduce-and.ll
  2656. static const CostTblEntry CostTblNoPairwise[]{
  2657. {ISD::ADD, MVT::v8i8, 2},
  2658. {ISD::ADD, MVT::v16i8, 2},
  2659. {ISD::ADD, MVT::v4i16, 2},
  2660. {ISD::ADD, MVT::v8i16, 2},
  2661. {ISD::ADD, MVT::v4i32, 2},
  2662. {ISD::ADD, MVT::v2i64, 2},
  2663. {ISD::OR, MVT::v8i8, 15},
  2664. {ISD::OR, MVT::v16i8, 17},
  2665. {ISD::OR, MVT::v4i16, 7},
  2666. {ISD::OR, MVT::v8i16, 9},
  2667. {ISD::OR, MVT::v2i32, 3},
  2668. {ISD::OR, MVT::v4i32, 5},
  2669. {ISD::OR, MVT::v2i64, 3},
  2670. {ISD::XOR, MVT::v8i8, 15},
  2671. {ISD::XOR, MVT::v16i8, 17},
  2672. {ISD::XOR, MVT::v4i16, 7},
  2673. {ISD::XOR, MVT::v8i16, 9},
  2674. {ISD::XOR, MVT::v2i32, 3},
  2675. {ISD::XOR, MVT::v4i32, 5},
  2676. {ISD::XOR, MVT::v2i64, 3},
  2677. {ISD::AND, MVT::v8i8, 15},
  2678. {ISD::AND, MVT::v16i8, 17},
  2679. {ISD::AND, MVT::v4i16, 7},
  2680. {ISD::AND, MVT::v8i16, 9},
  2681. {ISD::AND, MVT::v2i32, 3},
  2682. {ISD::AND, MVT::v4i32, 5},
  2683. {ISD::AND, MVT::v2i64, 3},
  2684. };
  2685. switch (ISD) {
  2686. default:
  2687. break;
  2688. case ISD::ADD:
  2689. if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
  2690. return (LT.first - 1) + Entry->Cost;
  2691. break;
  2692. case ISD::XOR:
  2693. case ISD::AND:
  2694. case ISD::OR:
  2695. const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy);
  2696. if (!Entry)
  2697. break;
  2698. auto *ValVTy = cast<FixedVectorType>(ValTy);
  2699. if (!ValVTy->getElementType()->isIntegerTy(1) &&
  2700. MTy.getVectorNumElements() <= ValVTy->getNumElements() &&
  2701. isPowerOf2_32(ValVTy->getNumElements())) {
  2702. InstructionCost ExtraCost = 0;
  2703. if (LT.first != 1) {
  2704. // Type needs to be split, so there is an extra cost of LT.first - 1
  2705. // arithmetic ops.
  2706. auto *Ty = FixedVectorType::get(ValTy->getElementType(),
  2707. MTy.getVectorNumElements());
  2708. ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
  2709. ExtraCost *= LT.first - 1;
  2710. }
  2711. return Entry->Cost + ExtraCost;
  2712. }
  2713. break;
  2714. }
  2715. return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
  2716. }
  2717. InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) {
  2718. static const CostTblEntry ShuffleTbl[] = {
  2719. { TTI::SK_Splice, MVT::nxv16i8, 1 },
  2720. { TTI::SK_Splice, MVT::nxv8i16, 1 },
  2721. { TTI::SK_Splice, MVT::nxv4i32, 1 },
  2722. { TTI::SK_Splice, MVT::nxv2i64, 1 },
  2723. { TTI::SK_Splice, MVT::nxv2f16, 1 },
  2724. { TTI::SK_Splice, MVT::nxv4f16, 1 },
  2725. { TTI::SK_Splice, MVT::nxv8f16, 1 },
  2726. { TTI::SK_Splice, MVT::nxv2bf16, 1 },
  2727. { TTI::SK_Splice, MVT::nxv4bf16, 1 },
  2728. { TTI::SK_Splice, MVT::nxv8bf16, 1 },
  2729. { TTI::SK_Splice, MVT::nxv2f32, 1 },
  2730. { TTI::SK_Splice, MVT::nxv4f32, 1 },
  2731. { TTI::SK_Splice, MVT::nxv2f64, 1 },
  2732. };
  2733. // The code-generator is currently not able to handle scalable vectors
  2734. // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
  2735. // it. This change will be removed when code-generation for these types is
  2736. // sufficiently reliable.
  2737. if (Tp->getElementCount() == ElementCount::getScalable(1))
  2738. return InstructionCost::getInvalid();
  2739. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
  2740. Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext());
  2741. TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
  2742. EVT PromotedVT = LT.second.getScalarType() == MVT::i1
  2743. ? TLI->getPromotedVTForPredicate(EVT(LT.second))
  2744. : LT.second;
  2745. Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext());
  2746. InstructionCost LegalizationCost = 0;
  2747. if (Index < 0) {
  2748. LegalizationCost =
  2749. getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy,
  2750. CmpInst::BAD_ICMP_PREDICATE, CostKind) +
  2751. getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy,
  2752. CmpInst::BAD_ICMP_PREDICATE, CostKind);
  2753. }
  2754. // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp
  2755. // Cost performed on a promoted type.
  2756. if (LT.second.getScalarType() == MVT::i1) {
  2757. LegalizationCost +=
  2758. getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy,
  2759. TTI::CastContextHint::None, CostKind) +
  2760. getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy,
  2761. TTI::CastContextHint::None, CostKind);
  2762. }
  2763. const auto *Entry =
  2764. CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT());
  2765. assert(Entry && "Illegal Type for Splice");
  2766. LegalizationCost += Entry->Cost;
  2767. return LegalizationCost * LT.first;
  2768. }
  2769. InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
  2770. VectorType *Tp,
  2771. ArrayRef<int> Mask,
  2772. TTI::TargetCostKind CostKind,
  2773. int Index, VectorType *SubTp,
  2774. ArrayRef<const Value *> Args) {
  2775. std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
  2776. // If we have a Mask, and the LT is being legalized somehow, split the Mask
  2777. // into smaller vectors and sum the cost of each shuffle.
  2778. if (!Mask.empty() && isa<FixedVectorType>(Tp) && LT.second.isVector() &&
  2779. Tp->getScalarSizeInBits() == LT.second.getScalarSizeInBits() &&
  2780. cast<FixedVectorType>(Tp)->getNumElements() >
  2781. LT.second.getVectorNumElements() &&
  2782. !Index && !SubTp) {
  2783. unsigned TpNumElts = cast<FixedVectorType>(Tp)->getNumElements();
  2784. assert(Mask.size() == TpNumElts && "Expected Mask and Tp size to match!");
  2785. unsigned LTNumElts = LT.second.getVectorNumElements();
  2786. unsigned NumVecs = (TpNumElts + LTNumElts - 1) / LTNumElts;
  2787. VectorType *NTp =
  2788. VectorType::get(Tp->getScalarType(), LT.second.getVectorElementCount());
  2789. InstructionCost Cost;
  2790. for (unsigned N = 0; N < NumVecs; N++) {
  2791. SmallVector<int> NMask;
  2792. // Split the existing mask into chunks of size LTNumElts. Track the source
  2793. // sub-vectors to ensure the result has at most 2 inputs.
  2794. unsigned Source1, Source2;
  2795. unsigned NumSources = 0;
  2796. for (unsigned E = 0; E < LTNumElts; E++) {
  2797. int MaskElt = (N * LTNumElts + E < TpNumElts) ? Mask[N * LTNumElts + E]
  2798. : UndefMaskElem;
  2799. if (MaskElt < 0) {
  2800. NMask.push_back(UndefMaskElem);
  2801. continue;
  2802. }
  2803. // Calculate which source from the input this comes from and whether it
  2804. // is new to us.
  2805. unsigned Source = MaskElt / LTNumElts;
  2806. if (NumSources == 0) {
  2807. Source1 = Source;
  2808. NumSources = 1;
  2809. } else if (NumSources == 1 && Source != Source1) {
  2810. Source2 = Source;
  2811. NumSources = 2;
  2812. } else if (NumSources >= 2 && Source != Source1 && Source != Source2) {
  2813. NumSources++;
  2814. }
  2815. // Add to the new mask. For the NumSources>2 case these are not correct,
  2816. // but are only used for the modular lane number.
  2817. if (Source == Source1)
  2818. NMask.push_back(MaskElt % LTNumElts);
  2819. else if (Source == Source2)
  2820. NMask.push_back(MaskElt % LTNumElts + LTNumElts);
  2821. else
  2822. NMask.push_back(MaskElt % LTNumElts);
  2823. }
  2824. // If the sub-mask has at most 2 input sub-vectors then re-cost it using
  2825. // getShuffleCost. If not then cost it using the worst case.
  2826. if (NumSources <= 2)
  2827. Cost += getShuffleCost(NumSources <= 1 ? TTI::SK_PermuteSingleSrc
  2828. : TTI::SK_PermuteTwoSrc,
  2829. NTp, NMask, CostKind, 0, nullptr, Args);
  2830. else if (any_of(enumerate(NMask), [&](const auto &ME) {
  2831. return ME.value() % LTNumElts == ME.index();
  2832. }))
  2833. Cost += LTNumElts - 1;
  2834. else
  2835. Cost += LTNumElts;
  2836. }
  2837. return Cost;
  2838. }
  2839. Kind = improveShuffleKindFromMask(Kind, Mask);
  2840. // Check for broadcast loads.
  2841. if (Kind == TTI::SK_Broadcast) {
  2842. bool IsLoad = !Args.empty() && isa<LoadInst>(Args[0]);
  2843. if (IsLoad && LT.second.isVector() &&
  2844. isLegalBroadcastLoad(Tp->getElementType(),
  2845. LT.second.getVectorElementCount()))
  2846. return 0; // broadcast is handled by ld1r
  2847. }
  2848. // If we have 4 elements for the shuffle and a Mask, get the cost straight
  2849. // from the perfect shuffle tables.
  2850. if (Mask.size() == 4 && Tp->getElementCount() == ElementCount::getFixed(4) &&
  2851. (Tp->getScalarSizeInBits() == 16 || Tp->getScalarSizeInBits() == 32) &&
  2852. all_of(Mask, [](int E) { return E < 8; }))
  2853. return getPerfectShuffleCost(Mask);
  2854. if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
  2855. Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc ||
  2856. Kind == TTI::SK_Reverse || Kind == TTI::SK_Splice) {
  2857. static const CostTblEntry ShuffleTbl[] = {
  2858. // Broadcast shuffle kinds can be performed with 'dup'.
  2859. {TTI::SK_Broadcast, MVT::v8i8, 1},
  2860. {TTI::SK_Broadcast, MVT::v16i8, 1},
  2861. {TTI::SK_Broadcast, MVT::v4i16, 1},
  2862. {TTI::SK_Broadcast, MVT::v8i16, 1},
  2863. {TTI::SK_Broadcast, MVT::v2i32, 1},
  2864. {TTI::SK_Broadcast, MVT::v4i32, 1},
  2865. {TTI::SK_Broadcast, MVT::v2i64, 1},
  2866. {TTI::SK_Broadcast, MVT::v2f32, 1},
  2867. {TTI::SK_Broadcast, MVT::v4f32, 1},
  2868. {TTI::SK_Broadcast, MVT::v2f64, 1},
  2869. // Transpose shuffle kinds can be performed with 'trn1/trn2' and
  2870. // 'zip1/zip2' instructions.
  2871. {TTI::SK_Transpose, MVT::v8i8, 1},
  2872. {TTI::SK_Transpose, MVT::v16i8, 1},
  2873. {TTI::SK_Transpose, MVT::v4i16, 1},
  2874. {TTI::SK_Transpose, MVT::v8i16, 1},
  2875. {TTI::SK_Transpose, MVT::v2i32, 1},
  2876. {TTI::SK_Transpose, MVT::v4i32, 1},
  2877. {TTI::SK_Transpose, MVT::v2i64, 1},
  2878. {TTI::SK_Transpose, MVT::v2f32, 1},
  2879. {TTI::SK_Transpose, MVT::v4f32, 1},
  2880. {TTI::SK_Transpose, MVT::v2f64, 1},
  2881. // Select shuffle kinds.
  2882. // TODO: handle vXi8/vXi16.
  2883. {TTI::SK_Select, MVT::v2i32, 1}, // mov.
  2884. {TTI::SK_Select, MVT::v4i32, 2}, // rev+trn (or similar).
  2885. {TTI::SK_Select, MVT::v2i64, 1}, // mov.
  2886. {TTI::SK_Select, MVT::v2f32, 1}, // mov.
  2887. {TTI::SK_Select, MVT::v4f32, 2}, // rev+trn (or similar).
  2888. {TTI::SK_Select, MVT::v2f64, 1}, // mov.
  2889. // PermuteSingleSrc shuffle kinds.
  2890. {TTI::SK_PermuteSingleSrc, MVT::v2i32, 1}, // mov.
  2891. {TTI::SK_PermuteSingleSrc, MVT::v4i32, 3}, // perfectshuffle worst case.
  2892. {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // mov.
  2893. {TTI::SK_PermuteSingleSrc, MVT::v2f32, 1}, // mov.
  2894. {TTI::SK_PermuteSingleSrc, MVT::v4f32, 3}, // perfectshuffle worst case.
  2895. {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // mov.
  2896. {TTI::SK_PermuteSingleSrc, MVT::v4i16, 3}, // perfectshuffle worst case.
  2897. {TTI::SK_PermuteSingleSrc, MVT::v4f16, 3}, // perfectshuffle worst case.
  2898. {TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3}, // same
  2899. {TTI::SK_PermuteSingleSrc, MVT::v8i16, 8}, // constpool + load + tbl
  2900. {TTI::SK_PermuteSingleSrc, MVT::v8f16, 8}, // constpool + load + tbl
  2901. {TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8}, // constpool + load + tbl
  2902. {TTI::SK_PermuteSingleSrc, MVT::v8i8, 8}, // constpool + load + tbl
  2903. {TTI::SK_PermuteSingleSrc, MVT::v16i8, 8}, // constpool + load + tbl
  2904. // Reverse can be lowered with `rev`.
  2905. {TTI::SK_Reverse, MVT::v2i32, 1}, // REV64
  2906. {TTI::SK_Reverse, MVT::v4i32, 2}, // REV64; EXT
  2907. {TTI::SK_Reverse, MVT::v2i64, 1}, // EXT
  2908. {TTI::SK_Reverse, MVT::v2f32, 1}, // REV64
  2909. {TTI::SK_Reverse, MVT::v4f32, 2}, // REV64; EXT
  2910. {TTI::SK_Reverse, MVT::v2f64, 1}, // EXT
  2911. {TTI::SK_Reverse, MVT::v8f16, 2}, // REV64; EXT
  2912. {TTI::SK_Reverse, MVT::v8i16, 2}, // REV64; EXT
  2913. {TTI::SK_Reverse, MVT::v16i8, 2}, // REV64; EXT
  2914. {TTI::SK_Reverse, MVT::v4f16, 1}, // REV64
  2915. {TTI::SK_Reverse, MVT::v4i16, 1}, // REV64
  2916. {TTI::SK_Reverse, MVT::v8i8, 1}, // REV64
  2917. // Splice can all be lowered as `ext`.
  2918. {TTI::SK_Splice, MVT::v2i32, 1},
  2919. {TTI::SK_Splice, MVT::v4i32, 1},
  2920. {TTI::SK_Splice, MVT::v2i64, 1},
  2921. {TTI::SK_Splice, MVT::v2f32, 1},
  2922. {TTI::SK_Splice, MVT::v4f32, 1},
  2923. {TTI::SK_Splice, MVT::v2f64, 1},
  2924. {TTI::SK_Splice, MVT::v8f16, 1},
  2925. {TTI::SK_Splice, MVT::v8bf16, 1},
  2926. {TTI::SK_Splice, MVT::v8i16, 1},
  2927. {TTI::SK_Splice, MVT::v16i8, 1},
  2928. {TTI::SK_Splice, MVT::v4bf16, 1},
  2929. {TTI::SK_Splice, MVT::v4f16, 1},
  2930. {TTI::SK_Splice, MVT::v4i16, 1},
  2931. {TTI::SK_Splice, MVT::v8i8, 1},
  2932. // Broadcast shuffle kinds for scalable vectors
  2933. {TTI::SK_Broadcast, MVT::nxv16i8, 1},
  2934. {TTI::SK_Broadcast, MVT::nxv8i16, 1},
  2935. {TTI::SK_Broadcast, MVT::nxv4i32, 1},
  2936. {TTI::SK_Broadcast, MVT::nxv2i64, 1},
  2937. {TTI::SK_Broadcast, MVT::nxv2f16, 1},
  2938. {TTI::SK_Broadcast, MVT::nxv4f16, 1},
  2939. {TTI::SK_Broadcast, MVT::nxv8f16, 1},
  2940. {TTI::SK_Broadcast, MVT::nxv2bf16, 1},
  2941. {TTI::SK_Broadcast, MVT::nxv4bf16, 1},
  2942. {TTI::SK_Broadcast, MVT::nxv8bf16, 1},
  2943. {TTI::SK_Broadcast, MVT::nxv2f32, 1},
  2944. {TTI::SK_Broadcast, MVT::nxv4f32, 1},
  2945. {TTI::SK_Broadcast, MVT::nxv2f64, 1},
  2946. {TTI::SK_Broadcast, MVT::nxv16i1, 1},
  2947. {TTI::SK_Broadcast, MVT::nxv8i1, 1},
  2948. {TTI::SK_Broadcast, MVT::nxv4i1, 1},
  2949. {TTI::SK_Broadcast, MVT::nxv2i1, 1},
  2950. // Handle the cases for vector.reverse with scalable vectors
  2951. {TTI::SK_Reverse, MVT::nxv16i8, 1},
  2952. {TTI::SK_Reverse, MVT::nxv8i16, 1},
  2953. {TTI::SK_Reverse, MVT::nxv4i32, 1},
  2954. {TTI::SK_Reverse, MVT::nxv2i64, 1},
  2955. {TTI::SK_Reverse, MVT::nxv2f16, 1},
  2956. {TTI::SK_Reverse, MVT::nxv4f16, 1},
  2957. {TTI::SK_Reverse, MVT::nxv8f16, 1},
  2958. {TTI::SK_Reverse, MVT::nxv2bf16, 1},
  2959. {TTI::SK_Reverse, MVT::nxv4bf16, 1},
  2960. {TTI::SK_Reverse, MVT::nxv8bf16, 1},
  2961. {TTI::SK_Reverse, MVT::nxv2f32, 1},
  2962. {TTI::SK_Reverse, MVT::nxv4f32, 1},
  2963. {TTI::SK_Reverse, MVT::nxv2f64, 1},
  2964. {TTI::SK_Reverse, MVT::nxv16i1, 1},
  2965. {TTI::SK_Reverse, MVT::nxv8i1, 1},
  2966. {TTI::SK_Reverse, MVT::nxv4i1, 1},
  2967. {TTI::SK_Reverse, MVT::nxv2i1, 1},
  2968. };
  2969. if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
  2970. return LT.first * Entry->Cost;
  2971. }
  2972. if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp))
  2973. return getSpliceCost(Tp, Index);
  2974. // Inserting a subvector can often be done with either a D, S or H register
  2975. // move, so long as the inserted vector is "aligned".
  2976. if (Kind == TTI::SK_InsertSubvector && LT.second.isFixedLengthVector() &&
  2977. LT.second.getSizeInBits() <= 128 && SubTp) {
  2978. std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
  2979. if (SubLT.second.isVector()) {
  2980. int NumElts = LT.second.getVectorNumElements();
  2981. int NumSubElts = SubLT.second.getVectorNumElements();
  2982. if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
  2983. return SubLT.first;
  2984. }
  2985. }
  2986. return BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp);
  2987. }
  2988. bool AArch64TTIImpl::preferPredicateOverEpilogue(
  2989. Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC,
  2990. TargetLibraryInfo *TLI, DominatorTree *DT, LoopVectorizationLegality *LVL,
  2991. InterleavedAccessInfo *IAI) {
  2992. if (!ST->hasSVE() || TailFoldingKindLoc == TailFoldingKind::TFDisabled)
  2993. return false;
  2994. // We don't currently support vectorisation with interleaving for SVE - with
  2995. // such loops we're better off not using tail-folding. This gives us a chance
  2996. // to fall back on fixed-width vectorisation using NEON's ld2/st2/etc.
  2997. if (IAI->hasGroups())
  2998. return false;
  2999. TailFoldingKind Required; // Defaults to 0.
  3000. if (LVL->getReductionVars().size())
  3001. Required.add(TailFoldingKind::TFReductions);
  3002. if (LVL->getFixedOrderRecurrences().size())
  3003. Required.add(TailFoldingKind::TFRecurrences);
  3004. if (!Required)
  3005. Required.add(TailFoldingKind::TFSimple);
  3006. return (TailFoldingKindLoc & Required) == Required;
  3007. }
  3008. InstructionCost
  3009. AArch64TTIImpl::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
  3010. int64_t BaseOffset, bool HasBaseReg,
  3011. int64_t Scale, unsigned AddrSpace) const {
  3012. // Scaling factors are not free at all.
  3013. // Operands | Rt Latency
  3014. // -------------------------------------------
  3015. // Rt, [Xn, Xm] | 4
  3016. // -------------------------------------------
  3017. // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5
  3018. // Rt, [Xn, Wm, <extend> #imm] |
  3019. TargetLoweringBase::AddrMode AM;
  3020. AM.BaseGV = BaseGV;
  3021. AM.BaseOffs = BaseOffset;
  3022. AM.HasBaseReg = HasBaseReg;
  3023. AM.Scale = Scale;
  3024. if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
  3025. // Scale represents reg2 * scale, thus account for 1 if
  3026. // it is not equal to 0 or 1.
  3027. return AM.Scale != 0 && AM.Scale != 1;
  3028. return -1;
  3029. }