InstCombineMulDivRem.cpp 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877
  1. //===- InstCombineMulDivRem.cpp -------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
  10. // srem, urem, frem.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "InstCombineInternal.h"
  14. #include "llvm/ADT/APInt.h"
  15. #include "llvm/ADT/SmallVector.h"
  16. #include "llvm/Analysis/InstructionSimplify.h"
  17. #include "llvm/Analysis/ValueTracking.h"
  18. #include "llvm/IR/BasicBlock.h"
  19. #include "llvm/IR/Constant.h"
  20. #include "llvm/IR/Constants.h"
  21. #include "llvm/IR/InstrTypes.h"
  22. #include "llvm/IR/Instruction.h"
  23. #include "llvm/IR/Instructions.h"
  24. #include "llvm/IR/IntrinsicInst.h"
  25. #include "llvm/IR/Intrinsics.h"
  26. #include "llvm/IR/Operator.h"
  27. #include "llvm/IR/PatternMatch.h"
  28. #include "llvm/IR/Type.h"
  29. #include "llvm/IR/Value.h"
  30. #include "llvm/Support/Casting.h"
  31. #include "llvm/Support/ErrorHandling.h"
  32. #include "llvm/Transforms/InstCombine/InstCombiner.h"
  33. #include "llvm/Transforms/Utils/BuildLibCalls.h"
  34. #include <cassert>
  35. #define DEBUG_TYPE "instcombine"
  36. #include "llvm/Transforms/Utils/InstructionWorklist.h"
  37. using namespace llvm;
  38. using namespace PatternMatch;
  39. /// The specific integer value is used in a context where it is known to be
  40. /// non-zero. If this allows us to simplify the computation, do so and return
  41. /// the new operand, otherwise return null.
  42. static Value *simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC,
  43. Instruction &CxtI) {
  44. // If V has multiple uses, then we would have to do more analysis to determine
  45. // if this is safe. For example, the use could be in dynamically unreached
  46. // code.
  47. if (!V->hasOneUse()) return nullptr;
  48. bool MadeChange = false;
  49. // ((1 << A) >>u B) --> (1 << (A-B))
  50. // Because V cannot be zero, we know that B is less than A.
  51. Value *A = nullptr, *B = nullptr, *One = nullptr;
  52. if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) &&
  53. match(One, m_One())) {
  54. A = IC.Builder.CreateSub(A, B);
  55. return IC.Builder.CreateShl(One, A);
  56. }
  57. // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
  58. // inexact. Similarly for <<.
  59. BinaryOperator *I = dyn_cast<BinaryOperator>(V);
  60. if (I && I->isLogicalShift() &&
  61. IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) {
  62. // We know that this is an exact/nuw shift and that the input is a
  63. // non-zero context as well.
  64. if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
  65. IC.replaceOperand(*I, 0, V2);
  66. MadeChange = true;
  67. }
  68. if (I->getOpcode() == Instruction::LShr && !I->isExact()) {
  69. I->setIsExact();
  70. MadeChange = true;
  71. }
  72. if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) {
  73. I->setHasNoUnsignedWrap();
  74. MadeChange = true;
  75. }
  76. }
  77. // TODO: Lots more we could do here:
  78. // If V is a phi node, we can call this on each of its operands.
  79. // "select cond, X, 0" can simplify to "X".
  80. return MadeChange ? V : nullptr;
  81. }
  82. // TODO: This is a specific form of a much more general pattern.
  83. // We could detect a select with any binop identity constant, or we
  84. // could use SimplifyBinOp to see if either arm of the select reduces.
  85. // But that needs to be done carefully and/or while removing potential
  86. // reverse canonicalizations as in InstCombiner::foldSelectIntoOp().
  87. static Value *foldMulSelectToNegate(BinaryOperator &I,
  88. InstCombiner::BuilderTy &Builder) {
  89. Value *Cond, *OtherOp;
  90. // mul (select Cond, 1, -1), OtherOp --> select Cond, OtherOp, -OtherOp
  91. // mul OtherOp, (select Cond, 1, -1) --> select Cond, OtherOp, -OtherOp
  92. if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_One(), m_AllOnes())),
  93. m_Value(OtherOp)))) {
  94. bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
  95. Value *Neg = Builder.CreateNeg(OtherOp, "", false, HasAnyNoWrap);
  96. return Builder.CreateSelect(Cond, OtherOp, Neg);
  97. }
  98. // mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp
  99. // mul OtherOp, (select Cond, -1, 1) --> select Cond, -OtherOp, OtherOp
  100. if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_AllOnes(), m_One())),
  101. m_Value(OtherOp)))) {
  102. bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
  103. Value *Neg = Builder.CreateNeg(OtherOp, "", false, HasAnyNoWrap);
  104. return Builder.CreateSelect(Cond, Neg, OtherOp);
  105. }
  106. // fmul (select Cond, 1.0, -1.0), OtherOp --> select Cond, OtherOp, -OtherOp
  107. // fmul OtherOp, (select Cond, 1.0, -1.0) --> select Cond, OtherOp, -OtherOp
  108. if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(1.0),
  109. m_SpecificFP(-1.0))),
  110. m_Value(OtherOp)))) {
  111. IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
  112. Builder.setFastMathFlags(I.getFastMathFlags());
  113. return Builder.CreateSelect(Cond, OtherOp, Builder.CreateFNeg(OtherOp));
  114. }
  115. // fmul (select Cond, -1.0, 1.0), OtherOp --> select Cond, -OtherOp, OtherOp
  116. // fmul OtherOp, (select Cond, -1.0, 1.0) --> select Cond, -OtherOp, OtherOp
  117. if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(-1.0),
  118. m_SpecificFP(1.0))),
  119. m_Value(OtherOp)))) {
  120. IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
  121. Builder.setFastMathFlags(I.getFastMathFlags());
  122. return Builder.CreateSelect(Cond, Builder.CreateFNeg(OtherOp), OtherOp);
  123. }
  124. return nullptr;
  125. }
  126. /// Reduce integer multiplication patterns that contain a (+/-1 << Z) factor.
  127. /// Callers are expected to call this twice to handle commuted patterns.
  128. static Value *foldMulShl1(BinaryOperator &Mul, bool CommuteOperands,
  129. InstCombiner::BuilderTy &Builder) {
  130. Value *X = Mul.getOperand(0), *Y = Mul.getOperand(1);
  131. if (CommuteOperands)
  132. std::swap(X, Y);
  133. const bool HasNSW = Mul.hasNoSignedWrap();
  134. const bool HasNUW = Mul.hasNoUnsignedWrap();
  135. // X * (1 << Z) --> X << Z
  136. Value *Z;
  137. if (match(Y, m_Shl(m_One(), m_Value(Z)))) {
  138. bool PropagateNSW = HasNSW && cast<ShlOperator>(Y)->hasNoSignedWrap();
  139. return Builder.CreateShl(X, Z, Mul.getName(), HasNUW, PropagateNSW);
  140. }
  141. // Similar to above, but an increment of the shifted value becomes an add:
  142. // X * ((1 << Z) + 1) --> (X * (1 << Z)) + X --> (X << Z) + X
  143. // This increases uses of X, so it may require a freeze, but that is still
  144. // expected to be an improvement because it removes the multiply.
  145. BinaryOperator *Shift;
  146. if (match(Y, m_OneUse(m_Add(m_BinOp(Shift), m_One()))) &&
  147. match(Shift, m_OneUse(m_Shl(m_One(), m_Value(Z))))) {
  148. bool PropagateNSW = HasNSW && Shift->hasNoSignedWrap();
  149. Value *FrX = Builder.CreateFreeze(X, X->getName() + ".fr");
  150. Value *Shl = Builder.CreateShl(FrX, Z, "mulshl", HasNUW, PropagateNSW);
  151. return Builder.CreateAdd(Shl, FrX, Mul.getName(), HasNUW, PropagateNSW);
  152. }
  153. // Similar to above, but a decrement of the shifted value is disguised as
  154. // 'not' and becomes a sub:
  155. // X * (~(-1 << Z)) --> X * ((1 << Z) - 1) --> (X << Z) - X
  156. // This increases uses of X, so it may require a freeze, but that is still
  157. // expected to be an improvement because it removes the multiply.
  158. if (match(Y, m_OneUse(m_Not(m_OneUse(m_Shl(m_AllOnes(), m_Value(Z))))))) {
  159. Value *FrX = Builder.CreateFreeze(X, X->getName() + ".fr");
  160. Value *Shl = Builder.CreateShl(FrX, Z, "mulshl");
  161. return Builder.CreateSub(Shl, FrX, Mul.getName());
  162. }
  163. return nullptr;
  164. }
  165. Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
  166. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  167. if (Value *V =
  168. simplifyMulInst(Op0, Op1, I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
  169. SQ.getWithInstruction(&I)))
  170. return replaceInstUsesWith(I, V);
  171. if (SimplifyAssociativeOrCommutative(I))
  172. return &I;
  173. if (Instruction *X = foldVectorBinop(I))
  174. return X;
  175. if (Instruction *Phi = foldBinopWithPhiOperands(I))
  176. return Phi;
  177. if (Value *V = foldUsingDistributiveLaws(I))
  178. return replaceInstUsesWith(I, V);
  179. Type *Ty = I.getType();
  180. const unsigned BitWidth = Ty->getScalarSizeInBits();
  181. const bool HasNSW = I.hasNoSignedWrap();
  182. const bool HasNUW = I.hasNoUnsignedWrap();
  183. // X * -1 --> 0 - X
  184. if (match(Op1, m_AllOnes())) {
  185. return HasNSW ? BinaryOperator::CreateNSWNeg(Op0)
  186. : BinaryOperator::CreateNeg(Op0);
  187. }
  188. // Also allow combining multiply instructions on vectors.
  189. {
  190. Value *NewOp;
  191. Constant *C1, *C2;
  192. const APInt *IVal;
  193. if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)),
  194. m_Constant(C1))) &&
  195. match(C1, m_APInt(IVal))) {
  196. // ((X << C2)*C1) == (X * (C1 << C2))
  197. Constant *Shl = ConstantExpr::getShl(C1, C2);
  198. BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0));
  199. BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl);
  200. if (HasNUW && Mul->hasNoUnsignedWrap())
  201. BO->setHasNoUnsignedWrap();
  202. if (HasNSW && Mul->hasNoSignedWrap() && Shl->isNotMinSignedValue())
  203. BO->setHasNoSignedWrap();
  204. return BO;
  205. }
  206. if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
  207. // Replace X*(2^C) with X << C, where C is either a scalar or a vector.
  208. if (Constant *NewCst = ConstantExpr::getExactLogBase2(C1)) {
  209. BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst);
  210. if (HasNUW)
  211. Shl->setHasNoUnsignedWrap();
  212. if (HasNSW) {
  213. const APInt *V;
  214. if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1)
  215. Shl->setHasNoSignedWrap();
  216. }
  217. return Shl;
  218. }
  219. }
  220. }
  221. if (Op0->hasOneUse() && match(Op1, m_NegatedPower2())) {
  222. // Interpret X * (-1<<C) as (-X) * (1<<C) and try to sink the negation.
  223. // The "* (1<<C)" thus becomes a potential shifting opportunity.
  224. if (Value *NegOp0 = Negator::Negate(/*IsNegation*/ true, Op0, *this))
  225. return BinaryOperator::CreateMul(
  226. NegOp0, ConstantExpr::getNeg(cast<Constant>(Op1)), I.getName());
  227. // Try to convert multiply of extended operand to narrow negate and shift
  228. // for better analysis.
  229. // This is valid if the shift amount (trailing zeros in the multiplier
  230. // constant) clears more high bits than the bitwidth difference between
  231. // source and destination types:
  232. // ({z/s}ext X) * (-1<<C) --> (zext (-X)) << C
  233. const APInt *NegPow2C;
  234. Value *X;
  235. if (match(Op0, m_ZExtOrSExt(m_Value(X))) &&
  236. match(Op1, m_APIntAllowUndef(NegPow2C))) {
  237. unsigned SrcWidth = X->getType()->getScalarSizeInBits();
  238. unsigned ShiftAmt = NegPow2C->countTrailingZeros();
  239. if (ShiftAmt >= BitWidth - SrcWidth) {
  240. Value *N = Builder.CreateNeg(X, X->getName() + ".neg");
  241. Value *Z = Builder.CreateZExt(N, Ty, N->getName() + ".z");
  242. return BinaryOperator::CreateShl(Z, ConstantInt::get(Ty, ShiftAmt));
  243. }
  244. }
  245. }
  246. if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
  247. return FoldedMul;
  248. if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
  249. return replaceInstUsesWith(I, FoldedMul);
  250. // Simplify mul instructions with a constant RHS.
  251. Constant *MulC;
  252. if (match(Op1, m_ImmConstant(MulC))) {
  253. // Canonicalize (X+C1)*MulC -> X*MulC+C1*MulC.
  254. // Canonicalize (X|C1)*MulC -> X*MulC+C1*MulC.
  255. Value *X;
  256. Constant *C1;
  257. if ((match(Op0, m_OneUse(m_Add(m_Value(X), m_ImmConstant(C1))))) ||
  258. (match(Op0, m_OneUse(m_Or(m_Value(X), m_ImmConstant(C1)))) &&
  259. haveNoCommonBitsSet(X, C1, DL, &AC, &I, &DT))) {
  260. // C1*MulC simplifies to a tidier constant.
  261. Value *NewC = Builder.CreateMul(C1, MulC);
  262. auto *BOp0 = cast<BinaryOperator>(Op0);
  263. bool Op0NUW =
  264. (BOp0->getOpcode() == Instruction::Or || BOp0->hasNoUnsignedWrap());
  265. Value *NewMul = Builder.CreateMul(X, MulC);
  266. auto *BO = BinaryOperator::CreateAdd(NewMul, NewC);
  267. if (HasNUW && Op0NUW) {
  268. // If NewMulBO is constant we also can set BO to nuw.
  269. if (auto *NewMulBO = dyn_cast<BinaryOperator>(NewMul))
  270. NewMulBO->setHasNoUnsignedWrap();
  271. BO->setHasNoUnsignedWrap();
  272. }
  273. return BO;
  274. }
  275. }
  276. // abs(X) * abs(X) -> X * X
  277. // nabs(X) * nabs(X) -> X * X
  278. if (Op0 == Op1) {
  279. Value *X, *Y;
  280. SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
  281. if (SPF == SPF_ABS || SPF == SPF_NABS)
  282. return BinaryOperator::CreateMul(X, X);
  283. if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X))))
  284. return BinaryOperator::CreateMul(X, X);
  285. }
  286. // -X * C --> X * -C
  287. Value *X, *Y;
  288. Constant *Op1C;
  289. if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C)))
  290. return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C));
  291. // -X * -Y --> X * Y
  292. if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) {
  293. auto *NewMul = BinaryOperator::CreateMul(X, Y);
  294. if (HasNSW && cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() &&
  295. cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap())
  296. NewMul->setHasNoSignedWrap();
  297. return NewMul;
  298. }
  299. // -X * Y --> -(X * Y)
  300. // X * -Y --> -(X * Y)
  301. if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))
  302. return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y));
  303. // (X / Y) * Y = X - (X % Y)
  304. // (X / Y) * -Y = (X % Y) - X
  305. {
  306. Value *Y = Op1;
  307. BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0);
  308. if (!Div || (Div->getOpcode() != Instruction::UDiv &&
  309. Div->getOpcode() != Instruction::SDiv)) {
  310. Y = Op0;
  311. Div = dyn_cast<BinaryOperator>(Op1);
  312. }
  313. Value *Neg = dyn_castNegVal(Y);
  314. if (Div && Div->hasOneUse() &&
  315. (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) &&
  316. (Div->getOpcode() == Instruction::UDiv ||
  317. Div->getOpcode() == Instruction::SDiv)) {
  318. Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1);
  319. // If the division is exact, X % Y is zero, so we end up with X or -X.
  320. if (Div->isExact()) {
  321. if (DivOp1 == Y)
  322. return replaceInstUsesWith(I, X);
  323. return BinaryOperator::CreateNeg(X);
  324. }
  325. auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
  326. : Instruction::SRem;
  327. // X must be frozen because we are increasing its number of uses.
  328. Value *XFreeze = Builder.CreateFreeze(X, X->getName() + ".fr");
  329. Value *Rem = Builder.CreateBinOp(RemOpc, XFreeze, DivOp1);
  330. if (DivOp1 == Y)
  331. return BinaryOperator::CreateSub(XFreeze, Rem);
  332. return BinaryOperator::CreateSub(Rem, XFreeze);
  333. }
  334. }
  335. // Fold the following two scenarios:
  336. // 1) i1 mul -> i1 and.
  337. // 2) X * Y --> X & Y, iff X, Y can be only {0,1}.
  338. // Note: We could use known bits to generalize this and related patterns with
  339. // shifts/truncs
  340. if (Ty->isIntOrIntVectorTy(1) ||
  341. (match(Op0, m_And(m_Value(), m_One())) &&
  342. match(Op1, m_And(m_Value(), m_One()))))
  343. return BinaryOperator::CreateAnd(Op0, Op1);
  344. if (Value *R = foldMulShl1(I, /* CommuteOperands */ false, Builder))
  345. return replaceInstUsesWith(I, R);
  346. if (Value *R = foldMulShl1(I, /* CommuteOperands */ true, Builder))
  347. return replaceInstUsesWith(I, R);
  348. // (zext bool X) * (zext bool Y) --> zext (and X, Y)
  349. // (sext bool X) * (sext bool Y) --> zext (and X, Y)
  350. // Note: -1 * -1 == 1 * 1 == 1 (if the extends match, the result is the same)
  351. if (((match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
  352. (match(Op0, m_SExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
  353. X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
  354. (Op0->hasOneUse() || Op1->hasOneUse() || X == Y)) {
  355. Value *And = Builder.CreateAnd(X, Y, "mulbool");
  356. return CastInst::Create(Instruction::ZExt, And, Ty);
  357. }
  358. // (sext bool X) * (zext bool Y) --> sext (and X, Y)
  359. // (zext bool X) * (sext bool Y) --> sext (and X, Y)
  360. // Note: -1 * 1 == 1 * -1 == -1
  361. if (((match(Op0, m_SExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
  362. (match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
  363. X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
  364. (Op0->hasOneUse() || Op1->hasOneUse())) {
  365. Value *And = Builder.CreateAnd(X, Y, "mulbool");
  366. return CastInst::Create(Instruction::SExt, And, Ty);
  367. }
  368. // (zext bool X) * Y --> X ? Y : 0
  369. // Y * (zext bool X) --> X ? Y : 0
  370. if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
  371. return SelectInst::Create(X, Op1, ConstantInt::getNullValue(Ty));
  372. if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
  373. return SelectInst::Create(X, Op0, ConstantInt::getNullValue(Ty));
  374. Constant *ImmC;
  375. if (match(Op1, m_ImmConstant(ImmC))) {
  376. // (sext bool X) * C --> X ? -C : 0
  377. if (match(Op0, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
  378. Constant *NegC = ConstantExpr::getNeg(ImmC);
  379. return SelectInst::Create(X, NegC, ConstantInt::getNullValue(Ty));
  380. }
  381. // (ashr i32 X, 31) * C --> (X < 0) ? -C : 0
  382. const APInt *C;
  383. if (match(Op0, m_OneUse(m_AShr(m_Value(X), m_APInt(C)))) &&
  384. *C == C->getBitWidth() - 1) {
  385. Constant *NegC = ConstantExpr::getNeg(ImmC);
  386. Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
  387. return SelectInst::Create(IsNeg, NegC, ConstantInt::getNullValue(Ty));
  388. }
  389. }
  390. // (lshr X, 31) * Y --> (X < 0) ? Y : 0
  391. // TODO: We are not checking one-use because the elimination of the multiply
  392. // is better for analysis?
  393. const APInt *C;
  394. if (match(&I, m_c_BinOp(m_LShr(m_Value(X), m_APInt(C)), m_Value(Y))) &&
  395. *C == C->getBitWidth() - 1) {
  396. Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
  397. return SelectInst::Create(IsNeg, Y, ConstantInt::getNullValue(Ty));
  398. }
  399. // (and X, 1) * Y --> (trunc X) ? Y : 0
  400. if (match(&I, m_c_BinOp(m_OneUse(m_And(m_Value(X), m_One())), m_Value(Y)))) {
  401. Value *Tr = Builder.CreateTrunc(X, CmpInst::makeCmpResultType(Ty));
  402. return SelectInst::Create(Tr, Y, ConstantInt::getNullValue(Ty));
  403. }
  404. // ((ashr X, 31) | 1) * X --> abs(X)
  405. // X * ((ashr X, 31) | 1) --> abs(X)
  406. if (match(&I, m_c_BinOp(m_Or(m_AShr(m_Value(X),
  407. m_SpecificIntAllowUndef(BitWidth - 1)),
  408. m_One()),
  409. m_Deferred(X)))) {
  410. Value *Abs = Builder.CreateBinaryIntrinsic(
  411. Intrinsic::abs, X, ConstantInt::getBool(I.getContext(), HasNSW));
  412. Abs->takeName(&I);
  413. return replaceInstUsesWith(I, Abs);
  414. }
  415. if (Instruction *Ext = narrowMathIfNoOverflow(I))
  416. return Ext;
  417. bool Changed = false;
  418. if (!HasNSW && willNotOverflowSignedMul(Op0, Op1, I)) {
  419. Changed = true;
  420. I.setHasNoSignedWrap(true);
  421. }
  422. if (!HasNUW && willNotOverflowUnsignedMul(Op0, Op1, I)) {
  423. Changed = true;
  424. I.setHasNoUnsignedWrap(true);
  425. }
  426. return Changed ? &I : nullptr;
  427. }
  428. Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) {
  429. BinaryOperator::BinaryOps Opcode = I.getOpcode();
  430. assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
  431. "Expected fmul or fdiv");
  432. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  433. Value *X, *Y;
  434. // -X * -Y --> X * Y
  435. // -X / -Y --> X / Y
  436. if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
  437. return BinaryOperator::CreateWithCopiedFlags(Opcode, X, Y, &I);
  438. // fabs(X) * fabs(X) -> X * X
  439. // fabs(X) / fabs(X) -> X / X
  440. if (Op0 == Op1 && match(Op0, m_FAbs(m_Value(X))))
  441. return BinaryOperator::CreateWithCopiedFlags(Opcode, X, X, &I);
  442. // fabs(X) * fabs(Y) --> fabs(X * Y)
  443. // fabs(X) / fabs(Y) --> fabs(X / Y)
  444. if (match(Op0, m_FAbs(m_Value(X))) && match(Op1, m_FAbs(m_Value(Y))) &&
  445. (Op0->hasOneUse() || Op1->hasOneUse())) {
  446. IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
  447. Builder.setFastMathFlags(I.getFastMathFlags());
  448. Value *XY = Builder.CreateBinOp(Opcode, X, Y);
  449. Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY);
  450. Fabs->takeName(&I);
  451. return replaceInstUsesWith(I, Fabs);
  452. }
  453. return nullptr;
  454. }
  455. Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
  456. if (Value *V = simplifyFMulInst(I.getOperand(0), I.getOperand(1),
  457. I.getFastMathFlags(),
  458. SQ.getWithInstruction(&I)))
  459. return replaceInstUsesWith(I, V);
  460. if (SimplifyAssociativeOrCommutative(I))
  461. return &I;
  462. if (Instruction *X = foldVectorBinop(I))
  463. return X;
  464. if (Instruction *Phi = foldBinopWithPhiOperands(I))
  465. return Phi;
  466. if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
  467. return FoldedMul;
  468. if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
  469. return replaceInstUsesWith(I, FoldedMul);
  470. if (Instruction *R = foldFPSignBitOps(I))
  471. return R;
  472. // X * -1.0 --> -X
  473. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  474. if (match(Op1, m_SpecificFP(-1.0)))
  475. return UnaryOperator::CreateFNegFMF(Op0, &I);
  476. // With no-nans: X * 0.0 --> copysign(0.0, X)
  477. if (I.hasNoNaNs() && match(Op1, m_PosZeroFP())) {
  478. CallInst *CopySign = Builder.CreateIntrinsic(Intrinsic::copysign,
  479. {I.getType()}, {Op1, Op0}, &I);
  480. return replaceInstUsesWith(I, CopySign);
  481. }
  482. // -X * C --> X * -C
  483. Value *X, *Y;
  484. Constant *C;
  485. if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C)))
  486. if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
  487. return BinaryOperator::CreateFMulFMF(X, NegC, &I);
  488. // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E)
  489. if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
  490. return replaceInstUsesWith(I, V);
  491. if (I.hasAllowReassoc()) {
  492. // Reassociate constant RHS with another constant to form constant
  493. // expression.
  494. if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) {
  495. Constant *C1;
  496. if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) {
  497. // (C1 / X) * C --> (C * C1) / X
  498. Constant *CC1 =
  499. ConstantFoldBinaryOpOperands(Instruction::FMul, C, C1, DL);
  500. if (CC1 && CC1->isNormalFP())
  501. return BinaryOperator::CreateFDivFMF(CC1, X, &I);
  502. }
  503. if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) {
  504. // (X / C1) * C --> X * (C / C1)
  505. Constant *CDivC1 =
  506. ConstantFoldBinaryOpOperands(Instruction::FDiv, C, C1, DL);
  507. if (CDivC1 && CDivC1->isNormalFP())
  508. return BinaryOperator::CreateFMulFMF(X, CDivC1, &I);
  509. // If the constant was a denormal, try reassociating differently.
  510. // (X / C1) * C --> X / (C1 / C)
  511. Constant *C1DivC =
  512. ConstantFoldBinaryOpOperands(Instruction::FDiv, C1, C, DL);
  513. if (C1DivC && Op0->hasOneUse() && C1DivC->isNormalFP())
  514. return BinaryOperator::CreateFDivFMF(X, C1DivC, &I);
  515. }
  516. // We do not need to match 'fadd C, X' and 'fsub X, C' because they are
  517. // canonicalized to 'fadd X, C'. Distributing the multiply may allow
  518. // further folds and (X * C) + C2 is 'fma'.
  519. if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) {
  520. // (X + C1) * C --> (X * C) + (C * C1)
  521. if (Constant *CC1 = ConstantFoldBinaryOpOperands(
  522. Instruction::FMul, C, C1, DL)) {
  523. Value *XC = Builder.CreateFMulFMF(X, C, &I);
  524. return BinaryOperator::CreateFAddFMF(XC, CC1, &I);
  525. }
  526. }
  527. if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) {
  528. // (C1 - X) * C --> (C * C1) - (X * C)
  529. if (Constant *CC1 = ConstantFoldBinaryOpOperands(
  530. Instruction::FMul, C, C1, DL)) {
  531. Value *XC = Builder.CreateFMulFMF(X, C, &I);
  532. return BinaryOperator::CreateFSubFMF(CC1, XC, &I);
  533. }
  534. }
  535. }
  536. Value *Z;
  537. if (match(&I, m_c_FMul(m_OneUse(m_FDiv(m_Value(X), m_Value(Y))),
  538. m_Value(Z)))) {
  539. // Sink division: (X / Y) * Z --> (X * Z) / Y
  540. Value *NewFMul = Builder.CreateFMulFMF(X, Z, &I);
  541. return BinaryOperator::CreateFDivFMF(NewFMul, Y, &I);
  542. }
  543. // sqrt(X) * sqrt(Y) -> sqrt(X * Y)
  544. // nnan disallows the possibility of returning a number if both operands are
  545. // negative (in that case, we should return NaN).
  546. if (I.hasNoNaNs() && match(Op0, m_OneUse(m_Sqrt(m_Value(X)))) &&
  547. match(Op1, m_OneUse(m_Sqrt(m_Value(Y))))) {
  548. Value *XY = Builder.CreateFMulFMF(X, Y, &I);
  549. Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I);
  550. return replaceInstUsesWith(I, Sqrt);
  551. }
  552. // The following transforms are done irrespective of the number of uses
  553. // for the expression "1.0/sqrt(X)".
  554. // 1) 1.0/sqrt(X) * X -> X/sqrt(X)
  555. // 2) X * 1.0/sqrt(X) -> X/sqrt(X)
  556. // We always expect the backend to reduce X/sqrt(X) to sqrt(X), if it
  557. // has the necessary (reassoc) fast-math-flags.
  558. if (I.hasNoSignedZeros() &&
  559. match(Op0, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) &&
  560. match(Y, m_Sqrt(m_Value(X))) && Op1 == X)
  561. return BinaryOperator::CreateFDivFMF(X, Y, &I);
  562. if (I.hasNoSignedZeros() &&
  563. match(Op1, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) &&
  564. match(Y, m_Sqrt(m_Value(X))) && Op0 == X)
  565. return BinaryOperator::CreateFDivFMF(X, Y, &I);
  566. // Like the similar transform in instsimplify, this requires 'nsz' because
  567. // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0.
  568. if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 &&
  569. Op0->hasNUses(2)) {
  570. // Peek through fdiv to find squaring of square root:
  571. // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y
  572. if (match(Op0, m_FDiv(m_Value(X), m_Sqrt(m_Value(Y))))) {
  573. Value *XX = Builder.CreateFMulFMF(X, X, &I);
  574. return BinaryOperator::CreateFDivFMF(XX, Y, &I);
  575. }
  576. // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X)
  577. if (match(Op0, m_FDiv(m_Sqrt(m_Value(Y)), m_Value(X)))) {
  578. Value *XX = Builder.CreateFMulFMF(X, X, &I);
  579. return BinaryOperator::CreateFDivFMF(Y, XX, &I);
  580. }
  581. }
  582. // pow(X, Y) * X --> pow(X, Y+1)
  583. // X * pow(X, Y) --> pow(X, Y+1)
  584. if (match(&I, m_c_FMul(m_OneUse(m_Intrinsic<Intrinsic::pow>(m_Value(X),
  585. m_Value(Y))),
  586. m_Deferred(X)))) {
  587. Value *Y1 =
  588. Builder.CreateFAddFMF(Y, ConstantFP::get(I.getType(), 1.0), &I);
  589. Value *Pow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, X, Y1, &I);
  590. return replaceInstUsesWith(I, Pow);
  591. }
  592. if (I.isOnlyUserOfAnyOperand()) {
  593. // pow(X, Y) * pow(X, Z) -> pow(X, Y + Z)
  594. if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
  595. match(Op1, m_Intrinsic<Intrinsic::pow>(m_Specific(X), m_Value(Z)))) {
  596. auto *YZ = Builder.CreateFAddFMF(Y, Z, &I);
  597. auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, X, YZ, &I);
  598. return replaceInstUsesWith(I, NewPow);
  599. }
  600. // pow(X, Y) * pow(Z, Y) -> pow(X * Z, Y)
  601. if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
  602. match(Op1, m_Intrinsic<Intrinsic::pow>(m_Value(Z), m_Specific(Y)))) {
  603. auto *XZ = Builder.CreateFMulFMF(X, Z, &I);
  604. auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, XZ, Y, &I);
  605. return replaceInstUsesWith(I, NewPow);
  606. }
  607. // powi(x, y) * powi(x, z) -> powi(x, y + z)
  608. if (match(Op0, m_Intrinsic<Intrinsic::powi>(m_Value(X), m_Value(Y))) &&
  609. match(Op1, m_Intrinsic<Intrinsic::powi>(m_Specific(X), m_Value(Z))) &&
  610. Y->getType() == Z->getType()) {
  611. auto *YZ = Builder.CreateAdd(Y, Z);
  612. auto *NewPow = Builder.CreateIntrinsic(
  613. Intrinsic::powi, {X->getType(), YZ->getType()}, {X, YZ}, &I);
  614. return replaceInstUsesWith(I, NewPow);
  615. }
  616. // exp(X) * exp(Y) -> exp(X + Y)
  617. if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
  618. match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y)))) {
  619. Value *XY = Builder.CreateFAddFMF(X, Y, &I);
  620. Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I);
  621. return replaceInstUsesWith(I, Exp);
  622. }
  623. // exp2(X) * exp2(Y) -> exp2(X + Y)
  624. if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) &&
  625. match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y)))) {
  626. Value *XY = Builder.CreateFAddFMF(X, Y, &I);
  627. Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I);
  628. return replaceInstUsesWith(I, Exp2);
  629. }
  630. }
  631. // (X*Y) * X => (X*X) * Y where Y != X
  632. // The purpose is two-fold:
  633. // 1) to form a power expression (of X).
  634. // 2) potentially shorten the critical path: After transformation, the
  635. // latency of the instruction Y is amortized by the expression of X*X,
  636. // and therefore Y is in a "less critical" position compared to what it
  637. // was before the transformation.
  638. if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) &&
  639. Op1 != Y) {
  640. Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I);
  641. return BinaryOperator::CreateFMulFMF(XX, Y, &I);
  642. }
  643. if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) &&
  644. Op0 != Y) {
  645. Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I);
  646. return BinaryOperator::CreateFMulFMF(XX, Y, &I);
  647. }
  648. }
  649. // log2(X * 0.5) * Y = log2(X) * Y - Y
  650. if (I.isFast()) {
  651. IntrinsicInst *Log2 = nullptr;
  652. if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>(
  653. m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
  654. Log2 = cast<IntrinsicInst>(Op0);
  655. Y = Op1;
  656. }
  657. if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>(
  658. m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
  659. Log2 = cast<IntrinsicInst>(Op1);
  660. Y = Op0;
  661. }
  662. if (Log2) {
  663. Value *Log2 = Builder.CreateUnaryIntrinsic(Intrinsic::log2, X, &I);
  664. Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I);
  665. return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I);
  666. }
  667. }
  668. // Simplify FMUL recurrences starting with 0.0 to 0.0 if nnan and nsz are set.
  669. // Given a phi node with entry value as 0 and it used in fmul operation,
  670. // we can replace fmul with 0 safely and eleminate loop operation.
  671. PHINode *PN = nullptr;
  672. Value *Start = nullptr, *Step = nullptr;
  673. if (matchSimpleRecurrence(&I, PN, Start, Step) && I.hasNoNaNs() &&
  674. I.hasNoSignedZeros() && match(Start, m_Zero()))
  675. return replaceInstUsesWith(I, Start);
  676. return nullptr;
  677. }
  678. /// Fold a divide or remainder with a select instruction divisor when one of the
  679. /// select operands is zero. In that case, we can use the other select operand
  680. /// because div/rem by zero is undefined.
  681. bool InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) {
  682. SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1));
  683. if (!SI)
  684. return false;
  685. int NonNullOperand;
  686. if (match(SI->getTrueValue(), m_Zero()))
  687. // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
  688. NonNullOperand = 2;
  689. else if (match(SI->getFalseValue(), m_Zero()))
  690. // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
  691. NonNullOperand = 1;
  692. else
  693. return false;
  694. // Change the div/rem to use 'Y' instead of the select.
  695. replaceOperand(I, 1, SI->getOperand(NonNullOperand));
  696. // Okay, we know we replace the operand of the div/rem with 'Y' with no
  697. // problem. However, the select, or the condition of the select may have
  698. // multiple uses. Based on our knowledge that the operand must be non-zero,
  699. // propagate the known value for the select into other uses of it, and
  700. // propagate a known value of the condition into its other users.
  701. // If the select and condition only have a single use, don't bother with this,
  702. // early exit.
  703. Value *SelectCond = SI->getCondition();
  704. if (SI->use_empty() && SelectCond->hasOneUse())
  705. return true;
  706. // Scan the current block backward, looking for other uses of SI.
  707. BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin();
  708. Type *CondTy = SelectCond->getType();
  709. while (BBI != BBFront) {
  710. --BBI;
  711. // If we found an instruction that we can't assume will return, so
  712. // information from below it cannot be propagated above it.
  713. if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI))
  714. break;
  715. // Replace uses of the select or its condition with the known values.
  716. for (Use &Op : BBI->operands()) {
  717. if (Op == SI) {
  718. replaceUse(Op, SI->getOperand(NonNullOperand));
  719. Worklist.push(&*BBI);
  720. } else if (Op == SelectCond) {
  721. replaceUse(Op, NonNullOperand == 1 ? ConstantInt::getTrue(CondTy)
  722. : ConstantInt::getFalse(CondTy));
  723. Worklist.push(&*BBI);
  724. }
  725. }
  726. // If we past the instruction, quit looking for it.
  727. if (&*BBI == SI)
  728. SI = nullptr;
  729. if (&*BBI == SelectCond)
  730. SelectCond = nullptr;
  731. // If we ran out of things to eliminate, break out of the loop.
  732. if (!SelectCond && !SI)
  733. break;
  734. }
  735. return true;
  736. }
  737. /// True if the multiply can not be expressed in an int this size.
  738. static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product,
  739. bool IsSigned) {
  740. bool Overflow;
  741. Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow);
  742. return Overflow;
  743. }
  744. /// True if C1 is a multiple of C2. Quotient contains C1/C2.
  745. static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
  746. bool IsSigned) {
  747. assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal");
  748. // Bail if we will divide by zero.
  749. if (C2.isZero())
  750. return false;
  751. // Bail if we would divide INT_MIN by -1.
  752. if (IsSigned && C1.isMinSignedValue() && C2.isAllOnes())
  753. return false;
  754. APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned);
  755. if (IsSigned)
  756. APInt::sdivrem(C1, C2, Quotient, Remainder);
  757. else
  758. APInt::udivrem(C1, C2, Quotient, Remainder);
  759. return Remainder.isMinValue();
  760. }
  761. static Instruction *foldIDivShl(BinaryOperator &I,
  762. InstCombiner::BuilderTy &Builder) {
  763. assert((I.getOpcode() == Instruction::SDiv ||
  764. I.getOpcode() == Instruction::UDiv) &&
  765. "Expected integer divide");
  766. bool IsSigned = I.getOpcode() == Instruction::SDiv;
  767. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  768. Type *Ty = I.getType();
  769. Instruction *Ret = nullptr;
  770. Value *X, *Y, *Z;
  771. // With appropriate no-wrap constraints, remove a common factor in the
  772. // dividend and divisor that is disguised as a left-shifted value.
  773. if (match(Op1, m_Shl(m_Value(X), m_Value(Z))) &&
  774. match(Op0, m_c_Mul(m_Specific(X), m_Value(Y)))) {
  775. // Both operands must have the matching no-wrap for this kind of division.
  776. auto *Mul = cast<OverflowingBinaryOperator>(Op0);
  777. auto *Shl = cast<OverflowingBinaryOperator>(Op1);
  778. bool HasNUW = Mul->hasNoUnsignedWrap() && Shl->hasNoUnsignedWrap();
  779. bool HasNSW = Mul->hasNoSignedWrap() && Shl->hasNoSignedWrap();
  780. // (X * Y) u/ (X << Z) --> Y u>> Z
  781. if (!IsSigned && HasNUW)
  782. Ret = BinaryOperator::CreateLShr(Y, Z);
  783. // (X * Y) s/ (X << Z) --> Y s/ (1 << Z)
  784. if (IsSigned && HasNSW && (Op0->hasOneUse() || Op1->hasOneUse())) {
  785. Value *Shl = Builder.CreateShl(ConstantInt::get(Ty, 1), Z);
  786. Ret = BinaryOperator::CreateSDiv(Y, Shl);
  787. }
  788. }
  789. // With appropriate no-wrap constraints, remove a common factor in the
  790. // dividend and divisor that is disguised as a left-shift amount.
  791. if (match(Op0, m_Shl(m_Value(X), m_Value(Z))) &&
  792. match(Op1, m_Shl(m_Value(Y), m_Specific(Z)))) {
  793. auto *Shl0 = cast<OverflowingBinaryOperator>(Op0);
  794. auto *Shl1 = cast<OverflowingBinaryOperator>(Op1);
  795. // For unsigned div, we need 'nuw' on both shifts or
  796. // 'nsw' on both shifts + 'nuw' on the dividend.
  797. // (X << Z) / (Y << Z) --> X / Y
  798. if (!IsSigned &&
  799. ((Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap()) ||
  800. (Shl0->hasNoUnsignedWrap() && Shl0->hasNoSignedWrap() &&
  801. Shl1->hasNoSignedWrap())))
  802. Ret = BinaryOperator::CreateUDiv(X, Y);
  803. // For signed div, we need 'nsw' on both shifts + 'nuw' on the divisor.
  804. // (X << Z) / (Y << Z) --> X / Y
  805. if (IsSigned && Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap() &&
  806. Shl1->hasNoUnsignedWrap())
  807. Ret = BinaryOperator::CreateSDiv(X, Y);
  808. }
  809. if (!Ret)
  810. return nullptr;
  811. Ret->setIsExact(I.isExact());
  812. return Ret;
  813. }
  814. /// This function implements the transforms common to both integer division
  815. /// instructions (udiv and sdiv). It is called by the visitors to those integer
  816. /// division instructions.
  817. /// Common integer divide transforms
  818. Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) {
  819. if (Instruction *Phi = foldBinopWithPhiOperands(I))
  820. return Phi;
  821. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  822. bool IsSigned = I.getOpcode() == Instruction::SDiv;
  823. Type *Ty = I.getType();
  824. // The RHS is known non-zero.
  825. if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
  826. return replaceOperand(I, 1, V);
  827. // Handle cases involving: [su]div X, (select Cond, Y, Z)
  828. // This does not apply for fdiv.
  829. if (simplifyDivRemOfSelectWithZeroOp(I))
  830. return &I;
  831. // If the divisor is a select-of-constants, try to constant fold all div ops:
  832. // C / (select Cond, TrueC, FalseC) --> select Cond, (C / TrueC), (C / FalseC)
  833. // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
  834. if (match(Op0, m_ImmConstant()) &&
  835. match(Op1, m_Select(m_Value(), m_ImmConstant(), m_ImmConstant()))) {
  836. if (Instruction *R = FoldOpIntoSelect(I, cast<SelectInst>(Op1),
  837. /*FoldWithMultiUse*/ true))
  838. return R;
  839. }
  840. const APInt *C2;
  841. if (match(Op1, m_APInt(C2))) {
  842. Value *X;
  843. const APInt *C1;
  844. // (X / C1) / C2 -> X / (C1*C2)
  845. if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) ||
  846. (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) {
  847. APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
  848. if (!multiplyOverflows(*C1, *C2, Product, IsSigned))
  849. return BinaryOperator::Create(I.getOpcode(), X,
  850. ConstantInt::get(Ty, Product));
  851. }
  852. if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) ||
  853. (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) {
  854. APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
  855. // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
  856. if (isMultiple(*C2, *C1, Quotient, IsSigned)) {
  857. auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X,
  858. ConstantInt::get(Ty, Quotient));
  859. NewDiv->setIsExact(I.isExact());
  860. return NewDiv;
  861. }
  862. // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2.
  863. if (isMultiple(*C1, *C2, Quotient, IsSigned)) {
  864. auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
  865. ConstantInt::get(Ty, Quotient));
  866. auto *OBO = cast<OverflowingBinaryOperator>(Op0);
  867. Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
  868. Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
  869. return Mul;
  870. }
  871. }
  872. if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) &&
  873. C1->ult(C1->getBitWidth() - 1)) ||
  874. (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))) &&
  875. C1->ult(C1->getBitWidth()))) {
  876. APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
  877. APInt C1Shifted = APInt::getOneBitSet(
  878. C1->getBitWidth(), static_cast<unsigned>(C1->getZExtValue()));
  879. // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
  880. if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
  881. auto *BO = BinaryOperator::Create(I.getOpcode(), X,
  882. ConstantInt::get(Ty, Quotient));
  883. BO->setIsExact(I.isExact());
  884. return BO;
  885. }
  886. // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2.
  887. if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
  888. auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
  889. ConstantInt::get(Ty, Quotient));
  890. auto *OBO = cast<OverflowingBinaryOperator>(Op0);
  891. Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
  892. Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
  893. return Mul;
  894. }
  895. }
  896. if (!C2->isZero()) // avoid X udiv 0
  897. if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I))
  898. return FoldedDiv;
  899. }
  900. if (match(Op0, m_One())) {
  901. assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?");
  902. if (IsSigned) {
  903. // 1 / 0 --> undef ; 1 / 1 --> 1 ; 1 / -1 --> -1 ; 1 / anything else --> 0
  904. // (Op1 + 1) u< 3 ? Op1 : 0
  905. // Op1 must be frozen because we are increasing its number of uses.
  906. Value *F1 = Builder.CreateFreeze(Op1, Op1->getName() + ".fr");
  907. Value *Inc = Builder.CreateAdd(F1, Op0);
  908. Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
  909. return SelectInst::Create(Cmp, F1, ConstantInt::get(Ty, 0));
  910. } else {
  911. // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
  912. // result is one, otherwise it's zero.
  913. return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty);
  914. }
  915. }
  916. // See if we can fold away this div instruction.
  917. if (SimplifyDemandedInstructionBits(I))
  918. return &I;
  919. // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
  920. Value *X, *Z;
  921. if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1
  922. if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
  923. (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1)))))
  924. return BinaryOperator::Create(I.getOpcode(), X, Op1);
  925. // (X << Y) / X -> 1 << Y
  926. Value *Y;
  927. if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y))))
  928. return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y);
  929. if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y))))
  930. return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y);
  931. // X / (X * Y) -> 1 / Y if the multiplication does not overflow.
  932. if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) {
  933. bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap();
  934. bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap();
  935. if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
  936. replaceOperand(I, 0, ConstantInt::get(Ty, 1));
  937. replaceOperand(I, 1, Y);
  938. return &I;
  939. }
  940. }
  941. // (X << Z) / (X * Y) -> (1 << Z) / Y
  942. // TODO: Handle sdiv.
  943. if (!IsSigned && Op1->hasOneUse() &&
  944. match(Op0, m_NUWShl(m_Value(X), m_Value(Z))) &&
  945. match(Op1, m_c_Mul(m_Specific(X), m_Value(Y))))
  946. if (cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap()) {
  947. Instruction *NewDiv = BinaryOperator::CreateUDiv(
  948. Builder.CreateShl(ConstantInt::get(Ty, 1), Z, "", /*NUW*/ true), Y);
  949. NewDiv->setIsExact(I.isExact());
  950. return NewDiv;
  951. }
  952. if (Instruction *R = foldIDivShl(I, Builder))
  953. return R;
  954. // With the appropriate no-wrap constraint, remove a multiply by the divisor
  955. // after peeking through another divide:
  956. // ((Op1 * X) / Y) / Op1 --> X / Y
  957. if (match(Op0, m_BinOp(I.getOpcode(), m_c_Mul(m_Specific(Op1), m_Value(X)),
  958. m_Value(Y)))) {
  959. auto *InnerDiv = cast<PossiblyExactOperator>(Op0);
  960. auto *Mul = cast<OverflowingBinaryOperator>(InnerDiv->getOperand(0));
  961. Instruction *NewDiv = nullptr;
  962. if (!IsSigned && Mul->hasNoUnsignedWrap())
  963. NewDiv = BinaryOperator::CreateUDiv(X, Y);
  964. else if (IsSigned && Mul->hasNoSignedWrap())
  965. NewDiv = BinaryOperator::CreateSDiv(X, Y);
  966. // Exact propagates only if both of the original divides are exact.
  967. if (NewDiv) {
  968. NewDiv->setIsExact(I.isExact() && InnerDiv->isExact());
  969. return NewDiv;
  970. }
  971. }
  972. return nullptr;
  973. }
  974. static const unsigned MaxDepth = 6;
  975. // Take the exact integer log2 of the value. If DoFold is true, create the
  976. // actual instructions, otherwise return a non-null dummy value. Return nullptr
  977. // on failure.
  978. static Value *takeLog2(IRBuilderBase &Builder, Value *Op, unsigned Depth,
  979. bool DoFold) {
  980. auto IfFold = [DoFold](function_ref<Value *()> Fn) {
  981. if (!DoFold)
  982. return reinterpret_cast<Value *>(-1);
  983. return Fn();
  984. };
  985. // FIXME: assert that Op1 isn't/doesn't contain undef.
  986. // log2(2^C) -> C
  987. if (match(Op, m_Power2()))
  988. return IfFold([&]() {
  989. Constant *C = ConstantExpr::getExactLogBase2(cast<Constant>(Op));
  990. if (!C)
  991. llvm_unreachable("Failed to constant fold udiv -> logbase2");
  992. return C;
  993. });
  994. // The remaining tests are all recursive, so bail out if we hit the limit.
  995. if (Depth++ == MaxDepth)
  996. return nullptr;
  997. // log2(zext X) -> zext log2(X)
  998. // FIXME: Require one use?
  999. Value *X, *Y;
  1000. if (match(Op, m_ZExt(m_Value(X))))
  1001. if (Value *LogX = takeLog2(Builder, X, Depth, DoFold))
  1002. return IfFold([&]() { return Builder.CreateZExt(LogX, Op->getType()); });
  1003. // log2(X << Y) -> log2(X) + Y
  1004. // FIXME: Require one use unless X is 1?
  1005. if (match(Op, m_Shl(m_Value(X), m_Value(Y))))
  1006. if (Value *LogX = takeLog2(Builder, X, Depth, DoFold))
  1007. return IfFold([&]() { return Builder.CreateAdd(LogX, Y); });
  1008. // log2(Cond ? X : Y) -> Cond ? log2(X) : log2(Y)
  1009. // FIXME: missed optimization: if one of the hands of select is/contains
  1010. // undef, just directly pick the other one.
  1011. // FIXME: can both hands contain undef?
  1012. // FIXME: Require one use?
  1013. if (SelectInst *SI = dyn_cast<SelectInst>(Op))
  1014. if (Value *LogX = takeLog2(Builder, SI->getOperand(1), Depth, DoFold))
  1015. if (Value *LogY = takeLog2(Builder, SI->getOperand(2), Depth, DoFold))
  1016. return IfFold([&]() {
  1017. return Builder.CreateSelect(SI->getOperand(0), LogX, LogY);
  1018. });
  1019. // log2(umin(X, Y)) -> umin(log2(X), log2(Y))
  1020. // log2(umax(X, Y)) -> umax(log2(X), log2(Y))
  1021. auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op);
  1022. if (MinMax && MinMax->hasOneUse() && !MinMax->isSigned())
  1023. if (Value *LogX = takeLog2(Builder, MinMax->getLHS(), Depth, DoFold))
  1024. if (Value *LogY = takeLog2(Builder, MinMax->getRHS(), Depth, DoFold))
  1025. return IfFold([&]() {
  1026. return Builder.CreateBinaryIntrinsic(
  1027. MinMax->getIntrinsicID(), LogX, LogY);
  1028. });
  1029. return nullptr;
  1030. }
  1031. /// If we have zero-extended operands of an unsigned div or rem, we may be able
  1032. /// to narrow the operation (sink the zext below the math).
  1033. static Instruction *narrowUDivURem(BinaryOperator &I,
  1034. InstCombiner::BuilderTy &Builder) {
  1035. Instruction::BinaryOps Opcode = I.getOpcode();
  1036. Value *N = I.getOperand(0);
  1037. Value *D = I.getOperand(1);
  1038. Type *Ty = I.getType();
  1039. Value *X, *Y;
  1040. if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) &&
  1041. X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) {
  1042. // udiv (zext X), (zext Y) --> zext (udiv X, Y)
  1043. // urem (zext X), (zext Y) --> zext (urem X, Y)
  1044. Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y);
  1045. return new ZExtInst(NarrowOp, Ty);
  1046. }
  1047. Constant *C;
  1048. if (isa<Instruction>(N) && match(N, m_OneUse(m_ZExt(m_Value(X)))) &&
  1049. match(D, m_Constant(C))) {
  1050. // If the constant is the same in the smaller type, use the narrow version.
  1051. Constant *TruncC = ConstantExpr::getTrunc(C, X->getType());
  1052. if (ConstantExpr::getZExt(TruncC, Ty) != C)
  1053. return nullptr;
  1054. // udiv (zext X), C --> zext (udiv X, C')
  1055. // urem (zext X), C --> zext (urem X, C')
  1056. return new ZExtInst(Builder.CreateBinOp(Opcode, X, TruncC), Ty);
  1057. }
  1058. if (isa<Instruction>(D) && match(D, m_OneUse(m_ZExt(m_Value(X)))) &&
  1059. match(N, m_Constant(C))) {
  1060. // If the constant is the same in the smaller type, use the narrow version.
  1061. Constant *TruncC = ConstantExpr::getTrunc(C, X->getType());
  1062. if (ConstantExpr::getZExt(TruncC, Ty) != C)
  1063. return nullptr;
  1064. // udiv C, (zext X) --> zext (udiv C', X)
  1065. // urem C, (zext X) --> zext (urem C', X)
  1066. return new ZExtInst(Builder.CreateBinOp(Opcode, TruncC, X), Ty);
  1067. }
  1068. return nullptr;
  1069. }
  1070. Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) {
  1071. if (Value *V = simplifyUDivInst(I.getOperand(0), I.getOperand(1), I.isExact(),
  1072. SQ.getWithInstruction(&I)))
  1073. return replaceInstUsesWith(I, V);
  1074. if (Instruction *X = foldVectorBinop(I))
  1075. return X;
  1076. // Handle the integer div common cases
  1077. if (Instruction *Common = commonIDivTransforms(I))
  1078. return Common;
  1079. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  1080. Value *X;
  1081. const APInt *C1, *C2;
  1082. if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) {
  1083. // (X lshr C1) udiv C2 --> X udiv (C2 << C1)
  1084. bool Overflow;
  1085. APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow);
  1086. if (!Overflow) {
  1087. bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value()));
  1088. BinaryOperator *BO = BinaryOperator::CreateUDiv(
  1089. X, ConstantInt::get(X->getType(), C2ShlC1));
  1090. if (IsExact)
  1091. BO->setIsExact();
  1092. return BO;
  1093. }
  1094. }
  1095. // Op0 / C where C is large (negative) --> zext (Op0 >= C)
  1096. // TODO: Could use isKnownNegative() to handle non-constant values.
  1097. Type *Ty = I.getType();
  1098. if (match(Op1, m_Negative())) {
  1099. Value *Cmp = Builder.CreateICmpUGE(Op0, Op1);
  1100. return CastInst::CreateZExtOrBitCast(Cmp, Ty);
  1101. }
  1102. // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined)
  1103. if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
  1104. Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
  1105. return CastInst::CreateZExtOrBitCast(Cmp, Ty);
  1106. }
  1107. if (Instruction *NarrowDiv = narrowUDivURem(I, Builder))
  1108. return NarrowDiv;
  1109. // If the udiv operands are non-overflowing multiplies with a common operand,
  1110. // then eliminate the common factor:
  1111. // (A * B) / (A * X) --> B / X (and commuted variants)
  1112. // TODO: The code would be reduced if we had m_c_NUWMul pattern matching.
  1113. // TODO: If -reassociation handled this generally, we could remove this.
  1114. Value *A, *B;
  1115. if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) {
  1116. if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) ||
  1117. match(Op1, m_NUWMul(m_Value(X), m_Specific(A))))
  1118. return BinaryOperator::CreateUDiv(B, X);
  1119. if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) ||
  1120. match(Op1, m_NUWMul(m_Value(X), m_Specific(B))))
  1121. return BinaryOperator::CreateUDiv(A, X);
  1122. }
  1123. // Look through a right-shift to find the common factor:
  1124. // ((Op1 *nuw A) >> B) / Op1 --> A >> B
  1125. if (match(Op0, m_LShr(m_NUWMul(m_Specific(Op1), m_Value(A)), m_Value(B))) ||
  1126. match(Op0, m_LShr(m_NUWMul(m_Value(A), m_Specific(Op1)), m_Value(B)))) {
  1127. Instruction *Lshr = BinaryOperator::CreateLShr(A, B);
  1128. if (I.isExact() && cast<PossiblyExactOperator>(Op0)->isExact())
  1129. Lshr->setIsExact();
  1130. return Lshr;
  1131. }
  1132. // Op1 udiv Op2 -> Op1 lshr log2(Op2), if log2() folds away.
  1133. if (takeLog2(Builder, Op1, /*Depth*/0, /*DoFold*/false)) {
  1134. Value *Res = takeLog2(Builder, Op1, /*Depth*/0, /*DoFold*/true);
  1135. return replaceInstUsesWith(
  1136. I, Builder.CreateLShr(Op0, Res, I.getName(), I.isExact()));
  1137. }
  1138. return nullptr;
  1139. }
  1140. Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) {
  1141. if (Value *V = simplifySDivInst(I.getOperand(0), I.getOperand(1), I.isExact(),
  1142. SQ.getWithInstruction(&I)))
  1143. return replaceInstUsesWith(I, V);
  1144. if (Instruction *X = foldVectorBinop(I))
  1145. return X;
  1146. // Handle the integer div common cases
  1147. if (Instruction *Common = commonIDivTransforms(I))
  1148. return Common;
  1149. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  1150. Type *Ty = I.getType();
  1151. Value *X;
  1152. // sdiv Op0, -1 --> -Op0
  1153. // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined)
  1154. if (match(Op1, m_AllOnes()) ||
  1155. (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
  1156. return BinaryOperator::CreateNeg(Op0);
  1157. // X / INT_MIN --> X == INT_MIN
  1158. if (match(Op1, m_SignMask()))
  1159. return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), Ty);
  1160. if (I.isExact()) {
  1161. // sdiv exact X, 1<<C --> ashr exact X, C iff 1<<C is non-negative
  1162. if (match(Op1, m_Power2()) && match(Op1, m_NonNegative())) {
  1163. Constant *C = ConstantExpr::getExactLogBase2(cast<Constant>(Op1));
  1164. return BinaryOperator::CreateExactAShr(Op0, C);
  1165. }
  1166. // sdiv exact X, (1<<ShAmt) --> ashr exact X, ShAmt (if shl is non-negative)
  1167. Value *ShAmt;
  1168. if (match(Op1, m_NSWShl(m_One(), m_Value(ShAmt))))
  1169. return BinaryOperator::CreateExactAShr(Op0, ShAmt);
  1170. // sdiv exact X, -1<<C --> -(ashr exact X, C)
  1171. if (match(Op1, m_NegatedPower2())) {
  1172. Constant *NegPow2C = ConstantExpr::getNeg(cast<Constant>(Op1));
  1173. Constant *C = ConstantExpr::getExactLogBase2(NegPow2C);
  1174. Value *Ashr = Builder.CreateAShr(Op0, C, I.getName() + ".neg", true);
  1175. return BinaryOperator::CreateNeg(Ashr);
  1176. }
  1177. }
  1178. const APInt *Op1C;
  1179. if (match(Op1, m_APInt(Op1C))) {
  1180. // If the dividend is sign-extended and the constant divisor is small enough
  1181. // to fit in the source type, shrink the division to the narrower type:
  1182. // (sext X) sdiv C --> sext (X sdiv C)
  1183. Value *Op0Src;
  1184. if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) &&
  1185. Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) {
  1186. // In the general case, we need to make sure that the dividend is not the
  1187. // minimum signed value because dividing that by -1 is UB. But here, we
  1188. // know that the -1 divisor case is already handled above.
  1189. Constant *NarrowDivisor =
  1190. ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType());
  1191. Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor);
  1192. return new SExtInst(NarrowOp, Ty);
  1193. }
  1194. // -X / C --> X / -C (if the negation doesn't overflow).
  1195. // TODO: This could be enhanced to handle arbitrary vector constants by
  1196. // checking if all elements are not the min-signed-val.
  1197. if (!Op1C->isMinSignedValue() &&
  1198. match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) {
  1199. Constant *NegC = ConstantInt::get(Ty, -(*Op1C));
  1200. Instruction *BO = BinaryOperator::CreateSDiv(X, NegC);
  1201. BO->setIsExact(I.isExact());
  1202. return BO;
  1203. }
  1204. }
  1205. // -X / Y --> -(X / Y)
  1206. Value *Y;
  1207. if (match(&I, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
  1208. return BinaryOperator::CreateNSWNeg(
  1209. Builder.CreateSDiv(X, Y, I.getName(), I.isExact()));
  1210. // abs(X) / X --> X > -1 ? 1 : -1
  1211. // X / abs(X) --> X > -1 ? 1 : -1
  1212. if (match(&I, m_c_BinOp(
  1213. m_OneUse(m_Intrinsic<Intrinsic::abs>(m_Value(X), m_One())),
  1214. m_Deferred(X)))) {
  1215. Value *Cond = Builder.CreateIsNotNeg(X);
  1216. return SelectInst::Create(Cond, ConstantInt::get(Ty, 1),
  1217. ConstantInt::getAllOnesValue(Ty));
  1218. }
  1219. KnownBits KnownDividend = computeKnownBits(Op0, 0, &I);
  1220. if (!I.isExact() &&
  1221. (match(Op1, m_Power2(Op1C)) || match(Op1, m_NegatedPower2(Op1C))) &&
  1222. KnownDividend.countMinTrailingZeros() >= Op1C->countTrailingZeros()) {
  1223. I.setIsExact();
  1224. return &I;
  1225. }
  1226. if (KnownDividend.isNonNegative()) {
  1227. // If both operands are unsigned, turn this into a udiv.
  1228. if (isKnownNonNegative(Op1, DL, 0, &AC, &I, &DT)) {
  1229. auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
  1230. BO->setIsExact(I.isExact());
  1231. return BO;
  1232. }
  1233. if (match(Op1, m_NegatedPower2())) {
  1234. // X sdiv (-(1 << C)) -> -(X sdiv (1 << C)) ->
  1235. // -> -(X udiv (1 << C)) -> -(X u>> C)
  1236. Constant *CNegLog2 = ConstantExpr::getExactLogBase2(
  1237. ConstantExpr::getNeg(cast<Constant>(Op1)));
  1238. Value *Shr = Builder.CreateLShr(Op0, CNegLog2, I.getName(), I.isExact());
  1239. return BinaryOperator::CreateNeg(Shr);
  1240. }
  1241. if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
  1242. // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
  1243. // Safe because the only negative value (1 << Y) can take on is
  1244. // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
  1245. // the sign bit set.
  1246. auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
  1247. BO->setIsExact(I.isExact());
  1248. return BO;
  1249. }
  1250. }
  1251. return nullptr;
  1252. }
  1253. /// Remove negation and try to convert division into multiplication.
  1254. Instruction *InstCombinerImpl::foldFDivConstantDivisor(BinaryOperator &I) {
  1255. Constant *C;
  1256. if (!match(I.getOperand(1), m_Constant(C)))
  1257. return nullptr;
  1258. // -X / C --> X / -C
  1259. Value *X;
  1260. const DataLayout &DL = I.getModule()->getDataLayout();
  1261. if (match(I.getOperand(0), m_FNeg(m_Value(X))))
  1262. if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
  1263. return BinaryOperator::CreateFDivFMF(X, NegC, &I);
  1264. // nnan X / +0.0 -> copysign(inf, X)
  1265. if (I.hasNoNaNs() && match(I.getOperand(1), m_Zero())) {
  1266. IRBuilder<> B(&I);
  1267. // TODO: nnan nsz X / -0.0 -> copysign(inf, X)
  1268. CallInst *CopySign = B.CreateIntrinsic(
  1269. Intrinsic::copysign, {C->getType()},
  1270. {ConstantFP::getInfinity(I.getType()), I.getOperand(0)}, &I);
  1271. CopySign->takeName(&I);
  1272. return replaceInstUsesWith(I, CopySign);
  1273. }
  1274. // If the constant divisor has an exact inverse, this is always safe. If not,
  1275. // then we can still create a reciprocal if fast-math-flags allow it and the
  1276. // constant is a regular number (not zero, infinite, or denormal).
  1277. if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP())))
  1278. return nullptr;
  1279. // Disallow denormal constants because we don't know what would happen
  1280. // on all targets.
  1281. // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
  1282. // denorms are flushed?
  1283. auto *RecipC = ConstantFoldBinaryOpOperands(
  1284. Instruction::FDiv, ConstantFP::get(I.getType(), 1.0), C, DL);
  1285. if (!RecipC || !RecipC->isNormalFP())
  1286. return nullptr;
  1287. // X / C --> X * (1 / C)
  1288. return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I);
  1289. }
  1290. /// Remove negation and try to reassociate constant math.
  1291. static Instruction *foldFDivConstantDividend(BinaryOperator &I) {
  1292. Constant *C;
  1293. if (!match(I.getOperand(0), m_Constant(C)))
  1294. return nullptr;
  1295. // C / -X --> -C / X
  1296. Value *X;
  1297. const DataLayout &DL = I.getModule()->getDataLayout();
  1298. if (match(I.getOperand(1), m_FNeg(m_Value(X))))
  1299. if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
  1300. return BinaryOperator::CreateFDivFMF(NegC, X, &I);
  1301. if (!I.hasAllowReassoc() || !I.hasAllowReciprocal())
  1302. return nullptr;
  1303. // Try to reassociate C / X expressions where X includes another constant.
  1304. Constant *C2, *NewC = nullptr;
  1305. if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) {
  1306. // C / (X * C2) --> (C / C2) / X
  1307. NewC = ConstantFoldBinaryOpOperands(Instruction::FDiv, C, C2, DL);
  1308. } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) {
  1309. // C / (X / C2) --> (C * C2) / X
  1310. NewC = ConstantFoldBinaryOpOperands(Instruction::FMul, C, C2, DL);
  1311. }
  1312. // Disallow denormal constants because we don't know what would happen
  1313. // on all targets.
  1314. // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
  1315. // denorms are flushed?
  1316. if (!NewC || !NewC->isNormalFP())
  1317. return nullptr;
  1318. return BinaryOperator::CreateFDivFMF(NewC, X, &I);
  1319. }
  1320. /// Negate the exponent of pow/exp to fold division-by-pow() into multiply.
  1321. static Instruction *foldFDivPowDivisor(BinaryOperator &I,
  1322. InstCombiner::BuilderTy &Builder) {
  1323. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  1324. auto *II = dyn_cast<IntrinsicInst>(Op1);
  1325. if (!II || !II->hasOneUse() || !I.hasAllowReassoc() ||
  1326. !I.hasAllowReciprocal())
  1327. return nullptr;
  1328. // Z / pow(X, Y) --> Z * pow(X, -Y)
  1329. // Z / exp{2}(Y) --> Z * exp{2}(-Y)
  1330. // In the general case, this creates an extra instruction, but fmul allows
  1331. // for better canonicalization and optimization than fdiv.
  1332. Intrinsic::ID IID = II->getIntrinsicID();
  1333. SmallVector<Value *> Args;
  1334. switch (IID) {
  1335. case Intrinsic::pow:
  1336. Args.push_back(II->getArgOperand(0));
  1337. Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(1), &I));
  1338. break;
  1339. case Intrinsic::powi: {
  1340. // Require 'ninf' assuming that makes powi(X, -INT_MIN) acceptable.
  1341. // That is, X ** (huge negative number) is 0.0, ~1.0, or INF and so
  1342. // dividing by that is INF, ~1.0, or 0.0. Code that uses powi allows
  1343. // non-standard results, so this corner case should be acceptable if the
  1344. // code rules out INF values.
  1345. if (!I.hasNoInfs())
  1346. return nullptr;
  1347. Args.push_back(II->getArgOperand(0));
  1348. Args.push_back(Builder.CreateNeg(II->getArgOperand(1)));
  1349. Type *Tys[] = {I.getType(), II->getArgOperand(1)->getType()};
  1350. Value *Pow = Builder.CreateIntrinsic(IID, Tys, Args, &I);
  1351. return BinaryOperator::CreateFMulFMF(Op0, Pow, &I);
  1352. }
  1353. case Intrinsic::exp:
  1354. case Intrinsic::exp2:
  1355. Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(0), &I));
  1356. break;
  1357. default:
  1358. return nullptr;
  1359. }
  1360. Value *Pow = Builder.CreateIntrinsic(IID, I.getType(), Args, &I);
  1361. return BinaryOperator::CreateFMulFMF(Op0, Pow, &I);
  1362. }
  1363. Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
  1364. Module *M = I.getModule();
  1365. if (Value *V = simplifyFDivInst(I.getOperand(0), I.getOperand(1),
  1366. I.getFastMathFlags(),
  1367. SQ.getWithInstruction(&I)))
  1368. return replaceInstUsesWith(I, V);
  1369. if (Instruction *X = foldVectorBinop(I))
  1370. return X;
  1371. if (Instruction *Phi = foldBinopWithPhiOperands(I))
  1372. return Phi;
  1373. if (Instruction *R = foldFDivConstantDivisor(I))
  1374. return R;
  1375. if (Instruction *R = foldFDivConstantDividend(I))
  1376. return R;
  1377. if (Instruction *R = foldFPSignBitOps(I))
  1378. return R;
  1379. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  1380. if (isa<Constant>(Op0))
  1381. if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
  1382. if (Instruction *R = FoldOpIntoSelect(I, SI))
  1383. return R;
  1384. if (isa<Constant>(Op1))
  1385. if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
  1386. if (Instruction *R = FoldOpIntoSelect(I, SI))
  1387. return R;
  1388. if (I.hasAllowReassoc() && I.hasAllowReciprocal()) {
  1389. Value *X, *Y;
  1390. if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
  1391. (!isa<Constant>(Y) || !isa<Constant>(Op1))) {
  1392. // (X / Y) / Z => X / (Y * Z)
  1393. Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I);
  1394. return BinaryOperator::CreateFDivFMF(X, YZ, &I);
  1395. }
  1396. if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
  1397. (!isa<Constant>(Y) || !isa<Constant>(Op0))) {
  1398. // Z / (X / Y) => (Y * Z) / X
  1399. Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I);
  1400. return BinaryOperator::CreateFDivFMF(YZ, X, &I);
  1401. }
  1402. // Z / (1.0 / Y) => (Y * Z)
  1403. //
  1404. // This is a special case of Z / (X / Y) => (Y * Z) / X, with X = 1.0. The
  1405. // m_OneUse check is avoided because even in the case of the multiple uses
  1406. // for 1.0/Y, the number of instructions remain the same and a division is
  1407. // replaced by a multiplication.
  1408. if (match(Op1, m_FDiv(m_SpecificFP(1.0), m_Value(Y))))
  1409. return BinaryOperator::CreateFMulFMF(Y, Op0, &I);
  1410. }
  1411. if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) {
  1412. // sin(X) / cos(X) -> tan(X)
  1413. // cos(X) / sin(X) -> 1/tan(X) (cotangent)
  1414. Value *X;
  1415. bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) &&
  1416. match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X)));
  1417. bool IsCot =
  1418. !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) &&
  1419. match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X)));
  1420. if ((IsTan || IsCot) && hasFloatFn(M, &TLI, I.getType(), LibFunc_tan,
  1421. LibFunc_tanf, LibFunc_tanl)) {
  1422. IRBuilder<> B(&I);
  1423. IRBuilder<>::FastMathFlagGuard FMFGuard(B);
  1424. B.setFastMathFlags(I.getFastMathFlags());
  1425. AttributeList Attrs =
  1426. cast<CallBase>(Op0)->getCalledFunction()->getAttributes();
  1427. Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf,
  1428. LibFunc_tanl, B, Attrs);
  1429. if (IsCot)
  1430. Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res);
  1431. return replaceInstUsesWith(I, Res);
  1432. }
  1433. }
  1434. // X / (X * Y) --> 1.0 / Y
  1435. // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed.
  1436. // We can ignore the possibility that X is infinity because INF/INF is NaN.
  1437. Value *X, *Y;
  1438. if (I.hasNoNaNs() && I.hasAllowReassoc() &&
  1439. match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) {
  1440. replaceOperand(I, 0, ConstantFP::get(I.getType(), 1.0));
  1441. replaceOperand(I, 1, Y);
  1442. return &I;
  1443. }
  1444. // X / fabs(X) -> copysign(1.0, X)
  1445. // fabs(X) / X -> copysign(1.0, X)
  1446. if (I.hasNoNaNs() && I.hasNoInfs() &&
  1447. (match(&I, m_FDiv(m_Value(X), m_FAbs(m_Deferred(X)))) ||
  1448. match(&I, m_FDiv(m_FAbs(m_Value(X)), m_Deferred(X))))) {
  1449. Value *V = Builder.CreateBinaryIntrinsic(
  1450. Intrinsic::copysign, ConstantFP::get(I.getType(), 1.0), X, &I);
  1451. return replaceInstUsesWith(I, V);
  1452. }
  1453. if (Instruction *Mul = foldFDivPowDivisor(I, Builder))
  1454. return Mul;
  1455. // pow(X, Y) / X --> pow(X, Y-1)
  1456. if (I.hasAllowReassoc() &&
  1457. match(Op0, m_OneUse(m_Intrinsic<Intrinsic::pow>(m_Specific(Op1),
  1458. m_Value(Y))))) {
  1459. Value *Y1 =
  1460. Builder.CreateFAddFMF(Y, ConstantFP::get(I.getType(), -1.0), &I);
  1461. Value *Pow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, Op1, Y1, &I);
  1462. return replaceInstUsesWith(I, Pow);
  1463. }
  1464. return nullptr;
  1465. }
  1466. /// This function implements the transforms common to both integer remainder
  1467. /// instructions (urem and srem). It is called by the visitors to those integer
  1468. /// remainder instructions.
  1469. /// Common integer remainder transforms
  1470. Instruction *InstCombinerImpl::commonIRemTransforms(BinaryOperator &I) {
  1471. if (Instruction *Phi = foldBinopWithPhiOperands(I))
  1472. return Phi;
  1473. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  1474. // The RHS is known non-zero.
  1475. if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
  1476. return replaceOperand(I, 1, V);
  1477. // Handle cases involving: rem X, (select Cond, Y, Z)
  1478. if (simplifyDivRemOfSelectWithZeroOp(I))
  1479. return &I;
  1480. // If the divisor is a select-of-constants, try to constant fold all rem ops:
  1481. // C % (select Cond, TrueC, FalseC) --> select Cond, (C % TrueC), (C % FalseC)
  1482. // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
  1483. if (match(Op0, m_ImmConstant()) &&
  1484. match(Op1, m_Select(m_Value(), m_ImmConstant(), m_ImmConstant()))) {
  1485. if (Instruction *R = FoldOpIntoSelect(I, cast<SelectInst>(Op1),
  1486. /*FoldWithMultiUse*/ true))
  1487. return R;
  1488. }
  1489. if (isa<Constant>(Op1)) {
  1490. if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
  1491. if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
  1492. if (Instruction *R = FoldOpIntoSelect(I, SI))
  1493. return R;
  1494. } else if (auto *PN = dyn_cast<PHINode>(Op0I)) {
  1495. const APInt *Op1Int;
  1496. if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() &&
  1497. (I.getOpcode() == Instruction::URem ||
  1498. !Op1Int->isMinSignedValue())) {
  1499. // foldOpIntoPhi will speculate instructions to the end of the PHI's
  1500. // predecessor blocks, so do this only if we know the srem or urem
  1501. // will not fault.
  1502. if (Instruction *NV = foldOpIntoPhi(I, PN))
  1503. return NV;
  1504. }
  1505. }
  1506. // See if we can fold away this rem instruction.
  1507. if (SimplifyDemandedInstructionBits(I))
  1508. return &I;
  1509. }
  1510. }
  1511. return nullptr;
  1512. }
  1513. Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) {
  1514. if (Value *V = simplifyURemInst(I.getOperand(0), I.getOperand(1),
  1515. SQ.getWithInstruction(&I)))
  1516. return replaceInstUsesWith(I, V);
  1517. if (Instruction *X = foldVectorBinop(I))
  1518. return X;
  1519. if (Instruction *common = commonIRemTransforms(I))
  1520. return common;
  1521. if (Instruction *NarrowRem = narrowUDivURem(I, Builder))
  1522. return NarrowRem;
  1523. // X urem Y -> X and Y-1, where Y is a power of 2,
  1524. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  1525. Type *Ty = I.getType();
  1526. if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
  1527. // This may increase instruction count, we don't enforce that Y is a
  1528. // constant.
  1529. Constant *N1 = Constant::getAllOnesValue(Ty);
  1530. Value *Add = Builder.CreateAdd(Op1, N1);
  1531. return BinaryOperator::CreateAnd(Op0, Add);
  1532. }
  1533. // 1 urem X -> zext(X != 1)
  1534. if (match(Op0, m_One())) {
  1535. Value *Cmp = Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1));
  1536. return CastInst::CreateZExtOrBitCast(Cmp, Ty);
  1537. }
  1538. // Op0 urem C -> Op0 < C ? Op0 : Op0 - C, where C >= signbit.
  1539. // Op0 must be frozen because we are increasing its number of uses.
  1540. if (match(Op1, m_Negative())) {
  1541. Value *F0 = Builder.CreateFreeze(Op0, Op0->getName() + ".fr");
  1542. Value *Cmp = Builder.CreateICmpULT(F0, Op1);
  1543. Value *Sub = Builder.CreateSub(F0, Op1);
  1544. return SelectInst::Create(Cmp, F0, Sub);
  1545. }
  1546. // If the divisor is a sext of a boolean, then the divisor must be max
  1547. // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also
  1548. // max unsigned value. In that case, the remainder is 0:
  1549. // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0
  1550. Value *X;
  1551. if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
  1552. Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
  1553. return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0);
  1554. }
  1555. return nullptr;
  1556. }
  1557. Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) {
  1558. if (Value *V = simplifySRemInst(I.getOperand(0), I.getOperand(1),
  1559. SQ.getWithInstruction(&I)))
  1560. return replaceInstUsesWith(I, V);
  1561. if (Instruction *X = foldVectorBinop(I))
  1562. return X;
  1563. // Handle the integer rem common cases
  1564. if (Instruction *Common = commonIRemTransforms(I))
  1565. return Common;
  1566. Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
  1567. {
  1568. const APInt *Y;
  1569. // X % -Y -> X % Y
  1570. if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue())
  1571. return replaceOperand(I, 1, ConstantInt::get(I.getType(), -*Y));
  1572. }
  1573. // -X srem Y --> -(X srem Y)
  1574. Value *X, *Y;
  1575. if (match(&I, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
  1576. return BinaryOperator::CreateNSWNeg(Builder.CreateSRem(X, Y));
  1577. // If the sign bits of both operands are zero (i.e. we can prove they are
  1578. // unsigned inputs), turn this into a urem.
  1579. APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
  1580. if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
  1581. MaskedValueIsZero(Op0, Mask, 0, &I)) {
  1582. // X srem Y -> X urem Y, iff X and Y don't have sign bit set
  1583. return BinaryOperator::CreateURem(Op0, Op1, I.getName());
  1584. }
  1585. // If it's a constant vector, flip any negative values positive.
  1586. if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) {
  1587. Constant *C = cast<Constant>(Op1);
  1588. unsigned VWidth = cast<FixedVectorType>(C->getType())->getNumElements();
  1589. bool hasNegative = false;
  1590. bool hasMissing = false;
  1591. for (unsigned i = 0; i != VWidth; ++i) {
  1592. Constant *Elt = C->getAggregateElement(i);
  1593. if (!Elt) {
  1594. hasMissing = true;
  1595. break;
  1596. }
  1597. if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt))
  1598. if (RHS->isNegative())
  1599. hasNegative = true;
  1600. }
  1601. if (hasNegative && !hasMissing) {
  1602. SmallVector<Constant *, 16> Elts(VWidth);
  1603. for (unsigned i = 0; i != VWidth; ++i) {
  1604. Elts[i] = C->getAggregateElement(i); // Handle undef, etc.
  1605. if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) {
  1606. if (RHS->isNegative())
  1607. Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
  1608. }
  1609. }
  1610. Constant *NewRHSV = ConstantVector::get(Elts);
  1611. if (NewRHSV != C) // Don't loop on -MININT
  1612. return replaceOperand(I, 1, NewRHSV);
  1613. }
  1614. }
  1615. return nullptr;
  1616. }
  1617. Instruction *InstCombinerImpl::visitFRem(BinaryOperator &I) {
  1618. if (Value *V = simplifyFRemInst(I.getOperand(0), I.getOperand(1),
  1619. I.getFastMathFlags(),
  1620. SQ.getWithInstruction(&I)))
  1621. return replaceInstUsesWith(I, V);
  1622. if (Instruction *X = foldVectorBinop(I))
  1623. return X;
  1624. if (Instruction *Phi = foldBinopWithPhiOperands(I))
  1625. return Phi;
  1626. return nullptr;
  1627. }