InstCombinePHI.cpp 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514
  1. //===- InstCombinePHI.cpp -------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the visitPHINode function.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "InstCombineInternal.h"
  13. #include "llvm/ADT/STLExtras.h"
  14. #include "llvm/ADT/SmallPtrSet.h"
  15. #include "llvm/ADT/Statistic.h"
  16. #include "llvm/Analysis/InstructionSimplify.h"
  17. #include "llvm/Analysis/ValueTracking.h"
  18. #include "llvm/IR/PatternMatch.h"
  19. #include "llvm/Support/CommandLine.h"
  20. #include "llvm/Transforms/InstCombine/InstCombiner.h"
  21. #include "llvm/Transforms/Utils/Local.h"
  22. using namespace llvm;
  23. using namespace llvm::PatternMatch;
  24. #define DEBUG_TYPE "instcombine"
  25. static cl::opt<unsigned>
  26. MaxNumPhis("instcombine-max-num-phis", cl::init(512),
  27. cl::desc("Maximum number phis to handle in intptr/ptrint folding"));
  28. STATISTIC(NumPHIsOfInsertValues,
  29. "Number of phi-of-insertvalue turned into insertvalue-of-phis");
  30. STATISTIC(NumPHIsOfExtractValues,
  31. "Number of phi-of-extractvalue turned into extractvalue-of-phi");
  32. STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
  33. /// The PHI arguments will be folded into a single operation with a PHI node
  34. /// as input. The debug location of the single operation will be the merged
  35. /// locations of the original PHI node arguments.
  36. void InstCombinerImpl::PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN) {
  37. auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
  38. Inst->setDebugLoc(FirstInst->getDebugLoc());
  39. // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
  40. // will be inefficient.
  41. assert(!isa<CallInst>(Inst));
  42. for (Value *V : drop_begin(PN.incoming_values())) {
  43. auto *I = cast<Instruction>(V);
  44. Inst->applyMergedLocation(Inst->getDebugLoc(), I->getDebugLoc());
  45. }
  46. }
  47. // Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
  48. // If there is an existing pointer typed PHI that produces the same value as PN,
  49. // replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
  50. // PHI node:
  51. //
  52. // Case-1:
  53. // bb1:
  54. // int_init = PtrToInt(ptr_init)
  55. // br label %bb2
  56. // bb2:
  57. // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
  58. // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
  59. // ptr_val2 = IntToPtr(int_val)
  60. // ...
  61. // use(ptr_val2)
  62. // ptr_val_inc = ...
  63. // inc_val_inc = PtrToInt(ptr_val_inc)
  64. //
  65. // ==>
  66. // bb1:
  67. // br label %bb2
  68. // bb2:
  69. // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
  70. // ...
  71. // use(ptr_val)
  72. // ptr_val_inc = ...
  73. //
  74. // Case-2:
  75. // bb1:
  76. // int_ptr = BitCast(ptr_ptr)
  77. // int_init = Load(int_ptr)
  78. // br label %bb2
  79. // bb2:
  80. // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
  81. // ptr_val2 = IntToPtr(int_val)
  82. // ...
  83. // use(ptr_val2)
  84. // ptr_val_inc = ...
  85. // inc_val_inc = PtrToInt(ptr_val_inc)
  86. // ==>
  87. // bb1:
  88. // ptr_init = Load(ptr_ptr)
  89. // br label %bb2
  90. // bb2:
  91. // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
  92. // ...
  93. // use(ptr_val)
  94. // ptr_val_inc = ...
  95. // ...
  96. //
  97. Instruction *InstCombinerImpl::foldIntegerTypedPHI(PHINode &PN) {
  98. if (!PN.getType()->isIntegerTy())
  99. return nullptr;
  100. if (!PN.hasOneUse())
  101. return nullptr;
  102. auto *IntToPtr = dyn_cast<IntToPtrInst>(PN.user_back());
  103. if (!IntToPtr)
  104. return nullptr;
  105. // Check if the pointer is actually used as pointer:
  106. auto HasPointerUse = [](Instruction *IIP) {
  107. for (User *U : IIP->users()) {
  108. Value *Ptr = nullptr;
  109. if (LoadInst *LoadI = dyn_cast<LoadInst>(U)) {
  110. Ptr = LoadI->getPointerOperand();
  111. } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
  112. Ptr = SI->getPointerOperand();
  113. } else if (GetElementPtrInst *GI = dyn_cast<GetElementPtrInst>(U)) {
  114. Ptr = GI->getPointerOperand();
  115. }
  116. if (Ptr && Ptr == IIP)
  117. return true;
  118. }
  119. return false;
  120. };
  121. if (!HasPointerUse(IntToPtr))
  122. return nullptr;
  123. if (DL.getPointerSizeInBits(IntToPtr->getAddressSpace()) !=
  124. DL.getTypeSizeInBits(IntToPtr->getOperand(0)->getType()))
  125. return nullptr;
  126. SmallVector<Value *, 4> AvailablePtrVals;
  127. for (auto Incoming : zip(PN.blocks(), PN.incoming_values())) {
  128. BasicBlock *BB = std::get<0>(Incoming);
  129. Value *Arg = std::get<1>(Incoming);
  130. // First look backward:
  131. if (auto *PI = dyn_cast<PtrToIntInst>(Arg)) {
  132. AvailablePtrVals.emplace_back(PI->getOperand(0));
  133. continue;
  134. }
  135. // Next look forward:
  136. Value *ArgIntToPtr = nullptr;
  137. for (User *U : Arg->users()) {
  138. if (isa<IntToPtrInst>(U) && U->getType() == IntToPtr->getType() &&
  139. (DT.dominates(cast<Instruction>(U), BB) ||
  140. cast<Instruction>(U)->getParent() == BB)) {
  141. ArgIntToPtr = U;
  142. break;
  143. }
  144. }
  145. if (ArgIntToPtr) {
  146. AvailablePtrVals.emplace_back(ArgIntToPtr);
  147. continue;
  148. }
  149. // If Arg is defined by a PHI, allow it. This will also create
  150. // more opportunities iteratively.
  151. if (isa<PHINode>(Arg)) {
  152. AvailablePtrVals.emplace_back(Arg);
  153. continue;
  154. }
  155. // For a single use integer load:
  156. auto *LoadI = dyn_cast<LoadInst>(Arg);
  157. if (!LoadI)
  158. return nullptr;
  159. if (!LoadI->hasOneUse())
  160. return nullptr;
  161. // Push the integer typed Load instruction into the available
  162. // value set, and fix it up later when the pointer typed PHI
  163. // is synthesized.
  164. AvailablePtrVals.emplace_back(LoadI);
  165. }
  166. // Now search for a matching PHI
  167. auto *BB = PN.getParent();
  168. assert(AvailablePtrVals.size() == PN.getNumIncomingValues() &&
  169. "Not enough available ptr typed incoming values");
  170. PHINode *MatchingPtrPHI = nullptr;
  171. unsigned NumPhis = 0;
  172. for (PHINode &PtrPHI : BB->phis()) {
  173. // FIXME: consider handling this in AggressiveInstCombine
  174. if (NumPhis++ > MaxNumPhis)
  175. return nullptr;
  176. if (&PtrPHI == &PN || PtrPHI.getType() != IntToPtr->getType())
  177. continue;
  178. if (any_of(zip(PN.blocks(), AvailablePtrVals),
  179. [&](const auto &BlockAndValue) {
  180. BasicBlock *BB = std::get<0>(BlockAndValue);
  181. Value *V = std::get<1>(BlockAndValue);
  182. return PtrPHI.getIncomingValueForBlock(BB) != V;
  183. }))
  184. continue;
  185. MatchingPtrPHI = &PtrPHI;
  186. break;
  187. }
  188. if (MatchingPtrPHI) {
  189. assert(MatchingPtrPHI->getType() == IntToPtr->getType() &&
  190. "Phi's Type does not match with IntToPtr");
  191. // The PtrToCast + IntToPtr will be simplified later
  192. return CastInst::CreateBitOrPointerCast(MatchingPtrPHI,
  193. IntToPtr->getOperand(0)->getType());
  194. }
  195. // If it requires a conversion for every PHI operand, do not do it.
  196. if (all_of(AvailablePtrVals, [&](Value *V) {
  197. return (V->getType() != IntToPtr->getType()) || isa<IntToPtrInst>(V);
  198. }))
  199. return nullptr;
  200. // If any of the operand that requires casting is a terminator
  201. // instruction, do not do it. Similarly, do not do the transform if the value
  202. // is PHI in a block with no insertion point, for example, a catchswitch
  203. // block, since we will not be able to insert a cast after the PHI.
  204. if (any_of(AvailablePtrVals, [&](Value *V) {
  205. if (V->getType() == IntToPtr->getType())
  206. return false;
  207. auto *Inst = dyn_cast<Instruction>(V);
  208. if (!Inst)
  209. return false;
  210. if (Inst->isTerminator())
  211. return true;
  212. auto *BB = Inst->getParent();
  213. if (isa<PHINode>(Inst) && BB->getFirstInsertionPt() == BB->end())
  214. return true;
  215. return false;
  216. }))
  217. return nullptr;
  218. PHINode *NewPtrPHI = PHINode::Create(
  219. IntToPtr->getType(), PN.getNumIncomingValues(), PN.getName() + ".ptr");
  220. InsertNewInstBefore(NewPtrPHI, PN);
  221. SmallDenseMap<Value *, Instruction *> Casts;
  222. for (auto Incoming : zip(PN.blocks(), AvailablePtrVals)) {
  223. auto *IncomingBB = std::get<0>(Incoming);
  224. auto *IncomingVal = std::get<1>(Incoming);
  225. if (IncomingVal->getType() == IntToPtr->getType()) {
  226. NewPtrPHI->addIncoming(IncomingVal, IncomingBB);
  227. continue;
  228. }
  229. #ifndef NDEBUG
  230. LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
  231. assert((isa<PHINode>(IncomingVal) ||
  232. IncomingVal->getType()->isPointerTy() ||
  233. (LoadI && LoadI->hasOneUse())) &&
  234. "Can not replace LoadInst with multiple uses");
  235. #endif
  236. // Need to insert a BitCast.
  237. // For an integer Load instruction with a single use, the load + IntToPtr
  238. // cast will be simplified into a pointer load:
  239. // %v = load i64, i64* %a.ip, align 8
  240. // %v.cast = inttoptr i64 %v to float **
  241. // ==>
  242. // %v.ptrp = bitcast i64 * %a.ip to float **
  243. // %v.cast = load float *, float ** %v.ptrp, align 8
  244. Instruction *&CI = Casts[IncomingVal];
  245. if (!CI) {
  246. CI = CastInst::CreateBitOrPointerCast(IncomingVal, IntToPtr->getType(),
  247. IncomingVal->getName() + ".ptr");
  248. if (auto *IncomingI = dyn_cast<Instruction>(IncomingVal)) {
  249. BasicBlock::iterator InsertPos(IncomingI);
  250. InsertPos++;
  251. BasicBlock *BB = IncomingI->getParent();
  252. if (isa<PHINode>(IncomingI))
  253. InsertPos = BB->getFirstInsertionPt();
  254. assert(InsertPos != BB->end() && "should have checked above");
  255. InsertNewInstBefore(CI, *InsertPos);
  256. } else {
  257. auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
  258. InsertNewInstBefore(CI, *InsertBB->getFirstInsertionPt());
  259. }
  260. }
  261. NewPtrPHI->addIncoming(CI, IncomingBB);
  262. }
  263. // The PtrToCast + IntToPtr will be simplified later
  264. return CastInst::CreateBitOrPointerCast(NewPtrPHI,
  265. IntToPtr->getOperand(0)->getType());
  266. }
  267. // Remove RoundTrip IntToPtr/PtrToInt Cast on PHI-Operand and
  268. // fold Phi-operand to bitcast.
  269. Instruction *InstCombinerImpl::foldPHIArgIntToPtrToPHI(PHINode &PN) {
  270. // convert ptr2int ( phi[ int2ptr(ptr2int(x))] ) --> ptr2int ( phi [ x ] )
  271. // Make sure all uses of phi are ptr2int.
  272. if (!all_of(PN.users(), [](User *U) { return isa<PtrToIntInst>(U); }))
  273. return nullptr;
  274. // Iterating over all operands to check presence of target pointers for
  275. // optimization.
  276. bool OperandWithRoundTripCast = false;
  277. for (unsigned OpNum = 0; OpNum != PN.getNumIncomingValues(); ++OpNum) {
  278. if (auto *NewOp =
  279. simplifyIntToPtrRoundTripCast(PN.getIncomingValue(OpNum))) {
  280. PN.setIncomingValue(OpNum, NewOp);
  281. OperandWithRoundTripCast = true;
  282. }
  283. }
  284. if (!OperandWithRoundTripCast)
  285. return nullptr;
  286. return &PN;
  287. }
  288. /// If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)],
  289. /// turn this into a phi[a,c] and phi[b,d] and a single insertvalue.
  290. Instruction *
  291. InstCombinerImpl::foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN) {
  292. auto *FirstIVI = cast<InsertValueInst>(PN.getIncomingValue(0));
  293. // Scan to see if all operands are `insertvalue`'s with the same indicies,
  294. // and all have a single use.
  295. for (Value *V : drop_begin(PN.incoming_values())) {
  296. auto *I = dyn_cast<InsertValueInst>(V);
  297. if (!I || !I->hasOneUser() || I->getIndices() != FirstIVI->getIndices())
  298. return nullptr;
  299. }
  300. // For each operand of an `insertvalue`
  301. std::array<PHINode *, 2> NewOperands;
  302. for (int OpIdx : {0, 1}) {
  303. auto *&NewOperand = NewOperands[OpIdx];
  304. // Create a new PHI node to receive the values the operand has in each
  305. // incoming basic block.
  306. NewOperand = PHINode::Create(
  307. FirstIVI->getOperand(OpIdx)->getType(), PN.getNumIncomingValues(),
  308. FirstIVI->getOperand(OpIdx)->getName() + ".pn");
  309. // And populate each operand's PHI with said values.
  310. for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
  311. NewOperand->addIncoming(
  312. cast<InsertValueInst>(std::get<1>(Incoming))->getOperand(OpIdx),
  313. std::get<0>(Incoming));
  314. InsertNewInstBefore(NewOperand, PN);
  315. }
  316. // And finally, create `insertvalue` over the newly-formed PHI nodes.
  317. auto *NewIVI = InsertValueInst::Create(NewOperands[0], NewOperands[1],
  318. FirstIVI->getIndices(), PN.getName());
  319. PHIArgMergedDebugLoc(NewIVI, PN);
  320. ++NumPHIsOfInsertValues;
  321. return NewIVI;
  322. }
  323. /// If we have something like phi [extractvalue(a,0), extractvalue(b,0)],
  324. /// turn this into a phi[a,b] and a single extractvalue.
  325. Instruction *
  326. InstCombinerImpl::foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN) {
  327. auto *FirstEVI = cast<ExtractValueInst>(PN.getIncomingValue(0));
  328. // Scan to see if all operands are `extractvalue`'s with the same indicies,
  329. // and all have a single use.
  330. for (Value *V : drop_begin(PN.incoming_values())) {
  331. auto *I = dyn_cast<ExtractValueInst>(V);
  332. if (!I || !I->hasOneUser() || I->getIndices() != FirstEVI->getIndices() ||
  333. I->getAggregateOperand()->getType() !=
  334. FirstEVI->getAggregateOperand()->getType())
  335. return nullptr;
  336. }
  337. // Create a new PHI node to receive the values the aggregate operand has
  338. // in each incoming basic block.
  339. auto *NewAggregateOperand = PHINode::Create(
  340. FirstEVI->getAggregateOperand()->getType(), PN.getNumIncomingValues(),
  341. FirstEVI->getAggregateOperand()->getName() + ".pn");
  342. // And populate the PHI with said values.
  343. for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
  344. NewAggregateOperand->addIncoming(
  345. cast<ExtractValueInst>(std::get<1>(Incoming))->getAggregateOperand(),
  346. std::get<0>(Incoming));
  347. InsertNewInstBefore(NewAggregateOperand, PN);
  348. // And finally, create `extractvalue` over the newly-formed PHI nodes.
  349. auto *NewEVI = ExtractValueInst::Create(NewAggregateOperand,
  350. FirstEVI->getIndices(), PN.getName());
  351. PHIArgMergedDebugLoc(NewEVI, PN);
  352. ++NumPHIsOfExtractValues;
  353. return NewEVI;
  354. }
  355. /// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
  356. /// adds all have a single user, turn this into a phi and a single binop.
  357. Instruction *InstCombinerImpl::foldPHIArgBinOpIntoPHI(PHINode &PN) {
  358. Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
  359. assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
  360. unsigned Opc = FirstInst->getOpcode();
  361. Value *LHSVal = FirstInst->getOperand(0);
  362. Value *RHSVal = FirstInst->getOperand(1);
  363. Type *LHSType = LHSVal->getType();
  364. Type *RHSType = RHSVal->getType();
  365. // Scan to see if all operands are the same opcode, and all have one user.
  366. for (Value *V : drop_begin(PN.incoming_values())) {
  367. Instruction *I = dyn_cast<Instruction>(V);
  368. if (!I || I->getOpcode() != Opc || !I->hasOneUser() ||
  369. // Verify type of the LHS matches so we don't fold cmp's of different
  370. // types.
  371. I->getOperand(0)->getType() != LHSType ||
  372. I->getOperand(1)->getType() != RHSType)
  373. return nullptr;
  374. // If they are CmpInst instructions, check their predicates
  375. if (CmpInst *CI = dyn_cast<CmpInst>(I))
  376. if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
  377. return nullptr;
  378. // Keep track of which operand needs a phi node.
  379. if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
  380. if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
  381. }
  382. // If both LHS and RHS would need a PHI, don't do this transformation,
  383. // because it would increase the number of PHIs entering the block,
  384. // which leads to higher register pressure. This is especially
  385. // bad when the PHIs are in the header of a loop.
  386. if (!LHSVal && !RHSVal)
  387. return nullptr;
  388. // Otherwise, this is safe to transform!
  389. Value *InLHS = FirstInst->getOperand(0);
  390. Value *InRHS = FirstInst->getOperand(1);
  391. PHINode *NewLHS = nullptr, *NewRHS = nullptr;
  392. if (!LHSVal) {
  393. NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
  394. FirstInst->getOperand(0)->getName() + ".pn");
  395. NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
  396. InsertNewInstBefore(NewLHS, PN);
  397. LHSVal = NewLHS;
  398. }
  399. if (!RHSVal) {
  400. NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
  401. FirstInst->getOperand(1)->getName() + ".pn");
  402. NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
  403. InsertNewInstBefore(NewRHS, PN);
  404. RHSVal = NewRHS;
  405. }
  406. // Add all operands to the new PHIs.
  407. if (NewLHS || NewRHS) {
  408. for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
  409. BasicBlock *InBB = std::get<0>(Incoming);
  410. Value *InVal = std::get<1>(Incoming);
  411. Instruction *InInst = cast<Instruction>(InVal);
  412. if (NewLHS) {
  413. Value *NewInLHS = InInst->getOperand(0);
  414. NewLHS->addIncoming(NewInLHS, InBB);
  415. }
  416. if (NewRHS) {
  417. Value *NewInRHS = InInst->getOperand(1);
  418. NewRHS->addIncoming(NewInRHS, InBB);
  419. }
  420. }
  421. }
  422. if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
  423. CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
  424. LHSVal, RHSVal);
  425. PHIArgMergedDebugLoc(NewCI, PN);
  426. return NewCI;
  427. }
  428. BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
  429. BinaryOperator *NewBinOp =
  430. BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
  431. NewBinOp->copyIRFlags(PN.getIncomingValue(0));
  432. for (Value *V : drop_begin(PN.incoming_values()))
  433. NewBinOp->andIRFlags(V);
  434. PHIArgMergedDebugLoc(NewBinOp, PN);
  435. return NewBinOp;
  436. }
  437. Instruction *InstCombinerImpl::foldPHIArgGEPIntoPHI(PHINode &PN) {
  438. GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
  439. SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
  440. FirstInst->op_end());
  441. // This is true if all GEP bases are allocas and if all indices into them are
  442. // constants.
  443. bool AllBasePointersAreAllocas = true;
  444. // We don't want to replace this phi if the replacement would require
  445. // more than one phi, which leads to higher register pressure. This is
  446. // especially bad when the PHIs are in the header of a loop.
  447. bool NeededPhi = false;
  448. bool AllInBounds = true;
  449. // Scan to see if all operands are the same opcode, and all have one user.
  450. for (Value *V : drop_begin(PN.incoming_values())) {
  451. GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V);
  452. if (!GEP || !GEP->hasOneUser() || GEP->getType() != FirstInst->getType() ||
  453. GEP->getNumOperands() != FirstInst->getNumOperands())
  454. return nullptr;
  455. AllInBounds &= GEP->isInBounds();
  456. // Keep track of whether or not all GEPs are of alloca pointers.
  457. if (AllBasePointersAreAllocas &&
  458. (!isa<AllocaInst>(GEP->getOperand(0)) ||
  459. !GEP->hasAllConstantIndices()))
  460. AllBasePointersAreAllocas = false;
  461. // Compare the operand lists.
  462. for (unsigned Op = 0, E = FirstInst->getNumOperands(); Op != E; ++Op) {
  463. if (FirstInst->getOperand(Op) == GEP->getOperand(Op))
  464. continue;
  465. // Don't merge two GEPs when two operands differ (introducing phi nodes)
  466. // if one of the PHIs has a constant for the index. The index may be
  467. // substantially cheaper to compute for the constants, so making it a
  468. // variable index could pessimize the path. This also handles the case
  469. // for struct indices, which must always be constant.
  470. if (isa<ConstantInt>(FirstInst->getOperand(Op)) ||
  471. isa<ConstantInt>(GEP->getOperand(Op)))
  472. return nullptr;
  473. if (FirstInst->getOperand(Op)->getType() !=
  474. GEP->getOperand(Op)->getType())
  475. return nullptr;
  476. // If we already needed a PHI for an earlier operand, and another operand
  477. // also requires a PHI, we'd be introducing more PHIs than we're
  478. // eliminating, which increases register pressure on entry to the PHI's
  479. // block.
  480. if (NeededPhi)
  481. return nullptr;
  482. FixedOperands[Op] = nullptr; // Needs a PHI.
  483. NeededPhi = true;
  484. }
  485. }
  486. // If all of the base pointers of the PHI'd GEPs are from allocas, don't
  487. // bother doing this transformation. At best, this will just save a bit of
  488. // offset calculation, but all the predecessors will have to materialize the
  489. // stack address into a register anyway. We'd actually rather *clone* the
  490. // load up into the predecessors so that we have a load of a gep of an alloca,
  491. // which can usually all be folded into the load.
  492. if (AllBasePointersAreAllocas)
  493. return nullptr;
  494. // Otherwise, this is safe to transform. Insert PHI nodes for each operand
  495. // that is variable.
  496. SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
  497. bool HasAnyPHIs = false;
  498. for (unsigned I = 0, E = FixedOperands.size(); I != E; ++I) {
  499. if (FixedOperands[I])
  500. continue; // operand doesn't need a phi.
  501. Value *FirstOp = FirstInst->getOperand(I);
  502. PHINode *NewPN =
  503. PHINode::Create(FirstOp->getType(), E, FirstOp->getName() + ".pn");
  504. InsertNewInstBefore(NewPN, PN);
  505. NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
  506. OperandPhis[I] = NewPN;
  507. FixedOperands[I] = NewPN;
  508. HasAnyPHIs = true;
  509. }
  510. // Add all operands to the new PHIs.
  511. if (HasAnyPHIs) {
  512. for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
  513. BasicBlock *InBB = std::get<0>(Incoming);
  514. Value *InVal = std::get<1>(Incoming);
  515. GetElementPtrInst *InGEP = cast<GetElementPtrInst>(InVal);
  516. for (unsigned Op = 0, E = OperandPhis.size(); Op != E; ++Op)
  517. if (PHINode *OpPhi = OperandPhis[Op])
  518. OpPhi->addIncoming(InGEP->getOperand(Op), InBB);
  519. }
  520. }
  521. Value *Base = FixedOperands[0];
  522. GetElementPtrInst *NewGEP =
  523. GetElementPtrInst::Create(FirstInst->getSourceElementType(), Base,
  524. makeArrayRef(FixedOperands).slice(1));
  525. if (AllInBounds) NewGEP->setIsInBounds();
  526. PHIArgMergedDebugLoc(NewGEP, PN);
  527. return NewGEP;
  528. }
  529. /// Return true if we know that it is safe to sink the load out of the block
  530. /// that defines it. This means that it must be obvious the value of the load is
  531. /// not changed from the point of the load to the end of the block it is in.
  532. ///
  533. /// Finally, it is safe, but not profitable, to sink a load targeting a
  534. /// non-address-taken alloca. Doing so will cause us to not promote the alloca
  535. /// to a register.
  536. static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
  537. BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
  538. for (++BBI; BBI != E; ++BBI)
  539. if (BBI->mayWriteToMemory()) {
  540. // Calls that only access inaccessible memory do not block sinking the
  541. // load.
  542. if (auto *CB = dyn_cast<CallBase>(BBI))
  543. if (CB->onlyAccessesInaccessibleMemory())
  544. continue;
  545. return false;
  546. }
  547. // Check for non-address taken alloca. If not address-taken already, it isn't
  548. // profitable to do this xform.
  549. if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
  550. bool IsAddressTaken = false;
  551. for (User *U : AI->users()) {
  552. if (isa<LoadInst>(U)) continue;
  553. if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
  554. // If storing TO the alloca, then the address isn't taken.
  555. if (SI->getOperand(1) == AI) continue;
  556. }
  557. IsAddressTaken = true;
  558. break;
  559. }
  560. if (!IsAddressTaken && AI->isStaticAlloca())
  561. return false;
  562. }
  563. // If this load is a load from a GEP with a constant offset from an alloca,
  564. // then we don't want to sink it. In its present form, it will be
  565. // load [constant stack offset]. Sinking it will cause us to have to
  566. // materialize the stack addresses in each predecessor in a register only to
  567. // do a shared load from register in the successor.
  568. if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
  569. if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
  570. if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
  571. return false;
  572. return true;
  573. }
  574. Instruction *InstCombinerImpl::foldPHIArgLoadIntoPHI(PHINode &PN) {
  575. LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
  576. // FIXME: This is overconservative; this transform is allowed in some cases
  577. // for atomic operations.
  578. if (FirstLI->isAtomic())
  579. return nullptr;
  580. // When processing loads, we need to propagate two bits of information to the
  581. // sunk load: whether it is volatile, and what its alignment is.
  582. bool IsVolatile = FirstLI->isVolatile();
  583. Align LoadAlignment = FirstLI->getAlign();
  584. const unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
  585. // We can't sink the load if the loaded value could be modified between the
  586. // load and the PHI.
  587. if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
  588. !isSafeAndProfitableToSinkLoad(FirstLI))
  589. return nullptr;
  590. // If the PHI is of volatile loads and the load block has multiple
  591. // successors, sinking it would remove a load of the volatile value from
  592. // the path through the other successor.
  593. if (IsVolatile &&
  594. FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
  595. return nullptr;
  596. for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
  597. BasicBlock *InBB = std::get<0>(Incoming);
  598. Value *InVal = std::get<1>(Incoming);
  599. LoadInst *LI = dyn_cast<LoadInst>(InVal);
  600. if (!LI || !LI->hasOneUser() || LI->isAtomic())
  601. return nullptr;
  602. // Make sure all arguments are the same type of operation.
  603. if (LI->isVolatile() != IsVolatile ||
  604. LI->getPointerAddressSpace() != LoadAddrSpace)
  605. return nullptr;
  606. // We can't sink the load if the loaded value could be modified between
  607. // the load and the PHI.
  608. if (LI->getParent() != InBB || !isSafeAndProfitableToSinkLoad(LI))
  609. return nullptr;
  610. LoadAlignment = std::min(LoadAlignment, LI->getAlign());
  611. // If the PHI is of volatile loads and the load block has multiple
  612. // successors, sinking it would remove a load of the volatile value from
  613. // the path through the other successor.
  614. if (IsVolatile && LI->getParent()->getTerminator()->getNumSuccessors() != 1)
  615. return nullptr;
  616. }
  617. // Okay, they are all the same operation. Create a new PHI node of the
  618. // correct type, and PHI together all of the LHS's of the instructions.
  619. PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
  620. PN.getNumIncomingValues(),
  621. PN.getName()+".in");
  622. Value *InVal = FirstLI->getOperand(0);
  623. NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
  624. LoadInst *NewLI =
  625. new LoadInst(FirstLI->getType(), NewPN, "", IsVolatile, LoadAlignment);
  626. unsigned KnownIDs[] = {
  627. LLVMContext::MD_tbaa,
  628. LLVMContext::MD_range,
  629. LLVMContext::MD_invariant_load,
  630. LLVMContext::MD_alias_scope,
  631. LLVMContext::MD_noalias,
  632. LLVMContext::MD_nonnull,
  633. LLVMContext::MD_align,
  634. LLVMContext::MD_dereferenceable,
  635. LLVMContext::MD_dereferenceable_or_null,
  636. LLVMContext::MD_access_group,
  637. };
  638. for (unsigned ID : KnownIDs)
  639. NewLI->setMetadata(ID, FirstLI->getMetadata(ID));
  640. // Add all operands to the new PHI and combine TBAA metadata.
  641. for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
  642. BasicBlock *BB = std::get<0>(Incoming);
  643. Value *V = std::get<1>(Incoming);
  644. LoadInst *LI = cast<LoadInst>(V);
  645. combineMetadata(NewLI, LI, KnownIDs, true);
  646. Value *NewInVal = LI->getOperand(0);
  647. if (NewInVal != InVal)
  648. InVal = nullptr;
  649. NewPN->addIncoming(NewInVal, BB);
  650. }
  651. if (InVal) {
  652. // The new PHI unions all of the same values together. This is really
  653. // common, so we handle it intelligently here for compile-time speed.
  654. NewLI->setOperand(0, InVal);
  655. delete NewPN;
  656. } else {
  657. InsertNewInstBefore(NewPN, PN);
  658. }
  659. // If this was a volatile load that we are merging, make sure to loop through
  660. // and mark all the input loads as non-volatile. If we don't do this, we will
  661. // insert a new volatile load and the old ones will not be deletable.
  662. if (IsVolatile)
  663. for (Value *IncValue : PN.incoming_values())
  664. cast<LoadInst>(IncValue)->setVolatile(false);
  665. PHIArgMergedDebugLoc(NewLI, PN);
  666. return NewLI;
  667. }
  668. /// TODO: This function could handle other cast types, but then it might
  669. /// require special-casing a cast from the 'i1' type. See the comment in
  670. /// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
  671. Instruction *InstCombinerImpl::foldPHIArgZextsIntoPHI(PHINode &Phi) {
  672. // We cannot create a new instruction after the PHI if the terminator is an
  673. // EHPad because there is no valid insertion point.
  674. if (Instruction *TI = Phi.getParent()->getTerminator())
  675. if (TI->isEHPad())
  676. return nullptr;
  677. // Early exit for the common case of a phi with two operands. These are
  678. // handled elsewhere. See the comment below where we check the count of zexts
  679. // and constants for more details.
  680. unsigned NumIncomingValues = Phi.getNumIncomingValues();
  681. if (NumIncomingValues < 3)
  682. return nullptr;
  683. // Find the narrower type specified by the first zext.
  684. Type *NarrowType = nullptr;
  685. for (Value *V : Phi.incoming_values()) {
  686. if (auto *Zext = dyn_cast<ZExtInst>(V)) {
  687. NarrowType = Zext->getSrcTy();
  688. break;
  689. }
  690. }
  691. if (!NarrowType)
  692. return nullptr;
  693. // Walk the phi operands checking that we only have zexts or constants that
  694. // we can shrink for free. Store the new operands for the new phi.
  695. SmallVector<Value *, 4> NewIncoming;
  696. unsigned NumZexts = 0;
  697. unsigned NumConsts = 0;
  698. for (Value *V : Phi.incoming_values()) {
  699. if (auto *Zext = dyn_cast<ZExtInst>(V)) {
  700. // All zexts must be identical and have one user.
  701. if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUser())
  702. return nullptr;
  703. NewIncoming.push_back(Zext->getOperand(0));
  704. NumZexts++;
  705. } else if (auto *C = dyn_cast<Constant>(V)) {
  706. // Make sure that constants can fit in the new type.
  707. Constant *Trunc = ConstantExpr::getTrunc(C, NarrowType);
  708. if (ConstantExpr::getZExt(Trunc, C->getType()) != C)
  709. return nullptr;
  710. NewIncoming.push_back(Trunc);
  711. NumConsts++;
  712. } else {
  713. // If it's not a cast or a constant, bail out.
  714. return nullptr;
  715. }
  716. }
  717. // The more common cases of a phi with no constant operands or just one
  718. // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
  719. // respectively. foldOpIntoPhi() wants to do the opposite transform that is
  720. // performed here. It tries to replicate a cast in the phi operand's basic
  721. // block to expose other folding opportunities. Thus, InstCombine will
  722. // infinite loop without this check.
  723. if (NumConsts == 0 || NumZexts < 2)
  724. return nullptr;
  725. // All incoming values are zexts or constants that are safe to truncate.
  726. // Create a new phi node of the narrow type, phi together all of the new
  727. // operands, and zext the result back to the original type.
  728. PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
  729. Phi.getName() + ".shrunk");
  730. for (unsigned I = 0; I != NumIncomingValues; ++I)
  731. NewPhi->addIncoming(NewIncoming[I], Phi.getIncomingBlock(I));
  732. InsertNewInstBefore(NewPhi, Phi);
  733. return CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
  734. }
  735. /// If all operands to a PHI node are the same "unary" operator and they all are
  736. /// only used by the PHI, PHI together their inputs, and do the operation once,
  737. /// to the result of the PHI.
  738. Instruction *InstCombinerImpl::foldPHIArgOpIntoPHI(PHINode &PN) {
  739. // We cannot create a new instruction after the PHI if the terminator is an
  740. // EHPad because there is no valid insertion point.
  741. if (Instruction *TI = PN.getParent()->getTerminator())
  742. if (TI->isEHPad())
  743. return nullptr;
  744. Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
  745. if (isa<GetElementPtrInst>(FirstInst))
  746. return foldPHIArgGEPIntoPHI(PN);
  747. if (isa<LoadInst>(FirstInst))
  748. return foldPHIArgLoadIntoPHI(PN);
  749. if (isa<InsertValueInst>(FirstInst))
  750. return foldPHIArgInsertValueInstructionIntoPHI(PN);
  751. if (isa<ExtractValueInst>(FirstInst))
  752. return foldPHIArgExtractValueInstructionIntoPHI(PN);
  753. // Scan the instruction, looking for input operations that can be folded away.
  754. // If all input operands to the phi are the same instruction (e.g. a cast from
  755. // the same type or "+42") we can pull the operation through the PHI, reducing
  756. // code size and simplifying code.
  757. Constant *ConstantOp = nullptr;
  758. Type *CastSrcTy = nullptr;
  759. if (isa<CastInst>(FirstInst)) {
  760. CastSrcTy = FirstInst->getOperand(0)->getType();
  761. // Be careful about transforming integer PHIs. We don't want to pessimize
  762. // the code by turning an i32 into an i1293.
  763. if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
  764. if (!shouldChangeType(PN.getType(), CastSrcTy))
  765. return nullptr;
  766. }
  767. } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
  768. // Can fold binop, compare or shift here if the RHS is a constant,
  769. // otherwise call FoldPHIArgBinOpIntoPHI.
  770. ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
  771. if (!ConstantOp)
  772. return foldPHIArgBinOpIntoPHI(PN);
  773. } else {
  774. return nullptr; // Cannot fold this operation.
  775. }
  776. // Check to see if all arguments are the same operation.
  777. for (Value *V : drop_begin(PN.incoming_values())) {
  778. Instruction *I = dyn_cast<Instruction>(V);
  779. if (!I || !I->hasOneUser() || !I->isSameOperationAs(FirstInst))
  780. return nullptr;
  781. if (CastSrcTy) {
  782. if (I->getOperand(0)->getType() != CastSrcTy)
  783. return nullptr; // Cast operation must match.
  784. } else if (I->getOperand(1) != ConstantOp) {
  785. return nullptr;
  786. }
  787. }
  788. // Okay, they are all the same operation. Create a new PHI node of the
  789. // correct type, and PHI together all of the LHS's of the instructions.
  790. PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
  791. PN.getNumIncomingValues(),
  792. PN.getName()+".in");
  793. Value *InVal = FirstInst->getOperand(0);
  794. NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
  795. // Add all operands to the new PHI.
  796. for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
  797. BasicBlock *BB = std::get<0>(Incoming);
  798. Value *V = std::get<1>(Incoming);
  799. Value *NewInVal = cast<Instruction>(V)->getOperand(0);
  800. if (NewInVal != InVal)
  801. InVal = nullptr;
  802. NewPN->addIncoming(NewInVal, BB);
  803. }
  804. Value *PhiVal;
  805. if (InVal) {
  806. // The new PHI unions all of the same values together. This is really
  807. // common, so we handle it intelligently here for compile-time speed.
  808. PhiVal = InVal;
  809. delete NewPN;
  810. } else {
  811. InsertNewInstBefore(NewPN, PN);
  812. PhiVal = NewPN;
  813. }
  814. // Insert and return the new operation.
  815. if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
  816. CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
  817. PN.getType());
  818. PHIArgMergedDebugLoc(NewCI, PN);
  819. return NewCI;
  820. }
  821. if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
  822. BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
  823. BinOp->copyIRFlags(PN.getIncomingValue(0));
  824. for (Value *V : drop_begin(PN.incoming_values()))
  825. BinOp->andIRFlags(V);
  826. PHIArgMergedDebugLoc(BinOp, PN);
  827. return BinOp;
  828. }
  829. CmpInst *CIOp = cast<CmpInst>(FirstInst);
  830. CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
  831. PhiVal, ConstantOp);
  832. PHIArgMergedDebugLoc(NewCI, PN);
  833. return NewCI;
  834. }
  835. /// Return true if this PHI node is only used by a PHI node cycle that is dead.
  836. static bool isDeadPHICycle(PHINode *PN,
  837. SmallPtrSetImpl<PHINode *> &PotentiallyDeadPHIs) {
  838. if (PN->use_empty()) return true;
  839. if (!PN->hasOneUse()) return false;
  840. // Remember this node, and if we find the cycle, return.
  841. if (!PotentiallyDeadPHIs.insert(PN).second)
  842. return true;
  843. // Don't scan crazily complex things.
  844. if (PotentiallyDeadPHIs.size() == 16)
  845. return false;
  846. if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
  847. return isDeadPHICycle(PU, PotentiallyDeadPHIs);
  848. return false;
  849. }
  850. /// Return true if this phi node is always equal to NonPhiInVal.
  851. /// This happens with mutually cyclic phi nodes like:
  852. /// z = some value; x = phi (y, z); y = phi (x, z)
  853. static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
  854. SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
  855. // See if we already saw this PHI node.
  856. if (!ValueEqualPHIs.insert(PN).second)
  857. return true;
  858. // Don't scan crazily complex things.
  859. if (ValueEqualPHIs.size() == 16)
  860. return false;
  861. // Scan the operands to see if they are either phi nodes or are equal to
  862. // the value.
  863. for (Value *Op : PN->incoming_values()) {
  864. if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
  865. if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
  866. return false;
  867. } else if (Op != NonPhiInVal)
  868. return false;
  869. }
  870. return true;
  871. }
  872. /// Return an existing non-zero constant if this phi node has one, otherwise
  873. /// return constant 1.
  874. static ConstantInt *getAnyNonZeroConstInt(PHINode &PN) {
  875. assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
  876. for (Value *V : PN.operands())
  877. if (auto *ConstVA = dyn_cast<ConstantInt>(V))
  878. if (!ConstVA->isZero())
  879. return ConstVA;
  880. return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
  881. }
  882. namespace {
  883. struct PHIUsageRecord {
  884. unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
  885. unsigned Shift; // The amount shifted.
  886. Instruction *Inst; // The trunc instruction.
  887. PHIUsageRecord(unsigned Pn, unsigned Sh, Instruction *User)
  888. : PHIId(Pn), Shift(Sh), Inst(User) {}
  889. bool operator<(const PHIUsageRecord &RHS) const {
  890. if (PHIId < RHS.PHIId) return true;
  891. if (PHIId > RHS.PHIId) return false;
  892. if (Shift < RHS.Shift) return true;
  893. if (Shift > RHS.Shift) return false;
  894. return Inst->getType()->getPrimitiveSizeInBits() <
  895. RHS.Inst->getType()->getPrimitiveSizeInBits();
  896. }
  897. };
  898. struct LoweredPHIRecord {
  899. PHINode *PN; // The PHI that was lowered.
  900. unsigned Shift; // The amount shifted.
  901. unsigned Width; // The width extracted.
  902. LoweredPHIRecord(PHINode *Phi, unsigned Sh, Type *Ty)
  903. : PN(Phi), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
  904. // Ctor form used by DenseMap.
  905. LoweredPHIRecord(PHINode *Phi, unsigned Sh) : PN(Phi), Shift(Sh), Width(0) {}
  906. };
  907. } // namespace
  908. namespace llvm {
  909. template<>
  910. struct DenseMapInfo<LoweredPHIRecord> {
  911. static inline LoweredPHIRecord getEmptyKey() {
  912. return LoweredPHIRecord(nullptr, 0);
  913. }
  914. static inline LoweredPHIRecord getTombstoneKey() {
  915. return LoweredPHIRecord(nullptr, 1);
  916. }
  917. static unsigned getHashValue(const LoweredPHIRecord &Val) {
  918. return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
  919. (Val.Width>>3);
  920. }
  921. static bool isEqual(const LoweredPHIRecord &LHS,
  922. const LoweredPHIRecord &RHS) {
  923. return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
  924. LHS.Width == RHS.Width;
  925. }
  926. };
  927. } // namespace llvm
  928. /// This is an integer PHI and we know that it has an illegal type: see if it is
  929. /// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
  930. /// the various pieces being extracted. This sort of thing is introduced when
  931. /// SROA promotes an aggregate to large integer values.
  932. ///
  933. /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
  934. /// inttoptr. We should produce new PHIs in the right type.
  935. ///
  936. Instruction *InstCombinerImpl::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
  937. // PHIUsers - Keep track of all of the truncated values extracted from a set
  938. // of PHIs, along with their offset. These are the things we want to rewrite.
  939. SmallVector<PHIUsageRecord, 16> PHIUsers;
  940. // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
  941. // nodes which are extracted from. PHIsToSlice is a set we use to avoid
  942. // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
  943. // check the uses of (to ensure they are all extracts).
  944. SmallVector<PHINode*, 8> PHIsToSlice;
  945. SmallPtrSet<PHINode*, 8> PHIsInspected;
  946. PHIsToSlice.push_back(&FirstPhi);
  947. PHIsInspected.insert(&FirstPhi);
  948. for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
  949. PHINode *PN = PHIsToSlice[PHIId];
  950. // Scan the input list of the PHI. If any input is an invoke, and if the
  951. // input is defined in the predecessor, then we won't be split the critical
  952. // edge which is required to insert a truncate. Because of this, we have to
  953. // bail out.
  954. for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
  955. BasicBlock *BB = std::get<0>(Incoming);
  956. Value *V = std::get<1>(Incoming);
  957. InvokeInst *II = dyn_cast<InvokeInst>(V);
  958. if (!II)
  959. continue;
  960. if (II->getParent() != BB)
  961. continue;
  962. // If we have a phi, and if it's directly in the predecessor, then we have
  963. // a critical edge where we need to put the truncate. Since we can't
  964. // split the edge in instcombine, we have to bail out.
  965. return nullptr;
  966. }
  967. for (User *U : PN->users()) {
  968. Instruction *UserI = cast<Instruction>(U);
  969. // If the user is a PHI, inspect its uses recursively.
  970. if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
  971. if (PHIsInspected.insert(UserPN).second)
  972. PHIsToSlice.push_back(UserPN);
  973. continue;
  974. }
  975. // Truncates are always ok.
  976. if (isa<TruncInst>(UserI)) {
  977. PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
  978. continue;
  979. }
  980. // Otherwise it must be a lshr which can only be used by one trunc.
  981. if (UserI->getOpcode() != Instruction::LShr ||
  982. !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
  983. !isa<ConstantInt>(UserI->getOperand(1)))
  984. return nullptr;
  985. // Bail on out of range shifts.
  986. unsigned SizeInBits = UserI->getType()->getScalarSizeInBits();
  987. if (cast<ConstantInt>(UserI->getOperand(1))->getValue().uge(SizeInBits))
  988. return nullptr;
  989. unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
  990. PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
  991. }
  992. }
  993. // If we have no users, they must be all self uses, just nuke the PHI.
  994. if (PHIUsers.empty())
  995. return replaceInstUsesWith(FirstPhi, PoisonValue::get(FirstPhi.getType()));
  996. // If this phi node is transformable, create new PHIs for all the pieces
  997. // extracted out of it. First, sort the users by their offset and size.
  998. array_pod_sort(PHIUsers.begin(), PHIUsers.end());
  999. LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
  1000. for (unsigned I = 1; I != PHIsToSlice.size(); ++I) dbgs()
  1001. << "AND USER PHI #" << I << ": " << *PHIsToSlice[I] << '\n');
  1002. // PredValues - This is a temporary used when rewriting PHI nodes. It is
  1003. // hoisted out here to avoid construction/destruction thrashing.
  1004. DenseMap<BasicBlock*, Value*> PredValues;
  1005. // ExtractedVals - Each new PHI we introduce is saved here so we don't
  1006. // introduce redundant PHIs.
  1007. DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
  1008. for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
  1009. unsigned PHIId = PHIUsers[UserI].PHIId;
  1010. PHINode *PN = PHIsToSlice[PHIId];
  1011. unsigned Offset = PHIUsers[UserI].Shift;
  1012. Type *Ty = PHIUsers[UserI].Inst->getType();
  1013. PHINode *EltPHI;
  1014. // If we've already lowered a user like this, reuse the previously lowered
  1015. // value.
  1016. if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
  1017. // Otherwise, Create the new PHI node for this user.
  1018. EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
  1019. PN->getName()+".off"+Twine(Offset), PN);
  1020. assert(EltPHI->getType() != PN->getType() &&
  1021. "Truncate didn't shrink phi?");
  1022. for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
  1023. BasicBlock *Pred = std::get<0>(Incoming);
  1024. Value *InVal = std::get<1>(Incoming);
  1025. Value *&PredVal = PredValues[Pred];
  1026. // If we already have a value for this predecessor, reuse it.
  1027. if (PredVal) {
  1028. EltPHI->addIncoming(PredVal, Pred);
  1029. continue;
  1030. }
  1031. // Handle the PHI self-reuse case.
  1032. if (InVal == PN) {
  1033. PredVal = EltPHI;
  1034. EltPHI->addIncoming(PredVal, Pred);
  1035. continue;
  1036. }
  1037. if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
  1038. // If the incoming value was a PHI, and if it was one of the PHIs we
  1039. // already rewrote it, just use the lowered value.
  1040. if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
  1041. PredVal = Res;
  1042. EltPHI->addIncoming(PredVal, Pred);
  1043. continue;
  1044. }
  1045. }
  1046. // Otherwise, do an extract in the predecessor.
  1047. Builder.SetInsertPoint(Pred->getTerminator());
  1048. Value *Res = InVal;
  1049. if (Offset)
  1050. Res = Builder.CreateLShr(
  1051. Res, ConstantInt::get(InVal->getType(), Offset), "extract");
  1052. Res = Builder.CreateTrunc(Res, Ty, "extract.t");
  1053. PredVal = Res;
  1054. EltPHI->addIncoming(Res, Pred);
  1055. // If the incoming value was a PHI, and if it was one of the PHIs we are
  1056. // rewriting, we will ultimately delete the code we inserted. This
  1057. // means we need to revisit that PHI to make sure we extract out the
  1058. // needed piece.
  1059. if (PHINode *OldInVal = dyn_cast<PHINode>(InVal))
  1060. if (PHIsInspected.count(OldInVal)) {
  1061. unsigned RefPHIId =
  1062. find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
  1063. PHIUsers.push_back(
  1064. PHIUsageRecord(RefPHIId, Offset, cast<Instruction>(Res)));
  1065. ++UserE;
  1066. }
  1067. }
  1068. PredValues.clear();
  1069. LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
  1070. << *EltPHI << '\n');
  1071. ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
  1072. }
  1073. // Replace the use of this piece with the PHI node.
  1074. replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
  1075. }
  1076. // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
  1077. // with poison.
  1078. Value *Poison = PoisonValue::get(FirstPhi.getType());
  1079. for (PHINode *PHI : drop_begin(PHIsToSlice))
  1080. replaceInstUsesWith(*PHI, Poison);
  1081. return replaceInstUsesWith(FirstPhi, Poison);
  1082. }
  1083. static Value *simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN,
  1084. const DominatorTree &DT) {
  1085. // Simplify the following patterns:
  1086. // if (cond)
  1087. // / \
  1088. // ... ...
  1089. // \ /
  1090. // phi [true] [false]
  1091. if (!PN.getType()->isIntegerTy(1))
  1092. return nullptr;
  1093. if (PN.getNumOperands() != 2)
  1094. return nullptr;
  1095. // Make sure all inputs are constants.
  1096. if (!all_of(PN.operands(), [](Value *V) { return isa<ConstantInt>(V); }))
  1097. return nullptr;
  1098. BasicBlock *BB = PN.getParent();
  1099. // Do not bother with unreachable instructions.
  1100. if (!DT.isReachableFromEntry(BB))
  1101. return nullptr;
  1102. // Same inputs.
  1103. if (PN.getOperand(0) == PN.getOperand(1))
  1104. return PN.getOperand(0);
  1105. BasicBlock *TruePred = nullptr, *FalsePred = nullptr;
  1106. for (auto *Pred : predecessors(BB)) {
  1107. auto *Input = cast<ConstantInt>(PN.getIncomingValueForBlock(Pred));
  1108. if (Input->isAllOnesValue())
  1109. TruePred = Pred;
  1110. else
  1111. FalsePred = Pred;
  1112. }
  1113. assert(TruePred && FalsePred && "Must be!");
  1114. // Check which edge of the dominator dominates the true input. If it is the
  1115. // false edge, we should invert the condition.
  1116. auto *IDom = DT.getNode(BB)->getIDom()->getBlock();
  1117. auto *BI = dyn_cast<BranchInst>(IDom->getTerminator());
  1118. if (!BI || BI->isUnconditional())
  1119. return nullptr;
  1120. // Check that edges outgoing from the idom's terminators dominate respective
  1121. // inputs of the Phi.
  1122. BasicBlockEdge TrueOutEdge(IDom, BI->getSuccessor(0));
  1123. BasicBlockEdge FalseOutEdge(IDom, BI->getSuccessor(1));
  1124. BasicBlockEdge TrueIncEdge(TruePred, BB);
  1125. BasicBlockEdge FalseIncEdge(FalsePred, BB);
  1126. auto *Cond = BI->getCondition();
  1127. if (DT.dominates(TrueOutEdge, TrueIncEdge) &&
  1128. DT.dominates(FalseOutEdge, FalseIncEdge))
  1129. // This Phi is actually equivalent to branching condition of IDom.
  1130. return Cond;
  1131. if (DT.dominates(TrueOutEdge, FalseIncEdge) &&
  1132. DT.dominates(FalseOutEdge, TrueIncEdge)) {
  1133. // This Phi is actually opposite to branching condition of IDom. We invert
  1134. // the condition that will potentially open up some opportunities for
  1135. // sinking.
  1136. auto InsertPt = BB->getFirstInsertionPt();
  1137. if (InsertPt != BB->end()) {
  1138. Self.Builder.SetInsertPoint(&*InsertPt);
  1139. return Self.Builder.CreateNot(Cond);
  1140. }
  1141. }
  1142. return nullptr;
  1143. }
  1144. // PHINode simplification
  1145. //
  1146. Instruction *InstCombinerImpl::visitPHINode(PHINode &PN) {
  1147. if (Value *V = SimplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
  1148. return replaceInstUsesWith(PN, V);
  1149. if (Instruction *Result = foldPHIArgZextsIntoPHI(PN))
  1150. return Result;
  1151. if (Instruction *Result = foldPHIArgIntToPtrToPHI(PN))
  1152. return Result;
  1153. // If all PHI operands are the same operation, pull them through the PHI,
  1154. // reducing code size.
  1155. if (isa<Instruction>(PN.getIncomingValue(0)) &&
  1156. isa<Instruction>(PN.getIncomingValue(1)) &&
  1157. cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
  1158. cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
  1159. PN.getIncomingValue(0)->hasOneUser())
  1160. if (Instruction *Result = foldPHIArgOpIntoPHI(PN))
  1161. return Result;
  1162. // If the incoming values are pointer casts of the same original value,
  1163. // replace the phi with a single cast iff we can insert a non-PHI instruction.
  1164. if (PN.getType()->isPointerTy() &&
  1165. PN.getParent()->getFirstInsertionPt() != PN.getParent()->end()) {
  1166. Value *IV0 = PN.getIncomingValue(0);
  1167. Value *IV0Stripped = IV0->stripPointerCasts();
  1168. // Set to keep track of values known to be equal to IV0Stripped after
  1169. // stripping pointer casts.
  1170. SmallPtrSet<Value *, 4> CheckedIVs;
  1171. CheckedIVs.insert(IV0);
  1172. if (IV0 != IV0Stripped &&
  1173. all_of(PN.incoming_values(), [&CheckedIVs, IV0Stripped](Value *IV) {
  1174. return !CheckedIVs.insert(IV).second ||
  1175. IV0Stripped == IV->stripPointerCasts();
  1176. })) {
  1177. return CastInst::CreatePointerCast(IV0Stripped, PN.getType());
  1178. }
  1179. }
  1180. // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
  1181. // this PHI only has a single use (a PHI), and if that PHI only has one use (a
  1182. // PHI)... break the cycle.
  1183. if (PN.hasOneUse()) {
  1184. if (Instruction *Result = foldIntegerTypedPHI(PN))
  1185. return Result;
  1186. Instruction *PHIUser = cast<Instruction>(PN.user_back());
  1187. if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
  1188. SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
  1189. PotentiallyDeadPHIs.insert(&PN);
  1190. if (isDeadPHICycle(PU, PotentiallyDeadPHIs))
  1191. return replaceInstUsesWith(PN, PoisonValue::get(PN.getType()));
  1192. }
  1193. // If this phi has a single use, and if that use just computes a value for
  1194. // the next iteration of a loop, delete the phi. This occurs with unused
  1195. // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
  1196. // common case here is good because the only other things that catch this
  1197. // are induction variable analysis (sometimes) and ADCE, which is only run
  1198. // late.
  1199. if (PHIUser->hasOneUse() &&
  1200. (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
  1201. PHIUser->user_back() == &PN) {
  1202. return replaceInstUsesWith(PN, PoisonValue::get(PN.getType()));
  1203. }
  1204. // When a PHI is used only to be compared with zero, it is safe to replace
  1205. // an incoming value proved as known nonzero with any non-zero constant.
  1206. // For example, in the code below, the incoming value %v can be replaced
  1207. // with any non-zero constant based on the fact that the PHI is only used to
  1208. // be compared with zero and %v is a known non-zero value:
  1209. // %v = select %cond, 1, 2
  1210. // %p = phi [%v, BB] ...
  1211. // icmp eq, %p, 0
  1212. auto *CmpInst = dyn_cast<ICmpInst>(PHIUser);
  1213. // FIXME: To be simple, handle only integer type for now.
  1214. if (CmpInst && isa<IntegerType>(PN.getType()) && CmpInst->isEquality() &&
  1215. match(CmpInst->getOperand(1), m_Zero())) {
  1216. ConstantInt *NonZeroConst = nullptr;
  1217. bool MadeChange = false;
  1218. for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
  1219. Instruction *CtxI = PN.getIncomingBlock(I)->getTerminator();
  1220. Value *VA = PN.getIncomingValue(I);
  1221. if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) {
  1222. if (!NonZeroConst)
  1223. NonZeroConst = getAnyNonZeroConstInt(PN);
  1224. if (NonZeroConst != VA) {
  1225. replaceOperand(PN, I, NonZeroConst);
  1226. MadeChange = true;
  1227. }
  1228. }
  1229. }
  1230. if (MadeChange)
  1231. return &PN;
  1232. }
  1233. }
  1234. // We sometimes end up with phi cycles that non-obviously end up being the
  1235. // same value, for example:
  1236. // z = some value; x = phi (y, z); y = phi (x, z)
  1237. // where the phi nodes don't necessarily need to be in the same block. Do a
  1238. // quick check to see if the PHI node only contains a single non-phi value, if
  1239. // so, scan to see if the phi cycle is actually equal to that value.
  1240. {
  1241. unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
  1242. // Scan for the first non-phi operand.
  1243. while (InValNo != NumIncomingVals &&
  1244. isa<PHINode>(PN.getIncomingValue(InValNo)))
  1245. ++InValNo;
  1246. if (InValNo != NumIncomingVals) {
  1247. Value *NonPhiInVal = PN.getIncomingValue(InValNo);
  1248. // Scan the rest of the operands to see if there are any conflicts, if so
  1249. // there is no need to recursively scan other phis.
  1250. for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
  1251. Value *OpVal = PN.getIncomingValue(InValNo);
  1252. if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
  1253. break;
  1254. }
  1255. // If we scanned over all operands, then we have one unique value plus
  1256. // phi values. Scan PHI nodes to see if they all merge in each other or
  1257. // the value.
  1258. if (InValNo == NumIncomingVals) {
  1259. SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
  1260. if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
  1261. return replaceInstUsesWith(PN, NonPhiInVal);
  1262. }
  1263. }
  1264. }
  1265. // If there are multiple PHIs, sort their operands so that they all list
  1266. // the blocks in the same order. This will help identical PHIs be eliminated
  1267. // by other passes. Other passes shouldn't depend on this for correctness
  1268. // however.
  1269. PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
  1270. if (&PN != FirstPN)
  1271. for (unsigned I = 0, E = FirstPN->getNumIncomingValues(); I != E; ++I) {
  1272. BasicBlock *BBA = PN.getIncomingBlock(I);
  1273. BasicBlock *BBB = FirstPN->getIncomingBlock(I);
  1274. if (BBA != BBB) {
  1275. Value *VA = PN.getIncomingValue(I);
  1276. unsigned J = PN.getBasicBlockIndex(BBB);
  1277. Value *VB = PN.getIncomingValue(J);
  1278. PN.setIncomingBlock(I, BBB);
  1279. PN.setIncomingValue(I, VB);
  1280. PN.setIncomingBlock(J, BBA);
  1281. PN.setIncomingValue(J, VA);
  1282. // NOTE: Instcombine normally would want us to "return &PN" if we
  1283. // modified any of the operands of an instruction. However, since we
  1284. // aren't adding or removing uses (just rearranging them) we don't do
  1285. // this in this case.
  1286. }
  1287. }
  1288. // Is there an identical PHI node in this basic block?
  1289. for (PHINode &IdenticalPN : PN.getParent()->phis()) {
  1290. // Ignore the PHI node itself.
  1291. if (&IdenticalPN == &PN)
  1292. continue;
  1293. // Note that even though we've just canonicalized this PHI, due to the
  1294. // worklist visitation order, there are no guarantess that *every* PHI
  1295. // has been canonicalized, so we can't just compare operands ranges.
  1296. if (!PN.isIdenticalToWhenDefined(&IdenticalPN))
  1297. continue;
  1298. // Just use that PHI instead then.
  1299. ++NumPHICSEs;
  1300. return replaceInstUsesWith(PN, &IdenticalPN);
  1301. }
  1302. // If this is an integer PHI and we know that it has an illegal type, see if
  1303. // it is only used by trunc or trunc(lshr) operations. If so, we split the
  1304. // PHI into the various pieces being extracted. This sort of thing is
  1305. // introduced when SROA promotes an aggregate to a single large integer type.
  1306. if (PN.getType()->isIntegerTy() &&
  1307. !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
  1308. if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
  1309. return Res;
  1310. // Ultimately, try to replace this Phi with a dominating condition.
  1311. if (auto *V = simplifyUsingControlFlow(*this, PN, DT))
  1312. return replaceInstUsesWith(PN, V);
  1313. return nullptr;
  1314. }