Evaluator.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. //===- Evaluator.cpp - LLVM IR evaluator ----------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Function evaluator for LLVM IR.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "llvm/Transforms/Utils/Evaluator.h"
  13. #include "llvm/ADT/DenseMap.h"
  14. #include "llvm/ADT/STLExtras.h"
  15. #include "llvm/ADT/SmallPtrSet.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/Analysis/ConstantFolding.h"
  18. #include "llvm/IR/BasicBlock.h"
  19. #include "llvm/IR/Constant.h"
  20. #include "llvm/IR/Constants.h"
  21. #include "llvm/IR/DataLayout.h"
  22. #include "llvm/IR/DerivedTypes.h"
  23. #include "llvm/IR/Function.h"
  24. #include "llvm/IR/GlobalAlias.h"
  25. #include "llvm/IR/GlobalValue.h"
  26. #include "llvm/IR/GlobalVariable.h"
  27. #include "llvm/IR/InstrTypes.h"
  28. #include "llvm/IR/Instruction.h"
  29. #include "llvm/IR/Instructions.h"
  30. #include "llvm/IR/IntrinsicInst.h"
  31. #include "llvm/IR/Operator.h"
  32. #include "llvm/IR/Type.h"
  33. #include "llvm/IR/User.h"
  34. #include "llvm/IR/Value.h"
  35. #include "llvm/Support/Casting.h"
  36. #include "llvm/Support/Debug.h"
  37. #include "llvm/Support/raw_ostream.h"
  38. #define DEBUG_TYPE "evaluator"
  39. using namespace llvm;
  40. static inline bool
  41. isSimpleEnoughValueToCommit(Constant *C,
  42. SmallPtrSetImpl<Constant *> &SimpleConstants,
  43. const DataLayout &DL);
  44. /// Return true if the specified constant can be handled by the code generator.
  45. /// We don't want to generate something like:
  46. /// void *X = &X/42;
  47. /// because the code generator doesn't have a relocation that can handle that.
  48. ///
  49. /// This function should be called if C was not found (but just got inserted)
  50. /// in SimpleConstants to avoid having to rescan the same constants all the
  51. /// time.
  52. static bool
  53. isSimpleEnoughValueToCommitHelper(Constant *C,
  54. SmallPtrSetImpl<Constant *> &SimpleConstants,
  55. const DataLayout &DL) {
  56. // Simple global addresses are supported, do not allow dllimport or
  57. // thread-local globals.
  58. if (auto *GV = dyn_cast<GlobalValue>(C))
  59. return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal();
  60. // Simple integer, undef, constant aggregate zero, etc are all supported.
  61. if (C->getNumOperands() == 0 || isa<BlockAddress>(C))
  62. return true;
  63. // Aggregate values are safe if all their elements are.
  64. if (isa<ConstantAggregate>(C)) {
  65. for (Value *Op : C->operands())
  66. if (!isSimpleEnoughValueToCommit(cast<Constant>(Op), SimpleConstants, DL))
  67. return false;
  68. return true;
  69. }
  70. // We don't know exactly what relocations are allowed in constant expressions,
  71. // so we allow &global+constantoffset, which is safe and uniformly supported
  72. // across targets.
  73. ConstantExpr *CE = cast<ConstantExpr>(C);
  74. switch (CE->getOpcode()) {
  75. case Instruction::BitCast:
  76. // Bitcast is fine if the casted value is fine.
  77. return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
  78. case Instruction::IntToPtr:
  79. case Instruction::PtrToInt:
  80. // int <=> ptr is fine if the int type is the same size as the
  81. // pointer type.
  82. if (DL.getTypeSizeInBits(CE->getType()) !=
  83. DL.getTypeSizeInBits(CE->getOperand(0)->getType()))
  84. return false;
  85. return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
  86. // GEP is fine if it is simple + constant offset.
  87. case Instruction::GetElementPtr:
  88. for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
  89. if (!isa<ConstantInt>(CE->getOperand(i)))
  90. return false;
  91. return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
  92. case Instruction::Add:
  93. // We allow simple+cst.
  94. if (!isa<ConstantInt>(CE->getOperand(1)))
  95. return false;
  96. return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
  97. }
  98. return false;
  99. }
  100. static inline bool
  101. isSimpleEnoughValueToCommit(Constant *C,
  102. SmallPtrSetImpl<Constant *> &SimpleConstants,
  103. const DataLayout &DL) {
  104. // If we already checked this constant, we win.
  105. if (!SimpleConstants.insert(C).second)
  106. return true;
  107. // Check the constant.
  108. return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
  109. }
  110. void Evaluator::MutableValue::clear() {
  111. if (auto *Agg = Val.dyn_cast<MutableAggregate *>())
  112. delete Agg;
  113. Val = nullptr;
  114. }
  115. Constant *Evaluator::MutableValue::read(Type *Ty, APInt Offset,
  116. const DataLayout &DL) const {
  117. TypeSize TySize = DL.getTypeStoreSize(Ty);
  118. const MutableValue *V = this;
  119. while (const auto *Agg = V->Val.dyn_cast<MutableAggregate *>()) {
  120. Type *AggTy = Agg->Ty;
  121. std::optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
  122. if (!Index || Index->uge(Agg->Elements.size()) ||
  123. !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
  124. return nullptr;
  125. V = &Agg->Elements[Index->getZExtValue()];
  126. }
  127. return ConstantFoldLoadFromConst(V->Val.get<Constant *>(), Ty, Offset, DL);
  128. }
  129. bool Evaluator::MutableValue::makeMutable() {
  130. Constant *C = Val.get<Constant *>();
  131. Type *Ty = C->getType();
  132. unsigned NumElements;
  133. if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
  134. NumElements = VT->getNumElements();
  135. } else if (auto *AT = dyn_cast<ArrayType>(Ty))
  136. NumElements = AT->getNumElements();
  137. else if (auto *ST = dyn_cast<StructType>(Ty))
  138. NumElements = ST->getNumElements();
  139. else
  140. return false;
  141. MutableAggregate *MA = new MutableAggregate(Ty);
  142. MA->Elements.reserve(NumElements);
  143. for (unsigned I = 0; I < NumElements; ++I)
  144. MA->Elements.push_back(C->getAggregateElement(I));
  145. Val = MA;
  146. return true;
  147. }
  148. bool Evaluator::MutableValue::write(Constant *V, APInt Offset,
  149. const DataLayout &DL) {
  150. Type *Ty = V->getType();
  151. TypeSize TySize = DL.getTypeStoreSize(Ty);
  152. MutableValue *MV = this;
  153. while (Offset != 0 ||
  154. !CastInst::isBitOrNoopPointerCastable(Ty, MV->getType(), DL)) {
  155. if (MV->Val.is<Constant *>() && !MV->makeMutable())
  156. return false;
  157. MutableAggregate *Agg = MV->Val.get<MutableAggregate *>();
  158. Type *AggTy = Agg->Ty;
  159. std::optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset);
  160. if (!Index || Index->uge(Agg->Elements.size()) ||
  161. !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy)))
  162. return false;
  163. MV = &Agg->Elements[Index->getZExtValue()];
  164. }
  165. Type *MVType = MV->getType();
  166. MV->clear();
  167. if (Ty->isIntegerTy() && MVType->isPointerTy())
  168. MV->Val = ConstantExpr::getIntToPtr(V, MVType);
  169. else if (Ty->isPointerTy() && MVType->isIntegerTy())
  170. MV->Val = ConstantExpr::getPtrToInt(V, MVType);
  171. else if (Ty != MVType)
  172. MV->Val = ConstantExpr::getBitCast(V, MVType);
  173. else
  174. MV->Val = V;
  175. return true;
  176. }
  177. Constant *Evaluator::MutableAggregate::toConstant() const {
  178. SmallVector<Constant *, 32> Consts;
  179. for (const MutableValue &MV : Elements)
  180. Consts.push_back(MV.toConstant());
  181. if (auto *ST = dyn_cast<StructType>(Ty))
  182. return ConstantStruct::get(ST, Consts);
  183. if (auto *AT = dyn_cast<ArrayType>(Ty))
  184. return ConstantArray::get(AT, Consts);
  185. assert(isa<FixedVectorType>(Ty) && "Must be vector");
  186. return ConstantVector::get(Consts);
  187. }
  188. /// Return the value that would be computed by a load from P after the stores
  189. /// reflected by 'memory' have been performed. If we can't decide, return null.
  190. Constant *Evaluator::ComputeLoadResult(Constant *P, Type *Ty) {
  191. APInt Offset(DL.getIndexTypeSizeInBits(P->getType()), 0);
  192. P = cast<Constant>(P->stripAndAccumulateConstantOffsets(
  193. DL, Offset, /* AllowNonInbounds */ true));
  194. Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(P->getType()));
  195. if (auto *GV = dyn_cast<GlobalVariable>(P))
  196. return ComputeLoadResult(GV, Ty, Offset);
  197. return nullptr;
  198. }
  199. Constant *Evaluator::ComputeLoadResult(GlobalVariable *GV, Type *Ty,
  200. const APInt &Offset) {
  201. auto It = MutatedMemory.find(GV);
  202. if (It != MutatedMemory.end())
  203. return It->second.read(Ty, Offset, DL);
  204. if (!GV->hasDefinitiveInitializer())
  205. return nullptr;
  206. return ConstantFoldLoadFromConst(GV->getInitializer(), Ty, Offset, DL);
  207. }
  208. static Function *getFunction(Constant *C) {
  209. if (auto *Fn = dyn_cast<Function>(C))
  210. return Fn;
  211. if (auto *Alias = dyn_cast<GlobalAlias>(C))
  212. if (auto *Fn = dyn_cast<Function>(Alias->getAliasee()))
  213. return Fn;
  214. return nullptr;
  215. }
  216. Function *
  217. Evaluator::getCalleeWithFormalArgs(CallBase &CB,
  218. SmallVectorImpl<Constant *> &Formals) {
  219. auto *V = CB.getCalledOperand()->stripPointerCasts();
  220. if (auto *Fn = getFunction(getVal(V)))
  221. return getFormalParams(CB, Fn, Formals) ? Fn : nullptr;
  222. return nullptr;
  223. }
  224. bool Evaluator::getFormalParams(CallBase &CB, Function *F,
  225. SmallVectorImpl<Constant *> &Formals) {
  226. if (!F)
  227. return false;
  228. auto *FTy = F->getFunctionType();
  229. if (FTy->getNumParams() > CB.arg_size()) {
  230. LLVM_DEBUG(dbgs() << "Too few arguments for function.\n");
  231. return false;
  232. }
  233. auto ArgI = CB.arg_begin();
  234. for (Type *PTy : FTy->params()) {
  235. auto *ArgC = ConstantFoldLoadThroughBitcast(getVal(*ArgI), PTy, DL);
  236. if (!ArgC) {
  237. LLVM_DEBUG(dbgs() << "Can not convert function argument.\n");
  238. return false;
  239. }
  240. Formals.push_back(ArgC);
  241. ++ArgI;
  242. }
  243. return true;
  244. }
  245. /// If call expression contains bitcast then we may need to cast
  246. /// evaluated return value to a type of the call expression.
  247. Constant *Evaluator::castCallResultIfNeeded(Type *ReturnType, Constant *RV) {
  248. if (!RV || RV->getType() == ReturnType)
  249. return RV;
  250. RV = ConstantFoldLoadThroughBitcast(RV, ReturnType, DL);
  251. if (!RV)
  252. LLVM_DEBUG(dbgs() << "Failed to fold bitcast call expr\n");
  253. return RV;
  254. }
  255. /// Evaluate all instructions in block BB, returning true if successful, false
  256. /// if we can't evaluate it. NewBB returns the next BB that control flows into,
  257. /// or null upon return. StrippedPointerCastsForAliasAnalysis is set to true if
  258. /// we looked through pointer casts to evaluate something.
  259. bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB,
  260. bool &StrippedPointerCastsForAliasAnalysis) {
  261. // This is the main evaluation loop.
  262. while (true) {
  263. Constant *InstResult = nullptr;
  264. LLVM_DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
  265. if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
  266. if (SI->isVolatile()) {
  267. LLVM_DEBUG(dbgs() << "Store is volatile! Can not evaluate.\n");
  268. return false; // no volatile accesses.
  269. }
  270. Constant *Ptr = getVal(SI->getOperand(1));
  271. Constant *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI);
  272. if (Ptr != FoldedPtr) {
  273. LLVM_DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
  274. Ptr = FoldedPtr;
  275. LLVM_DEBUG(dbgs() << "; To: " << *Ptr << "\n");
  276. }
  277. APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
  278. Ptr = cast<Constant>(Ptr->stripAndAccumulateConstantOffsets(
  279. DL, Offset, /* AllowNonInbounds */ true));
  280. Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(Ptr->getType()));
  281. auto *GV = dyn_cast<GlobalVariable>(Ptr);
  282. if (!GV || !GV->hasUniqueInitializer()) {
  283. LLVM_DEBUG(dbgs() << "Store is not to global with unique initializer: "
  284. << *Ptr << "\n");
  285. return false;
  286. }
  287. // If this might be too difficult for the backend to handle (e.g. the addr
  288. // of one global variable divided by another) then we can't commit it.
  289. Constant *Val = getVal(SI->getOperand(0));
  290. if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
  291. LLVM_DEBUG(dbgs() << "Store value is too complex to evaluate store. "
  292. << *Val << "\n");
  293. return false;
  294. }
  295. auto Res = MutatedMemory.try_emplace(GV, GV->getInitializer());
  296. if (!Res.first->second.write(Val, Offset, DL))
  297. return false;
  298. } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
  299. if (LI->isVolatile()) {
  300. LLVM_DEBUG(
  301. dbgs() << "Found a Load! Volatile load, can not evaluate.\n");
  302. return false; // no volatile accesses.
  303. }
  304. Constant *Ptr = getVal(LI->getOperand(0));
  305. Constant *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI);
  306. if (Ptr != FoldedPtr) {
  307. Ptr = FoldedPtr;
  308. LLVM_DEBUG(dbgs() << "Found a constant pointer expression, constant "
  309. "folding: "
  310. << *Ptr << "\n");
  311. }
  312. InstResult = ComputeLoadResult(Ptr, LI->getType());
  313. if (!InstResult) {
  314. LLVM_DEBUG(
  315. dbgs() << "Failed to compute load result. Can not evaluate load."
  316. "\n");
  317. return false; // Could not evaluate load.
  318. }
  319. LLVM_DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n");
  320. } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
  321. if (AI->isArrayAllocation()) {
  322. LLVM_DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n");
  323. return false; // Cannot handle array allocs.
  324. }
  325. Type *Ty = AI->getAllocatedType();
  326. AllocaTmps.push_back(std::make_unique<GlobalVariable>(
  327. Ty, false, GlobalValue::InternalLinkage, UndefValue::get(Ty),
  328. AI->getName(), /*TLMode=*/GlobalValue::NotThreadLocal,
  329. AI->getType()->getPointerAddressSpace()));
  330. InstResult = AllocaTmps.back().get();
  331. LLVM_DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
  332. } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
  333. CallBase &CB = *cast<CallBase>(&*CurInst);
  334. // Debug info can safely be ignored here.
  335. if (isa<DbgInfoIntrinsic>(CB)) {
  336. LLVM_DEBUG(dbgs() << "Ignoring debug info.\n");
  337. ++CurInst;
  338. continue;
  339. }
  340. // Cannot handle inline asm.
  341. if (CB.isInlineAsm()) {
  342. LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
  343. return false;
  344. }
  345. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CB)) {
  346. if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
  347. if (MSI->isVolatile()) {
  348. LLVM_DEBUG(dbgs() << "Can not optimize a volatile memset "
  349. << "intrinsic.\n");
  350. return false;
  351. }
  352. auto *LenC = dyn_cast<ConstantInt>(getVal(MSI->getLength()));
  353. if (!LenC) {
  354. LLVM_DEBUG(dbgs() << "Memset with unknown length.\n");
  355. return false;
  356. }
  357. Constant *Ptr = getVal(MSI->getDest());
  358. APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
  359. Ptr = cast<Constant>(Ptr->stripAndAccumulateConstantOffsets(
  360. DL, Offset, /* AllowNonInbounds */ true));
  361. auto *GV = dyn_cast<GlobalVariable>(Ptr);
  362. if (!GV) {
  363. LLVM_DEBUG(dbgs() << "Memset with unknown base.\n");
  364. return false;
  365. }
  366. Constant *Val = getVal(MSI->getValue());
  367. APInt Len = LenC->getValue();
  368. while (Len != 0) {
  369. Constant *DestVal = ComputeLoadResult(GV, Val->getType(), Offset);
  370. if (DestVal != Val) {
  371. LLVM_DEBUG(dbgs() << "Memset is not a no-op at offset "
  372. << Offset << " of " << *GV << ".\n");
  373. return false;
  374. }
  375. ++Offset;
  376. --Len;
  377. }
  378. LLVM_DEBUG(dbgs() << "Ignoring no-op memset.\n");
  379. ++CurInst;
  380. continue;
  381. }
  382. if (II->isLifetimeStartOrEnd()) {
  383. LLVM_DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
  384. ++CurInst;
  385. continue;
  386. }
  387. if (II->getIntrinsicID() == Intrinsic::invariant_start) {
  388. // We don't insert an entry into Values, as it doesn't have a
  389. // meaningful return value.
  390. if (!II->use_empty()) {
  391. LLVM_DEBUG(dbgs()
  392. << "Found unused invariant_start. Can't evaluate.\n");
  393. return false;
  394. }
  395. ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
  396. Value *PtrArg = getVal(II->getArgOperand(1));
  397. Value *Ptr = PtrArg->stripPointerCasts();
  398. if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
  399. Type *ElemTy = GV->getValueType();
  400. if (!Size->isMinusOne() &&
  401. Size->getValue().getLimitedValue() >=
  402. DL.getTypeStoreSize(ElemTy)) {
  403. Invariants.insert(GV);
  404. LLVM_DEBUG(dbgs() << "Found a global var that is an invariant: "
  405. << *GV << "\n");
  406. } else {
  407. LLVM_DEBUG(dbgs()
  408. << "Found a global var, but can not treat it as an "
  409. "invariant.\n");
  410. }
  411. }
  412. // Continue even if we do nothing.
  413. ++CurInst;
  414. continue;
  415. } else if (II->getIntrinsicID() == Intrinsic::assume) {
  416. LLVM_DEBUG(dbgs() << "Skipping assume intrinsic.\n");
  417. ++CurInst;
  418. continue;
  419. } else if (II->getIntrinsicID() == Intrinsic::sideeffect) {
  420. LLVM_DEBUG(dbgs() << "Skipping sideeffect intrinsic.\n");
  421. ++CurInst;
  422. continue;
  423. } else if (II->getIntrinsicID() == Intrinsic::pseudoprobe) {
  424. LLVM_DEBUG(dbgs() << "Skipping pseudoprobe intrinsic.\n");
  425. ++CurInst;
  426. continue;
  427. } else {
  428. Value *Stripped = CurInst->stripPointerCastsForAliasAnalysis();
  429. // Only attempt to getVal() if we've actually managed to strip
  430. // anything away, or else we'll call getVal() on the current
  431. // instruction.
  432. if (Stripped != &*CurInst) {
  433. InstResult = getVal(Stripped);
  434. }
  435. if (InstResult) {
  436. LLVM_DEBUG(dbgs()
  437. << "Stripped pointer casts for alias analysis for "
  438. "intrinsic call.\n");
  439. StrippedPointerCastsForAliasAnalysis = true;
  440. InstResult = ConstantExpr::getBitCast(InstResult, II->getType());
  441. } else {
  442. LLVM_DEBUG(dbgs() << "Unknown intrinsic. Cannot evaluate.\n");
  443. return false;
  444. }
  445. }
  446. }
  447. if (!InstResult) {
  448. // Resolve function pointers.
  449. SmallVector<Constant *, 8> Formals;
  450. Function *Callee = getCalleeWithFormalArgs(CB, Formals);
  451. if (!Callee || Callee->isInterposable()) {
  452. LLVM_DEBUG(dbgs() << "Can not resolve function pointer.\n");
  453. return false; // Cannot resolve.
  454. }
  455. if (Callee->isDeclaration()) {
  456. // If this is a function we can constant fold, do it.
  457. if (Constant *C = ConstantFoldCall(&CB, Callee, Formals, TLI)) {
  458. InstResult = castCallResultIfNeeded(CB.getType(), C);
  459. if (!InstResult)
  460. return false;
  461. LLVM_DEBUG(dbgs() << "Constant folded function call. Result: "
  462. << *InstResult << "\n");
  463. } else {
  464. LLVM_DEBUG(dbgs() << "Can not constant fold function call.\n");
  465. return false;
  466. }
  467. } else {
  468. if (Callee->getFunctionType()->isVarArg()) {
  469. LLVM_DEBUG(dbgs()
  470. << "Can not constant fold vararg function call.\n");
  471. return false;
  472. }
  473. Constant *RetVal = nullptr;
  474. // Execute the call, if successful, use the return value.
  475. ValueStack.emplace_back();
  476. if (!EvaluateFunction(Callee, RetVal, Formals)) {
  477. LLVM_DEBUG(dbgs() << "Failed to evaluate function.\n");
  478. return false;
  479. }
  480. ValueStack.pop_back();
  481. InstResult = castCallResultIfNeeded(CB.getType(), RetVal);
  482. if (RetVal && !InstResult)
  483. return false;
  484. if (InstResult) {
  485. LLVM_DEBUG(dbgs() << "Successfully evaluated function. Result: "
  486. << *InstResult << "\n\n");
  487. } else {
  488. LLVM_DEBUG(dbgs()
  489. << "Successfully evaluated function. Result: 0\n\n");
  490. }
  491. }
  492. }
  493. } else if (CurInst->isTerminator()) {
  494. LLVM_DEBUG(dbgs() << "Found a terminator instruction.\n");
  495. if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
  496. if (BI->isUnconditional()) {
  497. NextBB = BI->getSuccessor(0);
  498. } else {
  499. ConstantInt *Cond =
  500. dyn_cast<ConstantInt>(getVal(BI->getCondition()));
  501. if (!Cond) return false; // Cannot determine.
  502. NextBB = BI->getSuccessor(!Cond->getZExtValue());
  503. }
  504. } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
  505. ConstantInt *Val =
  506. dyn_cast<ConstantInt>(getVal(SI->getCondition()));
  507. if (!Val) return false; // Cannot determine.
  508. NextBB = SI->findCaseValue(Val)->getCaseSuccessor();
  509. } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
  510. Value *Val = getVal(IBI->getAddress())->stripPointerCasts();
  511. if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
  512. NextBB = BA->getBasicBlock();
  513. else
  514. return false; // Cannot determine.
  515. } else if (isa<ReturnInst>(CurInst)) {
  516. NextBB = nullptr;
  517. } else {
  518. // invoke, unwind, resume, unreachable.
  519. LLVM_DEBUG(dbgs() << "Can not handle terminator.");
  520. return false; // Cannot handle this terminator.
  521. }
  522. // We succeeded at evaluating this block!
  523. LLVM_DEBUG(dbgs() << "Successfully evaluated block.\n");
  524. return true;
  525. } else {
  526. SmallVector<Constant *> Ops;
  527. for (Value *Op : CurInst->operands())
  528. Ops.push_back(getVal(Op));
  529. InstResult = ConstantFoldInstOperands(&*CurInst, Ops, DL, TLI);
  530. if (!InstResult) {
  531. LLVM_DEBUG(dbgs() << "Cannot fold instruction: " << *CurInst << "\n");
  532. return false;
  533. }
  534. LLVM_DEBUG(dbgs() << "Folded instruction " << *CurInst << " to "
  535. << *InstResult << "\n");
  536. }
  537. if (!CurInst->use_empty()) {
  538. InstResult = ConstantFoldConstant(InstResult, DL, TLI);
  539. setVal(&*CurInst, InstResult);
  540. }
  541. // If we just processed an invoke, we finished evaluating the block.
  542. if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
  543. NextBB = II->getNormalDest();
  544. LLVM_DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n");
  545. return true;
  546. }
  547. // Advance program counter.
  548. ++CurInst;
  549. }
  550. }
  551. /// Evaluate a call to function F, returning true if successful, false if we
  552. /// can't evaluate it. ActualArgs contains the formal arguments for the
  553. /// function.
  554. bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
  555. const SmallVectorImpl<Constant*> &ActualArgs) {
  556. assert(ActualArgs.size() == F->arg_size() && "wrong number of arguments");
  557. // Check to see if this function is already executing (recursion). If so,
  558. // bail out. TODO: we might want to accept limited recursion.
  559. if (is_contained(CallStack, F))
  560. return false;
  561. CallStack.push_back(F);
  562. // Initialize arguments to the incoming values specified.
  563. for (const auto &[ArgNo, Arg] : llvm::enumerate(F->args()))
  564. setVal(&Arg, ActualArgs[ArgNo]);
  565. // ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
  566. // we can only evaluate any one basic block at most once. This set keeps
  567. // track of what we have executed so we can detect recursive cases etc.
  568. SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
  569. // CurBB - The current basic block we're evaluating.
  570. BasicBlock *CurBB = &F->front();
  571. BasicBlock::iterator CurInst = CurBB->begin();
  572. while (true) {
  573. BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings.
  574. LLVM_DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
  575. bool StrippedPointerCastsForAliasAnalysis = false;
  576. if (!EvaluateBlock(CurInst, NextBB, StrippedPointerCastsForAliasAnalysis))
  577. return false;
  578. if (!NextBB) {
  579. // Successfully running until there's no next block means that we found
  580. // the return. Fill it the return value and pop the call stack.
  581. ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
  582. if (RI->getNumOperands()) {
  583. // The Evaluator can look through pointer casts as long as alias
  584. // analysis holds because it's just a simple interpreter and doesn't
  585. // skip memory accesses due to invariant group metadata, but we can't
  586. // let users of Evaluator use a value that's been gleaned looking
  587. // through stripping pointer casts.
  588. if (StrippedPointerCastsForAliasAnalysis &&
  589. !RI->getReturnValue()->getType()->isVoidTy()) {
  590. return false;
  591. }
  592. RetVal = getVal(RI->getOperand(0));
  593. }
  594. CallStack.pop_back();
  595. return true;
  596. }
  597. // Okay, we succeeded in evaluating this control flow. See if we have
  598. // executed the new block before. If so, we have a looping function,
  599. // which we cannot evaluate in reasonable time.
  600. if (!ExecutedBlocks.insert(NextBB).second)
  601. return false; // looped!
  602. // Okay, we have never been in this block before. Check to see if there
  603. // are any PHI nodes. If so, evaluate them with information about where
  604. // we came from.
  605. PHINode *PN = nullptr;
  606. for (CurInst = NextBB->begin();
  607. (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
  608. setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
  609. // Advance to the next block.
  610. CurBB = NextBB;
  611. }
  612. }