Instruction.cpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919
  1. //===-- Instruction.cpp - Implement the Instruction class -----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the Instruction class for the IR library.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "llvm/IR/Instruction.h"
  13. #include "llvm/ADT/DenseSet.h"
  14. #include "llvm/IR/Constants.h"
  15. #include "llvm/IR/Instructions.h"
  16. #include "llvm/IR/IntrinsicInst.h"
  17. #include "llvm/IR/Intrinsics.h"
  18. #include "llvm/IR/Operator.h"
  19. #include "llvm/IR/ProfDataUtils.h"
  20. #include "llvm/IR/Type.h"
  21. using namespace llvm;
  22. Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
  23. Instruction *InsertBefore)
  24. : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
  25. // If requested, insert this instruction into a basic block...
  26. if (InsertBefore) {
  27. BasicBlock *BB = InsertBefore->getParent();
  28. assert(BB && "Instruction to insert before is not in a basic block!");
  29. insertInto(BB, InsertBefore->getIterator());
  30. }
  31. }
  32. Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
  33. BasicBlock *InsertAtEnd)
  34. : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
  35. // append this instruction into the basic block
  36. assert(InsertAtEnd && "Basic block to append to may not be NULL!");
  37. insertInto(InsertAtEnd, InsertAtEnd->end());
  38. }
  39. Instruction::~Instruction() {
  40. assert(!Parent && "Instruction still linked in the program!");
  41. // Replace any extant metadata uses of this instruction with undef to
  42. // preserve debug info accuracy. Some alternatives include:
  43. // - Treat Instruction like any other Value, and point its extant metadata
  44. // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
  45. // trivially dead (i.e. fair game for deletion in many passes), leading to
  46. // stale dbg.values being in effect for too long.
  47. // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
  48. // correct. OTOH results in wasted work in some common cases (e.g. when all
  49. // instructions in a BasicBlock are deleted).
  50. if (isUsedByMetadata())
  51. ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
  52. // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
  53. // mapping in LLVMContext.
  54. setMetadata(LLVMContext::MD_DIAssignID, nullptr);
  55. }
  56. void Instruction::setParent(BasicBlock *P) {
  57. Parent = P;
  58. }
  59. const Module *Instruction::getModule() const {
  60. return getParent()->getModule();
  61. }
  62. const Function *Instruction::getFunction() const {
  63. return getParent()->getParent();
  64. }
  65. void Instruction::removeFromParent() {
  66. getParent()->getInstList().remove(getIterator());
  67. }
  68. iplist<Instruction>::iterator Instruction::eraseFromParent() {
  69. return getParent()->getInstList().erase(getIterator());
  70. }
  71. /// Insert an unlinked instruction into a basic block immediately before the
  72. /// specified instruction.
  73. void Instruction::insertBefore(Instruction *InsertPos) {
  74. insertInto(InsertPos->getParent(), InsertPos->getIterator());
  75. }
  76. /// Insert an unlinked instruction into a basic block immediately after the
  77. /// specified instruction.
  78. void Instruction::insertAfter(Instruction *InsertPos) {
  79. insertInto(InsertPos->getParent(), std::next(InsertPos->getIterator()));
  80. }
  81. BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
  82. BasicBlock::iterator It) {
  83. assert(getParent() == nullptr && "Expected detached instruction");
  84. assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
  85. "It not in ParentBB");
  86. return ParentBB->getInstList().insert(It, this);
  87. }
  88. /// Unlink this instruction from its current basic block and insert it into the
  89. /// basic block that MovePos lives in, right before MovePos.
  90. void Instruction::moveBefore(Instruction *MovePos) {
  91. moveBefore(*MovePos->getParent(), MovePos->getIterator());
  92. }
  93. void Instruction::moveAfter(Instruction *MovePos) {
  94. moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
  95. }
  96. void Instruction::moveBefore(BasicBlock &BB,
  97. SymbolTableList<Instruction>::iterator I) {
  98. assert(I == BB.end() || I->getParent() == &BB);
  99. BB.splice(I, getParent(), getIterator());
  100. }
  101. bool Instruction::comesBefore(const Instruction *Other) const {
  102. assert(Parent && Other->Parent &&
  103. "instructions without BB parents have no order");
  104. assert(Parent == Other->Parent && "cross-BB instruction order comparison");
  105. if (!Parent->isInstrOrderValid())
  106. Parent->renumberInstructions();
  107. return Order < Other->Order;
  108. }
  109. Instruction *Instruction::getInsertionPointAfterDef() {
  110. assert(!getType()->isVoidTy() && "Instruction must define result");
  111. BasicBlock *InsertBB;
  112. BasicBlock::iterator InsertPt;
  113. if (auto *PN = dyn_cast<PHINode>(this)) {
  114. InsertBB = PN->getParent();
  115. InsertPt = InsertBB->getFirstInsertionPt();
  116. } else if (auto *II = dyn_cast<InvokeInst>(this)) {
  117. InsertBB = II->getNormalDest();
  118. InsertPt = InsertBB->getFirstInsertionPt();
  119. } else if (auto *CB = dyn_cast<CallBrInst>(this)) {
  120. InsertBB = CB->getDefaultDest();
  121. InsertPt = InsertBB->getFirstInsertionPt();
  122. } else {
  123. assert(!isTerminator() && "Only invoke/callbr terminators return value");
  124. InsertBB = getParent();
  125. InsertPt = std::next(getIterator());
  126. }
  127. // catchswitch blocks don't have any legal insertion point (because they
  128. // are both an exception pad and a terminator).
  129. if (InsertPt == InsertBB->end())
  130. return nullptr;
  131. return &*InsertPt;
  132. }
  133. bool Instruction::isOnlyUserOfAnyOperand() {
  134. return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
  135. }
  136. void Instruction::setHasNoUnsignedWrap(bool b) {
  137. cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
  138. }
  139. void Instruction::setHasNoSignedWrap(bool b) {
  140. cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
  141. }
  142. void Instruction::setIsExact(bool b) {
  143. cast<PossiblyExactOperator>(this)->setIsExact(b);
  144. }
  145. bool Instruction::hasNoUnsignedWrap() const {
  146. return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
  147. }
  148. bool Instruction::hasNoSignedWrap() const {
  149. return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
  150. }
  151. bool Instruction::hasPoisonGeneratingFlags() const {
  152. return cast<Operator>(this)->hasPoisonGeneratingFlags();
  153. }
  154. void Instruction::dropPoisonGeneratingFlags() {
  155. switch (getOpcode()) {
  156. case Instruction::Add:
  157. case Instruction::Sub:
  158. case Instruction::Mul:
  159. case Instruction::Shl:
  160. cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
  161. cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
  162. break;
  163. case Instruction::UDiv:
  164. case Instruction::SDiv:
  165. case Instruction::AShr:
  166. case Instruction::LShr:
  167. cast<PossiblyExactOperator>(this)->setIsExact(false);
  168. break;
  169. case Instruction::GetElementPtr:
  170. cast<GetElementPtrInst>(this)->setIsInBounds(false);
  171. break;
  172. }
  173. if (isa<FPMathOperator>(this)) {
  174. setHasNoNaNs(false);
  175. setHasNoInfs(false);
  176. }
  177. assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
  178. }
  179. bool Instruction::hasPoisonGeneratingMetadata() const {
  180. return hasMetadata(LLVMContext::MD_range) ||
  181. hasMetadata(LLVMContext::MD_nonnull) ||
  182. hasMetadata(LLVMContext::MD_align);
  183. }
  184. void Instruction::dropPoisonGeneratingMetadata() {
  185. eraseMetadata(LLVMContext::MD_range);
  186. eraseMetadata(LLVMContext::MD_nonnull);
  187. eraseMetadata(LLVMContext::MD_align);
  188. }
  189. void Instruction::dropUndefImplyingAttrsAndUnknownMetadata(
  190. ArrayRef<unsigned> KnownIDs) {
  191. dropUnknownNonDebugMetadata(KnownIDs);
  192. auto *CB = dyn_cast<CallBase>(this);
  193. if (!CB)
  194. return;
  195. // For call instructions, we also need to drop parameter and return attributes
  196. // that are can cause UB if the call is moved to a location where the
  197. // attribute is not valid.
  198. AttributeList AL = CB->getAttributes();
  199. if (AL.isEmpty())
  200. return;
  201. AttributeMask UBImplyingAttributes =
  202. AttributeFuncs::getUBImplyingAttributes();
  203. for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
  204. CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
  205. CB->removeRetAttrs(UBImplyingAttributes);
  206. }
  207. bool Instruction::isExact() const {
  208. return cast<PossiblyExactOperator>(this)->isExact();
  209. }
  210. void Instruction::setFast(bool B) {
  211. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  212. cast<FPMathOperator>(this)->setFast(B);
  213. }
  214. void Instruction::setHasAllowReassoc(bool B) {
  215. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  216. cast<FPMathOperator>(this)->setHasAllowReassoc(B);
  217. }
  218. void Instruction::setHasNoNaNs(bool B) {
  219. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  220. cast<FPMathOperator>(this)->setHasNoNaNs(B);
  221. }
  222. void Instruction::setHasNoInfs(bool B) {
  223. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  224. cast<FPMathOperator>(this)->setHasNoInfs(B);
  225. }
  226. void Instruction::setHasNoSignedZeros(bool B) {
  227. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  228. cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
  229. }
  230. void Instruction::setHasAllowReciprocal(bool B) {
  231. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  232. cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
  233. }
  234. void Instruction::setHasAllowContract(bool B) {
  235. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  236. cast<FPMathOperator>(this)->setHasAllowContract(B);
  237. }
  238. void Instruction::setHasApproxFunc(bool B) {
  239. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  240. cast<FPMathOperator>(this)->setHasApproxFunc(B);
  241. }
  242. void Instruction::setFastMathFlags(FastMathFlags FMF) {
  243. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  244. cast<FPMathOperator>(this)->setFastMathFlags(FMF);
  245. }
  246. void Instruction::copyFastMathFlags(FastMathFlags FMF) {
  247. assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
  248. cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
  249. }
  250. bool Instruction::isFast() const {
  251. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  252. return cast<FPMathOperator>(this)->isFast();
  253. }
  254. bool Instruction::hasAllowReassoc() const {
  255. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  256. return cast<FPMathOperator>(this)->hasAllowReassoc();
  257. }
  258. bool Instruction::hasNoNaNs() const {
  259. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  260. return cast<FPMathOperator>(this)->hasNoNaNs();
  261. }
  262. bool Instruction::hasNoInfs() const {
  263. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  264. return cast<FPMathOperator>(this)->hasNoInfs();
  265. }
  266. bool Instruction::hasNoSignedZeros() const {
  267. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  268. return cast<FPMathOperator>(this)->hasNoSignedZeros();
  269. }
  270. bool Instruction::hasAllowReciprocal() const {
  271. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  272. return cast<FPMathOperator>(this)->hasAllowReciprocal();
  273. }
  274. bool Instruction::hasAllowContract() const {
  275. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  276. return cast<FPMathOperator>(this)->hasAllowContract();
  277. }
  278. bool Instruction::hasApproxFunc() const {
  279. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  280. return cast<FPMathOperator>(this)->hasApproxFunc();
  281. }
  282. FastMathFlags Instruction::getFastMathFlags() const {
  283. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  284. return cast<FPMathOperator>(this)->getFastMathFlags();
  285. }
  286. void Instruction::copyFastMathFlags(const Instruction *I) {
  287. copyFastMathFlags(I->getFastMathFlags());
  288. }
  289. void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
  290. // Copy the wrapping flags.
  291. if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
  292. if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
  293. setHasNoSignedWrap(OB->hasNoSignedWrap());
  294. setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
  295. }
  296. }
  297. // Copy the exact flag.
  298. if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
  299. if (isa<PossiblyExactOperator>(this))
  300. setIsExact(PE->isExact());
  301. // Copy the fast-math flags.
  302. if (auto *FP = dyn_cast<FPMathOperator>(V))
  303. if (isa<FPMathOperator>(this))
  304. copyFastMathFlags(FP->getFastMathFlags());
  305. if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
  306. if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
  307. DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
  308. }
  309. void Instruction::andIRFlags(const Value *V) {
  310. if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
  311. if (isa<OverflowingBinaryOperator>(this)) {
  312. setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
  313. setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
  314. }
  315. }
  316. if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
  317. if (isa<PossiblyExactOperator>(this))
  318. setIsExact(isExact() && PE->isExact());
  319. if (auto *FP = dyn_cast<FPMathOperator>(V)) {
  320. if (isa<FPMathOperator>(this)) {
  321. FastMathFlags FM = getFastMathFlags();
  322. FM &= FP->getFastMathFlags();
  323. copyFastMathFlags(FM);
  324. }
  325. }
  326. if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
  327. if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
  328. DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
  329. }
  330. const char *Instruction::getOpcodeName(unsigned OpCode) {
  331. switch (OpCode) {
  332. // Terminators
  333. case Ret: return "ret";
  334. case Br: return "br";
  335. case Switch: return "switch";
  336. case IndirectBr: return "indirectbr";
  337. case Invoke: return "invoke";
  338. case Resume: return "resume";
  339. case Unreachable: return "unreachable";
  340. case CleanupRet: return "cleanupret";
  341. case CatchRet: return "catchret";
  342. case CatchPad: return "catchpad";
  343. case CatchSwitch: return "catchswitch";
  344. case CallBr: return "callbr";
  345. // Standard unary operators...
  346. case FNeg: return "fneg";
  347. // Standard binary operators...
  348. case Add: return "add";
  349. case FAdd: return "fadd";
  350. case Sub: return "sub";
  351. case FSub: return "fsub";
  352. case Mul: return "mul";
  353. case FMul: return "fmul";
  354. case UDiv: return "udiv";
  355. case SDiv: return "sdiv";
  356. case FDiv: return "fdiv";
  357. case URem: return "urem";
  358. case SRem: return "srem";
  359. case FRem: return "frem";
  360. // Logical operators...
  361. case And: return "and";
  362. case Or : return "or";
  363. case Xor: return "xor";
  364. // Memory instructions...
  365. case Alloca: return "alloca";
  366. case Load: return "load";
  367. case Store: return "store";
  368. case AtomicCmpXchg: return "cmpxchg";
  369. case AtomicRMW: return "atomicrmw";
  370. case Fence: return "fence";
  371. case GetElementPtr: return "getelementptr";
  372. // Convert instructions...
  373. case Trunc: return "trunc";
  374. case ZExt: return "zext";
  375. case SExt: return "sext";
  376. case FPTrunc: return "fptrunc";
  377. case FPExt: return "fpext";
  378. case FPToUI: return "fptoui";
  379. case FPToSI: return "fptosi";
  380. case UIToFP: return "uitofp";
  381. case SIToFP: return "sitofp";
  382. case IntToPtr: return "inttoptr";
  383. case PtrToInt: return "ptrtoint";
  384. case BitCast: return "bitcast";
  385. case AddrSpaceCast: return "addrspacecast";
  386. // Other instructions...
  387. case ICmp: return "icmp";
  388. case FCmp: return "fcmp";
  389. case PHI: return "phi";
  390. case Select: return "select";
  391. case Call: return "call";
  392. case Shl: return "shl";
  393. case LShr: return "lshr";
  394. case AShr: return "ashr";
  395. case VAArg: return "va_arg";
  396. case ExtractElement: return "extractelement";
  397. case InsertElement: return "insertelement";
  398. case ShuffleVector: return "shufflevector";
  399. case ExtractValue: return "extractvalue";
  400. case InsertValue: return "insertvalue";
  401. case LandingPad: return "landingpad";
  402. case CleanupPad: return "cleanuppad";
  403. case Freeze: return "freeze";
  404. default: return "<Invalid operator> ";
  405. }
  406. }
  407. /// Return true if both instructions have the same special state. This must be
  408. /// kept in sync with FunctionComparator::cmpOperations in
  409. /// lib/Transforms/IPO/MergeFunctions.cpp.
  410. static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
  411. bool IgnoreAlignment = false) {
  412. assert(I1->getOpcode() == I2->getOpcode() &&
  413. "Can not compare special state of different instructions");
  414. if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
  415. return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
  416. (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
  417. IgnoreAlignment);
  418. if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
  419. return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
  420. (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
  421. IgnoreAlignment) &&
  422. LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
  423. LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
  424. if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
  425. return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
  426. (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
  427. IgnoreAlignment) &&
  428. SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
  429. SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
  430. if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
  431. return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
  432. if (const CallInst *CI = dyn_cast<CallInst>(I1))
  433. return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
  434. CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
  435. CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
  436. CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
  437. if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
  438. return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
  439. CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
  440. CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
  441. if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
  442. return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
  443. CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
  444. CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
  445. if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
  446. return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
  447. if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
  448. return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
  449. if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
  450. return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
  451. FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
  452. if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
  453. return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
  454. CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
  455. CXI->getSuccessOrdering() ==
  456. cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
  457. CXI->getFailureOrdering() ==
  458. cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
  459. CXI->getSyncScopeID() ==
  460. cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
  461. if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
  462. return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
  463. RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
  464. RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
  465. RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
  466. if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
  467. return SVI->getShuffleMask() ==
  468. cast<ShuffleVectorInst>(I2)->getShuffleMask();
  469. if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
  470. return GEP->getSourceElementType() ==
  471. cast<GetElementPtrInst>(I2)->getSourceElementType();
  472. return true;
  473. }
  474. bool Instruction::isIdenticalTo(const Instruction *I) const {
  475. return isIdenticalToWhenDefined(I) &&
  476. SubclassOptionalData == I->SubclassOptionalData;
  477. }
  478. bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
  479. if (getOpcode() != I->getOpcode() ||
  480. getNumOperands() != I->getNumOperands() ||
  481. getType() != I->getType())
  482. return false;
  483. // If both instructions have no operands, they are identical.
  484. if (getNumOperands() == 0 && I->getNumOperands() == 0)
  485. return haveSameSpecialState(this, I);
  486. // We have two instructions of identical opcode and #operands. Check to see
  487. // if all operands are the same.
  488. if (!std::equal(op_begin(), op_end(), I->op_begin()))
  489. return false;
  490. // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
  491. if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
  492. const PHINode *otherPHI = cast<PHINode>(I);
  493. return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
  494. otherPHI->block_begin());
  495. }
  496. return haveSameSpecialState(this, I);
  497. }
  498. // Keep this in sync with FunctionComparator::cmpOperations in
  499. // lib/Transforms/IPO/MergeFunctions.cpp.
  500. bool Instruction::isSameOperationAs(const Instruction *I,
  501. unsigned flags) const {
  502. bool IgnoreAlignment = flags & CompareIgnoringAlignment;
  503. bool UseScalarTypes = flags & CompareUsingScalarTypes;
  504. if (getOpcode() != I->getOpcode() ||
  505. getNumOperands() != I->getNumOperands() ||
  506. (UseScalarTypes ?
  507. getType()->getScalarType() != I->getType()->getScalarType() :
  508. getType() != I->getType()))
  509. return false;
  510. // We have two instructions of identical opcode and #operands. Check to see
  511. // if all operands are the same type
  512. for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
  513. if (UseScalarTypes ?
  514. getOperand(i)->getType()->getScalarType() !=
  515. I->getOperand(i)->getType()->getScalarType() :
  516. getOperand(i)->getType() != I->getOperand(i)->getType())
  517. return false;
  518. return haveSameSpecialState(this, I, IgnoreAlignment);
  519. }
  520. bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
  521. for (const Use &U : uses()) {
  522. // PHI nodes uses values in the corresponding predecessor block. For other
  523. // instructions, just check to see whether the parent of the use matches up.
  524. const Instruction *I = cast<Instruction>(U.getUser());
  525. const PHINode *PN = dyn_cast<PHINode>(I);
  526. if (!PN) {
  527. if (I->getParent() != BB)
  528. return true;
  529. continue;
  530. }
  531. if (PN->getIncomingBlock(U) != BB)
  532. return true;
  533. }
  534. return false;
  535. }
  536. bool Instruction::mayReadFromMemory() const {
  537. switch (getOpcode()) {
  538. default: return false;
  539. case Instruction::VAArg:
  540. case Instruction::Load:
  541. case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
  542. case Instruction::AtomicCmpXchg:
  543. case Instruction::AtomicRMW:
  544. case Instruction::CatchPad:
  545. case Instruction::CatchRet:
  546. return true;
  547. case Instruction::Call:
  548. case Instruction::Invoke:
  549. case Instruction::CallBr:
  550. return !cast<CallBase>(this)->onlyWritesMemory();
  551. case Instruction::Store:
  552. return !cast<StoreInst>(this)->isUnordered();
  553. }
  554. }
  555. bool Instruction::mayWriteToMemory() const {
  556. switch (getOpcode()) {
  557. default: return false;
  558. case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
  559. case Instruction::Store:
  560. case Instruction::VAArg:
  561. case Instruction::AtomicCmpXchg:
  562. case Instruction::AtomicRMW:
  563. case Instruction::CatchPad:
  564. case Instruction::CatchRet:
  565. return true;
  566. case Instruction::Call:
  567. case Instruction::Invoke:
  568. case Instruction::CallBr:
  569. return !cast<CallBase>(this)->onlyReadsMemory();
  570. case Instruction::Load:
  571. return !cast<LoadInst>(this)->isUnordered();
  572. }
  573. }
  574. bool Instruction::isAtomic() const {
  575. switch (getOpcode()) {
  576. default:
  577. return false;
  578. case Instruction::AtomicCmpXchg:
  579. case Instruction::AtomicRMW:
  580. case Instruction::Fence:
  581. return true;
  582. case Instruction::Load:
  583. return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
  584. case Instruction::Store:
  585. return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
  586. }
  587. }
  588. bool Instruction::hasAtomicLoad() const {
  589. assert(isAtomic());
  590. switch (getOpcode()) {
  591. default:
  592. return false;
  593. case Instruction::AtomicCmpXchg:
  594. case Instruction::AtomicRMW:
  595. case Instruction::Load:
  596. return true;
  597. }
  598. }
  599. bool Instruction::hasAtomicStore() const {
  600. assert(isAtomic());
  601. switch (getOpcode()) {
  602. default:
  603. return false;
  604. case Instruction::AtomicCmpXchg:
  605. case Instruction::AtomicRMW:
  606. case Instruction::Store:
  607. return true;
  608. }
  609. }
  610. bool Instruction::isVolatile() const {
  611. switch (getOpcode()) {
  612. default:
  613. return false;
  614. case Instruction::AtomicRMW:
  615. return cast<AtomicRMWInst>(this)->isVolatile();
  616. case Instruction::Store:
  617. return cast<StoreInst>(this)->isVolatile();
  618. case Instruction::Load:
  619. return cast<LoadInst>(this)->isVolatile();
  620. case Instruction::AtomicCmpXchg:
  621. return cast<AtomicCmpXchgInst>(this)->isVolatile();
  622. case Instruction::Call:
  623. case Instruction::Invoke:
  624. // There are a very limited number of intrinsics with volatile flags.
  625. if (auto *II = dyn_cast<IntrinsicInst>(this)) {
  626. if (auto *MI = dyn_cast<MemIntrinsic>(II))
  627. return MI->isVolatile();
  628. switch (II->getIntrinsicID()) {
  629. default: break;
  630. case Intrinsic::matrix_column_major_load:
  631. return cast<ConstantInt>(II->getArgOperand(2))->isOne();
  632. case Intrinsic::matrix_column_major_store:
  633. return cast<ConstantInt>(II->getArgOperand(3))->isOne();
  634. }
  635. }
  636. return false;
  637. }
  638. }
  639. bool Instruction::mayThrow() const {
  640. if (const CallInst *CI = dyn_cast<CallInst>(this))
  641. return !CI->doesNotThrow();
  642. if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
  643. return CRI->unwindsToCaller();
  644. if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
  645. return CatchSwitch->unwindsToCaller();
  646. return isa<ResumeInst>(this);
  647. }
  648. bool Instruction::mayHaveSideEffects() const {
  649. return mayWriteToMemory() || mayThrow() || !willReturn();
  650. }
  651. bool Instruction::isSafeToRemove() const {
  652. return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
  653. !this->isTerminator() && !this->isEHPad();
  654. }
  655. bool Instruction::willReturn() const {
  656. // Volatile store isn't guaranteed to return; see LangRef.
  657. if (auto *SI = dyn_cast<StoreInst>(this))
  658. return !SI->isVolatile();
  659. if (const auto *CB = dyn_cast<CallBase>(this))
  660. return CB->hasFnAttr(Attribute::WillReturn);
  661. return true;
  662. }
  663. bool Instruction::isLifetimeStartOrEnd() const {
  664. auto *II = dyn_cast<IntrinsicInst>(this);
  665. if (!II)
  666. return false;
  667. Intrinsic::ID ID = II->getIntrinsicID();
  668. return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
  669. }
  670. bool Instruction::isLaunderOrStripInvariantGroup() const {
  671. auto *II = dyn_cast<IntrinsicInst>(this);
  672. if (!II)
  673. return false;
  674. Intrinsic::ID ID = II->getIntrinsicID();
  675. return ID == Intrinsic::launder_invariant_group ||
  676. ID == Intrinsic::strip_invariant_group;
  677. }
  678. bool Instruction::isDebugOrPseudoInst() const {
  679. return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
  680. }
  681. const Instruction *
  682. Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
  683. for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
  684. if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
  685. return I;
  686. return nullptr;
  687. }
  688. const Instruction *
  689. Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
  690. for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
  691. if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
  692. return I;
  693. return nullptr;
  694. }
  695. bool Instruction::isAssociative() const {
  696. unsigned Opcode = getOpcode();
  697. if (isAssociative(Opcode))
  698. return true;
  699. switch (Opcode) {
  700. case FMul:
  701. case FAdd:
  702. return cast<FPMathOperator>(this)->hasAllowReassoc() &&
  703. cast<FPMathOperator>(this)->hasNoSignedZeros();
  704. default:
  705. return false;
  706. }
  707. }
  708. bool Instruction::isCommutative() const {
  709. if (auto *II = dyn_cast<IntrinsicInst>(this))
  710. return II->isCommutative();
  711. // TODO: Should allow icmp/fcmp?
  712. return isCommutative(getOpcode());
  713. }
  714. unsigned Instruction::getNumSuccessors() const {
  715. switch (getOpcode()) {
  716. #define HANDLE_TERM_INST(N, OPC, CLASS) \
  717. case Instruction::OPC: \
  718. return static_cast<const CLASS *>(this)->getNumSuccessors();
  719. #include "llvm/IR/Instruction.def"
  720. default:
  721. break;
  722. }
  723. llvm_unreachable("not a terminator");
  724. }
  725. BasicBlock *Instruction::getSuccessor(unsigned idx) const {
  726. switch (getOpcode()) {
  727. #define HANDLE_TERM_INST(N, OPC, CLASS) \
  728. case Instruction::OPC: \
  729. return static_cast<const CLASS *>(this)->getSuccessor(idx);
  730. #include "llvm/IR/Instruction.def"
  731. default:
  732. break;
  733. }
  734. llvm_unreachable("not a terminator");
  735. }
  736. void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
  737. switch (getOpcode()) {
  738. #define HANDLE_TERM_INST(N, OPC, CLASS) \
  739. case Instruction::OPC: \
  740. return static_cast<CLASS *>(this)->setSuccessor(idx, B);
  741. #include "llvm/IR/Instruction.def"
  742. default:
  743. break;
  744. }
  745. llvm_unreachable("not a terminator");
  746. }
  747. void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
  748. for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
  749. Idx != NumSuccessors; ++Idx)
  750. if (getSuccessor(Idx) == OldBB)
  751. setSuccessor(Idx, NewBB);
  752. }
  753. Instruction *Instruction::cloneImpl() const {
  754. llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
  755. }
  756. void Instruction::swapProfMetadata() {
  757. MDNode *ProfileData = getBranchWeightMDNode(*this);
  758. if (!ProfileData || ProfileData->getNumOperands() != 3)
  759. return;
  760. // The first operand is the name. Fetch them backwards and build a new one.
  761. Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
  762. ProfileData->getOperand(1)};
  763. setMetadata(LLVMContext::MD_prof,
  764. MDNode::get(ProfileData->getContext(), Ops));
  765. }
  766. void Instruction::copyMetadata(const Instruction &SrcInst,
  767. ArrayRef<unsigned> WL) {
  768. if (!SrcInst.hasMetadata())
  769. return;
  770. DenseSet<unsigned> WLS;
  771. for (unsigned M : WL)
  772. WLS.insert(M);
  773. // Otherwise, enumerate and copy over metadata from the old instruction to the
  774. // new one.
  775. SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
  776. SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
  777. for (const auto &MD : TheMDs) {
  778. if (WL.empty() || WLS.count(MD.first))
  779. setMetadata(MD.first, MD.second);
  780. }
  781. if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
  782. setDebugLoc(SrcInst.getDebugLoc());
  783. }
  784. Instruction *Instruction::clone() const {
  785. Instruction *New = nullptr;
  786. switch (getOpcode()) {
  787. default:
  788. llvm_unreachable("Unhandled Opcode.");
  789. #define HANDLE_INST(num, opc, clas) \
  790. case Instruction::opc: \
  791. New = cast<clas>(this)->cloneImpl(); \
  792. break;
  793. #include "llvm/IR/Instruction.def"
  794. #undef HANDLE_INST
  795. }
  796. New->SubclassOptionalData = SubclassOptionalData;
  797. New->copyMetadata(*this);
  798. return New;
  799. }