Instruction.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. //===-- Instruction.cpp - Implement the Instruction class -----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the Instruction class for the IR library.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "llvm/IR/Instruction.h"
  13. #include "llvm/ADT/DenseSet.h"
  14. #include "llvm/IR/Constants.h"
  15. #include "llvm/IR/Instructions.h"
  16. #include "llvm/IR/IntrinsicInst.h"
  17. #include "llvm/IR/Intrinsics.h"
  18. #include "llvm/IR/Operator.h"
  19. #include "llvm/IR/Type.h"
  20. using namespace llvm;
  21. Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
  22. Instruction *InsertBefore)
  23. : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
  24. // If requested, insert this instruction into a basic block...
  25. if (InsertBefore) {
  26. BasicBlock *BB = InsertBefore->getParent();
  27. assert(BB && "Instruction to insert before is not in a basic block!");
  28. BB->getInstList().insert(InsertBefore->getIterator(), this);
  29. }
  30. }
  31. Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
  32. BasicBlock *InsertAtEnd)
  33. : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
  34. // append this instruction into the basic block
  35. assert(InsertAtEnd && "Basic block to append to may not be NULL!");
  36. InsertAtEnd->getInstList().push_back(this);
  37. }
  38. Instruction::~Instruction() {
  39. assert(!Parent && "Instruction still linked in the program!");
  40. // Replace any extant metadata uses of this instruction with undef to
  41. // preserve debug info accuracy. Some alternatives include:
  42. // - Treat Instruction like any other Value, and point its extant metadata
  43. // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
  44. // trivially dead (i.e. fair game for deletion in many passes), leading to
  45. // stale dbg.values being in effect for too long.
  46. // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
  47. // correct. OTOH results in wasted work in some common cases (e.g. when all
  48. // instructions in a BasicBlock are deleted).
  49. if (isUsedByMetadata())
  50. ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
  51. }
  52. void Instruction::setParent(BasicBlock *P) {
  53. Parent = P;
  54. }
  55. const Module *Instruction::getModule() const {
  56. return getParent()->getModule();
  57. }
  58. const Function *Instruction::getFunction() const {
  59. return getParent()->getParent();
  60. }
  61. void Instruction::removeFromParent() {
  62. getParent()->getInstList().remove(getIterator());
  63. }
  64. iplist<Instruction>::iterator Instruction::eraseFromParent() {
  65. return getParent()->getInstList().erase(getIterator());
  66. }
  67. /// Insert an unlinked instruction into a basic block immediately before the
  68. /// specified instruction.
  69. void Instruction::insertBefore(Instruction *InsertPos) {
  70. InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
  71. }
  72. /// Insert an unlinked instruction into a basic block immediately after the
  73. /// specified instruction.
  74. void Instruction::insertAfter(Instruction *InsertPos) {
  75. InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
  76. this);
  77. }
  78. /// Unlink this instruction from its current basic block and insert it into the
  79. /// basic block that MovePos lives in, right before MovePos.
  80. void Instruction::moveBefore(Instruction *MovePos) {
  81. moveBefore(*MovePos->getParent(), MovePos->getIterator());
  82. }
  83. void Instruction::moveAfter(Instruction *MovePos) {
  84. moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
  85. }
  86. void Instruction::moveBefore(BasicBlock &BB,
  87. SymbolTableList<Instruction>::iterator I) {
  88. assert(I == BB.end() || I->getParent() == &BB);
  89. BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
  90. }
  91. bool Instruction::comesBefore(const Instruction *Other) const {
  92. assert(Parent && Other->Parent &&
  93. "instructions without BB parents have no order");
  94. assert(Parent == Other->Parent && "cross-BB instruction order comparison");
  95. if (!Parent->isInstrOrderValid())
  96. Parent->renumberInstructions();
  97. return Order < Other->Order;
  98. }
  99. bool Instruction::isOnlyUserOfAnyOperand() {
  100. return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
  101. }
  102. void Instruction::setHasNoUnsignedWrap(bool b) {
  103. cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
  104. }
  105. void Instruction::setHasNoSignedWrap(bool b) {
  106. cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
  107. }
  108. void Instruction::setIsExact(bool b) {
  109. cast<PossiblyExactOperator>(this)->setIsExact(b);
  110. }
  111. bool Instruction::hasNoUnsignedWrap() const {
  112. return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
  113. }
  114. bool Instruction::hasNoSignedWrap() const {
  115. return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
  116. }
  117. bool Instruction::hasPoisonGeneratingFlags() const {
  118. return cast<Operator>(this)->hasPoisonGeneratingFlags();
  119. }
  120. void Instruction::dropPoisonGeneratingFlags() {
  121. switch (getOpcode()) {
  122. case Instruction::Add:
  123. case Instruction::Sub:
  124. case Instruction::Mul:
  125. case Instruction::Shl:
  126. cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
  127. cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
  128. break;
  129. case Instruction::UDiv:
  130. case Instruction::SDiv:
  131. case Instruction::AShr:
  132. case Instruction::LShr:
  133. cast<PossiblyExactOperator>(this)->setIsExact(false);
  134. break;
  135. case Instruction::GetElementPtr:
  136. cast<GetElementPtrInst>(this)->setIsInBounds(false);
  137. break;
  138. }
  139. if (isa<FPMathOperator>(this)) {
  140. setHasNoNaNs(false);
  141. setHasNoInfs(false);
  142. }
  143. assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
  144. }
  145. void Instruction::dropUndefImplyingAttrsAndUnknownMetadata(
  146. ArrayRef<unsigned> KnownIDs) {
  147. dropUnknownNonDebugMetadata(KnownIDs);
  148. auto *CB = dyn_cast<CallBase>(this);
  149. if (!CB)
  150. return;
  151. // For call instructions, we also need to drop parameter and return attributes
  152. // that are can cause UB if the call is moved to a location where the
  153. // attribute is not valid.
  154. AttributeList AL = CB->getAttributes();
  155. if (AL.isEmpty())
  156. return;
  157. AttributeMask UBImplyingAttributes =
  158. AttributeFuncs::getUBImplyingAttributes();
  159. for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
  160. CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
  161. CB->removeRetAttrs(UBImplyingAttributes);
  162. }
  163. bool Instruction::isExact() const {
  164. return cast<PossiblyExactOperator>(this)->isExact();
  165. }
  166. void Instruction::setFast(bool B) {
  167. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  168. cast<FPMathOperator>(this)->setFast(B);
  169. }
  170. void Instruction::setHasAllowReassoc(bool B) {
  171. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  172. cast<FPMathOperator>(this)->setHasAllowReassoc(B);
  173. }
  174. void Instruction::setHasNoNaNs(bool B) {
  175. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  176. cast<FPMathOperator>(this)->setHasNoNaNs(B);
  177. }
  178. void Instruction::setHasNoInfs(bool B) {
  179. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  180. cast<FPMathOperator>(this)->setHasNoInfs(B);
  181. }
  182. void Instruction::setHasNoSignedZeros(bool B) {
  183. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  184. cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
  185. }
  186. void Instruction::setHasAllowReciprocal(bool B) {
  187. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  188. cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
  189. }
  190. void Instruction::setHasAllowContract(bool B) {
  191. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  192. cast<FPMathOperator>(this)->setHasAllowContract(B);
  193. }
  194. void Instruction::setHasApproxFunc(bool B) {
  195. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  196. cast<FPMathOperator>(this)->setHasApproxFunc(B);
  197. }
  198. void Instruction::setFastMathFlags(FastMathFlags FMF) {
  199. assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
  200. cast<FPMathOperator>(this)->setFastMathFlags(FMF);
  201. }
  202. void Instruction::copyFastMathFlags(FastMathFlags FMF) {
  203. assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
  204. cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
  205. }
  206. bool Instruction::isFast() const {
  207. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  208. return cast<FPMathOperator>(this)->isFast();
  209. }
  210. bool Instruction::hasAllowReassoc() const {
  211. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  212. return cast<FPMathOperator>(this)->hasAllowReassoc();
  213. }
  214. bool Instruction::hasNoNaNs() const {
  215. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  216. return cast<FPMathOperator>(this)->hasNoNaNs();
  217. }
  218. bool Instruction::hasNoInfs() const {
  219. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  220. return cast<FPMathOperator>(this)->hasNoInfs();
  221. }
  222. bool Instruction::hasNoSignedZeros() const {
  223. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  224. return cast<FPMathOperator>(this)->hasNoSignedZeros();
  225. }
  226. bool Instruction::hasAllowReciprocal() const {
  227. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  228. return cast<FPMathOperator>(this)->hasAllowReciprocal();
  229. }
  230. bool Instruction::hasAllowContract() const {
  231. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  232. return cast<FPMathOperator>(this)->hasAllowContract();
  233. }
  234. bool Instruction::hasApproxFunc() const {
  235. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  236. return cast<FPMathOperator>(this)->hasApproxFunc();
  237. }
  238. FastMathFlags Instruction::getFastMathFlags() const {
  239. assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
  240. return cast<FPMathOperator>(this)->getFastMathFlags();
  241. }
  242. void Instruction::copyFastMathFlags(const Instruction *I) {
  243. copyFastMathFlags(I->getFastMathFlags());
  244. }
  245. void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
  246. // Copy the wrapping flags.
  247. if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
  248. if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
  249. setHasNoSignedWrap(OB->hasNoSignedWrap());
  250. setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
  251. }
  252. }
  253. // Copy the exact flag.
  254. if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
  255. if (isa<PossiblyExactOperator>(this))
  256. setIsExact(PE->isExact());
  257. // Copy the fast-math flags.
  258. if (auto *FP = dyn_cast<FPMathOperator>(V))
  259. if (isa<FPMathOperator>(this))
  260. copyFastMathFlags(FP->getFastMathFlags());
  261. if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
  262. if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
  263. DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
  264. }
  265. void Instruction::andIRFlags(const Value *V) {
  266. if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
  267. if (isa<OverflowingBinaryOperator>(this)) {
  268. setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
  269. setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
  270. }
  271. }
  272. if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
  273. if (isa<PossiblyExactOperator>(this))
  274. setIsExact(isExact() && PE->isExact());
  275. if (auto *FP = dyn_cast<FPMathOperator>(V)) {
  276. if (isa<FPMathOperator>(this)) {
  277. FastMathFlags FM = getFastMathFlags();
  278. FM &= FP->getFastMathFlags();
  279. copyFastMathFlags(FM);
  280. }
  281. }
  282. if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
  283. if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
  284. DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
  285. }
  286. const char *Instruction::getOpcodeName(unsigned OpCode) {
  287. switch (OpCode) {
  288. // Terminators
  289. case Ret: return "ret";
  290. case Br: return "br";
  291. case Switch: return "switch";
  292. case IndirectBr: return "indirectbr";
  293. case Invoke: return "invoke";
  294. case Resume: return "resume";
  295. case Unreachable: return "unreachable";
  296. case CleanupRet: return "cleanupret";
  297. case CatchRet: return "catchret";
  298. case CatchPad: return "catchpad";
  299. case CatchSwitch: return "catchswitch";
  300. case CallBr: return "callbr";
  301. // Standard unary operators...
  302. case FNeg: return "fneg";
  303. // Standard binary operators...
  304. case Add: return "add";
  305. case FAdd: return "fadd";
  306. case Sub: return "sub";
  307. case FSub: return "fsub";
  308. case Mul: return "mul";
  309. case FMul: return "fmul";
  310. case UDiv: return "udiv";
  311. case SDiv: return "sdiv";
  312. case FDiv: return "fdiv";
  313. case URem: return "urem";
  314. case SRem: return "srem";
  315. case FRem: return "frem";
  316. // Logical operators...
  317. case And: return "and";
  318. case Or : return "or";
  319. case Xor: return "xor";
  320. // Memory instructions...
  321. case Alloca: return "alloca";
  322. case Load: return "load";
  323. case Store: return "store";
  324. case AtomicCmpXchg: return "cmpxchg";
  325. case AtomicRMW: return "atomicrmw";
  326. case Fence: return "fence";
  327. case GetElementPtr: return "getelementptr";
  328. // Convert instructions...
  329. case Trunc: return "trunc";
  330. case ZExt: return "zext";
  331. case SExt: return "sext";
  332. case FPTrunc: return "fptrunc";
  333. case FPExt: return "fpext";
  334. case FPToUI: return "fptoui";
  335. case FPToSI: return "fptosi";
  336. case UIToFP: return "uitofp";
  337. case SIToFP: return "sitofp";
  338. case IntToPtr: return "inttoptr";
  339. case PtrToInt: return "ptrtoint";
  340. case BitCast: return "bitcast";
  341. case AddrSpaceCast: return "addrspacecast";
  342. // Other instructions...
  343. case ICmp: return "icmp";
  344. case FCmp: return "fcmp";
  345. case PHI: return "phi";
  346. case Select: return "select";
  347. case Call: return "call";
  348. case Shl: return "shl";
  349. case LShr: return "lshr";
  350. case AShr: return "ashr";
  351. case VAArg: return "va_arg";
  352. case ExtractElement: return "extractelement";
  353. case InsertElement: return "insertelement";
  354. case ShuffleVector: return "shufflevector";
  355. case ExtractValue: return "extractvalue";
  356. case InsertValue: return "insertvalue";
  357. case LandingPad: return "landingpad";
  358. case CleanupPad: return "cleanuppad";
  359. case Freeze: return "freeze";
  360. default: return "<Invalid operator> ";
  361. }
  362. }
  363. /// Return true if both instructions have the same special state. This must be
  364. /// kept in sync with FunctionComparator::cmpOperations in
  365. /// lib/Transforms/IPO/MergeFunctions.cpp.
  366. static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
  367. bool IgnoreAlignment = false) {
  368. assert(I1->getOpcode() == I2->getOpcode() &&
  369. "Can not compare special state of different instructions");
  370. if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
  371. return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
  372. (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
  373. IgnoreAlignment);
  374. if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
  375. return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
  376. (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
  377. IgnoreAlignment) &&
  378. LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
  379. LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
  380. if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
  381. return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
  382. (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
  383. IgnoreAlignment) &&
  384. SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
  385. SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
  386. if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
  387. return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
  388. if (const CallInst *CI = dyn_cast<CallInst>(I1))
  389. return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
  390. CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
  391. CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
  392. CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
  393. if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
  394. return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
  395. CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
  396. CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
  397. if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
  398. return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
  399. CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
  400. CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
  401. if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
  402. return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
  403. if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
  404. return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
  405. if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
  406. return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
  407. FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
  408. if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
  409. return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
  410. CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
  411. CXI->getSuccessOrdering() ==
  412. cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
  413. CXI->getFailureOrdering() ==
  414. cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
  415. CXI->getSyncScopeID() ==
  416. cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
  417. if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
  418. return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
  419. RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
  420. RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
  421. RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
  422. if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
  423. return SVI->getShuffleMask() ==
  424. cast<ShuffleVectorInst>(I2)->getShuffleMask();
  425. return true;
  426. }
  427. bool Instruction::isIdenticalTo(const Instruction *I) const {
  428. return isIdenticalToWhenDefined(I) &&
  429. SubclassOptionalData == I->SubclassOptionalData;
  430. }
  431. bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
  432. if (getOpcode() != I->getOpcode() ||
  433. getNumOperands() != I->getNumOperands() ||
  434. getType() != I->getType())
  435. return false;
  436. // If both instructions have no operands, they are identical.
  437. if (getNumOperands() == 0 && I->getNumOperands() == 0)
  438. return haveSameSpecialState(this, I);
  439. // We have two instructions of identical opcode and #operands. Check to see
  440. // if all operands are the same.
  441. if (!std::equal(op_begin(), op_end(), I->op_begin()))
  442. return false;
  443. // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
  444. if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
  445. const PHINode *otherPHI = cast<PHINode>(I);
  446. return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
  447. otherPHI->block_begin());
  448. }
  449. return haveSameSpecialState(this, I);
  450. }
  451. // Keep this in sync with FunctionComparator::cmpOperations in
  452. // lib/Transforms/IPO/MergeFunctions.cpp.
  453. bool Instruction::isSameOperationAs(const Instruction *I,
  454. unsigned flags) const {
  455. bool IgnoreAlignment = flags & CompareIgnoringAlignment;
  456. bool UseScalarTypes = flags & CompareUsingScalarTypes;
  457. if (getOpcode() != I->getOpcode() ||
  458. getNumOperands() != I->getNumOperands() ||
  459. (UseScalarTypes ?
  460. getType()->getScalarType() != I->getType()->getScalarType() :
  461. getType() != I->getType()))
  462. return false;
  463. // We have two instructions of identical opcode and #operands. Check to see
  464. // if all operands are the same type
  465. for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
  466. if (UseScalarTypes ?
  467. getOperand(i)->getType()->getScalarType() !=
  468. I->getOperand(i)->getType()->getScalarType() :
  469. getOperand(i)->getType() != I->getOperand(i)->getType())
  470. return false;
  471. return haveSameSpecialState(this, I, IgnoreAlignment);
  472. }
  473. bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
  474. for (const Use &U : uses()) {
  475. // PHI nodes uses values in the corresponding predecessor block. For other
  476. // instructions, just check to see whether the parent of the use matches up.
  477. const Instruction *I = cast<Instruction>(U.getUser());
  478. const PHINode *PN = dyn_cast<PHINode>(I);
  479. if (!PN) {
  480. if (I->getParent() != BB)
  481. return true;
  482. continue;
  483. }
  484. if (PN->getIncomingBlock(U) != BB)
  485. return true;
  486. }
  487. return false;
  488. }
  489. bool Instruction::mayReadFromMemory() const {
  490. switch (getOpcode()) {
  491. default: return false;
  492. case Instruction::VAArg:
  493. case Instruction::Load:
  494. case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
  495. case Instruction::AtomicCmpXchg:
  496. case Instruction::AtomicRMW:
  497. case Instruction::CatchPad:
  498. case Instruction::CatchRet:
  499. return true;
  500. case Instruction::Call:
  501. case Instruction::Invoke:
  502. case Instruction::CallBr:
  503. return !cast<CallBase>(this)->onlyWritesMemory();
  504. case Instruction::Store:
  505. return !cast<StoreInst>(this)->isUnordered();
  506. }
  507. }
  508. bool Instruction::mayWriteToMemory() const {
  509. switch (getOpcode()) {
  510. default: return false;
  511. case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
  512. case Instruction::Store:
  513. case Instruction::VAArg:
  514. case Instruction::AtomicCmpXchg:
  515. case Instruction::AtomicRMW:
  516. case Instruction::CatchPad:
  517. case Instruction::CatchRet:
  518. return true;
  519. case Instruction::Call:
  520. case Instruction::Invoke:
  521. case Instruction::CallBr:
  522. return !cast<CallBase>(this)->onlyReadsMemory();
  523. case Instruction::Load:
  524. return !cast<LoadInst>(this)->isUnordered();
  525. }
  526. }
  527. bool Instruction::isAtomic() const {
  528. switch (getOpcode()) {
  529. default:
  530. return false;
  531. case Instruction::AtomicCmpXchg:
  532. case Instruction::AtomicRMW:
  533. case Instruction::Fence:
  534. return true;
  535. case Instruction::Load:
  536. return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
  537. case Instruction::Store:
  538. return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
  539. }
  540. }
  541. bool Instruction::hasAtomicLoad() const {
  542. assert(isAtomic());
  543. switch (getOpcode()) {
  544. default:
  545. return false;
  546. case Instruction::AtomicCmpXchg:
  547. case Instruction::AtomicRMW:
  548. case Instruction::Load:
  549. return true;
  550. }
  551. }
  552. bool Instruction::hasAtomicStore() const {
  553. assert(isAtomic());
  554. switch (getOpcode()) {
  555. default:
  556. return false;
  557. case Instruction::AtomicCmpXchg:
  558. case Instruction::AtomicRMW:
  559. case Instruction::Store:
  560. return true;
  561. }
  562. }
  563. bool Instruction::isVolatile() const {
  564. switch (getOpcode()) {
  565. default:
  566. return false;
  567. case Instruction::AtomicRMW:
  568. return cast<AtomicRMWInst>(this)->isVolatile();
  569. case Instruction::Store:
  570. return cast<StoreInst>(this)->isVolatile();
  571. case Instruction::Load:
  572. return cast<LoadInst>(this)->isVolatile();
  573. case Instruction::AtomicCmpXchg:
  574. return cast<AtomicCmpXchgInst>(this)->isVolatile();
  575. case Instruction::Call:
  576. case Instruction::Invoke:
  577. // There are a very limited number of intrinsics with volatile flags.
  578. if (auto *II = dyn_cast<IntrinsicInst>(this)) {
  579. if (auto *MI = dyn_cast<MemIntrinsic>(II))
  580. return MI->isVolatile();
  581. switch (II->getIntrinsicID()) {
  582. default: break;
  583. case Intrinsic::matrix_column_major_load:
  584. return cast<ConstantInt>(II->getArgOperand(2))->isOne();
  585. case Intrinsic::matrix_column_major_store:
  586. return cast<ConstantInt>(II->getArgOperand(3))->isOne();
  587. }
  588. }
  589. return false;
  590. }
  591. }
  592. bool Instruction::mayThrow() const {
  593. if (const CallInst *CI = dyn_cast<CallInst>(this))
  594. return !CI->doesNotThrow();
  595. if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
  596. return CRI->unwindsToCaller();
  597. if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
  598. return CatchSwitch->unwindsToCaller();
  599. return isa<ResumeInst>(this);
  600. }
  601. bool Instruction::mayHaveSideEffects() const {
  602. return mayWriteToMemory() || mayThrow() || !willReturn();
  603. }
  604. bool Instruction::isSafeToRemove() const {
  605. return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
  606. !this->isTerminator();
  607. }
  608. bool Instruction::willReturn() const {
  609. // Volatile store isn't guaranteed to return; see LangRef.
  610. if (auto *SI = dyn_cast<StoreInst>(this))
  611. return !SI->isVolatile();
  612. if (const auto *CB = dyn_cast<CallBase>(this))
  613. // FIXME: Temporarily assume that all side-effect free intrinsics will
  614. // return. Remove this workaround once all intrinsics are appropriately
  615. // annotated.
  616. return CB->hasFnAttr(Attribute::WillReturn) ||
  617. (isa<IntrinsicInst>(CB) && CB->onlyReadsMemory());
  618. return true;
  619. }
  620. bool Instruction::isLifetimeStartOrEnd() const {
  621. auto *II = dyn_cast<IntrinsicInst>(this);
  622. if (!II)
  623. return false;
  624. Intrinsic::ID ID = II->getIntrinsicID();
  625. return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
  626. }
  627. bool Instruction::isLaunderOrStripInvariantGroup() const {
  628. auto *II = dyn_cast<IntrinsicInst>(this);
  629. if (!II)
  630. return false;
  631. Intrinsic::ID ID = II->getIntrinsicID();
  632. return ID == Intrinsic::launder_invariant_group ||
  633. ID == Intrinsic::strip_invariant_group;
  634. }
  635. bool Instruction::isDebugOrPseudoInst() const {
  636. return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
  637. }
  638. const Instruction *
  639. Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
  640. for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
  641. if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
  642. return I;
  643. return nullptr;
  644. }
  645. const Instruction *
  646. Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
  647. for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
  648. if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
  649. return I;
  650. return nullptr;
  651. }
  652. bool Instruction::isAssociative() const {
  653. unsigned Opcode = getOpcode();
  654. if (isAssociative(Opcode))
  655. return true;
  656. switch (Opcode) {
  657. case FMul:
  658. case FAdd:
  659. return cast<FPMathOperator>(this)->hasAllowReassoc() &&
  660. cast<FPMathOperator>(this)->hasNoSignedZeros();
  661. default:
  662. return false;
  663. }
  664. }
  665. bool Instruction::isCommutative() const {
  666. if (auto *II = dyn_cast<IntrinsicInst>(this))
  667. return II->isCommutative();
  668. // TODO: Should allow icmp/fcmp?
  669. return isCommutative(getOpcode());
  670. }
  671. unsigned Instruction::getNumSuccessors() const {
  672. switch (getOpcode()) {
  673. #define HANDLE_TERM_INST(N, OPC, CLASS) \
  674. case Instruction::OPC: \
  675. return static_cast<const CLASS *>(this)->getNumSuccessors();
  676. #include "llvm/IR/Instruction.def"
  677. default:
  678. break;
  679. }
  680. llvm_unreachable("not a terminator");
  681. }
  682. BasicBlock *Instruction::getSuccessor(unsigned idx) const {
  683. switch (getOpcode()) {
  684. #define HANDLE_TERM_INST(N, OPC, CLASS) \
  685. case Instruction::OPC: \
  686. return static_cast<const CLASS *>(this)->getSuccessor(idx);
  687. #include "llvm/IR/Instruction.def"
  688. default:
  689. break;
  690. }
  691. llvm_unreachable("not a terminator");
  692. }
  693. void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
  694. switch (getOpcode()) {
  695. #define HANDLE_TERM_INST(N, OPC, CLASS) \
  696. case Instruction::OPC: \
  697. return static_cast<CLASS *>(this)->setSuccessor(idx, B);
  698. #include "llvm/IR/Instruction.def"
  699. default:
  700. break;
  701. }
  702. llvm_unreachable("not a terminator");
  703. }
  704. void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
  705. for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
  706. Idx != NumSuccessors; ++Idx)
  707. if (getSuccessor(Idx) == OldBB)
  708. setSuccessor(Idx, NewBB);
  709. }
  710. Instruction *Instruction::cloneImpl() const {
  711. llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
  712. }
  713. void Instruction::swapProfMetadata() {
  714. MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
  715. if (!ProfileData || ProfileData->getNumOperands() != 3 ||
  716. !isa<MDString>(ProfileData->getOperand(0)))
  717. return;
  718. MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
  719. if (MDName->getString() != "branch_weights")
  720. return;
  721. // The first operand is the name. Fetch them backwards and build a new one.
  722. Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
  723. ProfileData->getOperand(1)};
  724. setMetadata(LLVMContext::MD_prof,
  725. MDNode::get(ProfileData->getContext(), Ops));
  726. }
  727. void Instruction::copyMetadata(const Instruction &SrcInst,
  728. ArrayRef<unsigned> WL) {
  729. if (!SrcInst.hasMetadata())
  730. return;
  731. DenseSet<unsigned> WLS;
  732. for (unsigned M : WL)
  733. WLS.insert(M);
  734. // Otherwise, enumerate and copy over metadata from the old instruction to the
  735. // new one.
  736. SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
  737. SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
  738. for (const auto &MD : TheMDs) {
  739. if (WL.empty() || WLS.count(MD.first))
  740. setMetadata(MD.first, MD.second);
  741. }
  742. if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
  743. setDebugLoc(SrcInst.getDebugLoc());
  744. }
  745. Instruction *Instruction::clone() const {
  746. Instruction *New = nullptr;
  747. switch (getOpcode()) {
  748. default:
  749. llvm_unreachable("Unhandled Opcode.");
  750. #define HANDLE_INST(num, opc, clas) \
  751. case Instruction::opc: \
  752. New = cast<clas>(this)->cloneImpl(); \
  753. break;
  754. #include "llvm/IR/Instruction.def"
  755. #undef HANDLE_INST
  756. }
  757. New->SubclassOptionalData = SubclassOptionalData;
  758. New->copyMetadata(*this);
  759. return New;
  760. }