Lint.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. //===-- Lint.cpp - Check for common errors in LLVM IR ---------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass statically checks for common and easily-identified constructs
  10. // which produce undefined or likely unintended behavior in LLVM IR.
  11. //
  12. // It is not a guarantee of correctness, in two ways. First, it isn't
  13. // comprehensive. There are checks which could be done statically which are
  14. // not yet implemented. Some of these are indicated by TODO comments, but
  15. // those aren't comprehensive either. Second, many conditions cannot be
  16. // checked statically. This pass does no dynamic instrumentation, so it
  17. // can't check for all possible problems.
  18. //
  19. // Another limitation is that it assumes all code will be executed. A store
  20. // through a null pointer in a basic block which is never reached is harmless,
  21. // but this pass will warn about it anyway. This is the main reason why most
  22. // of these checks live here instead of in the Verifier pass.
  23. //
  24. // Optimization passes may make conditions that this pass checks for more or
  25. // less obvious. If an optimization pass appears to be introducing a warning,
  26. // it may be that the optimization pass is merely exposing an existing
  27. // condition in the code.
  28. //
  29. // This code may be run before instcombine. In many cases, instcombine checks
  30. // for the same kinds of things and turns instructions with undefined behavior
  31. // into unreachable (or equivalent). Because of this, this pass makes some
  32. // effort to look through bitcasts and so on.
  33. //
  34. //===----------------------------------------------------------------------===//
  35. #include "llvm/Analysis/Lint.h"
  36. #include "llvm/ADT/APInt.h"
  37. #include "llvm/ADT/ArrayRef.h"
  38. #include "llvm/ADT/SmallPtrSet.h"
  39. #include "llvm/ADT/Twine.h"
  40. #include "llvm/Analysis/AliasAnalysis.h"
  41. #include "llvm/Analysis/AssumptionCache.h"
  42. #include "llvm/Analysis/ConstantFolding.h"
  43. #include "llvm/Analysis/InstructionSimplify.h"
  44. #include "llvm/Analysis/Loads.h"
  45. #include "llvm/Analysis/MemoryLocation.h"
  46. #include "llvm/Analysis/Passes.h"
  47. #include "llvm/Analysis/TargetLibraryInfo.h"
  48. #include "llvm/Analysis/ValueTracking.h"
  49. #include "llvm/IR/Argument.h"
  50. #include "llvm/IR/BasicBlock.h"
  51. #include "llvm/IR/Constant.h"
  52. #include "llvm/IR/Constants.h"
  53. #include "llvm/IR/DataLayout.h"
  54. #include "llvm/IR/DerivedTypes.h"
  55. #include "llvm/IR/Dominators.h"
  56. #include "llvm/IR/Function.h"
  57. #include "llvm/IR/GlobalVariable.h"
  58. #include "llvm/IR/InstVisitor.h"
  59. #include "llvm/IR/InstrTypes.h"
  60. #include "llvm/IR/Instruction.h"
  61. #include "llvm/IR/Instructions.h"
  62. #include "llvm/IR/IntrinsicInst.h"
  63. #include "llvm/IR/LegacyPassManager.h"
  64. #include "llvm/IR/Module.h"
  65. #include "llvm/IR/PassManager.h"
  66. #include "llvm/IR/Type.h"
  67. #include "llvm/IR/Value.h"
  68. #include "llvm/InitializePasses.h"
  69. #include "llvm/Pass.h"
  70. #include "llvm/Support/Casting.h"
  71. #include "llvm/Support/Debug.h"
  72. #include "llvm/Support/KnownBits.h"
  73. #include "llvm/Support/MathExtras.h"
  74. #include "llvm/Support/raw_ostream.h"
  75. #include <cassert>
  76. #include <cstdint>
  77. #include <iterator>
  78. #include <string>
  79. using namespace llvm;
  80. namespace {
  81. namespace MemRef {
  82. static const unsigned Read = 1;
  83. static const unsigned Write = 2;
  84. static const unsigned Callee = 4;
  85. static const unsigned Branchee = 8;
  86. } // end namespace MemRef
  87. class Lint : public InstVisitor<Lint> {
  88. friend class InstVisitor<Lint>;
  89. void visitFunction(Function &F);
  90. void visitCallBase(CallBase &CB);
  91. void visitMemoryReference(Instruction &I, const MemoryLocation &Loc,
  92. MaybeAlign Alignment, Type *Ty, unsigned Flags);
  93. void visitEHBeginCatch(IntrinsicInst *II);
  94. void visitEHEndCatch(IntrinsicInst *II);
  95. void visitReturnInst(ReturnInst &I);
  96. void visitLoadInst(LoadInst &I);
  97. void visitStoreInst(StoreInst &I);
  98. void visitXor(BinaryOperator &I);
  99. void visitSub(BinaryOperator &I);
  100. void visitLShr(BinaryOperator &I);
  101. void visitAShr(BinaryOperator &I);
  102. void visitShl(BinaryOperator &I);
  103. void visitSDiv(BinaryOperator &I);
  104. void visitUDiv(BinaryOperator &I);
  105. void visitSRem(BinaryOperator &I);
  106. void visitURem(BinaryOperator &I);
  107. void visitAllocaInst(AllocaInst &I);
  108. void visitVAArgInst(VAArgInst &I);
  109. void visitIndirectBrInst(IndirectBrInst &I);
  110. void visitExtractElementInst(ExtractElementInst &I);
  111. void visitInsertElementInst(InsertElementInst &I);
  112. void visitUnreachableInst(UnreachableInst &I);
  113. Value *findValue(Value *V, bool OffsetOk) const;
  114. Value *findValueImpl(Value *V, bool OffsetOk,
  115. SmallPtrSetImpl<Value *> &Visited) const;
  116. public:
  117. Module *Mod;
  118. const DataLayout *DL;
  119. AliasAnalysis *AA;
  120. AssumptionCache *AC;
  121. DominatorTree *DT;
  122. TargetLibraryInfo *TLI;
  123. std::string Messages;
  124. raw_string_ostream MessagesStr;
  125. Lint(Module *Mod, const DataLayout *DL, AliasAnalysis *AA,
  126. AssumptionCache *AC, DominatorTree *DT, TargetLibraryInfo *TLI)
  127. : Mod(Mod), DL(DL), AA(AA), AC(AC), DT(DT), TLI(TLI),
  128. MessagesStr(Messages) {}
  129. void WriteValues(ArrayRef<const Value *> Vs) {
  130. for (const Value *V : Vs) {
  131. if (!V)
  132. continue;
  133. if (isa<Instruction>(V)) {
  134. MessagesStr << *V << '\n';
  135. } else {
  136. V->printAsOperand(MessagesStr, true, Mod);
  137. MessagesStr << '\n';
  138. }
  139. }
  140. }
  141. /// A check failed, so printout out the condition and the message.
  142. ///
  143. /// This provides a nice place to put a breakpoint if you want to see why
  144. /// something is not correct.
  145. void CheckFailed(const Twine &Message) { MessagesStr << Message << '\n'; }
  146. /// A check failed (with values to print).
  147. ///
  148. /// This calls the Message-only version so that the above is easier to set
  149. /// a breakpoint on.
  150. template <typename T1, typename... Ts>
  151. void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
  152. CheckFailed(Message);
  153. WriteValues({V1, Vs...});
  154. }
  155. };
  156. } // end anonymous namespace
  157. // Assert - We know that cond should be true, if not print an error message.
  158. #define Assert(C, ...) \
  159. do { \
  160. if (!(C)) { \
  161. CheckFailed(__VA_ARGS__); \
  162. return; \
  163. } \
  164. } while (false)
  165. void Lint::visitFunction(Function &F) {
  166. // This isn't undefined behavior, it's just a little unusual, and it's a
  167. // fairly common mistake to neglect to name a function.
  168. Assert(F.hasName() || F.hasLocalLinkage(),
  169. "Unusual: Unnamed function with non-local linkage", &F);
  170. // TODO: Check for irreducible control flow.
  171. }
  172. void Lint::visitCallBase(CallBase &I) {
  173. Value *Callee = I.getCalledOperand();
  174. visitMemoryReference(I, MemoryLocation::getAfter(Callee), None, nullptr,
  175. MemRef::Callee);
  176. if (Function *F = dyn_cast<Function>(findValue(Callee,
  177. /*OffsetOk=*/false))) {
  178. Assert(I.getCallingConv() == F->getCallingConv(),
  179. "Undefined behavior: Caller and callee calling convention differ",
  180. &I);
  181. FunctionType *FT = F->getFunctionType();
  182. unsigned NumActualArgs = I.arg_size();
  183. Assert(FT->isVarArg() ? FT->getNumParams() <= NumActualArgs
  184. : FT->getNumParams() == NumActualArgs,
  185. "Undefined behavior: Call argument count mismatches callee "
  186. "argument count",
  187. &I);
  188. Assert(FT->getReturnType() == I.getType(),
  189. "Undefined behavior: Call return type mismatches "
  190. "callee return type",
  191. &I);
  192. // Check argument types (in case the callee was casted) and attributes.
  193. // TODO: Verify that caller and callee attributes are compatible.
  194. Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end();
  195. auto AI = I.arg_begin(), AE = I.arg_end();
  196. for (; AI != AE; ++AI) {
  197. Value *Actual = *AI;
  198. if (PI != PE) {
  199. Argument *Formal = &*PI++;
  200. Assert(Formal->getType() == Actual->getType(),
  201. "Undefined behavior: Call argument type mismatches "
  202. "callee parameter type",
  203. &I);
  204. // Check that noalias arguments don't alias other arguments. This is
  205. // not fully precise because we don't know the sizes of the dereferenced
  206. // memory regions.
  207. if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy()) {
  208. AttributeList PAL = I.getAttributes();
  209. unsigned ArgNo = 0;
  210. for (auto BI = I.arg_begin(); BI != AE; ++BI, ++ArgNo) {
  211. // Skip ByVal arguments since they will be memcpy'd to the callee's
  212. // stack so we're not really passing the pointer anyway.
  213. if (PAL.hasParamAttr(ArgNo, Attribute::ByVal))
  214. continue;
  215. // If both arguments are readonly, they have no dependence.
  216. if (Formal->onlyReadsMemory() && I.onlyReadsMemory(ArgNo))
  217. continue;
  218. if (AI != BI && (*BI)->getType()->isPointerTy()) {
  219. AliasResult Result = AA->alias(*AI, *BI);
  220. Assert(Result != AliasResult::MustAlias &&
  221. Result != AliasResult::PartialAlias,
  222. "Unusual: noalias argument aliases another argument", &I);
  223. }
  224. }
  225. }
  226. // Check that an sret argument points to valid memory.
  227. if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
  228. Type *Ty = Formal->getParamStructRetType();
  229. MemoryLocation Loc(
  230. Actual, LocationSize::precise(DL->getTypeStoreSize(Ty)));
  231. visitMemoryReference(I, Loc, DL->getABITypeAlign(Ty), Ty,
  232. MemRef::Read | MemRef::Write);
  233. }
  234. }
  235. }
  236. }
  237. if (const auto *CI = dyn_cast<CallInst>(&I)) {
  238. if (CI->isTailCall()) {
  239. const AttributeList &PAL = CI->getAttributes();
  240. unsigned ArgNo = 0;
  241. for (Value *Arg : I.args()) {
  242. // Skip ByVal arguments since they will be memcpy'd to the callee's
  243. // stack anyway.
  244. if (PAL.hasParamAttr(ArgNo++, Attribute::ByVal))
  245. continue;
  246. Value *Obj = findValue(Arg, /*OffsetOk=*/true);
  247. Assert(!isa<AllocaInst>(Obj),
  248. "Undefined behavior: Call with \"tail\" keyword references "
  249. "alloca",
  250. &I);
  251. }
  252. }
  253. }
  254. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I))
  255. switch (II->getIntrinsicID()) {
  256. default:
  257. break;
  258. // TODO: Check more intrinsics
  259. case Intrinsic::memcpy: {
  260. MemCpyInst *MCI = cast<MemCpyInst>(&I);
  261. visitMemoryReference(I, MemoryLocation::getForDest(MCI),
  262. MCI->getDestAlign(), nullptr, MemRef::Write);
  263. visitMemoryReference(I, MemoryLocation::getForSource(MCI),
  264. MCI->getSourceAlign(), nullptr, MemRef::Read);
  265. // Check that the memcpy arguments don't overlap. The AliasAnalysis API
  266. // isn't expressive enough for what we really want to do. Known partial
  267. // overlap is not distinguished from the case where nothing is known.
  268. auto Size = LocationSize::afterPointer();
  269. if (const ConstantInt *Len =
  270. dyn_cast<ConstantInt>(findValue(MCI->getLength(),
  271. /*OffsetOk=*/false)))
  272. if (Len->getValue().isIntN(32))
  273. Size = LocationSize::precise(Len->getValue().getZExtValue());
  274. Assert(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
  275. AliasResult::MustAlias,
  276. "Undefined behavior: memcpy source and destination overlap", &I);
  277. break;
  278. }
  279. case Intrinsic::memcpy_inline: {
  280. MemCpyInlineInst *MCII = cast<MemCpyInlineInst>(&I);
  281. const uint64_t Size = MCII->getLength()->getValue().getLimitedValue();
  282. visitMemoryReference(I, MemoryLocation::getForDest(MCII),
  283. MCII->getDestAlign(), nullptr, MemRef::Write);
  284. visitMemoryReference(I, MemoryLocation::getForSource(MCII),
  285. MCII->getSourceAlign(), nullptr, MemRef::Read);
  286. // Check that the memcpy arguments don't overlap. The AliasAnalysis API
  287. // isn't expressive enough for what we really want to do. Known partial
  288. // overlap is not distinguished from the case where nothing is known.
  289. const LocationSize LS = LocationSize::precise(Size);
  290. Assert(AA->alias(MCII->getSource(), LS, MCII->getDest(), LS) !=
  291. AliasResult::MustAlias,
  292. "Undefined behavior: memcpy source and destination overlap", &I);
  293. break;
  294. }
  295. case Intrinsic::memmove: {
  296. MemMoveInst *MMI = cast<MemMoveInst>(&I);
  297. visitMemoryReference(I, MemoryLocation::getForDest(MMI),
  298. MMI->getDestAlign(), nullptr, MemRef::Write);
  299. visitMemoryReference(I, MemoryLocation::getForSource(MMI),
  300. MMI->getSourceAlign(), nullptr, MemRef::Read);
  301. break;
  302. }
  303. case Intrinsic::memset: {
  304. MemSetInst *MSI = cast<MemSetInst>(&I);
  305. visitMemoryReference(I, MemoryLocation::getForDest(MSI),
  306. MSI->getDestAlign(), nullptr, MemRef::Write);
  307. break;
  308. }
  309. case Intrinsic::vastart:
  310. Assert(I.getParent()->getParent()->isVarArg(),
  311. "Undefined behavior: va_start called in a non-varargs function",
  312. &I);
  313. visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI), None,
  314. nullptr, MemRef::Read | MemRef::Write);
  315. break;
  316. case Intrinsic::vacopy:
  317. visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI), None,
  318. nullptr, MemRef::Write);
  319. visitMemoryReference(I, MemoryLocation::getForArgument(&I, 1, TLI), None,
  320. nullptr, MemRef::Read);
  321. break;
  322. case Intrinsic::vaend:
  323. visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI), None,
  324. nullptr, MemRef::Read | MemRef::Write);
  325. break;
  326. case Intrinsic::stackrestore:
  327. // Stackrestore doesn't read or write memory, but it sets the
  328. // stack pointer, which the compiler may read from or write to
  329. // at any time, so check it for both readability and writeability.
  330. visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI), None,
  331. nullptr, MemRef::Read | MemRef::Write);
  332. break;
  333. case Intrinsic::get_active_lane_mask:
  334. if (auto *TripCount = dyn_cast<ConstantInt>(I.getArgOperand(1)))
  335. Assert(!TripCount->isZero(), "get_active_lane_mask: operand #2 "
  336. "must be greater than 0", &I);
  337. break;
  338. }
  339. }
  340. void Lint::visitReturnInst(ReturnInst &I) {
  341. Function *F = I.getParent()->getParent();
  342. Assert(!F->doesNotReturn(),
  343. "Unusual: Return statement in function with noreturn attribute", &I);
  344. if (Value *V = I.getReturnValue()) {
  345. Value *Obj = findValue(V, /*OffsetOk=*/true);
  346. Assert(!isa<AllocaInst>(Obj), "Unusual: Returning alloca value", &I);
  347. }
  348. }
  349. // TODO: Check that the reference is in bounds.
  350. // TODO: Check readnone/readonly function attributes.
  351. void Lint::visitMemoryReference(Instruction &I, const MemoryLocation &Loc,
  352. MaybeAlign Align, Type *Ty, unsigned Flags) {
  353. // If no memory is being referenced, it doesn't matter if the pointer
  354. // is valid.
  355. if (Loc.Size.isZero())
  356. return;
  357. Value *Ptr = const_cast<Value *>(Loc.Ptr);
  358. Value *UnderlyingObject = findValue(Ptr, /*OffsetOk=*/true);
  359. Assert(!isa<ConstantPointerNull>(UnderlyingObject),
  360. "Undefined behavior: Null pointer dereference", &I);
  361. Assert(!isa<UndefValue>(UnderlyingObject),
  362. "Undefined behavior: Undef pointer dereference", &I);
  363. Assert(!isa<ConstantInt>(UnderlyingObject) ||
  364. !cast<ConstantInt>(UnderlyingObject)->isMinusOne(),
  365. "Unusual: All-ones pointer dereference", &I);
  366. Assert(!isa<ConstantInt>(UnderlyingObject) ||
  367. !cast<ConstantInt>(UnderlyingObject)->isOne(),
  368. "Unusual: Address one pointer dereference", &I);
  369. if (Flags & MemRef::Write) {
  370. if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(UnderlyingObject))
  371. Assert(!GV->isConstant(), "Undefined behavior: Write to read-only memory",
  372. &I);
  373. Assert(!isa<Function>(UnderlyingObject) &&
  374. !isa<BlockAddress>(UnderlyingObject),
  375. "Undefined behavior: Write to text section", &I);
  376. }
  377. if (Flags & MemRef::Read) {
  378. Assert(!isa<Function>(UnderlyingObject), "Unusual: Load from function body",
  379. &I);
  380. Assert(!isa<BlockAddress>(UnderlyingObject),
  381. "Undefined behavior: Load from block address", &I);
  382. }
  383. if (Flags & MemRef::Callee) {
  384. Assert(!isa<BlockAddress>(UnderlyingObject),
  385. "Undefined behavior: Call to block address", &I);
  386. }
  387. if (Flags & MemRef::Branchee) {
  388. Assert(!isa<Constant>(UnderlyingObject) ||
  389. isa<BlockAddress>(UnderlyingObject),
  390. "Undefined behavior: Branch to non-blockaddress", &I);
  391. }
  392. // Check for buffer overflows and misalignment.
  393. // Only handles memory references that read/write something simple like an
  394. // alloca instruction or a global variable.
  395. int64_t Offset = 0;
  396. if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *DL)) {
  397. // OK, so the access is to a constant offset from Ptr. Check that Ptr is
  398. // something we can handle and if so extract the size of this base object
  399. // along with its alignment.
  400. uint64_t BaseSize = MemoryLocation::UnknownSize;
  401. MaybeAlign BaseAlign;
  402. if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
  403. Type *ATy = AI->getAllocatedType();
  404. if (!AI->isArrayAllocation() && ATy->isSized())
  405. BaseSize = DL->getTypeAllocSize(ATy);
  406. BaseAlign = AI->getAlign();
  407. } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
  408. // If the global may be defined differently in another compilation unit
  409. // then don't warn about funky memory accesses.
  410. if (GV->hasDefinitiveInitializer()) {
  411. Type *GTy = GV->getValueType();
  412. if (GTy->isSized())
  413. BaseSize = DL->getTypeAllocSize(GTy);
  414. BaseAlign = GV->getAlign();
  415. if (!BaseAlign && GTy->isSized())
  416. BaseAlign = DL->getABITypeAlign(GTy);
  417. }
  418. }
  419. // Accesses from before the start or after the end of the object are not
  420. // defined.
  421. Assert(!Loc.Size.hasValue() || BaseSize == MemoryLocation::UnknownSize ||
  422. (Offset >= 0 && Offset + Loc.Size.getValue() <= BaseSize),
  423. "Undefined behavior: Buffer overflow", &I);
  424. // Accesses that say that the memory is more aligned than it is are not
  425. // defined.
  426. if (!Align && Ty && Ty->isSized())
  427. Align = DL->getABITypeAlign(Ty);
  428. if (BaseAlign && Align)
  429. Assert(*Align <= commonAlignment(*BaseAlign, Offset),
  430. "Undefined behavior: Memory reference address is misaligned", &I);
  431. }
  432. }
  433. void Lint::visitLoadInst(LoadInst &I) {
  434. visitMemoryReference(I, MemoryLocation::get(&I), I.getAlign(), I.getType(),
  435. MemRef::Read);
  436. }
  437. void Lint::visitStoreInst(StoreInst &I) {
  438. visitMemoryReference(I, MemoryLocation::get(&I), I.getAlign(),
  439. I.getOperand(0)->getType(), MemRef::Write);
  440. }
  441. void Lint::visitXor(BinaryOperator &I) {
  442. Assert(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)),
  443. "Undefined result: xor(undef, undef)", &I);
  444. }
  445. void Lint::visitSub(BinaryOperator &I) {
  446. Assert(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)),
  447. "Undefined result: sub(undef, undef)", &I);
  448. }
  449. void Lint::visitLShr(BinaryOperator &I) {
  450. if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getOperand(1),
  451. /*OffsetOk=*/false)))
  452. Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
  453. "Undefined result: Shift count out of range", &I);
  454. }
  455. void Lint::visitAShr(BinaryOperator &I) {
  456. if (ConstantInt *CI =
  457. dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
  458. Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
  459. "Undefined result: Shift count out of range", &I);
  460. }
  461. void Lint::visitShl(BinaryOperator &I) {
  462. if (ConstantInt *CI =
  463. dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
  464. Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
  465. "Undefined result: Shift count out of range", &I);
  466. }
  467. static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
  468. AssumptionCache *AC) {
  469. // Assume undef could be zero.
  470. if (isa<UndefValue>(V))
  471. return true;
  472. VectorType *VecTy = dyn_cast<VectorType>(V->getType());
  473. if (!VecTy) {
  474. KnownBits Known =
  475. computeKnownBits(V, DL, 0, AC, dyn_cast<Instruction>(V), DT);
  476. return Known.isZero();
  477. }
  478. // Per-component check doesn't work with zeroinitializer
  479. Constant *C = dyn_cast<Constant>(V);
  480. if (!C)
  481. return false;
  482. if (C->isZeroValue())
  483. return true;
  484. // For a vector, KnownZero will only be true if all values are zero, so check
  485. // this per component
  486. for (unsigned I = 0, N = cast<FixedVectorType>(VecTy)->getNumElements();
  487. I != N; ++I) {
  488. Constant *Elem = C->getAggregateElement(I);
  489. if (isa<UndefValue>(Elem))
  490. return true;
  491. KnownBits Known = computeKnownBits(Elem, DL);
  492. if (Known.isZero())
  493. return true;
  494. }
  495. return false;
  496. }
  497. void Lint::visitSDiv(BinaryOperator &I) {
  498. Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  499. "Undefined behavior: Division by zero", &I);
  500. }
  501. void Lint::visitUDiv(BinaryOperator &I) {
  502. Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  503. "Undefined behavior: Division by zero", &I);
  504. }
  505. void Lint::visitSRem(BinaryOperator &I) {
  506. Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  507. "Undefined behavior: Division by zero", &I);
  508. }
  509. void Lint::visitURem(BinaryOperator &I) {
  510. Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  511. "Undefined behavior: Division by zero", &I);
  512. }
  513. void Lint::visitAllocaInst(AllocaInst &I) {
  514. if (isa<ConstantInt>(I.getArraySize()))
  515. // This isn't undefined behavior, it's just an obvious pessimization.
  516. Assert(&I.getParent()->getParent()->getEntryBlock() == I.getParent(),
  517. "Pessimization: Static alloca outside of entry block", &I);
  518. // TODO: Check for an unusual size (MSB set?)
  519. }
  520. void Lint::visitVAArgInst(VAArgInst &I) {
  521. visitMemoryReference(I, MemoryLocation::get(&I), None, nullptr,
  522. MemRef::Read | MemRef::Write);
  523. }
  524. void Lint::visitIndirectBrInst(IndirectBrInst &I) {
  525. visitMemoryReference(I, MemoryLocation::getAfter(I.getAddress()), None,
  526. nullptr, MemRef::Branchee);
  527. Assert(I.getNumDestinations() != 0,
  528. "Undefined behavior: indirectbr with no destinations", &I);
  529. }
  530. void Lint::visitExtractElementInst(ExtractElementInst &I) {
  531. if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getIndexOperand(),
  532. /*OffsetOk=*/false)))
  533. Assert(
  534. CI->getValue().ult(
  535. cast<FixedVectorType>(I.getVectorOperandType())->getNumElements()),
  536. "Undefined result: extractelement index out of range", &I);
  537. }
  538. void Lint::visitInsertElementInst(InsertElementInst &I) {
  539. if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getOperand(2),
  540. /*OffsetOk=*/false)))
  541. Assert(CI->getValue().ult(
  542. cast<FixedVectorType>(I.getType())->getNumElements()),
  543. "Undefined result: insertelement index out of range", &I);
  544. }
  545. void Lint::visitUnreachableInst(UnreachableInst &I) {
  546. // This isn't undefined behavior, it's merely suspicious.
  547. Assert(&I == &I.getParent()->front() ||
  548. std::prev(I.getIterator())->mayHaveSideEffects(),
  549. "Unusual: unreachable immediately preceded by instruction without "
  550. "side effects",
  551. &I);
  552. }
  553. /// findValue - Look through bitcasts and simple memory reference patterns
  554. /// to identify an equivalent, but more informative, value. If OffsetOk
  555. /// is true, look through getelementptrs with non-zero offsets too.
  556. ///
  557. /// Most analysis passes don't require this logic, because instcombine
  558. /// will simplify most of these kinds of things away. But it's a goal of
  559. /// this Lint pass to be useful even on non-optimized IR.
  560. Value *Lint::findValue(Value *V, bool OffsetOk) const {
  561. SmallPtrSet<Value *, 4> Visited;
  562. return findValueImpl(V, OffsetOk, Visited);
  563. }
  564. /// findValueImpl - Implementation helper for findValue.
  565. Value *Lint::findValueImpl(Value *V, bool OffsetOk,
  566. SmallPtrSetImpl<Value *> &Visited) const {
  567. // Detect self-referential values.
  568. if (!Visited.insert(V).second)
  569. return UndefValue::get(V->getType());
  570. // TODO: Look through sext or zext cast, when the result is known to
  571. // be interpreted as signed or unsigned, respectively.
  572. // TODO: Look through eliminable cast pairs.
  573. // TODO: Look through calls with unique return values.
  574. // TODO: Look through vector insert/extract/shuffle.
  575. V = OffsetOk ? getUnderlyingObject(V) : V->stripPointerCasts();
  576. if (LoadInst *L = dyn_cast<LoadInst>(V)) {
  577. BasicBlock::iterator BBI = L->getIterator();
  578. BasicBlock *BB = L->getParent();
  579. SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
  580. for (;;) {
  581. if (!VisitedBlocks.insert(BB).second)
  582. break;
  583. if (Value *U =
  584. FindAvailableLoadedValue(L, BB, BBI, DefMaxInstsToScan, AA))
  585. return findValueImpl(U, OffsetOk, Visited);
  586. if (BBI != BB->begin())
  587. break;
  588. BB = BB->getUniquePredecessor();
  589. if (!BB)
  590. break;
  591. BBI = BB->end();
  592. }
  593. } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
  594. if (Value *W = PN->hasConstantValue())
  595. return findValueImpl(W, OffsetOk, Visited);
  596. } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
  597. if (CI->isNoopCast(*DL))
  598. return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
  599. } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
  600. if (Value *W =
  601. FindInsertedValue(Ex->getAggregateOperand(), Ex->getIndices()))
  602. if (W != V)
  603. return findValueImpl(W, OffsetOk, Visited);
  604. } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
  605. // Same as above, but for ConstantExpr instead of Instruction.
  606. if (Instruction::isCast(CE->getOpcode())) {
  607. if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
  608. CE->getOperand(0)->getType(), CE->getType(),
  609. *DL))
  610. return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
  611. } else if (CE->getOpcode() == Instruction::ExtractValue) {
  612. ArrayRef<unsigned> Indices = CE->getIndices();
  613. if (Value *W = FindInsertedValue(CE->getOperand(0), Indices))
  614. if (W != V)
  615. return findValueImpl(W, OffsetOk, Visited);
  616. }
  617. }
  618. // As a last resort, try SimplifyInstruction or constant folding.
  619. if (Instruction *Inst = dyn_cast<Instruction>(V)) {
  620. if (Value *W = SimplifyInstruction(Inst, {*DL, TLI, DT, AC}))
  621. return findValueImpl(W, OffsetOk, Visited);
  622. } else if (auto *C = dyn_cast<Constant>(V)) {
  623. Value *W = ConstantFoldConstant(C, *DL, TLI);
  624. if (W != V)
  625. return findValueImpl(W, OffsetOk, Visited);
  626. }
  627. return V;
  628. }
  629. PreservedAnalyses LintPass::run(Function &F, FunctionAnalysisManager &AM) {
  630. auto *Mod = F.getParent();
  631. auto *DL = &F.getParent()->getDataLayout();
  632. auto *AA = &AM.getResult<AAManager>(F);
  633. auto *AC = &AM.getResult<AssumptionAnalysis>(F);
  634. auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
  635. auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
  636. Lint L(Mod, DL, AA, AC, DT, TLI);
  637. L.visit(F);
  638. dbgs() << L.MessagesStr.str();
  639. return PreservedAnalyses::all();
  640. }
  641. namespace {
  642. class LintLegacyPass : public FunctionPass {
  643. public:
  644. static char ID; // Pass identification, replacement for typeid
  645. LintLegacyPass() : FunctionPass(ID) {
  646. initializeLintLegacyPassPass(*PassRegistry::getPassRegistry());
  647. }
  648. bool runOnFunction(Function &F) override;
  649. void getAnalysisUsage(AnalysisUsage &AU) const override {
  650. AU.setPreservesAll();
  651. AU.addRequired<AAResultsWrapperPass>();
  652. AU.addRequired<AssumptionCacheTracker>();
  653. AU.addRequired<TargetLibraryInfoWrapperPass>();
  654. AU.addRequired<DominatorTreeWrapperPass>();
  655. }
  656. void print(raw_ostream &O, const Module *M) const override {}
  657. };
  658. } // namespace
  659. char LintLegacyPass::ID = 0;
  660. INITIALIZE_PASS_BEGIN(LintLegacyPass, "lint", "Statically lint-checks LLVM IR",
  661. false, true)
  662. INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
  663. INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
  664. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  665. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  666. INITIALIZE_PASS_END(LintLegacyPass, "lint", "Statically lint-checks LLVM IR",
  667. false, true)
  668. bool LintLegacyPass::runOnFunction(Function &F) {
  669. auto *Mod = F.getParent();
  670. auto *DL = &F.getParent()->getDataLayout();
  671. auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  672. auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
  673. auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  674. auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
  675. Lint L(Mod, DL, AA, AC, DT, TLI);
  676. L.visit(F);
  677. dbgs() << L.MessagesStr.str();
  678. return false;
  679. }
  680. //===----------------------------------------------------------------------===//
  681. // Implement the public interfaces to this file...
  682. //===----------------------------------------------------------------------===//
  683. FunctionPass *llvm::createLintLegacyPassPass() { return new LintLegacyPass(); }
  684. /// lintFunction - Check a function for errors, printing messages on stderr.
  685. ///
  686. void llvm::lintFunction(const Function &f) {
  687. Function &F = const_cast<Function &>(f);
  688. assert(!F.isDeclaration() && "Cannot lint external functions");
  689. legacy::FunctionPassManager FPM(F.getParent());
  690. auto *V = new LintLegacyPass();
  691. FPM.add(V);
  692. FPM.run(F);
  693. }
  694. /// lintModule - Check a module for errors, printing messages on stderr.
  695. ///
  696. void llvm::lintModule(const Module &M) {
  697. legacy::PassManager PM;
  698. auto *V = new LintLegacyPass();
  699. PM.add(V);
  700. PM.run(const_cast<Module &>(M));
  701. }