Lint.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. //===-- Lint.cpp - Check for common errors in LLVM IR ---------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass statically checks for common and easily-identified constructs
  10. // which produce undefined or likely unintended behavior in LLVM IR.
  11. //
  12. // It is not a guarantee of correctness, in two ways. First, it isn't
  13. // comprehensive. There are checks which could be done statically which are
  14. // not yet implemented. Some of these are indicated by TODO comments, but
  15. // those aren't comprehensive either. Second, many conditions cannot be
  16. // checked statically. This pass does no dynamic instrumentation, so it
  17. // can't check for all possible problems.
  18. //
  19. // Another limitation is that it assumes all code will be executed. A store
  20. // through a null pointer in a basic block which is never reached is harmless,
  21. // but this pass will warn about it anyway. This is the main reason why most
  22. // of these checks live here instead of in the Verifier pass.
  23. //
  24. // Optimization passes may make conditions that this pass checks for more or
  25. // less obvious. If an optimization pass appears to be introducing a warning,
  26. // it may be that the optimization pass is merely exposing an existing
  27. // condition in the code.
  28. //
  29. // This code may be run before instcombine. In many cases, instcombine checks
  30. // for the same kinds of things and turns instructions with undefined behavior
  31. // into unreachable (or equivalent). Because of this, this pass makes some
  32. // effort to look through bitcasts and so on.
  33. //
  34. //===----------------------------------------------------------------------===//
  35. #include "llvm/Analysis/Lint.h"
  36. #include "llvm/ADT/APInt.h"
  37. #include "llvm/ADT/ArrayRef.h"
  38. #include "llvm/ADT/SmallPtrSet.h"
  39. #include "llvm/ADT/Twine.h"
  40. #include "llvm/Analysis/AliasAnalysis.h"
  41. #include "llvm/Analysis/AssumptionCache.h"
  42. #include "llvm/Analysis/ConstantFolding.h"
  43. #include "llvm/Analysis/InstructionSimplify.h"
  44. #include "llvm/Analysis/Loads.h"
  45. #include "llvm/Analysis/MemoryLocation.h"
  46. #include "llvm/Analysis/TargetLibraryInfo.h"
  47. #include "llvm/Analysis/ValueTracking.h"
  48. #include "llvm/IR/Argument.h"
  49. #include "llvm/IR/BasicBlock.h"
  50. #include "llvm/IR/Constant.h"
  51. #include "llvm/IR/Constants.h"
  52. #include "llvm/IR/DataLayout.h"
  53. #include "llvm/IR/DerivedTypes.h"
  54. #include "llvm/IR/Dominators.h"
  55. #include "llvm/IR/Function.h"
  56. #include "llvm/IR/GlobalVariable.h"
  57. #include "llvm/IR/InstVisitor.h"
  58. #include "llvm/IR/InstrTypes.h"
  59. #include "llvm/IR/Instruction.h"
  60. #include "llvm/IR/Instructions.h"
  61. #include "llvm/IR/IntrinsicInst.h"
  62. #include "llvm/IR/LegacyPassManager.h"
  63. #include "llvm/IR/Module.h"
  64. #include "llvm/IR/PassManager.h"
  65. #include "llvm/IR/Type.h"
  66. #include "llvm/IR/Value.h"
  67. #include "llvm/InitializePasses.h"
  68. #include "llvm/Pass.h"
  69. #include "llvm/Support/Casting.h"
  70. #include "llvm/Support/KnownBits.h"
  71. #include "llvm/Support/raw_ostream.h"
  72. #include <cassert>
  73. #include <cstdint>
  74. #include <iterator>
  75. #include <string>
  76. using namespace llvm;
  77. namespace {
  78. namespace MemRef {
  79. static const unsigned Read = 1;
  80. static const unsigned Write = 2;
  81. static const unsigned Callee = 4;
  82. static const unsigned Branchee = 8;
  83. } // end namespace MemRef
  84. class Lint : public InstVisitor<Lint> {
  85. friend class InstVisitor<Lint>;
  86. void visitFunction(Function &F);
  87. void visitCallBase(CallBase &CB);
  88. void visitMemoryReference(Instruction &I, const MemoryLocation &Loc,
  89. MaybeAlign Alignment, Type *Ty, unsigned Flags);
  90. void visitEHBeginCatch(IntrinsicInst *II);
  91. void visitEHEndCatch(IntrinsicInst *II);
  92. void visitReturnInst(ReturnInst &I);
  93. void visitLoadInst(LoadInst &I);
  94. void visitStoreInst(StoreInst &I);
  95. void visitXor(BinaryOperator &I);
  96. void visitSub(BinaryOperator &I);
  97. void visitLShr(BinaryOperator &I);
  98. void visitAShr(BinaryOperator &I);
  99. void visitShl(BinaryOperator &I);
  100. void visitSDiv(BinaryOperator &I);
  101. void visitUDiv(BinaryOperator &I);
  102. void visitSRem(BinaryOperator &I);
  103. void visitURem(BinaryOperator &I);
  104. void visitAllocaInst(AllocaInst &I);
  105. void visitVAArgInst(VAArgInst &I);
  106. void visitIndirectBrInst(IndirectBrInst &I);
  107. void visitExtractElementInst(ExtractElementInst &I);
  108. void visitInsertElementInst(InsertElementInst &I);
  109. void visitUnreachableInst(UnreachableInst &I);
  110. Value *findValue(Value *V, bool OffsetOk) const;
  111. Value *findValueImpl(Value *V, bool OffsetOk,
  112. SmallPtrSetImpl<Value *> &Visited) const;
  113. public:
  114. Module *Mod;
  115. const DataLayout *DL;
  116. AliasAnalysis *AA;
  117. AssumptionCache *AC;
  118. DominatorTree *DT;
  119. TargetLibraryInfo *TLI;
  120. std::string Messages;
  121. raw_string_ostream MessagesStr;
  122. Lint(Module *Mod, const DataLayout *DL, AliasAnalysis *AA,
  123. AssumptionCache *AC, DominatorTree *DT, TargetLibraryInfo *TLI)
  124. : Mod(Mod), DL(DL), AA(AA), AC(AC), DT(DT), TLI(TLI),
  125. MessagesStr(Messages) {}
  126. void WriteValues(ArrayRef<const Value *> Vs) {
  127. for (const Value *V : Vs) {
  128. if (!V)
  129. continue;
  130. if (isa<Instruction>(V)) {
  131. MessagesStr << *V << '\n';
  132. } else {
  133. V->printAsOperand(MessagesStr, true, Mod);
  134. MessagesStr << '\n';
  135. }
  136. }
  137. }
  138. /// A check failed, so printout out the condition and the message.
  139. ///
  140. /// This provides a nice place to put a breakpoint if you want to see why
  141. /// something is not correct.
  142. void CheckFailed(const Twine &Message) { MessagesStr << Message << '\n'; }
  143. /// A check failed (with values to print).
  144. ///
  145. /// This calls the Message-only version so that the above is easier to set
  146. /// a breakpoint on.
  147. template <typename T1, typename... Ts>
  148. void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
  149. CheckFailed(Message);
  150. WriteValues({V1, Vs...});
  151. }
  152. };
  153. } // end anonymous namespace
  154. // Check - We know that cond should be true, if not print an error message.
  155. #define Check(C, ...) \
  156. do { \
  157. if (!(C)) { \
  158. CheckFailed(__VA_ARGS__); \
  159. return; \
  160. } \
  161. } while (false)
  162. void Lint::visitFunction(Function &F) {
  163. // This isn't undefined behavior, it's just a little unusual, and it's a
  164. // fairly common mistake to neglect to name a function.
  165. Check(F.hasName() || F.hasLocalLinkage(),
  166. "Unusual: Unnamed function with non-local linkage", &F);
  167. // TODO: Check for irreducible control flow.
  168. }
  169. void Lint::visitCallBase(CallBase &I) {
  170. Value *Callee = I.getCalledOperand();
  171. visitMemoryReference(I, MemoryLocation::getAfter(Callee), std::nullopt,
  172. nullptr, MemRef::Callee);
  173. if (Function *F = dyn_cast<Function>(findValue(Callee,
  174. /*OffsetOk=*/false))) {
  175. Check(I.getCallingConv() == F->getCallingConv(),
  176. "Undefined behavior: Caller and callee calling convention differ",
  177. &I);
  178. FunctionType *FT = F->getFunctionType();
  179. unsigned NumActualArgs = I.arg_size();
  180. Check(FT->isVarArg() ? FT->getNumParams() <= NumActualArgs
  181. : FT->getNumParams() == NumActualArgs,
  182. "Undefined behavior: Call argument count mismatches callee "
  183. "argument count",
  184. &I);
  185. Check(FT->getReturnType() == I.getType(),
  186. "Undefined behavior: Call return type mismatches "
  187. "callee return type",
  188. &I);
  189. // Check argument types (in case the callee was casted) and attributes.
  190. // TODO: Verify that caller and callee attributes are compatible.
  191. Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end();
  192. auto AI = I.arg_begin(), AE = I.arg_end();
  193. for (; AI != AE; ++AI) {
  194. Value *Actual = *AI;
  195. if (PI != PE) {
  196. Argument *Formal = &*PI++;
  197. Check(Formal->getType() == Actual->getType(),
  198. "Undefined behavior: Call argument type mismatches "
  199. "callee parameter type",
  200. &I);
  201. // Check that noalias arguments don't alias other arguments. This is
  202. // not fully precise because we don't know the sizes of the dereferenced
  203. // memory regions.
  204. if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy()) {
  205. AttributeList PAL = I.getAttributes();
  206. unsigned ArgNo = 0;
  207. for (auto *BI = I.arg_begin(); BI != AE; ++BI, ++ArgNo) {
  208. // Skip ByVal arguments since they will be memcpy'd to the callee's
  209. // stack so we're not really passing the pointer anyway.
  210. if (PAL.hasParamAttr(ArgNo, Attribute::ByVal))
  211. continue;
  212. // If both arguments are readonly, they have no dependence.
  213. if (Formal->onlyReadsMemory() && I.onlyReadsMemory(ArgNo))
  214. continue;
  215. if (AI != BI && (*BI)->getType()->isPointerTy()) {
  216. AliasResult Result = AA->alias(*AI, *BI);
  217. Check(Result != AliasResult::MustAlias &&
  218. Result != AliasResult::PartialAlias,
  219. "Unusual: noalias argument aliases another argument", &I);
  220. }
  221. }
  222. }
  223. // Check that an sret argument points to valid memory.
  224. if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
  225. Type *Ty = Formal->getParamStructRetType();
  226. MemoryLocation Loc(
  227. Actual, LocationSize::precise(DL->getTypeStoreSize(Ty)));
  228. visitMemoryReference(I, Loc, DL->getABITypeAlign(Ty), Ty,
  229. MemRef::Read | MemRef::Write);
  230. }
  231. }
  232. }
  233. }
  234. if (const auto *CI = dyn_cast<CallInst>(&I)) {
  235. if (CI->isTailCall()) {
  236. const AttributeList &PAL = CI->getAttributes();
  237. unsigned ArgNo = 0;
  238. for (Value *Arg : I.args()) {
  239. // Skip ByVal arguments since they will be memcpy'd to the callee's
  240. // stack anyway.
  241. if (PAL.hasParamAttr(ArgNo++, Attribute::ByVal))
  242. continue;
  243. Value *Obj = findValue(Arg, /*OffsetOk=*/true);
  244. Check(!isa<AllocaInst>(Obj),
  245. "Undefined behavior: Call with \"tail\" keyword references "
  246. "alloca",
  247. &I);
  248. }
  249. }
  250. }
  251. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I))
  252. switch (II->getIntrinsicID()) {
  253. default:
  254. break;
  255. // TODO: Check more intrinsics
  256. case Intrinsic::memcpy: {
  257. MemCpyInst *MCI = cast<MemCpyInst>(&I);
  258. visitMemoryReference(I, MemoryLocation::getForDest(MCI),
  259. MCI->getDestAlign(), nullptr, MemRef::Write);
  260. visitMemoryReference(I, MemoryLocation::getForSource(MCI),
  261. MCI->getSourceAlign(), nullptr, MemRef::Read);
  262. // Check that the memcpy arguments don't overlap. The AliasAnalysis API
  263. // isn't expressive enough for what we really want to do. Known partial
  264. // overlap is not distinguished from the case where nothing is known.
  265. auto Size = LocationSize::afterPointer();
  266. if (const ConstantInt *Len =
  267. dyn_cast<ConstantInt>(findValue(MCI->getLength(),
  268. /*OffsetOk=*/false)))
  269. if (Len->getValue().isIntN(32))
  270. Size = LocationSize::precise(Len->getValue().getZExtValue());
  271. Check(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
  272. AliasResult::MustAlias,
  273. "Undefined behavior: memcpy source and destination overlap", &I);
  274. break;
  275. }
  276. case Intrinsic::memcpy_inline: {
  277. MemCpyInlineInst *MCII = cast<MemCpyInlineInst>(&I);
  278. const uint64_t Size = MCII->getLength()->getValue().getLimitedValue();
  279. visitMemoryReference(I, MemoryLocation::getForDest(MCII),
  280. MCII->getDestAlign(), nullptr, MemRef::Write);
  281. visitMemoryReference(I, MemoryLocation::getForSource(MCII),
  282. MCII->getSourceAlign(), nullptr, MemRef::Read);
  283. // Check that the memcpy arguments don't overlap. The AliasAnalysis API
  284. // isn't expressive enough for what we really want to do. Known partial
  285. // overlap is not distinguished from the case where nothing is known.
  286. const LocationSize LS = LocationSize::precise(Size);
  287. Check(AA->alias(MCII->getSource(), LS, MCII->getDest(), LS) !=
  288. AliasResult::MustAlias,
  289. "Undefined behavior: memcpy source and destination overlap", &I);
  290. break;
  291. }
  292. case Intrinsic::memmove: {
  293. MemMoveInst *MMI = cast<MemMoveInst>(&I);
  294. visitMemoryReference(I, MemoryLocation::getForDest(MMI),
  295. MMI->getDestAlign(), nullptr, MemRef::Write);
  296. visitMemoryReference(I, MemoryLocation::getForSource(MMI),
  297. MMI->getSourceAlign(), nullptr, MemRef::Read);
  298. break;
  299. }
  300. case Intrinsic::memset: {
  301. MemSetInst *MSI = cast<MemSetInst>(&I);
  302. visitMemoryReference(I, MemoryLocation::getForDest(MSI),
  303. MSI->getDestAlign(), nullptr, MemRef::Write);
  304. break;
  305. }
  306. case Intrinsic::memset_inline: {
  307. MemSetInlineInst *MSII = cast<MemSetInlineInst>(&I);
  308. visitMemoryReference(I, MemoryLocation::getForDest(MSII),
  309. MSII->getDestAlign(), nullptr, MemRef::Write);
  310. break;
  311. }
  312. case Intrinsic::vastart:
  313. Check(I.getParent()->getParent()->isVarArg(),
  314. "Undefined behavior: va_start called in a non-varargs function",
  315. &I);
  316. visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
  317. std::nullopt, nullptr, MemRef::Read | MemRef::Write);
  318. break;
  319. case Intrinsic::vacopy:
  320. visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
  321. std::nullopt, nullptr, MemRef::Write);
  322. visitMemoryReference(I, MemoryLocation::getForArgument(&I, 1, TLI),
  323. std::nullopt, nullptr, MemRef::Read);
  324. break;
  325. case Intrinsic::vaend:
  326. visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
  327. std::nullopt, nullptr, MemRef::Read | MemRef::Write);
  328. break;
  329. case Intrinsic::stackrestore:
  330. // Stackrestore doesn't read or write memory, but it sets the
  331. // stack pointer, which the compiler may read from or write to
  332. // at any time, so check it for both readability and writeability.
  333. visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
  334. std::nullopt, nullptr, MemRef::Read | MemRef::Write);
  335. break;
  336. case Intrinsic::get_active_lane_mask:
  337. if (auto *TripCount = dyn_cast<ConstantInt>(I.getArgOperand(1)))
  338. Check(!TripCount->isZero(),
  339. "get_active_lane_mask: operand #2 "
  340. "must be greater than 0",
  341. &I);
  342. break;
  343. }
  344. }
  345. void Lint::visitReturnInst(ReturnInst &I) {
  346. Function *F = I.getParent()->getParent();
  347. Check(!F->doesNotReturn(),
  348. "Unusual: Return statement in function with noreturn attribute", &I);
  349. if (Value *V = I.getReturnValue()) {
  350. Value *Obj = findValue(V, /*OffsetOk=*/true);
  351. Check(!isa<AllocaInst>(Obj), "Unusual: Returning alloca value", &I);
  352. }
  353. }
  354. // TODO: Check that the reference is in bounds.
  355. // TODO: Check readnone/readonly function attributes.
  356. void Lint::visitMemoryReference(Instruction &I, const MemoryLocation &Loc,
  357. MaybeAlign Align, Type *Ty, unsigned Flags) {
  358. // If no memory is being referenced, it doesn't matter if the pointer
  359. // is valid.
  360. if (Loc.Size.isZero())
  361. return;
  362. Value *Ptr = const_cast<Value *>(Loc.Ptr);
  363. Value *UnderlyingObject = findValue(Ptr, /*OffsetOk=*/true);
  364. Check(!isa<ConstantPointerNull>(UnderlyingObject),
  365. "Undefined behavior: Null pointer dereference", &I);
  366. Check(!isa<UndefValue>(UnderlyingObject),
  367. "Undefined behavior: Undef pointer dereference", &I);
  368. Check(!isa<ConstantInt>(UnderlyingObject) ||
  369. !cast<ConstantInt>(UnderlyingObject)->isMinusOne(),
  370. "Unusual: All-ones pointer dereference", &I);
  371. Check(!isa<ConstantInt>(UnderlyingObject) ||
  372. !cast<ConstantInt>(UnderlyingObject)->isOne(),
  373. "Unusual: Address one pointer dereference", &I);
  374. if (Flags & MemRef::Write) {
  375. if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(UnderlyingObject))
  376. Check(!GV->isConstant(), "Undefined behavior: Write to read-only memory",
  377. &I);
  378. Check(!isa<Function>(UnderlyingObject) &&
  379. !isa<BlockAddress>(UnderlyingObject),
  380. "Undefined behavior: Write to text section", &I);
  381. }
  382. if (Flags & MemRef::Read) {
  383. Check(!isa<Function>(UnderlyingObject), "Unusual: Load from function body",
  384. &I);
  385. Check(!isa<BlockAddress>(UnderlyingObject),
  386. "Undefined behavior: Load from block address", &I);
  387. }
  388. if (Flags & MemRef::Callee) {
  389. Check(!isa<BlockAddress>(UnderlyingObject),
  390. "Undefined behavior: Call to block address", &I);
  391. }
  392. if (Flags & MemRef::Branchee) {
  393. Check(!isa<Constant>(UnderlyingObject) ||
  394. isa<BlockAddress>(UnderlyingObject),
  395. "Undefined behavior: Branch to non-blockaddress", &I);
  396. }
  397. // Check for buffer overflows and misalignment.
  398. // Only handles memory references that read/write something simple like an
  399. // alloca instruction or a global variable.
  400. int64_t Offset = 0;
  401. if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *DL)) {
  402. // OK, so the access is to a constant offset from Ptr. Check that Ptr is
  403. // something we can handle and if so extract the size of this base object
  404. // along with its alignment.
  405. uint64_t BaseSize = MemoryLocation::UnknownSize;
  406. MaybeAlign BaseAlign;
  407. if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
  408. Type *ATy = AI->getAllocatedType();
  409. if (!AI->isArrayAllocation() && ATy->isSized())
  410. BaseSize = DL->getTypeAllocSize(ATy);
  411. BaseAlign = AI->getAlign();
  412. } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
  413. // If the global may be defined differently in another compilation unit
  414. // then don't warn about funky memory accesses.
  415. if (GV->hasDefinitiveInitializer()) {
  416. Type *GTy = GV->getValueType();
  417. if (GTy->isSized())
  418. BaseSize = DL->getTypeAllocSize(GTy);
  419. BaseAlign = GV->getAlign();
  420. if (!BaseAlign && GTy->isSized())
  421. BaseAlign = DL->getABITypeAlign(GTy);
  422. }
  423. }
  424. // Accesses from before the start or after the end of the object are not
  425. // defined.
  426. Check(!Loc.Size.hasValue() || BaseSize == MemoryLocation::UnknownSize ||
  427. (Offset >= 0 && Offset + Loc.Size.getValue() <= BaseSize),
  428. "Undefined behavior: Buffer overflow", &I);
  429. // Accesses that say that the memory is more aligned than it is are not
  430. // defined.
  431. if (!Align && Ty && Ty->isSized())
  432. Align = DL->getABITypeAlign(Ty);
  433. if (BaseAlign && Align)
  434. Check(*Align <= commonAlignment(*BaseAlign, Offset),
  435. "Undefined behavior: Memory reference address is misaligned", &I);
  436. }
  437. }
  438. void Lint::visitLoadInst(LoadInst &I) {
  439. visitMemoryReference(I, MemoryLocation::get(&I), I.getAlign(), I.getType(),
  440. MemRef::Read);
  441. }
  442. void Lint::visitStoreInst(StoreInst &I) {
  443. visitMemoryReference(I, MemoryLocation::get(&I), I.getAlign(),
  444. I.getOperand(0)->getType(), MemRef::Write);
  445. }
  446. void Lint::visitXor(BinaryOperator &I) {
  447. Check(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)),
  448. "Undefined result: xor(undef, undef)", &I);
  449. }
  450. void Lint::visitSub(BinaryOperator &I) {
  451. Check(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)),
  452. "Undefined result: sub(undef, undef)", &I);
  453. }
  454. void Lint::visitLShr(BinaryOperator &I) {
  455. if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getOperand(1),
  456. /*OffsetOk=*/false)))
  457. Check(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
  458. "Undefined result: Shift count out of range", &I);
  459. }
  460. void Lint::visitAShr(BinaryOperator &I) {
  461. if (ConstantInt *CI =
  462. dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
  463. Check(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
  464. "Undefined result: Shift count out of range", &I);
  465. }
  466. void Lint::visitShl(BinaryOperator &I) {
  467. if (ConstantInt *CI =
  468. dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
  469. Check(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
  470. "Undefined result: Shift count out of range", &I);
  471. }
  472. static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
  473. AssumptionCache *AC) {
  474. // Assume undef could be zero.
  475. if (isa<UndefValue>(V))
  476. return true;
  477. VectorType *VecTy = dyn_cast<VectorType>(V->getType());
  478. if (!VecTy) {
  479. KnownBits Known =
  480. computeKnownBits(V, DL, 0, AC, dyn_cast<Instruction>(V), DT);
  481. return Known.isZero();
  482. }
  483. // Per-component check doesn't work with zeroinitializer
  484. Constant *C = dyn_cast<Constant>(V);
  485. if (!C)
  486. return false;
  487. if (C->isZeroValue())
  488. return true;
  489. // For a vector, KnownZero will only be true if all values are zero, so check
  490. // this per component
  491. for (unsigned I = 0, N = cast<FixedVectorType>(VecTy)->getNumElements();
  492. I != N; ++I) {
  493. Constant *Elem = C->getAggregateElement(I);
  494. if (isa<UndefValue>(Elem))
  495. return true;
  496. KnownBits Known = computeKnownBits(Elem, DL);
  497. if (Known.isZero())
  498. return true;
  499. }
  500. return false;
  501. }
  502. void Lint::visitSDiv(BinaryOperator &I) {
  503. Check(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  504. "Undefined behavior: Division by zero", &I);
  505. }
  506. void Lint::visitUDiv(BinaryOperator &I) {
  507. Check(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  508. "Undefined behavior: Division by zero", &I);
  509. }
  510. void Lint::visitSRem(BinaryOperator &I) {
  511. Check(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  512. "Undefined behavior: Division by zero", &I);
  513. }
  514. void Lint::visitURem(BinaryOperator &I) {
  515. Check(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
  516. "Undefined behavior: Division by zero", &I);
  517. }
  518. void Lint::visitAllocaInst(AllocaInst &I) {
  519. if (isa<ConstantInt>(I.getArraySize()))
  520. // This isn't undefined behavior, it's just an obvious pessimization.
  521. Check(&I.getParent()->getParent()->getEntryBlock() == I.getParent(),
  522. "Pessimization: Static alloca outside of entry block", &I);
  523. // TODO: Check for an unusual size (MSB set?)
  524. }
  525. void Lint::visitVAArgInst(VAArgInst &I) {
  526. visitMemoryReference(I, MemoryLocation::get(&I), std::nullopt, nullptr,
  527. MemRef::Read | MemRef::Write);
  528. }
  529. void Lint::visitIndirectBrInst(IndirectBrInst &I) {
  530. visitMemoryReference(I, MemoryLocation::getAfter(I.getAddress()),
  531. std::nullopt, nullptr, MemRef::Branchee);
  532. Check(I.getNumDestinations() != 0,
  533. "Undefined behavior: indirectbr with no destinations", &I);
  534. }
  535. void Lint::visitExtractElementInst(ExtractElementInst &I) {
  536. if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getIndexOperand(),
  537. /*OffsetOk=*/false)))
  538. Check(
  539. CI->getValue().ult(
  540. cast<FixedVectorType>(I.getVectorOperandType())->getNumElements()),
  541. "Undefined result: extractelement index out of range", &I);
  542. }
  543. void Lint::visitInsertElementInst(InsertElementInst &I) {
  544. if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getOperand(2),
  545. /*OffsetOk=*/false)))
  546. Check(CI->getValue().ult(
  547. cast<FixedVectorType>(I.getType())->getNumElements()),
  548. "Undefined result: insertelement index out of range", &I);
  549. }
  550. void Lint::visitUnreachableInst(UnreachableInst &I) {
  551. // This isn't undefined behavior, it's merely suspicious.
  552. Check(&I == &I.getParent()->front() ||
  553. std::prev(I.getIterator())->mayHaveSideEffects(),
  554. "Unusual: unreachable immediately preceded by instruction without "
  555. "side effects",
  556. &I);
  557. }
  558. /// findValue - Look through bitcasts and simple memory reference patterns
  559. /// to identify an equivalent, but more informative, value. If OffsetOk
  560. /// is true, look through getelementptrs with non-zero offsets too.
  561. ///
  562. /// Most analysis passes don't require this logic, because instcombine
  563. /// will simplify most of these kinds of things away. But it's a goal of
  564. /// this Lint pass to be useful even on non-optimized IR.
  565. Value *Lint::findValue(Value *V, bool OffsetOk) const {
  566. SmallPtrSet<Value *, 4> Visited;
  567. return findValueImpl(V, OffsetOk, Visited);
  568. }
  569. /// findValueImpl - Implementation helper for findValue.
  570. Value *Lint::findValueImpl(Value *V, bool OffsetOk,
  571. SmallPtrSetImpl<Value *> &Visited) const {
  572. // Detect self-referential values.
  573. if (!Visited.insert(V).second)
  574. return UndefValue::get(V->getType());
  575. // TODO: Look through sext or zext cast, when the result is known to
  576. // be interpreted as signed or unsigned, respectively.
  577. // TODO: Look through eliminable cast pairs.
  578. // TODO: Look through calls with unique return values.
  579. // TODO: Look through vector insert/extract/shuffle.
  580. V = OffsetOk ? getUnderlyingObject(V) : V->stripPointerCasts();
  581. if (LoadInst *L = dyn_cast<LoadInst>(V)) {
  582. BasicBlock::iterator BBI = L->getIterator();
  583. BasicBlock *BB = L->getParent();
  584. SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
  585. for (;;) {
  586. if (!VisitedBlocks.insert(BB).second)
  587. break;
  588. if (Value *U =
  589. FindAvailableLoadedValue(L, BB, BBI, DefMaxInstsToScan, AA))
  590. return findValueImpl(U, OffsetOk, Visited);
  591. if (BBI != BB->begin())
  592. break;
  593. BB = BB->getUniquePredecessor();
  594. if (!BB)
  595. break;
  596. BBI = BB->end();
  597. }
  598. } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
  599. if (Value *W = PN->hasConstantValue())
  600. return findValueImpl(W, OffsetOk, Visited);
  601. } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
  602. if (CI->isNoopCast(*DL))
  603. return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
  604. } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
  605. if (Value *W =
  606. FindInsertedValue(Ex->getAggregateOperand(), Ex->getIndices()))
  607. if (W != V)
  608. return findValueImpl(W, OffsetOk, Visited);
  609. } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
  610. // Same as above, but for ConstantExpr instead of Instruction.
  611. if (Instruction::isCast(CE->getOpcode())) {
  612. if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
  613. CE->getOperand(0)->getType(), CE->getType(),
  614. *DL))
  615. return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
  616. }
  617. }
  618. // As a last resort, try SimplifyInstruction or constant folding.
  619. if (Instruction *Inst = dyn_cast<Instruction>(V)) {
  620. if (Value *W = simplifyInstruction(Inst, {*DL, TLI, DT, AC}))
  621. return findValueImpl(W, OffsetOk, Visited);
  622. } else if (auto *C = dyn_cast<Constant>(V)) {
  623. Value *W = ConstantFoldConstant(C, *DL, TLI);
  624. if (W != V)
  625. return findValueImpl(W, OffsetOk, Visited);
  626. }
  627. return V;
  628. }
  629. PreservedAnalyses LintPass::run(Function &F, FunctionAnalysisManager &AM) {
  630. auto *Mod = F.getParent();
  631. auto *DL = &F.getParent()->getDataLayout();
  632. auto *AA = &AM.getResult<AAManager>(F);
  633. auto *AC = &AM.getResult<AssumptionAnalysis>(F);
  634. auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
  635. auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
  636. Lint L(Mod, DL, AA, AC, DT, TLI);
  637. L.visit(F);
  638. dbgs() << L.MessagesStr.str();
  639. return PreservedAnalyses::all();
  640. }
  641. namespace {
  642. class LintLegacyPass : public FunctionPass {
  643. public:
  644. static char ID; // Pass identification, replacement for typeid
  645. LintLegacyPass() : FunctionPass(ID) {
  646. initializeLintLegacyPassPass(*PassRegistry::getPassRegistry());
  647. }
  648. bool runOnFunction(Function &F) override;
  649. void getAnalysisUsage(AnalysisUsage &AU) const override {
  650. AU.setPreservesAll();
  651. AU.addRequired<AAResultsWrapperPass>();
  652. AU.addRequired<AssumptionCacheTracker>();
  653. AU.addRequired<TargetLibraryInfoWrapperPass>();
  654. AU.addRequired<DominatorTreeWrapperPass>();
  655. }
  656. void print(raw_ostream &O, const Module *M) const override {}
  657. };
  658. } // namespace
  659. char LintLegacyPass::ID = 0;
  660. INITIALIZE_PASS_BEGIN(LintLegacyPass, "lint", "Statically lint-checks LLVM IR",
  661. false, true)
  662. INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
  663. INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
  664. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  665. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  666. INITIALIZE_PASS_END(LintLegacyPass, "lint", "Statically lint-checks LLVM IR",
  667. false, true)
  668. bool LintLegacyPass::runOnFunction(Function &F) {
  669. auto *Mod = F.getParent();
  670. auto *DL = &F.getParent()->getDataLayout();
  671. auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  672. auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
  673. auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  674. auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
  675. Lint L(Mod, DL, AA, AC, DT, TLI);
  676. L.visit(F);
  677. dbgs() << L.MessagesStr.str();
  678. return false;
  679. }
  680. //===----------------------------------------------------------------------===//
  681. // Implement the public interfaces to this file...
  682. //===----------------------------------------------------------------------===//
  683. FunctionPass *llvm::createLintLegacyPassPass() { return new LintLegacyPass(); }
  684. /// lintFunction - Check a function for errors, printing messages on stderr.
  685. ///
  686. void llvm::lintFunction(const Function &f) {
  687. Function &F = const_cast<Function &>(f);
  688. assert(!F.isDeclaration() && "Cannot lint external functions");
  689. legacy::FunctionPassManager FPM(F.getParent());
  690. auto *V = new LintLegacyPass();
  691. FPM.add(V);
  692. FPM.run(F);
  693. }
  694. /// lintModule - Check a module for errors, printing messages on stderr.
  695. ///
  696. void llvm::lintModule(const Module &M) {
  697. legacy::PassManager PM;
  698. auto *V = new LintLegacyPass();
  699. PM.add(V);
  700. PM.run(const_cast<Module &>(M));
  701. }