FunctionComparator.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982
  1. //===- FunctionComparator.h - Function Comparator -------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the FunctionComparator and GlobalNumberState classes
  10. // which are used by the MergeFunctions pass for comparing functions.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/Transforms/Utils/FunctionComparator.h"
  14. #include "llvm/ADT/APFloat.h"
  15. #include "llvm/ADT/APInt.h"
  16. #include "llvm/ADT/ArrayRef.h"
  17. #include "llvm/ADT/Hashing.h"
  18. #include "llvm/ADT/SmallPtrSet.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. #include "llvm/IR/Attributes.h"
  21. #include "llvm/IR/BasicBlock.h"
  22. #include "llvm/IR/Constant.h"
  23. #include "llvm/IR/Constants.h"
  24. #include "llvm/IR/DataLayout.h"
  25. #include "llvm/IR/DerivedTypes.h"
  26. #include "llvm/IR/Function.h"
  27. #include "llvm/IR/GlobalValue.h"
  28. #include "llvm/IR/InlineAsm.h"
  29. #include "llvm/IR/InstrTypes.h"
  30. #include "llvm/IR/Instruction.h"
  31. #include "llvm/IR/Instructions.h"
  32. #include "llvm/IR/LLVMContext.h"
  33. #include "llvm/IR/Metadata.h"
  34. #include "llvm/IR/Module.h"
  35. #include "llvm/IR/Operator.h"
  36. #include "llvm/IR/Type.h"
  37. #include "llvm/IR/Value.h"
  38. #include "llvm/Support/Casting.h"
  39. #include "llvm/Support/Compiler.h"
  40. #include "llvm/Support/Debug.h"
  41. #include "llvm/Support/ErrorHandling.h"
  42. #include "llvm/Support/raw_ostream.h"
  43. #include <cassert>
  44. #include <cstddef>
  45. #include <cstdint>
  46. #include <utility>
  47. using namespace llvm;
  48. #define DEBUG_TYPE "functioncomparator"
  49. int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
  50. if (L < R)
  51. return -1;
  52. if (L > R)
  53. return 1;
  54. return 0;
  55. }
  56. int FunctionComparator::cmpAligns(Align L, Align R) const {
  57. if (L.value() < R.value())
  58. return -1;
  59. if (L.value() > R.value())
  60. return 1;
  61. return 0;
  62. }
  63. int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const {
  64. if ((int)L < (int)R)
  65. return -1;
  66. if ((int)L > (int)R)
  67. return 1;
  68. return 0;
  69. }
  70. int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
  71. if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
  72. return Res;
  73. if (L.ugt(R))
  74. return 1;
  75. if (R.ugt(L))
  76. return -1;
  77. return 0;
  78. }
  79. int FunctionComparator::cmpAPFloats(const APFloat &L, const APFloat &R) const {
  80. // Floats are ordered first by semantics (i.e. float, double, half, etc.),
  81. // then by value interpreted as a bitstring (aka APInt).
  82. const fltSemantics &SL = L.getSemantics(), &SR = R.getSemantics();
  83. if (int Res = cmpNumbers(APFloat::semanticsPrecision(SL),
  84. APFloat::semanticsPrecision(SR)))
  85. return Res;
  86. if (int Res = cmpNumbers(APFloat::semanticsMaxExponent(SL),
  87. APFloat::semanticsMaxExponent(SR)))
  88. return Res;
  89. if (int Res = cmpNumbers(APFloat::semanticsMinExponent(SL),
  90. APFloat::semanticsMinExponent(SR)))
  91. return Res;
  92. if (int Res = cmpNumbers(APFloat::semanticsSizeInBits(SL),
  93. APFloat::semanticsSizeInBits(SR)))
  94. return Res;
  95. return cmpAPInts(L.bitcastToAPInt(), R.bitcastToAPInt());
  96. }
  97. int FunctionComparator::cmpMem(StringRef L, StringRef R) const {
  98. // Prevent heavy comparison, compare sizes first.
  99. if (int Res = cmpNumbers(L.size(), R.size()))
  100. return Res;
  101. // Compare strings lexicographically only when it is necessary: only when
  102. // strings are equal in size.
  103. return L.compare(R);
  104. }
  105. int FunctionComparator::cmpAttrs(const AttributeList L,
  106. const AttributeList R) const {
  107. if (int Res = cmpNumbers(L.getNumAttrSets(), R.getNumAttrSets()))
  108. return Res;
  109. for (unsigned i : L.indexes()) {
  110. AttributeSet LAS = L.getAttributes(i);
  111. AttributeSet RAS = R.getAttributes(i);
  112. AttributeSet::iterator LI = LAS.begin(), LE = LAS.end();
  113. AttributeSet::iterator RI = RAS.begin(), RE = RAS.end();
  114. for (; LI != LE && RI != RE; ++LI, ++RI) {
  115. Attribute LA = *LI;
  116. Attribute RA = *RI;
  117. if (LA.isTypeAttribute() && RA.isTypeAttribute()) {
  118. if (LA.getKindAsEnum() != RA.getKindAsEnum())
  119. return cmpNumbers(LA.getKindAsEnum(), RA.getKindAsEnum());
  120. Type *TyL = LA.getValueAsType();
  121. Type *TyR = RA.getValueAsType();
  122. if (TyL && TyR) {
  123. if (int Res = cmpTypes(TyL, TyR))
  124. return Res;
  125. continue;
  126. }
  127. // Two pointers, at least one null, so the comparison result is
  128. // independent of the value of a real pointer.
  129. if (int Res = cmpNumbers((uint64_t)TyL, (uint64_t)TyR))
  130. return Res;
  131. continue;
  132. }
  133. if (LA < RA)
  134. return -1;
  135. if (RA < LA)
  136. return 1;
  137. }
  138. if (LI != LE)
  139. return 1;
  140. if (RI != RE)
  141. return -1;
  142. }
  143. return 0;
  144. }
  145. int FunctionComparator::cmpRangeMetadata(const MDNode *L,
  146. const MDNode *R) const {
  147. if (L == R)
  148. return 0;
  149. if (!L)
  150. return -1;
  151. if (!R)
  152. return 1;
  153. // Range metadata is a sequence of numbers. Make sure they are the same
  154. // sequence.
  155. // TODO: Note that as this is metadata, it is possible to drop and/or merge
  156. // this data when considering functions to merge. Thus this comparison would
  157. // return 0 (i.e. equivalent), but merging would become more complicated
  158. // because the ranges would need to be unioned. It is not likely that
  159. // functions differ ONLY in this metadata if they are actually the same
  160. // function semantically.
  161. if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
  162. return Res;
  163. for (size_t I = 0; I < L->getNumOperands(); ++I) {
  164. ConstantInt *LLow = mdconst::extract<ConstantInt>(L->getOperand(I));
  165. ConstantInt *RLow = mdconst::extract<ConstantInt>(R->getOperand(I));
  166. if (int Res = cmpAPInts(LLow->getValue(), RLow->getValue()))
  167. return Res;
  168. }
  169. return 0;
  170. }
  171. int FunctionComparator::cmpOperandBundlesSchema(const CallBase &LCS,
  172. const CallBase &RCS) const {
  173. assert(LCS.getOpcode() == RCS.getOpcode() && "Can't compare otherwise!");
  174. if (int Res =
  175. cmpNumbers(LCS.getNumOperandBundles(), RCS.getNumOperandBundles()))
  176. return Res;
  177. for (unsigned I = 0, E = LCS.getNumOperandBundles(); I != E; ++I) {
  178. auto OBL = LCS.getOperandBundleAt(I);
  179. auto OBR = RCS.getOperandBundleAt(I);
  180. if (int Res = OBL.getTagName().compare(OBR.getTagName()))
  181. return Res;
  182. if (int Res = cmpNumbers(OBL.Inputs.size(), OBR.Inputs.size()))
  183. return Res;
  184. }
  185. return 0;
  186. }
  187. /// Constants comparison:
  188. /// 1. Check whether type of L constant could be losslessly bitcasted to R
  189. /// type.
  190. /// 2. Compare constant contents.
  191. /// For more details see declaration comments.
  192. int FunctionComparator::cmpConstants(const Constant *L,
  193. const Constant *R) const {
  194. Type *TyL = L->getType();
  195. Type *TyR = R->getType();
  196. // Check whether types are bitcastable. This part is just re-factored
  197. // Type::canLosslesslyBitCastTo method, but instead of returning true/false,
  198. // we also pack into result which type is "less" for us.
  199. int TypesRes = cmpTypes(TyL, TyR);
  200. if (TypesRes != 0) {
  201. // Types are different, but check whether we can bitcast them.
  202. if (!TyL->isFirstClassType()) {
  203. if (TyR->isFirstClassType())
  204. return -1;
  205. // Neither TyL nor TyR are values of first class type. Return the result
  206. // of comparing the types
  207. return TypesRes;
  208. }
  209. if (!TyR->isFirstClassType()) {
  210. if (TyL->isFirstClassType())
  211. return 1;
  212. return TypesRes;
  213. }
  214. // Vector -> Vector conversions are always lossless if the two vector types
  215. // have the same size, otherwise not.
  216. unsigned TyLWidth = 0;
  217. unsigned TyRWidth = 0;
  218. if (auto *VecTyL = dyn_cast<VectorType>(TyL))
  219. TyLWidth = VecTyL->getPrimitiveSizeInBits().getFixedSize();
  220. if (auto *VecTyR = dyn_cast<VectorType>(TyR))
  221. TyRWidth = VecTyR->getPrimitiveSizeInBits().getFixedSize();
  222. if (TyLWidth != TyRWidth)
  223. return cmpNumbers(TyLWidth, TyRWidth);
  224. // Zero bit-width means neither TyL nor TyR are vectors.
  225. if (!TyLWidth) {
  226. PointerType *PTyL = dyn_cast<PointerType>(TyL);
  227. PointerType *PTyR = dyn_cast<PointerType>(TyR);
  228. if (PTyL && PTyR) {
  229. unsigned AddrSpaceL = PTyL->getAddressSpace();
  230. unsigned AddrSpaceR = PTyR->getAddressSpace();
  231. if (int Res = cmpNumbers(AddrSpaceL, AddrSpaceR))
  232. return Res;
  233. }
  234. if (PTyL)
  235. return 1;
  236. if (PTyR)
  237. return -1;
  238. // TyL and TyR aren't vectors, nor pointers. We don't know how to
  239. // bitcast them.
  240. return TypesRes;
  241. }
  242. }
  243. // OK, types are bitcastable, now check constant contents.
  244. if (L->isNullValue() && R->isNullValue())
  245. return TypesRes;
  246. if (L->isNullValue() && !R->isNullValue())
  247. return 1;
  248. if (!L->isNullValue() && R->isNullValue())
  249. return -1;
  250. auto GlobalValueL = const_cast<GlobalValue *>(dyn_cast<GlobalValue>(L));
  251. auto GlobalValueR = const_cast<GlobalValue *>(dyn_cast<GlobalValue>(R));
  252. if (GlobalValueL && GlobalValueR) {
  253. return cmpGlobalValues(GlobalValueL, GlobalValueR);
  254. }
  255. if (int Res = cmpNumbers(L->getValueID(), R->getValueID()))
  256. return Res;
  257. if (const auto *SeqL = dyn_cast<ConstantDataSequential>(L)) {
  258. const auto *SeqR = cast<ConstantDataSequential>(R);
  259. // This handles ConstantDataArray and ConstantDataVector. Note that we
  260. // compare the two raw data arrays, which might differ depending on the host
  261. // endianness. This isn't a problem though, because the endiness of a module
  262. // will affect the order of the constants, but this order is the same
  263. // for a given input module and host platform.
  264. return cmpMem(SeqL->getRawDataValues(), SeqR->getRawDataValues());
  265. }
  266. switch (L->getValueID()) {
  267. case Value::UndefValueVal:
  268. case Value::PoisonValueVal:
  269. case Value::ConstantTokenNoneVal:
  270. return TypesRes;
  271. case Value::ConstantIntVal: {
  272. const APInt &LInt = cast<ConstantInt>(L)->getValue();
  273. const APInt &RInt = cast<ConstantInt>(R)->getValue();
  274. return cmpAPInts(LInt, RInt);
  275. }
  276. case Value::ConstantFPVal: {
  277. const APFloat &LAPF = cast<ConstantFP>(L)->getValueAPF();
  278. const APFloat &RAPF = cast<ConstantFP>(R)->getValueAPF();
  279. return cmpAPFloats(LAPF, RAPF);
  280. }
  281. case Value::ConstantArrayVal: {
  282. const ConstantArray *LA = cast<ConstantArray>(L);
  283. const ConstantArray *RA = cast<ConstantArray>(R);
  284. uint64_t NumElementsL = cast<ArrayType>(TyL)->getNumElements();
  285. uint64_t NumElementsR = cast<ArrayType>(TyR)->getNumElements();
  286. if (int Res = cmpNumbers(NumElementsL, NumElementsR))
  287. return Res;
  288. for (uint64_t i = 0; i < NumElementsL; ++i) {
  289. if (int Res = cmpConstants(cast<Constant>(LA->getOperand(i)),
  290. cast<Constant>(RA->getOperand(i))))
  291. return Res;
  292. }
  293. return 0;
  294. }
  295. case Value::ConstantStructVal: {
  296. const ConstantStruct *LS = cast<ConstantStruct>(L);
  297. const ConstantStruct *RS = cast<ConstantStruct>(R);
  298. unsigned NumElementsL = cast<StructType>(TyL)->getNumElements();
  299. unsigned NumElementsR = cast<StructType>(TyR)->getNumElements();
  300. if (int Res = cmpNumbers(NumElementsL, NumElementsR))
  301. return Res;
  302. for (unsigned i = 0; i != NumElementsL; ++i) {
  303. if (int Res = cmpConstants(cast<Constant>(LS->getOperand(i)),
  304. cast<Constant>(RS->getOperand(i))))
  305. return Res;
  306. }
  307. return 0;
  308. }
  309. case Value::ConstantVectorVal: {
  310. const ConstantVector *LV = cast<ConstantVector>(L);
  311. const ConstantVector *RV = cast<ConstantVector>(R);
  312. unsigned NumElementsL = cast<FixedVectorType>(TyL)->getNumElements();
  313. unsigned NumElementsR = cast<FixedVectorType>(TyR)->getNumElements();
  314. if (int Res = cmpNumbers(NumElementsL, NumElementsR))
  315. return Res;
  316. for (uint64_t i = 0; i < NumElementsL; ++i) {
  317. if (int Res = cmpConstants(cast<Constant>(LV->getOperand(i)),
  318. cast<Constant>(RV->getOperand(i))))
  319. return Res;
  320. }
  321. return 0;
  322. }
  323. case Value::ConstantExprVal: {
  324. const ConstantExpr *LE = cast<ConstantExpr>(L);
  325. const ConstantExpr *RE = cast<ConstantExpr>(R);
  326. unsigned NumOperandsL = LE->getNumOperands();
  327. unsigned NumOperandsR = RE->getNumOperands();
  328. if (int Res = cmpNumbers(NumOperandsL, NumOperandsR))
  329. return Res;
  330. for (unsigned i = 0; i < NumOperandsL; ++i) {
  331. if (int Res = cmpConstants(cast<Constant>(LE->getOperand(i)),
  332. cast<Constant>(RE->getOperand(i))))
  333. return Res;
  334. }
  335. return 0;
  336. }
  337. case Value::BlockAddressVal: {
  338. const BlockAddress *LBA = cast<BlockAddress>(L);
  339. const BlockAddress *RBA = cast<BlockAddress>(R);
  340. if (int Res = cmpValues(LBA->getFunction(), RBA->getFunction()))
  341. return Res;
  342. if (LBA->getFunction() == RBA->getFunction()) {
  343. // They are BBs in the same function. Order by which comes first in the
  344. // BB order of the function. This order is deterministic.
  345. Function *F = LBA->getFunction();
  346. BasicBlock *LBB = LBA->getBasicBlock();
  347. BasicBlock *RBB = RBA->getBasicBlock();
  348. if (LBB == RBB)
  349. return 0;
  350. for (BasicBlock &BB : F->getBasicBlockList()) {
  351. if (&BB == LBB) {
  352. assert(&BB != RBB);
  353. return -1;
  354. }
  355. if (&BB == RBB)
  356. return 1;
  357. }
  358. llvm_unreachable("Basic Block Address does not point to a basic block in "
  359. "its function.");
  360. return -1;
  361. } else {
  362. // cmpValues said the functions are the same. So because they aren't
  363. // literally the same pointer, they must respectively be the left and
  364. // right functions.
  365. assert(LBA->getFunction() == FnL && RBA->getFunction() == FnR);
  366. // cmpValues will tell us if these are equivalent BasicBlocks, in the
  367. // context of their respective functions.
  368. return cmpValues(LBA->getBasicBlock(), RBA->getBasicBlock());
  369. }
  370. }
  371. default: // Unknown constant, abort.
  372. LLVM_DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n");
  373. llvm_unreachable("Constant ValueID not recognized.");
  374. return -1;
  375. }
  376. }
  377. int FunctionComparator::cmpGlobalValues(GlobalValue *L, GlobalValue *R) const {
  378. uint64_t LNumber = GlobalNumbers->getNumber(L);
  379. uint64_t RNumber = GlobalNumbers->getNumber(R);
  380. return cmpNumbers(LNumber, RNumber);
  381. }
  382. /// cmpType - compares two types,
  383. /// defines total ordering among the types set.
  384. /// See method declaration comments for more details.
  385. int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const {
  386. PointerType *PTyL = dyn_cast<PointerType>(TyL);
  387. PointerType *PTyR = dyn_cast<PointerType>(TyR);
  388. const DataLayout &DL = FnL->getParent()->getDataLayout();
  389. if (PTyL && PTyL->getAddressSpace() == 0)
  390. TyL = DL.getIntPtrType(TyL);
  391. if (PTyR && PTyR->getAddressSpace() == 0)
  392. TyR = DL.getIntPtrType(TyR);
  393. if (TyL == TyR)
  394. return 0;
  395. if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID()))
  396. return Res;
  397. switch (TyL->getTypeID()) {
  398. default:
  399. llvm_unreachable("Unknown type!");
  400. case Type::IntegerTyID:
  401. return cmpNumbers(cast<IntegerType>(TyL)->getBitWidth(),
  402. cast<IntegerType>(TyR)->getBitWidth());
  403. // TyL == TyR would have returned true earlier, because types are uniqued.
  404. case Type::VoidTyID:
  405. case Type::FloatTyID:
  406. case Type::DoubleTyID:
  407. case Type::X86_FP80TyID:
  408. case Type::FP128TyID:
  409. case Type::PPC_FP128TyID:
  410. case Type::LabelTyID:
  411. case Type::MetadataTyID:
  412. case Type::TokenTyID:
  413. return 0;
  414. case Type::PointerTyID:
  415. assert(PTyL && PTyR && "Both types must be pointers here.");
  416. return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace());
  417. case Type::StructTyID: {
  418. StructType *STyL = cast<StructType>(TyL);
  419. StructType *STyR = cast<StructType>(TyR);
  420. if (STyL->getNumElements() != STyR->getNumElements())
  421. return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
  422. if (STyL->isPacked() != STyR->isPacked())
  423. return cmpNumbers(STyL->isPacked(), STyR->isPacked());
  424. for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
  425. if (int Res = cmpTypes(STyL->getElementType(i), STyR->getElementType(i)))
  426. return Res;
  427. }
  428. return 0;
  429. }
  430. case Type::FunctionTyID: {
  431. FunctionType *FTyL = cast<FunctionType>(TyL);
  432. FunctionType *FTyR = cast<FunctionType>(TyR);
  433. if (FTyL->getNumParams() != FTyR->getNumParams())
  434. return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams());
  435. if (FTyL->isVarArg() != FTyR->isVarArg())
  436. return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg());
  437. if (int Res = cmpTypes(FTyL->getReturnType(), FTyR->getReturnType()))
  438. return Res;
  439. for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
  440. if (int Res = cmpTypes(FTyL->getParamType(i), FTyR->getParamType(i)))
  441. return Res;
  442. }
  443. return 0;
  444. }
  445. case Type::ArrayTyID: {
  446. auto *STyL = cast<ArrayType>(TyL);
  447. auto *STyR = cast<ArrayType>(TyR);
  448. if (STyL->getNumElements() != STyR->getNumElements())
  449. return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
  450. return cmpTypes(STyL->getElementType(), STyR->getElementType());
  451. }
  452. case Type::FixedVectorTyID:
  453. case Type::ScalableVectorTyID: {
  454. auto *STyL = cast<VectorType>(TyL);
  455. auto *STyR = cast<VectorType>(TyR);
  456. if (STyL->getElementCount().isScalable() !=
  457. STyR->getElementCount().isScalable())
  458. return cmpNumbers(STyL->getElementCount().isScalable(),
  459. STyR->getElementCount().isScalable());
  460. if (STyL->getElementCount() != STyR->getElementCount())
  461. return cmpNumbers(STyL->getElementCount().getKnownMinValue(),
  462. STyR->getElementCount().getKnownMinValue());
  463. return cmpTypes(STyL->getElementType(), STyR->getElementType());
  464. }
  465. }
  466. }
  467. // Determine whether the two operations are the same except that pointer-to-A
  468. // and pointer-to-B are equivalent. This should be kept in sync with
  469. // Instruction::isSameOperationAs.
  470. // Read method declaration comments for more details.
  471. int FunctionComparator::cmpOperations(const Instruction *L,
  472. const Instruction *R,
  473. bool &needToCmpOperands) const {
  474. needToCmpOperands = true;
  475. if (int Res = cmpValues(L, R))
  476. return Res;
  477. // Differences from Instruction::isSameOperationAs:
  478. // * replace type comparison with calls to cmpTypes.
  479. // * we test for I->getRawSubclassOptionalData (nuw/nsw/tail) at the top.
  480. // * because of the above, we don't test for the tail bit on calls later on.
  481. if (int Res = cmpNumbers(L->getOpcode(), R->getOpcode()))
  482. return Res;
  483. if (const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(L)) {
  484. needToCmpOperands = false;
  485. const GetElementPtrInst *GEPR = cast<GetElementPtrInst>(R);
  486. if (int Res =
  487. cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand()))
  488. return Res;
  489. return cmpGEPs(GEPL, GEPR);
  490. }
  491. if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
  492. return Res;
  493. if (int Res = cmpTypes(L->getType(), R->getType()))
  494. return Res;
  495. if (int Res = cmpNumbers(L->getRawSubclassOptionalData(),
  496. R->getRawSubclassOptionalData()))
  497. return Res;
  498. // We have two instructions of identical opcode and #operands. Check to see
  499. // if all operands are the same type
  500. for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) {
  501. if (int Res =
  502. cmpTypes(L->getOperand(i)->getType(), R->getOperand(i)->getType()))
  503. return Res;
  504. }
  505. // Check special state that is a part of some instructions.
  506. if (const AllocaInst *AI = dyn_cast<AllocaInst>(L)) {
  507. if (int Res = cmpTypes(AI->getAllocatedType(),
  508. cast<AllocaInst>(R)->getAllocatedType()))
  509. return Res;
  510. return cmpAligns(AI->getAlign(), cast<AllocaInst>(R)->getAlign());
  511. }
  512. if (const LoadInst *LI = dyn_cast<LoadInst>(L)) {
  513. if (int Res = cmpNumbers(LI->isVolatile(), cast<LoadInst>(R)->isVolatile()))
  514. return Res;
  515. if (int Res = cmpAligns(LI->getAlign(), cast<LoadInst>(R)->getAlign()))
  516. return Res;
  517. if (int Res =
  518. cmpOrderings(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
  519. return Res;
  520. if (int Res = cmpNumbers(LI->getSyncScopeID(),
  521. cast<LoadInst>(R)->getSyncScopeID()))
  522. return Res;
  523. return cmpRangeMetadata(
  524. LI->getMetadata(LLVMContext::MD_range),
  525. cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
  526. }
  527. if (const StoreInst *SI = dyn_cast<StoreInst>(L)) {
  528. if (int Res =
  529. cmpNumbers(SI->isVolatile(), cast<StoreInst>(R)->isVolatile()))
  530. return Res;
  531. if (int Res = cmpAligns(SI->getAlign(), cast<StoreInst>(R)->getAlign()))
  532. return Res;
  533. if (int Res =
  534. cmpOrderings(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
  535. return Res;
  536. return cmpNumbers(SI->getSyncScopeID(),
  537. cast<StoreInst>(R)->getSyncScopeID());
  538. }
  539. if (const CmpInst *CI = dyn_cast<CmpInst>(L))
  540. return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
  541. if (auto *CBL = dyn_cast<CallBase>(L)) {
  542. auto *CBR = cast<CallBase>(R);
  543. if (int Res = cmpNumbers(CBL->getCallingConv(), CBR->getCallingConv()))
  544. return Res;
  545. if (int Res = cmpAttrs(CBL->getAttributes(), CBR->getAttributes()))
  546. return Res;
  547. if (int Res = cmpOperandBundlesSchema(*CBL, *CBR))
  548. return Res;
  549. if (const CallInst *CI = dyn_cast<CallInst>(L))
  550. if (int Res = cmpNumbers(CI->getTailCallKind(),
  551. cast<CallInst>(R)->getTailCallKind()))
  552. return Res;
  553. return cmpRangeMetadata(L->getMetadata(LLVMContext::MD_range),
  554. R->getMetadata(LLVMContext::MD_range));
  555. }
  556. if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(L)) {
  557. ArrayRef<unsigned> LIndices = IVI->getIndices();
  558. ArrayRef<unsigned> RIndices = cast<InsertValueInst>(R)->getIndices();
  559. if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
  560. return Res;
  561. for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
  562. if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
  563. return Res;
  564. }
  565. return 0;
  566. }
  567. if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(L)) {
  568. ArrayRef<unsigned> LIndices = EVI->getIndices();
  569. ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(R)->getIndices();
  570. if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
  571. return Res;
  572. for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
  573. if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
  574. return Res;
  575. }
  576. }
  577. if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
  578. if (int Res =
  579. cmpOrderings(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
  580. return Res;
  581. return cmpNumbers(FI->getSyncScopeID(),
  582. cast<FenceInst>(R)->getSyncScopeID());
  583. }
  584. if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
  585. if (int Res = cmpNumbers(CXI->isVolatile(),
  586. cast<AtomicCmpXchgInst>(R)->isVolatile()))
  587. return Res;
  588. if (int Res =
  589. cmpNumbers(CXI->isWeak(), cast<AtomicCmpXchgInst>(R)->isWeak()))
  590. return Res;
  591. if (int Res =
  592. cmpOrderings(CXI->getSuccessOrdering(),
  593. cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
  594. return Res;
  595. if (int Res =
  596. cmpOrderings(CXI->getFailureOrdering(),
  597. cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
  598. return Res;
  599. return cmpNumbers(CXI->getSyncScopeID(),
  600. cast<AtomicCmpXchgInst>(R)->getSyncScopeID());
  601. }
  602. if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
  603. if (int Res = cmpNumbers(RMWI->getOperation(),
  604. cast<AtomicRMWInst>(R)->getOperation()))
  605. return Res;
  606. if (int Res = cmpNumbers(RMWI->isVolatile(),
  607. cast<AtomicRMWInst>(R)->isVolatile()))
  608. return Res;
  609. if (int Res = cmpOrderings(RMWI->getOrdering(),
  610. cast<AtomicRMWInst>(R)->getOrdering()))
  611. return Res;
  612. return cmpNumbers(RMWI->getSyncScopeID(),
  613. cast<AtomicRMWInst>(R)->getSyncScopeID());
  614. }
  615. if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(L)) {
  616. ArrayRef<int> LMask = SVI->getShuffleMask();
  617. ArrayRef<int> RMask = cast<ShuffleVectorInst>(R)->getShuffleMask();
  618. if (int Res = cmpNumbers(LMask.size(), RMask.size()))
  619. return Res;
  620. for (size_t i = 0, e = LMask.size(); i != e; ++i) {
  621. if (int Res = cmpNumbers(LMask[i], RMask[i]))
  622. return Res;
  623. }
  624. }
  625. if (const PHINode *PNL = dyn_cast<PHINode>(L)) {
  626. const PHINode *PNR = cast<PHINode>(R);
  627. // Ensure that in addition to the incoming values being identical
  628. // (checked by the caller of this function), the incoming blocks
  629. // are also identical.
  630. for (unsigned i = 0, e = PNL->getNumIncomingValues(); i != e; ++i) {
  631. if (int Res =
  632. cmpValues(PNL->getIncomingBlock(i), PNR->getIncomingBlock(i)))
  633. return Res;
  634. }
  635. }
  636. return 0;
  637. }
  638. // Determine whether two GEP operations perform the same underlying arithmetic.
  639. // Read method declaration comments for more details.
  640. int FunctionComparator::cmpGEPs(const GEPOperator *GEPL,
  641. const GEPOperator *GEPR) const {
  642. unsigned int ASL = GEPL->getPointerAddressSpace();
  643. unsigned int ASR = GEPR->getPointerAddressSpace();
  644. if (int Res = cmpNumbers(ASL, ASR))
  645. return Res;
  646. // When we have target data, we can reduce the GEP down to the value in bytes
  647. // added to the address.
  648. const DataLayout &DL = FnL->getParent()->getDataLayout();
  649. unsigned BitWidth = DL.getPointerSizeInBits(ASL);
  650. APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0);
  651. if (GEPL->accumulateConstantOffset(DL, OffsetL) &&
  652. GEPR->accumulateConstantOffset(DL, OffsetR))
  653. return cmpAPInts(OffsetL, OffsetR);
  654. if (int Res =
  655. cmpTypes(GEPL->getSourceElementType(), GEPR->getSourceElementType()))
  656. return Res;
  657. if (int Res = cmpNumbers(GEPL->getNumOperands(), GEPR->getNumOperands()))
  658. return Res;
  659. for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) {
  660. if (int Res = cmpValues(GEPL->getOperand(i), GEPR->getOperand(i)))
  661. return Res;
  662. }
  663. return 0;
  664. }
  665. int FunctionComparator::cmpInlineAsm(const InlineAsm *L,
  666. const InlineAsm *R) const {
  667. // InlineAsm's are uniqued. If they are the same pointer, obviously they are
  668. // the same, otherwise compare the fields.
  669. if (L == R)
  670. return 0;
  671. if (int Res = cmpTypes(L->getFunctionType(), R->getFunctionType()))
  672. return Res;
  673. if (int Res = cmpMem(L->getAsmString(), R->getAsmString()))
  674. return Res;
  675. if (int Res = cmpMem(L->getConstraintString(), R->getConstraintString()))
  676. return Res;
  677. if (int Res = cmpNumbers(L->hasSideEffects(), R->hasSideEffects()))
  678. return Res;
  679. if (int Res = cmpNumbers(L->isAlignStack(), R->isAlignStack()))
  680. return Res;
  681. if (int Res = cmpNumbers(L->getDialect(), R->getDialect()))
  682. return Res;
  683. assert(L->getFunctionType() != R->getFunctionType());
  684. return 0;
  685. }
  686. /// Compare two values used by the two functions under pair-wise comparison. If
  687. /// this is the first time the values are seen, they're added to the mapping so
  688. /// that we will detect mismatches on next use.
  689. /// See comments in declaration for more details.
  690. int FunctionComparator::cmpValues(const Value *L, const Value *R) const {
  691. // Catch self-reference case.
  692. if (L == FnL) {
  693. if (R == FnR)
  694. return 0;
  695. return -1;
  696. }
  697. if (R == FnR) {
  698. if (L == FnL)
  699. return 0;
  700. return 1;
  701. }
  702. const Constant *ConstL = dyn_cast<Constant>(L);
  703. const Constant *ConstR = dyn_cast<Constant>(R);
  704. if (ConstL && ConstR) {
  705. if (L == R)
  706. return 0;
  707. return cmpConstants(ConstL, ConstR);
  708. }
  709. if (ConstL)
  710. return 1;
  711. if (ConstR)
  712. return -1;
  713. const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(L);
  714. const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(R);
  715. if (InlineAsmL && InlineAsmR)
  716. return cmpInlineAsm(InlineAsmL, InlineAsmR);
  717. if (InlineAsmL)
  718. return 1;
  719. if (InlineAsmR)
  720. return -1;
  721. auto LeftSN = sn_mapL.insert(std::make_pair(L, sn_mapL.size())),
  722. RightSN = sn_mapR.insert(std::make_pair(R, sn_mapR.size()));
  723. return cmpNumbers(LeftSN.first->second, RightSN.first->second);
  724. }
  725. // Test whether two basic blocks have equivalent behaviour.
  726. int FunctionComparator::cmpBasicBlocks(const BasicBlock *BBL,
  727. const BasicBlock *BBR) const {
  728. BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
  729. BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
  730. do {
  731. bool needToCmpOperands = true;
  732. if (int Res = cmpOperations(&*InstL, &*InstR, needToCmpOperands))
  733. return Res;
  734. if (needToCmpOperands) {
  735. assert(InstL->getNumOperands() == InstR->getNumOperands());
  736. for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
  737. Value *OpL = InstL->getOperand(i);
  738. Value *OpR = InstR->getOperand(i);
  739. if (int Res = cmpValues(OpL, OpR))
  740. return Res;
  741. // cmpValues should ensure this is true.
  742. assert(cmpTypes(OpL->getType(), OpR->getType()) == 0);
  743. }
  744. }
  745. ++InstL;
  746. ++InstR;
  747. } while (InstL != InstLE && InstR != InstRE);
  748. if (InstL != InstLE && InstR == InstRE)
  749. return 1;
  750. if (InstL == InstLE && InstR != InstRE)
  751. return -1;
  752. return 0;
  753. }
  754. int FunctionComparator::compareSignature() const {
  755. if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes()))
  756. return Res;
  757. if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC()))
  758. return Res;
  759. if (FnL->hasGC()) {
  760. if (int Res = cmpMem(FnL->getGC(), FnR->getGC()))
  761. return Res;
  762. }
  763. if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection()))
  764. return Res;
  765. if (FnL->hasSection()) {
  766. if (int Res = cmpMem(FnL->getSection(), FnR->getSection()))
  767. return Res;
  768. }
  769. if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg()))
  770. return Res;
  771. // TODO: if it's internal and only used in direct calls, we could handle this
  772. // case too.
  773. if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv()))
  774. return Res;
  775. if (int Res = cmpTypes(FnL->getFunctionType(), FnR->getFunctionType()))
  776. return Res;
  777. assert(FnL->arg_size() == FnR->arg_size() &&
  778. "Identically typed functions have different numbers of args!");
  779. // Visit the arguments so that they get enumerated in the order they're
  780. // passed in.
  781. for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
  782. ArgRI = FnR->arg_begin(),
  783. ArgLE = FnL->arg_end();
  784. ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
  785. if (cmpValues(&*ArgLI, &*ArgRI) != 0)
  786. llvm_unreachable("Arguments repeat!");
  787. }
  788. return 0;
  789. }
  790. // Test whether the two functions have equivalent behaviour.
  791. int FunctionComparator::compare() {
  792. beginCompare();
  793. if (int Res = compareSignature())
  794. return Res;
  795. // We do a CFG-ordered walk since the actual ordering of the blocks in the
  796. // linked list is immaterial. Our walk starts at the entry block for both
  797. // functions, then takes each block from each terminator in order. As an
  798. // artifact, this also means that unreachable blocks are ignored.
  799. SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
  800. SmallPtrSet<const BasicBlock *, 32> VisitedBBs; // in terms of F1.
  801. FnLBBs.push_back(&FnL->getEntryBlock());
  802. FnRBBs.push_back(&FnR->getEntryBlock());
  803. VisitedBBs.insert(FnLBBs[0]);
  804. while (!FnLBBs.empty()) {
  805. const BasicBlock *BBL = FnLBBs.pop_back_val();
  806. const BasicBlock *BBR = FnRBBs.pop_back_val();
  807. if (int Res = cmpValues(BBL, BBR))
  808. return Res;
  809. if (int Res = cmpBasicBlocks(BBL, BBR))
  810. return Res;
  811. const Instruction *TermL = BBL->getTerminator();
  812. const Instruction *TermR = BBR->getTerminator();
  813. assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
  814. for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
  815. if (!VisitedBBs.insert(TermL->getSuccessor(i)).second)
  816. continue;
  817. FnLBBs.push_back(TermL->getSuccessor(i));
  818. FnRBBs.push_back(TermR->getSuccessor(i));
  819. }
  820. }
  821. return 0;
  822. }
  823. namespace {
  824. // Accumulate the hash of a sequence of 64-bit integers. This is similar to a
  825. // hash of a sequence of 64bit ints, but the entire input does not need to be
  826. // available at once. This interface is necessary for functionHash because it
  827. // needs to accumulate the hash as the structure of the function is traversed
  828. // without saving these values to an intermediate buffer. This form of hashing
  829. // is not often needed, as usually the object to hash is just read from a
  830. // buffer.
  831. class HashAccumulator64 {
  832. uint64_t Hash;
  833. public:
  834. // Initialize to random constant, so the state isn't zero.
  835. HashAccumulator64() { Hash = 0x6acaa36bef8325c5ULL; }
  836. void add(uint64_t V) { Hash = hashing::detail::hash_16_bytes(Hash, V); }
  837. // No finishing is required, because the entire hash value is used.
  838. uint64_t getHash() { return Hash; }
  839. };
  840. } // end anonymous namespace
  841. // A function hash is calculated by considering only the number of arguments and
  842. // whether a function is varargs, the order of basic blocks (given by the
  843. // successors of each basic block in depth first order), and the order of
  844. // opcodes of each instruction within each of these basic blocks. This mirrors
  845. // the strategy compare() uses to compare functions by walking the BBs in depth
  846. // first order and comparing each instruction in sequence. Because this hash
  847. // does not look at the operands, it is insensitive to things such as the
  848. // target of calls and the constants used in the function, which makes it useful
  849. // when possibly merging functions which are the same modulo constants and call
  850. // targets.
  851. FunctionComparator::FunctionHash FunctionComparator::functionHash(Function &F) {
  852. HashAccumulator64 H;
  853. H.add(F.isVarArg());
  854. H.add(F.arg_size());
  855. SmallVector<const BasicBlock *, 8> BBs;
  856. SmallPtrSet<const BasicBlock *, 16> VisitedBBs;
  857. // Walk the blocks in the same order as FunctionComparator::cmpBasicBlocks(),
  858. // accumulating the hash of the function "structure." (BB and opcode sequence)
  859. BBs.push_back(&F.getEntryBlock());
  860. VisitedBBs.insert(BBs[0]);
  861. while (!BBs.empty()) {
  862. const BasicBlock *BB = BBs.pop_back_val();
  863. // This random value acts as a block header, as otherwise the partition of
  864. // opcodes into BBs wouldn't affect the hash, only the order of the opcodes
  865. H.add(45798);
  866. for (auto &Inst : *BB) {
  867. H.add(Inst.getOpcode());
  868. }
  869. const Instruction *Term = BB->getTerminator();
  870. for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
  871. if (!VisitedBBs.insert(Term->getSuccessor(i)).second)
  872. continue;
  873. BBs.push_back(Term->getSuccessor(i));
  874. }
  875. }
  876. return H.getHash();
  877. }