FunctionComparator.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976
  1. //===- FunctionComparator.h - Function Comparator -------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the FunctionComparator and GlobalNumberState classes
  10. // which are used by the MergeFunctions pass for comparing functions.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/Transforms/Utils/FunctionComparator.h"
  14. #include "llvm/ADT/APFloat.h"
  15. #include "llvm/ADT/APInt.h"
  16. #include "llvm/ADT/ArrayRef.h"
  17. #include "llvm/ADT/Hashing.h"
  18. #include "llvm/ADT/SmallPtrSet.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. #include "llvm/IR/Attributes.h"
  21. #include "llvm/IR/BasicBlock.h"
  22. #include "llvm/IR/Constant.h"
  23. #include "llvm/IR/Constants.h"
  24. #include "llvm/IR/DataLayout.h"
  25. #include "llvm/IR/DerivedTypes.h"
  26. #include "llvm/IR/Function.h"
  27. #include "llvm/IR/GlobalValue.h"
  28. #include "llvm/IR/InlineAsm.h"
  29. #include "llvm/IR/InstrTypes.h"
  30. #include "llvm/IR/Instruction.h"
  31. #include "llvm/IR/Instructions.h"
  32. #include "llvm/IR/LLVMContext.h"
  33. #include "llvm/IR/Metadata.h"
  34. #include "llvm/IR/Module.h"
  35. #include "llvm/IR/Operator.h"
  36. #include "llvm/IR/Type.h"
  37. #include "llvm/IR/Value.h"
  38. #include "llvm/Support/Casting.h"
  39. #include "llvm/Support/Compiler.h"
  40. #include "llvm/Support/Debug.h"
  41. #include "llvm/Support/ErrorHandling.h"
  42. #include "llvm/Support/raw_ostream.h"
  43. #include <cassert>
  44. #include <cstddef>
  45. #include <cstdint>
  46. #include <utility>
  47. using namespace llvm;
  48. #define DEBUG_TYPE "functioncomparator"
  49. int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
  50. if (L < R)
  51. return -1;
  52. if (L > R)
  53. return 1;
  54. return 0;
  55. }
  56. int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const {
  57. if ((int)L < (int)R)
  58. return -1;
  59. if ((int)L > (int)R)
  60. return 1;
  61. return 0;
  62. }
  63. int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
  64. if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
  65. return Res;
  66. if (L.ugt(R))
  67. return 1;
  68. if (R.ugt(L))
  69. return -1;
  70. return 0;
  71. }
  72. int FunctionComparator::cmpAPFloats(const APFloat &L, const APFloat &R) const {
  73. // Floats are ordered first by semantics (i.e. float, double, half, etc.),
  74. // then by value interpreted as a bitstring (aka APInt).
  75. const fltSemantics &SL = L.getSemantics(), &SR = R.getSemantics();
  76. if (int Res = cmpNumbers(APFloat::semanticsPrecision(SL),
  77. APFloat::semanticsPrecision(SR)))
  78. return Res;
  79. if (int Res = cmpNumbers(APFloat::semanticsMaxExponent(SL),
  80. APFloat::semanticsMaxExponent(SR)))
  81. return Res;
  82. if (int Res = cmpNumbers(APFloat::semanticsMinExponent(SL),
  83. APFloat::semanticsMinExponent(SR)))
  84. return Res;
  85. if (int Res = cmpNumbers(APFloat::semanticsSizeInBits(SL),
  86. APFloat::semanticsSizeInBits(SR)))
  87. return Res;
  88. return cmpAPInts(L.bitcastToAPInt(), R.bitcastToAPInt());
  89. }
  90. int FunctionComparator::cmpMem(StringRef L, StringRef R) const {
  91. // Prevent heavy comparison, compare sizes first.
  92. if (int Res = cmpNumbers(L.size(), R.size()))
  93. return Res;
  94. // Compare strings lexicographically only when it is necessary: only when
  95. // strings are equal in size.
  96. return L.compare(R);
  97. }
  98. int FunctionComparator::cmpAttrs(const AttributeList L,
  99. const AttributeList R) const {
  100. if (int Res = cmpNumbers(L.getNumAttrSets(), R.getNumAttrSets()))
  101. return Res;
  102. for (unsigned i = L.index_begin(), e = L.index_end(); i != e; ++i) {
  103. AttributeSet LAS = L.getAttributes(i);
  104. AttributeSet RAS = R.getAttributes(i);
  105. AttributeSet::iterator LI = LAS.begin(), LE = LAS.end();
  106. AttributeSet::iterator RI = RAS.begin(), RE = RAS.end();
  107. for (; LI != LE && RI != RE; ++LI, ++RI) {
  108. Attribute LA = *LI;
  109. Attribute RA = *RI;
  110. if (LA.isTypeAttribute() && RA.isTypeAttribute()) {
  111. if (LA.getKindAsEnum() != RA.getKindAsEnum())
  112. return cmpNumbers(LA.getKindAsEnum(), RA.getKindAsEnum());
  113. Type *TyL = LA.getValueAsType();
  114. Type *TyR = RA.getValueAsType();
  115. if (TyL && TyR) {
  116. if (int Res = cmpTypes(TyL, TyR))
  117. return Res;
  118. continue;
  119. }
  120. // Two pointers, at least one null, so the comparison result is
  121. // independent of the value of a real pointer.
  122. if (int Res = cmpNumbers((uint64_t)TyL, (uint64_t)TyR))
  123. return Res;
  124. continue;
  125. }
  126. if (LA < RA)
  127. return -1;
  128. if (RA < LA)
  129. return 1;
  130. }
  131. if (LI != LE)
  132. return 1;
  133. if (RI != RE)
  134. return -1;
  135. }
  136. return 0;
  137. }
  138. int FunctionComparator::cmpRangeMetadata(const MDNode *L,
  139. const MDNode *R) const {
  140. if (L == R)
  141. return 0;
  142. if (!L)
  143. return -1;
  144. if (!R)
  145. return 1;
  146. // Range metadata is a sequence of numbers. Make sure they are the same
  147. // sequence.
  148. // TODO: Note that as this is metadata, it is possible to drop and/or merge
  149. // this data when considering functions to merge. Thus this comparison would
  150. // return 0 (i.e. equivalent), but merging would become more complicated
  151. // because the ranges would need to be unioned. It is not likely that
  152. // functions differ ONLY in this metadata if they are actually the same
  153. // function semantically.
  154. if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
  155. return Res;
  156. for (size_t I = 0; I < L->getNumOperands(); ++I) {
  157. ConstantInt *LLow = mdconst::extract<ConstantInt>(L->getOperand(I));
  158. ConstantInt *RLow = mdconst::extract<ConstantInt>(R->getOperand(I));
  159. if (int Res = cmpAPInts(LLow->getValue(), RLow->getValue()))
  160. return Res;
  161. }
  162. return 0;
  163. }
  164. int FunctionComparator::cmpOperandBundlesSchema(const CallBase &LCS,
  165. const CallBase &RCS) const {
  166. assert(LCS.getOpcode() == RCS.getOpcode() && "Can't compare otherwise!");
  167. if (int Res =
  168. cmpNumbers(LCS.getNumOperandBundles(), RCS.getNumOperandBundles()))
  169. return Res;
  170. for (unsigned I = 0, E = LCS.getNumOperandBundles(); I != E; ++I) {
  171. auto OBL = LCS.getOperandBundleAt(I);
  172. auto OBR = RCS.getOperandBundleAt(I);
  173. if (int Res = OBL.getTagName().compare(OBR.getTagName()))
  174. return Res;
  175. if (int Res = cmpNumbers(OBL.Inputs.size(), OBR.Inputs.size()))
  176. return Res;
  177. }
  178. return 0;
  179. }
  180. /// Constants comparison:
  181. /// 1. Check whether type of L constant could be losslessly bitcasted to R
  182. /// type.
  183. /// 2. Compare constant contents.
  184. /// For more details see declaration comments.
  185. int FunctionComparator::cmpConstants(const Constant *L,
  186. const Constant *R) const {
  187. Type *TyL = L->getType();
  188. Type *TyR = R->getType();
  189. // Check whether types are bitcastable. This part is just re-factored
  190. // Type::canLosslesslyBitCastTo method, but instead of returning true/false,
  191. // we also pack into result which type is "less" for us.
  192. int TypesRes = cmpTypes(TyL, TyR);
  193. if (TypesRes != 0) {
  194. // Types are different, but check whether we can bitcast them.
  195. if (!TyL->isFirstClassType()) {
  196. if (TyR->isFirstClassType())
  197. return -1;
  198. // Neither TyL nor TyR are values of first class type. Return the result
  199. // of comparing the types
  200. return TypesRes;
  201. }
  202. if (!TyR->isFirstClassType()) {
  203. if (TyL->isFirstClassType())
  204. return 1;
  205. return TypesRes;
  206. }
  207. // Vector -> Vector conversions are always lossless if the two vector types
  208. // have the same size, otherwise not.
  209. unsigned TyLWidth = 0;
  210. unsigned TyRWidth = 0;
  211. if (auto *VecTyL = dyn_cast<VectorType>(TyL))
  212. TyLWidth = VecTyL->getPrimitiveSizeInBits().getFixedSize();
  213. if (auto *VecTyR = dyn_cast<VectorType>(TyR))
  214. TyRWidth = VecTyR->getPrimitiveSizeInBits().getFixedSize();
  215. if (TyLWidth != TyRWidth)
  216. return cmpNumbers(TyLWidth, TyRWidth);
  217. // Zero bit-width means neither TyL nor TyR are vectors.
  218. if (!TyLWidth) {
  219. PointerType *PTyL = dyn_cast<PointerType>(TyL);
  220. PointerType *PTyR = dyn_cast<PointerType>(TyR);
  221. if (PTyL && PTyR) {
  222. unsigned AddrSpaceL = PTyL->getAddressSpace();
  223. unsigned AddrSpaceR = PTyR->getAddressSpace();
  224. if (int Res = cmpNumbers(AddrSpaceL, AddrSpaceR))
  225. return Res;
  226. }
  227. if (PTyL)
  228. return 1;
  229. if (PTyR)
  230. return -1;
  231. // TyL and TyR aren't vectors, nor pointers. We don't know how to
  232. // bitcast them.
  233. return TypesRes;
  234. }
  235. }
  236. // OK, types are bitcastable, now check constant contents.
  237. if (L->isNullValue() && R->isNullValue())
  238. return TypesRes;
  239. if (L->isNullValue() && !R->isNullValue())
  240. return 1;
  241. if (!L->isNullValue() && R->isNullValue())
  242. return -1;
  243. auto GlobalValueL = const_cast<GlobalValue *>(dyn_cast<GlobalValue>(L));
  244. auto GlobalValueR = const_cast<GlobalValue *>(dyn_cast<GlobalValue>(R));
  245. if (GlobalValueL && GlobalValueR) {
  246. return cmpGlobalValues(GlobalValueL, GlobalValueR);
  247. }
  248. if (int Res = cmpNumbers(L->getValueID(), R->getValueID()))
  249. return Res;
  250. if (const auto *SeqL = dyn_cast<ConstantDataSequential>(L)) {
  251. const auto *SeqR = cast<ConstantDataSequential>(R);
  252. // This handles ConstantDataArray and ConstantDataVector. Note that we
  253. // compare the two raw data arrays, which might differ depending on the host
  254. // endianness. This isn't a problem though, because the endiness of a module
  255. // will affect the order of the constants, but this order is the same
  256. // for a given input module and host platform.
  257. return cmpMem(SeqL->getRawDataValues(), SeqR->getRawDataValues());
  258. }
  259. switch (L->getValueID()) {
  260. case Value::UndefValueVal:
  261. case Value::PoisonValueVal:
  262. case Value::ConstantTokenNoneVal:
  263. return TypesRes;
  264. case Value::ConstantIntVal: {
  265. const APInt &LInt = cast<ConstantInt>(L)->getValue();
  266. const APInt &RInt = cast<ConstantInt>(R)->getValue();
  267. return cmpAPInts(LInt, RInt);
  268. }
  269. case Value::ConstantFPVal: {
  270. const APFloat &LAPF = cast<ConstantFP>(L)->getValueAPF();
  271. const APFloat &RAPF = cast<ConstantFP>(R)->getValueAPF();
  272. return cmpAPFloats(LAPF, RAPF);
  273. }
  274. case Value::ConstantArrayVal: {
  275. const ConstantArray *LA = cast<ConstantArray>(L);
  276. const ConstantArray *RA = cast<ConstantArray>(R);
  277. uint64_t NumElementsL = cast<ArrayType>(TyL)->getNumElements();
  278. uint64_t NumElementsR = cast<ArrayType>(TyR)->getNumElements();
  279. if (int Res = cmpNumbers(NumElementsL, NumElementsR))
  280. return Res;
  281. for (uint64_t i = 0; i < NumElementsL; ++i) {
  282. if (int Res = cmpConstants(cast<Constant>(LA->getOperand(i)),
  283. cast<Constant>(RA->getOperand(i))))
  284. return Res;
  285. }
  286. return 0;
  287. }
  288. case Value::ConstantStructVal: {
  289. const ConstantStruct *LS = cast<ConstantStruct>(L);
  290. const ConstantStruct *RS = cast<ConstantStruct>(R);
  291. unsigned NumElementsL = cast<StructType>(TyL)->getNumElements();
  292. unsigned NumElementsR = cast<StructType>(TyR)->getNumElements();
  293. if (int Res = cmpNumbers(NumElementsL, NumElementsR))
  294. return Res;
  295. for (unsigned i = 0; i != NumElementsL; ++i) {
  296. if (int Res = cmpConstants(cast<Constant>(LS->getOperand(i)),
  297. cast<Constant>(RS->getOperand(i))))
  298. return Res;
  299. }
  300. return 0;
  301. }
  302. case Value::ConstantVectorVal: {
  303. const ConstantVector *LV = cast<ConstantVector>(L);
  304. const ConstantVector *RV = cast<ConstantVector>(R);
  305. unsigned NumElementsL = cast<FixedVectorType>(TyL)->getNumElements();
  306. unsigned NumElementsR = cast<FixedVectorType>(TyR)->getNumElements();
  307. if (int Res = cmpNumbers(NumElementsL, NumElementsR))
  308. return Res;
  309. for (uint64_t i = 0; i < NumElementsL; ++i) {
  310. if (int Res = cmpConstants(cast<Constant>(LV->getOperand(i)),
  311. cast<Constant>(RV->getOperand(i))))
  312. return Res;
  313. }
  314. return 0;
  315. }
  316. case Value::ConstantExprVal: {
  317. const ConstantExpr *LE = cast<ConstantExpr>(L);
  318. const ConstantExpr *RE = cast<ConstantExpr>(R);
  319. unsigned NumOperandsL = LE->getNumOperands();
  320. unsigned NumOperandsR = RE->getNumOperands();
  321. if (int Res = cmpNumbers(NumOperandsL, NumOperandsR))
  322. return Res;
  323. for (unsigned i = 0; i < NumOperandsL; ++i) {
  324. if (int Res = cmpConstants(cast<Constant>(LE->getOperand(i)),
  325. cast<Constant>(RE->getOperand(i))))
  326. return Res;
  327. }
  328. return 0;
  329. }
  330. case Value::BlockAddressVal: {
  331. const BlockAddress *LBA = cast<BlockAddress>(L);
  332. const BlockAddress *RBA = cast<BlockAddress>(R);
  333. if (int Res = cmpValues(LBA->getFunction(), RBA->getFunction()))
  334. return Res;
  335. if (LBA->getFunction() == RBA->getFunction()) {
  336. // They are BBs in the same function. Order by which comes first in the
  337. // BB order of the function. This order is deterministic.
  338. Function *F = LBA->getFunction();
  339. BasicBlock *LBB = LBA->getBasicBlock();
  340. BasicBlock *RBB = RBA->getBasicBlock();
  341. if (LBB == RBB)
  342. return 0;
  343. for (BasicBlock &BB : F->getBasicBlockList()) {
  344. if (&BB == LBB) {
  345. assert(&BB != RBB);
  346. return -1;
  347. }
  348. if (&BB == RBB)
  349. return 1;
  350. }
  351. llvm_unreachable("Basic Block Address does not point to a basic block in "
  352. "its function.");
  353. return -1;
  354. } else {
  355. // cmpValues said the functions are the same. So because they aren't
  356. // literally the same pointer, they must respectively be the left and
  357. // right functions.
  358. assert(LBA->getFunction() == FnL && RBA->getFunction() == FnR);
  359. // cmpValues will tell us if these are equivalent BasicBlocks, in the
  360. // context of their respective functions.
  361. return cmpValues(LBA->getBasicBlock(), RBA->getBasicBlock());
  362. }
  363. }
  364. default: // Unknown constant, abort.
  365. LLVM_DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n");
  366. llvm_unreachable("Constant ValueID not recognized.");
  367. return -1;
  368. }
  369. }
  370. int FunctionComparator::cmpGlobalValues(GlobalValue *L, GlobalValue *R) const {
  371. uint64_t LNumber = GlobalNumbers->getNumber(L);
  372. uint64_t RNumber = GlobalNumbers->getNumber(R);
  373. return cmpNumbers(LNumber, RNumber);
  374. }
  375. /// cmpType - compares two types,
  376. /// defines total ordering among the types set.
  377. /// See method declaration comments for more details.
  378. int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const {
  379. PointerType *PTyL = dyn_cast<PointerType>(TyL);
  380. PointerType *PTyR = dyn_cast<PointerType>(TyR);
  381. const DataLayout &DL = FnL->getParent()->getDataLayout();
  382. if (PTyL && PTyL->getAddressSpace() == 0)
  383. TyL = DL.getIntPtrType(TyL);
  384. if (PTyR && PTyR->getAddressSpace() == 0)
  385. TyR = DL.getIntPtrType(TyR);
  386. if (TyL == TyR)
  387. return 0;
  388. if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID()))
  389. return Res;
  390. switch (TyL->getTypeID()) {
  391. default:
  392. llvm_unreachable("Unknown type!");
  393. case Type::IntegerTyID:
  394. return cmpNumbers(cast<IntegerType>(TyL)->getBitWidth(),
  395. cast<IntegerType>(TyR)->getBitWidth());
  396. // TyL == TyR would have returned true earlier, because types are uniqued.
  397. case Type::VoidTyID:
  398. case Type::FloatTyID:
  399. case Type::DoubleTyID:
  400. case Type::X86_FP80TyID:
  401. case Type::FP128TyID:
  402. case Type::PPC_FP128TyID:
  403. case Type::LabelTyID:
  404. case Type::MetadataTyID:
  405. case Type::TokenTyID:
  406. return 0;
  407. case Type::PointerTyID:
  408. assert(PTyL && PTyR && "Both types must be pointers here.");
  409. return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace());
  410. case Type::StructTyID: {
  411. StructType *STyL = cast<StructType>(TyL);
  412. StructType *STyR = cast<StructType>(TyR);
  413. if (STyL->getNumElements() != STyR->getNumElements())
  414. return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
  415. if (STyL->isPacked() != STyR->isPacked())
  416. return cmpNumbers(STyL->isPacked(), STyR->isPacked());
  417. for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
  418. if (int Res = cmpTypes(STyL->getElementType(i), STyR->getElementType(i)))
  419. return Res;
  420. }
  421. return 0;
  422. }
  423. case Type::FunctionTyID: {
  424. FunctionType *FTyL = cast<FunctionType>(TyL);
  425. FunctionType *FTyR = cast<FunctionType>(TyR);
  426. if (FTyL->getNumParams() != FTyR->getNumParams())
  427. return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams());
  428. if (FTyL->isVarArg() != FTyR->isVarArg())
  429. return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg());
  430. if (int Res = cmpTypes(FTyL->getReturnType(), FTyR->getReturnType()))
  431. return Res;
  432. for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
  433. if (int Res = cmpTypes(FTyL->getParamType(i), FTyR->getParamType(i)))
  434. return Res;
  435. }
  436. return 0;
  437. }
  438. case Type::ArrayTyID: {
  439. auto *STyL = cast<ArrayType>(TyL);
  440. auto *STyR = cast<ArrayType>(TyR);
  441. if (STyL->getNumElements() != STyR->getNumElements())
  442. return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
  443. return cmpTypes(STyL->getElementType(), STyR->getElementType());
  444. }
  445. case Type::FixedVectorTyID:
  446. case Type::ScalableVectorTyID: {
  447. auto *STyL = cast<VectorType>(TyL);
  448. auto *STyR = cast<VectorType>(TyR);
  449. if (STyL->getElementCount().isScalable() !=
  450. STyR->getElementCount().isScalable())
  451. return cmpNumbers(STyL->getElementCount().isScalable(),
  452. STyR->getElementCount().isScalable());
  453. if (STyL->getElementCount() != STyR->getElementCount())
  454. return cmpNumbers(STyL->getElementCount().getKnownMinValue(),
  455. STyR->getElementCount().getKnownMinValue());
  456. return cmpTypes(STyL->getElementType(), STyR->getElementType());
  457. }
  458. }
  459. }
  460. // Determine whether the two operations are the same except that pointer-to-A
  461. // and pointer-to-B are equivalent. This should be kept in sync with
  462. // Instruction::isSameOperationAs.
  463. // Read method declaration comments for more details.
  464. int FunctionComparator::cmpOperations(const Instruction *L,
  465. const Instruction *R,
  466. bool &needToCmpOperands) const {
  467. needToCmpOperands = true;
  468. if (int Res = cmpValues(L, R))
  469. return Res;
  470. // Differences from Instruction::isSameOperationAs:
  471. // * replace type comparison with calls to cmpTypes.
  472. // * we test for I->getRawSubclassOptionalData (nuw/nsw/tail) at the top.
  473. // * because of the above, we don't test for the tail bit on calls later on.
  474. if (int Res = cmpNumbers(L->getOpcode(), R->getOpcode()))
  475. return Res;
  476. if (const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(L)) {
  477. needToCmpOperands = false;
  478. const GetElementPtrInst *GEPR = cast<GetElementPtrInst>(R);
  479. if (int Res =
  480. cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand()))
  481. return Res;
  482. return cmpGEPs(GEPL, GEPR);
  483. }
  484. if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
  485. return Res;
  486. if (int Res = cmpTypes(L->getType(), R->getType()))
  487. return Res;
  488. if (int Res = cmpNumbers(L->getRawSubclassOptionalData(),
  489. R->getRawSubclassOptionalData()))
  490. return Res;
  491. // We have two instructions of identical opcode and #operands. Check to see
  492. // if all operands are the same type
  493. for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) {
  494. if (int Res =
  495. cmpTypes(L->getOperand(i)->getType(), R->getOperand(i)->getType()))
  496. return Res;
  497. }
  498. // Check special state that is a part of some instructions.
  499. if (const AllocaInst *AI = dyn_cast<AllocaInst>(L)) {
  500. if (int Res = cmpTypes(AI->getAllocatedType(),
  501. cast<AllocaInst>(R)->getAllocatedType()))
  502. return Res;
  503. return cmpNumbers(AI->getAlignment(), cast<AllocaInst>(R)->getAlignment());
  504. }
  505. if (const LoadInst *LI = dyn_cast<LoadInst>(L)) {
  506. if (int Res = cmpNumbers(LI->isVolatile(), cast<LoadInst>(R)->isVolatile()))
  507. return Res;
  508. if (int Res =
  509. cmpNumbers(LI->getAlignment(), cast<LoadInst>(R)->getAlignment()))
  510. return Res;
  511. if (int Res =
  512. cmpOrderings(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
  513. return Res;
  514. if (int Res = cmpNumbers(LI->getSyncScopeID(),
  515. cast<LoadInst>(R)->getSyncScopeID()))
  516. return Res;
  517. return cmpRangeMetadata(
  518. LI->getMetadata(LLVMContext::MD_range),
  519. cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
  520. }
  521. if (const StoreInst *SI = dyn_cast<StoreInst>(L)) {
  522. if (int Res =
  523. cmpNumbers(SI->isVolatile(), cast<StoreInst>(R)->isVolatile()))
  524. return Res;
  525. if (int Res =
  526. cmpNumbers(SI->getAlignment(), cast<StoreInst>(R)->getAlignment()))
  527. return Res;
  528. if (int Res =
  529. cmpOrderings(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
  530. return Res;
  531. return cmpNumbers(SI->getSyncScopeID(),
  532. cast<StoreInst>(R)->getSyncScopeID());
  533. }
  534. if (const CmpInst *CI = dyn_cast<CmpInst>(L))
  535. return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
  536. if (auto *CBL = dyn_cast<CallBase>(L)) {
  537. auto *CBR = cast<CallBase>(R);
  538. if (int Res = cmpNumbers(CBL->getCallingConv(), CBR->getCallingConv()))
  539. return Res;
  540. if (int Res = cmpAttrs(CBL->getAttributes(), CBR->getAttributes()))
  541. return Res;
  542. if (int Res = cmpOperandBundlesSchema(*CBL, *CBR))
  543. return Res;
  544. if (const CallInst *CI = dyn_cast<CallInst>(L))
  545. if (int Res = cmpNumbers(CI->getTailCallKind(),
  546. cast<CallInst>(R)->getTailCallKind()))
  547. return Res;
  548. return cmpRangeMetadata(L->getMetadata(LLVMContext::MD_range),
  549. R->getMetadata(LLVMContext::MD_range));
  550. }
  551. if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(L)) {
  552. ArrayRef<unsigned> LIndices = IVI->getIndices();
  553. ArrayRef<unsigned> RIndices = cast<InsertValueInst>(R)->getIndices();
  554. if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
  555. return Res;
  556. for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
  557. if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
  558. return Res;
  559. }
  560. return 0;
  561. }
  562. if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(L)) {
  563. ArrayRef<unsigned> LIndices = EVI->getIndices();
  564. ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(R)->getIndices();
  565. if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
  566. return Res;
  567. for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
  568. if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
  569. return Res;
  570. }
  571. }
  572. if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
  573. if (int Res =
  574. cmpOrderings(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
  575. return Res;
  576. return cmpNumbers(FI->getSyncScopeID(),
  577. cast<FenceInst>(R)->getSyncScopeID());
  578. }
  579. if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
  580. if (int Res = cmpNumbers(CXI->isVolatile(),
  581. cast<AtomicCmpXchgInst>(R)->isVolatile()))
  582. return Res;
  583. if (int Res =
  584. cmpNumbers(CXI->isWeak(), cast<AtomicCmpXchgInst>(R)->isWeak()))
  585. return Res;
  586. if (int Res =
  587. cmpOrderings(CXI->getSuccessOrdering(),
  588. cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
  589. return Res;
  590. if (int Res =
  591. cmpOrderings(CXI->getFailureOrdering(),
  592. cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
  593. return Res;
  594. return cmpNumbers(CXI->getSyncScopeID(),
  595. cast<AtomicCmpXchgInst>(R)->getSyncScopeID());
  596. }
  597. if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
  598. if (int Res = cmpNumbers(RMWI->getOperation(),
  599. cast<AtomicRMWInst>(R)->getOperation()))
  600. return Res;
  601. if (int Res = cmpNumbers(RMWI->isVolatile(),
  602. cast<AtomicRMWInst>(R)->isVolatile()))
  603. return Res;
  604. if (int Res = cmpOrderings(RMWI->getOrdering(),
  605. cast<AtomicRMWInst>(R)->getOrdering()))
  606. return Res;
  607. return cmpNumbers(RMWI->getSyncScopeID(),
  608. cast<AtomicRMWInst>(R)->getSyncScopeID());
  609. }
  610. if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(L)) {
  611. ArrayRef<int> LMask = SVI->getShuffleMask();
  612. ArrayRef<int> RMask = cast<ShuffleVectorInst>(R)->getShuffleMask();
  613. if (int Res = cmpNumbers(LMask.size(), RMask.size()))
  614. return Res;
  615. for (size_t i = 0, e = LMask.size(); i != e; ++i) {
  616. if (int Res = cmpNumbers(LMask[i], RMask[i]))
  617. return Res;
  618. }
  619. }
  620. if (const PHINode *PNL = dyn_cast<PHINode>(L)) {
  621. const PHINode *PNR = cast<PHINode>(R);
  622. // Ensure that in addition to the incoming values being identical
  623. // (checked by the caller of this function), the incoming blocks
  624. // are also identical.
  625. for (unsigned i = 0, e = PNL->getNumIncomingValues(); i != e; ++i) {
  626. if (int Res =
  627. cmpValues(PNL->getIncomingBlock(i), PNR->getIncomingBlock(i)))
  628. return Res;
  629. }
  630. }
  631. return 0;
  632. }
  633. // Determine whether two GEP operations perform the same underlying arithmetic.
  634. // Read method declaration comments for more details.
  635. int FunctionComparator::cmpGEPs(const GEPOperator *GEPL,
  636. const GEPOperator *GEPR) const {
  637. unsigned int ASL = GEPL->getPointerAddressSpace();
  638. unsigned int ASR = GEPR->getPointerAddressSpace();
  639. if (int Res = cmpNumbers(ASL, ASR))
  640. return Res;
  641. // When we have target data, we can reduce the GEP down to the value in bytes
  642. // added to the address.
  643. const DataLayout &DL = FnL->getParent()->getDataLayout();
  644. unsigned BitWidth = DL.getPointerSizeInBits(ASL);
  645. APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0);
  646. if (GEPL->accumulateConstantOffset(DL, OffsetL) &&
  647. GEPR->accumulateConstantOffset(DL, OffsetR))
  648. return cmpAPInts(OffsetL, OffsetR);
  649. if (int Res =
  650. cmpTypes(GEPL->getSourceElementType(), GEPR->getSourceElementType()))
  651. return Res;
  652. if (int Res = cmpNumbers(GEPL->getNumOperands(), GEPR->getNumOperands()))
  653. return Res;
  654. for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) {
  655. if (int Res = cmpValues(GEPL->getOperand(i), GEPR->getOperand(i)))
  656. return Res;
  657. }
  658. return 0;
  659. }
  660. int FunctionComparator::cmpInlineAsm(const InlineAsm *L,
  661. const InlineAsm *R) const {
  662. // InlineAsm's are uniqued. If they are the same pointer, obviously they are
  663. // the same, otherwise compare the fields.
  664. if (L == R)
  665. return 0;
  666. if (int Res = cmpTypes(L->getFunctionType(), R->getFunctionType()))
  667. return Res;
  668. if (int Res = cmpMem(L->getAsmString(), R->getAsmString()))
  669. return Res;
  670. if (int Res = cmpMem(L->getConstraintString(), R->getConstraintString()))
  671. return Res;
  672. if (int Res = cmpNumbers(L->hasSideEffects(), R->hasSideEffects()))
  673. return Res;
  674. if (int Res = cmpNumbers(L->isAlignStack(), R->isAlignStack()))
  675. return Res;
  676. if (int Res = cmpNumbers(L->getDialect(), R->getDialect()))
  677. return Res;
  678. assert(L->getFunctionType() != R->getFunctionType());
  679. return 0;
  680. }
  681. /// Compare two values used by the two functions under pair-wise comparison. If
  682. /// this is the first time the values are seen, they're added to the mapping so
  683. /// that we will detect mismatches on next use.
  684. /// See comments in declaration for more details.
  685. int FunctionComparator::cmpValues(const Value *L, const Value *R) const {
  686. // Catch self-reference case.
  687. if (L == FnL) {
  688. if (R == FnR)
  689. return 0;
  690. return -1;
  691. }
  692. if (R == FnR) {
  693. if (L == FnL)
  694. return 0;
  695. return 1;
  696. }
  697. const Constant *ConstL = dyn_cast<Constant>(L);
  698. const Constant *ConstR = dyn_cast<Constant>(R);
  699. if (ConstL && ConstR) {
  700. if (L == R)
  701. return 0;
  702. return cmpConstants(ConstL, ConstR);
  703. }
  704. if (ConstL)
  705. return 1;
  706. if (ConstR)
  707. return -1;
  708. const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(L);
  709. const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(R);
  710. if (InlineAsmL && InlineAsmR)
  711. return cmpInlineAsm(InlineAsmL, InlineAsmR);
  712. if (InlineAsmL)
  713. return 1;
  714. if (InlineAsmR)
  715. return -1;
  716. auto LeftSN = sn_mapL.insert(std::make_pair(L, sn_mapL.size())),
  717. RightSN = sn_mapR.insert(std::make_pair(R, sn_mapR.size()));
  718. return cmpNumbers(LeftSN.first->second, RightSN.first->second);
  719. }
  720. // Test whether two basic blocks have equivalent behaviour.
  721. int FunctionComparator::cmpBasicBlocks(const BasicBlock *BBL,
  722. const BasicBlock *BBR) const {
  723. BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
  724. BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
  725. do {
  726. bool needToCmpOperands = true;
  727. if (int Res = cmpOperations(&*InstL, &*InstR, needToCmpOperands))
  728. return Res;
  729. if (needToCmpOperands) {
  730. assert(InstL->getNumOperands() == InstR->getNumOperands());
  731. for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
  732. Value *OpL = InstL->getOperand(i);
  733. Value *OpR = InstR->getOperand(i);
  734. if (int Res = cmpValues(OpL, OpR))
  735. return Res;
  736. // cmpValues should ensure this is true.
  737. assert(cmpTypes(OpL->getType(), OpR->getType()) == 0);
  738. }
  739. }
  740. ++InstL;
  741. ++InstR;
  742. } while (InstL != InstLE && InstR != InstRE);
  743. if (InstL != InstLE && InstR == InstRE)
  744. return 1;
  745. if (InstL == InstLE && InstR != InstRE)
  746. return -1;
  747. return 0;
  748. }
  749. int FunctionComparator::compareSignature() const {
  750. if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes()))
  751. return Res;
  752. if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC()))
  753. return Res;
  754. if (FnL->hasGC()) {
  755. if (int Res = cmpMem(FnL->getGC(), FnR->getGC()))
  756. return Res;
  757. }
  758. if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection()))
  759. return Res;
  760. if (FnL->hasSection()) {
  761. if (int Res = cmpMem(FnL->getSection(), FnR->getSection()))
  762. return Res;
  763. }
  764. if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg()))
  765. return Res;
  766. // TODO: if it's internal and only used in direct calls, we could handle this
  767. // case too.
  768. if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv()))
  769. return Res;
  770. if (int Res = cmpTypes(FnL->getFunctionType(), FnR->getFunctionType()))
  771. return Res;
  772. assert(FnL->arg_size() == FnR->arg_size() &&
  773. "Identically typed functions have different numbers of args!");
  774. // Visit the arguments so that they get enumerated in the order they're
  775. // passed in.
  776. for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
  777. ArgRI = FnR->arg_begin(),
  778. ArgLE = FnL->arg_end();
  779. ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
  780. if (cmpValues(&*ArgLI, &*ArgRI) != 0)
  781. llvm_unreachable("Arguments repeat!");
  782. }
  783. return 0;
  784. }
  785. // Test whether the two functions have equivalent behaviour.
  786. int FunctionComparator::compare() {
  787. beginCompare();
  788. if (int Res = compareSignature())
  789. return Res;
  790. // We do a CFG-ordered walk since the actual ordering of the blocks in the
  791. // linked list is immaterial. Our walk starts at the entry block for both
  792. // functions, then takes each block from each terminator in order. As an
  793. // artifact, this also means that unreachable blocks are ignored.
  794. SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
  795. SmallPtrSet<const BasicBlock *, 32> VisitedBBs; // in terms of F1.
  796. FnLBBs.push_back(&FnL->getEntryBlock());
  797. FnRBBs.push_back(&FnR->getEntryBlock());
  798. VisitedBBs.insert(FnLBBs[0]);
  799. while (!FnLBBs.empty()) {
  800. const BasicBlock *BBL = FnLBBs.pop_back_val();
  801. const BasicBlock *BBR = FnRBBs.pop_back_val();
  802. if (int Res = cmpValues(BBL, BBR))
  803. return Res;
  804. if (int Res = cmpBasicBlocks(BBL, BBR))
  805. return Res;
  806. const Instruction *TermL = BBL->getTerminator();
  807. const Instruction *TermR = BBR->getTerminator();
  808. assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
  809. for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
  810. if (!VisitedBBs.insert(TermL->getSuccessor(i)).second)
  811. continue;
  812. FnLBBs.push_back(TermL->getSuccessor(i));
  813. FnRBBs.push_back(TermR->getSuccessor(i));
  814. }
  815. }
  816. return 0;
  817. }
  818. namespace {
  819. // Accumulate the hash of a sequence of 64-bit integers. This is similar to a
  820. // hash of a sequence of 64bit ints, but the entire input does not need to be
  821. // available at once. This interface is necessary for functionHash because it
  822. // needs to accumulate the hash as the structure of the function is traversed
  823. // without saving these values to an intermediate buffer. This form of hashing
  824. // is not often needed, as usually the object to hash is just read from a
  825. // buffer.
  826. class HashAccumulator64 {
  827. uint64_t Hash;
  828. public:
  829. // Initialize to random constant, so the state isn't zero.
  830. HashAccumulator64() { Hash = 0x6acaa36bef8325c5ULL; }
  831. void add(uint64_t V) { Hash = hashing::detail::hash_16_bytes(Hash, V); }
  832. // No finishing is required, because the entire hash value is used.
  833. uint64_t getHash() { return Hash; }
  834. };
  835. } // end anonymous namespace
  836. // A function hash is calculated by considering only the number of arguments and
  837. // whether a function is varargs, the order of basic blocks (given by the
  838. // successors of each basic block in depth first order), and the order of
  839. // opcodes of each instruction within each of these basic blocks. This mirrors
  840. // the strategy compare() uses to compare functions by walking the BBs in depth
  841. // first order and comparing each instruction in sequence. Because this hash
  842. // does not look at the operands, it is insensitive to things such as the
  843. // target of calls and the constants used in the function, which makes it useful
  844. // when possibly merging functions which are the same modulo constants and call
  845. // targets.
  846. FunctionComparator::FunctionHash FunctionComparator::functionHash(Function &F) {
  847. HashAccumulator64 H;
  848. H.add(F.isVarArg());
  849. H.add(F.arg_size());
  850. SmallVector<const BasicBlock *, 8> BBs;
  851. SmallPtrSet<const BasicBlock *, 16> VisitedBBs;
  852. // Walk the blocks in the same order as FunctionComparator::cmpBasicBlocks(),
  853. // accumulating the hash of the function "structure." (BB and opcode sequence)
  854. BBs.push_back(&F.getEntryBlock());
  855. VisitedBBs.insert(BBs[0]);
  856. while (!BBs.empty()) {
  857. const BasicBlock *BB = BBs.pop_back_val();
  858. // This random value acts as a block header, as otherwise the partition of
  859. // opcodes into BBs wouldn't affect the hash, only the order of the opcodes
  860. H.add(45798);
  861. for (auto &Inst : *BB) {
  862. H.add(Inst.getOpcode());
  863. }
  864. const Instruction *Term = BB->getTerminator();
  865. for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
  866. if (!VisitedBBs.insert(Term->getSuccessor(i)).second)
  867. continue;
  868. BBs.push_back(Term->getSuccessor(i));
  869. }
  870. }
  871. return H.getHash();
  872. }