FunctionComparator.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991
  1. //===- FunctionComparator.h - Function Comparator -------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the FunctionComparator and GlobalNumberState classes
  10. // which are used by the MergeFunctions pass for comparing functions.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/Transforms/Utils/FunctionComparator.h"
  14. #include "llvm/ADT/APFloat.h"
  15. #include "llvm/ADT/APInt.h"
  16. #include "llvm/ADT/ArrayRef.h"
  17. #include "llvm/ADT/Hashing.h"
  18. #include "llvm/ADT/SmallPtrSet.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. #include "llvm/IR/Attributes.h"
  21. #include "llvm/IR/BasicBlock.h"
  22. #include "llvm/IR/Constant.h"
  23. #include "llvm/IR/Constants.h"
  24. #include "llvm/IR/DataLayout.h"
  25. #include "llvm/IR/DerivedTypes.h"
  26. #include "llvm/IR/Function.h"
  27. #include "llvm/IR/GlobalValue.h"
  28. #include "llvm/IR/InlineAsm.h"
  29. #include "llvm/IR/InstrTypes.h"
  30. #include "llvm/IR/Instruction.h"
  31. #include "llvm/IR/Instructions.h"
  32. #include "llvm/IR/LLVMContext.h"
  33. #include "llvm/IR/Metadata.h"
  34. #include "llvm/IR/Module.h"
  35. #include "llvm/IR/Operator.h"
  36. #include "llvm/IR/Type.h"
  37. #include "llvm/IR/Value.h"
  38. #include "llvm/Support/Casting.h"
  39. #include "llvm/Support/Compiler.h"
  40. #include "llvm/Support/Debug.h"
  41. #include "llvm/Support/ErrorHandling.h"
  42. #include "llvm/Support/raw_ostream.h"
  43. #include <cassert>
  44. #include <cstddef>
  45. #include <cstdint>
  46. #include <utility>
  47. using namespace llvm;
  48. #define DEBUG_TYPE "functioncomparator"
  49. int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
  50. if (L < R)
  51. return -1;
  52. if (L > R)
  53. return 1;
  54. return 0;
  55. }
  56. int FunctionComparator::cmpAligns(Align L, Align R) const {
  57. if (L.value() < R.value())
  58. return -1;
  59. if (L.value() > R.value())
  60. return 1;
  61. return 0;
  62. }
  63. int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const {
  64. if ((int)L < (int)R)
  65. return -1;
  66. if ((int)L > (int)R)
  67. return 1;
  68. return 0;
  69. }
  70. int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
  71. if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
  72. return Res;
  73. if (L.ugt(R))
  74. return 1;
  75. if (R.ugt(L))
  76. return -1;
  77. return 0;
  78. }
  79. int FunctionComparator::cmpAPFloats(const APFloat &L, const APFloat &R) const {
  80. // Floats are ordered first by semantics (i.e. float, double, half, etc.),
  81. // then by value interpreted as a bitstring (aka APInt).
  82. const fltSemantics &SL = L.getSemantics(), &SR = R.getSemantics();
  83. if (int Res = cmpNumbers(APFloat::semanticsPrecision(SL),
  84. APFloat::semanticsPrecision(SR)))
  85. return Res;
  86. if (int Res = cmpNumbers(APFloat::semanticsMaxExponent(SL),
  87. APFloat::semanticsMaxExponent(SR)))
  88. return Res;
  89. if (int Res = cmpNumbers(APFloat::semanticsMinExponent(SL),
  90. APFloat::semanticsMinExponent(SR)))
  91. return Res;
  92. if (int Res = cmpNumbers(APFloat::semanticsSizeInBits(SL),
  93. APFloat::semanticsSizeInBits(SR)))
  94. return Res;
  95. return cmpAPInts(L.bitcastToAPInt(), R.bitcastToAPInt());
  96. }
  97. int FunctionComparator::cmpMem(StringRef L, StringRef R) const {
  98. // Prevent heavy comparison, compare sizes first.
  99. if (int Res = cmpNumbers(L.size(), R.size()))
  100. return Res;
  101. // Compare strings lexicographically only when it is necessary: only when
  102. // strings are equal in size.
  103. return std::clamp(L.compare(R), -1, 1);
  104. }
  105. int FunctionComparator::cmpAttrs(const AttributeList L,
  106. const AttributeList R) const {
  107. if (int Res = cmpNumbers(L.getNumAttrSets(), R.getNumAttrSets()))
  108. return Res;
  109. for (unsigned i : L.indexes()) {
  110. AttributeSet LAS = L.getAttributes(i);
  111. AttributeSet RAS = R.getAttributes(i);
  112. AttributeSet::iterator LI = LAS.begin(), LE = LAS.end();
  113. AttributeSet::iterator RI = RAS.begin(), RE = RAS.end();
  114. for (; LI != LE && RI != RE; ++LI, ++RI) {
  115. Attribute LA = *LI;
  116. Attribute RA = *RI;
  117. if (LA.isTypeAttribute() && RA.isTypeAttribute()) {
  118. if (LA.getKindAsEnum() != RA.getKindAsEnum())
  119. return cmpNumbers(LA.getKindAsEnum(), RA.getKindAsEnum());
  120. Type *TyL = LA.getValueAsType();
  121. Type *TyR = RA.getValueAsType();
  122. if (TyL && TyR) {
  123. if (int Res = cmpTypes(TyL, TyR))
  124. return Res;
  125. continue;
  126. }
  127. // Two pointers, at least one null, so the comparison result is
  128. // independent of the value of a real pointer.
  129. if (int Res = cmpNumbers((uint64_t)TyL, (uint64_t)TyR))
  130. return Res;
  131. continue;
  132. }
  133. if (LA < RA)
  134. return -1;
  135. if (RA < LA)
  136. return 1;
  137. }
  138. if (LI != LE)
  139. return 1;
  140. if (RI != RE)
  141. return -1;
  142. }
  143. return 0;
  144. }
  145. int FunctionComparator::cmpRangeMetadata(const MDNode *L,
  146. const MDNode *R) const {
  147. if (L == R)
  148. return 0;
  149. if (!L)
  150. return -1;
  151. if (!R)
  152. return 1;
  153. // Range metadata is a sequence of numbers. Make sure they are the same
  154. // sequence.
  155. // TODO: Note that as this is metadata, it is possible to drop and/or merge
  156. // this data when considering functions to merge. Thus this comparison would
  157. // return 0 (i.e. equivalent), but merging would become more complicated
  158. // because the ranges would need to be unioned. It is not likely that
  159. // functions differ ONLY in this metadata if they are actually the same
  160. // function semantically.
  161. if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
  162. return Res;
  163. for (size_t I = 0; I < L->getNumOperands(); ++I) {
  164. ConstantInt *LLow = mdconst::extract<ConstantInt>(L->getOperand(I));
  165. ConstantInt *RLow = mdconst::extract<ConstantInt>(R->getOperand(I));
  166. if (int Res = cmpAPInts(LLow->getValue(), RLow->getValue()))
  167. return Res;
  168. }
  169. return 0;
  170. }
  171. int FunctionComparator::cmpOperandBundlesSchema(const CallBase &LCS,
  172. const CallBase &RCS) const {
  173. assert(LCS.getOpcode() == RCS.getOpcode() && "Can't compare otherwise!");
  174. if (int Res =
  175. cmpNumbers(LCS.getNumOperandBundles(), RCS.getNumOperandBundles()))
  176. return Res;
  177. for (unsigned I = 0, E = LCS.getNumOperandBundles(); I != E; ++I) {
  178. auto OBL = LCS.getOperandBundleAt(I);
  179. auto OBR = RCS.getOperandBundleAt(I);
  180. if (int Res = OBL.getTagName().compare(OBR.getTagName()))
  181. return Res;
  182. if (int Res = cmpNumbers(OBL.Inputs.size(), OBR.Inputs.size()))
  183. return Res;
  184. }
  185. return 0;
  186. }
  187. /// Constants comparison:
  188. /// 1. Check whether type of L constant could be losslessly bitcasted to R
  189. /// type.
  190. /// 2. Compare constant contents.
  191. /// For more details see declaration comments.
  192. int FunctionComparator::cmpConstants(const Constant *L,
  193. const Constant *R) const {
  194. Type *TyL = L->getType();
  195. Type *TyR = R->getType();
  196. // Check whether types are bitcastable. This part is just re-factored
  197. // Type::canLosslesslyBitCastTo method, but instead of returning true/false,
  198. // we also pack into result which type is "less" for us.
  199. int TypesRes = cmpTypes(TyL, TyR);
  200. if (TypesRes != 0) {
  201. // Types are different, but check whether we can bitcast them.
  202. if (!TyL->isFirstClassType()) {
  203. if (TyR->isFirstClassType())
  204. return -1;
  205. // Neither TyL nor TyR are values of first class type. Return the result
  206. // of comparing the types
  207. return TypesRes;
  208. }
  209. if (!TyR->isFirstClassType()) {
  210. if (TyL->isFirstClassType())
  211. return 1;
  212. return TypesRes;
  213. }
  214. // Vector -> Vector conversions are always lossless if the two vector types
  215. // have the same size, otherwise not.
  216. unsigned TyLWidth = 0;
  217. unsigned TyRWidth = 0;
  218. if (auto *VecTyL = dyn_cast<VectorType>(TyL))
  219. TyLWidth = VecTyL->getPrimitiveSizeInBits().getFixedValue();
  220. if (auto *VecTyR = dyn_cast<VectorType>(TyR))
  221. TyRWidth = VecTyR->getPrimitiveSizeInBits().getFixedValue();
  222. if (TyLWidth != TyRWidth)
  223. return cmpNumbers(TyLWidth, TyRWidth);
  224. // Zero bit-width means neither TyL nor TyR are vectors.
  225. if (!TyLWidth) {
  226. PointerType *PTyL = dyn_cast<PointerType>(TyL);
  227. PointerType *PTyR = dyn_cast<PointerType>(TyR);
  228. if (PTyL && PTyR) {
  229. unsigned AddrSpaceL = PTyL->getAddressSpace();
  230. unsigned AddrSpaceR = PTyR->getAddressSpace();
  231. if (int Res = cmpNumbers(AddrSpaceL, AddrSpaceR))
  232. return Res;
  233. }
  234. if (PTyL)
  235. return 1;
  236. if (PTyR)
  237. return -1;
  238. // TyL and TyR aren't vectors, nor pointers. We don't know how to
  239. // bitcast them.
  240. return TypesRes;
  241. }
  242. }
  243. // OK, types are bitcastable, now check constant contents.
  244. if (L->isNullValue() && R->isNullValue())
  245. return TypesRes;
  246. if (L->isNullValue() && !R->isNullValue())
  247. return 1;
  248. if (!L->isNullValue() && R->isNullValue())
  249. return -1;
  250. auto GlobalValueL = const_cast<GlobalValue *>(dyn_cast<GlobalValue>(L));
  251. auto GlobalValueR = const_cast<GlobalValue *>(dyn_cast<GlobalValue>(R));
  252. if (GlobalValueL && GlobalValueR) {
  253. return cmpGlobalValues(GlobalValueL, GlobalValueR);
  254. }
  255. if (int Res = cmpNumbers(L->getValueID(), R->getValueID()))
  256. return Res;
  257. if (const auto *SeqL = dyn_cast<ConstantDataSequential>(L)) {
  258. const auto *SeqR = cast<ConstantDataSequential>(R);
  259. // This handles ConstantDataArray and ConstantDataVector. Note that we
  260. // compare the two raw data arrays, which might differ depending on the host
  261. // endianness. This isn't a problem though, because the endiness of a module
  262. // will affect the order of the constants, but this order is the same
  263. // for a given input module and host platform.
  264. return cmpMem(SeqL->getRawDataValues(), SeqR->getRawDataValues());
  265. }
  266. switch (L->getValueID()) {
  267. case Value::UndefValueVal:
  268. case Value::PoisonValueVal:
  269. case Value::ConstantTokenNoneVal:
  270. return TypesRes;
  271. case Value::ConstantIntVal: {
  272. const APInt &LInt = cast<ConstantInt>(L)->getValue();
  273. const APInt &RInt = cast<ConstantInt>(R)->getValue();
  274. return cmpAPInts(LInt, RInt);
  275. }
  276. case Value::ConstantFPVal: {
  277. const APFloat &LAPF = cast<ConstantFP>(L)->getValueAPF();
  278. const APFloat &RAPF = cast<ConstantFP>(R)->getValueAPF();
  279. return cmpAPFloats(LAPF, RAPF);
  280. }
  281. case Value::ConstantArrayVal: {
  282. const ConstantArray *LA = cast<ConstantArray>(L);
  283. const ConstantArray *RA = cast<ConstantArray>(R);
  284. uint64_t NumElementsL = cast<ArrayType>(TyL)->getNumElements();
  285. uint64_t NumElementsR = cast<ArrayType>(TyR)->getNumElements();
  286. if (int Res = cmpNumbers(NumElementsL, NumElementsR))
  287. return Res;
  288. for (uint64_t i = 0; i < NumElementsL; ++i) {
  289. if (int Res = cmpConstants(cast<Constant>(LA->getOperand(i)),
  290. cast<Constant>(RA->getOperand(i))))
  291. return Res;
  292. }
  293. return 0;
  294. }
  295. case Value::ConstantStructVal: {
  296. const ConstantStruct *LS = cast<ConstantStruct>(L);
  297. const ConstantStruct *RS = cast<ConstantStruct>(R);
  298. unsigned NumElementsL = cast<StructType>(TyL)->getNumElements();
  299. unsigned NumElementsR = cast<StructType>(TyR)->getNumElements();
  300. if (int Res = cmpNumbers(NumElementsL, NumElementsR))
  301. return Res;
  302. for (unsigned i = 0; i != NumElementsL; ++i) {
  303. if (int Res = cmpConstants(cast<Constant>(LS->getOperand(i)),
  304. cast<Constant>(RS->getOperand(i))))
  305. return Res;
  306. }
  307. return 0;
  308. }
  309. case Value::ConstantVectorVal: {
  310. const ConstantVector *LV = cast<ConstantVector>(L);
  311. const ConstantVector *RV = cast<ConstantVector>(R);
  312. unsigned NumElementsL = cast<FixedVectorType>(TyL)->getNumElements();
  313. unsigned NumElementsR = cast<FixedVectorType>(TyR)->getNumElements();
  314. if (int Res = cmpNumbers(NumElementsL, NumElementsR))
  315. return Res;
  316. for (uint64_t i = 0; i < NumElementsL; ++i) {
  317. if (int Res = cmpConstants(cast<Constant>(LV->getOperand(i)),
  318. cast<Constant>(RV->getOperand(i))))
  319. return Res;
  320. }
  321. return 0;
  322. }
  323. case Value::ConstantExprVal: {
  324. const ConstantExpr *LE = cast<ConstantExpr>(L);
  325. const ConstantExpr *RE = cast<ConstantExpr>(R);
  326. unsigned NumOperandsL = LE->getNumOperands();
  327. unsigned NumOperandsR = RE->getNumOperands();
  328. if (int Res = cmpNumbers(NumOperandsL, NumOperandsR))
  329. return Res;
  330. for (unsigned i = 0; i < NumOperandsL; ++i) {
  331. if (int Res = cmpConstants(cast<Constant>(LE->getOperand(i)),
  332. cast<Constant>(RE->getOperand(i))))
  333. return Res;
  334. }
  335. return 0;
  336. }
  337. case Value::BlockAddressVal: {
  338. const BlockAddress *LBA = cast<BlockAddress>(L);
  339. const BlockAddress *RBA = cast<BlockAddress>(R);
  340. if (int Res = cmpValues(LBA->getFunction(), RBA->getFunction()))
  341. return Res;
  342. if (LBA->getFunction() == RBA->getFunction()) {
  343. // They are BBs in the same function. Order by which comes first in the
  344. // BB order of the function. This order is deterministic.
  345. Function *F = LBA->getFunction();
  346. BasicBlock *LBB = LBA->getBasicBlock();
  347. BasicBlock *RBB = RBA->getBasicBlock();
  348. if (LBB == RBB)
  349. return 0;
  350. for (BasicBlock &BB : *F) {
  351. if (&BB == LBB) {
  352. assert(&BB != RBB);
  353. return -1;
  354. }
  355. if (&BB == RBB)
  356. return 1;
  357. }
  358. llvm_unreachable("Basic Block Address does not point to a basic block in "
  359. "its function.");
  360. return -1;
  361. } else {
  362. // cmpValues said the functions are the same. So because they aren't
  363. // literally the same pointer, they must respectively be the left and
  364. // right functions.
  365. assert(LBA->getFunction() == FnL && RBA->getFunction() == FnR);
  366. // cmpValues will tell us if these are equivalent BasicBlocks, in the
  367. // context of their respective functions.
  368. return cmpValues(LBA->getBasicBlock(), RBA->getBasicBlock());
  369. }
  370. }
  371. case Value::DSOLocalEquivalentVal: {
  372. // dso_local_equivalent is functionally equivalent to whatever it points to.
  373. // This means the behavior of the IR should be the exact same as if the
  374. // function was referenced directly rather than through a
  375. // dso_local_equivalent.
  376. const auto *LEquiv = cast<DSOLocalEquivalent>(L);
  377. const auto *REquiv = cast<DSOLocalEquivalent>(R);
  378. return cmpGlobalValues(LEquiv->getGlobalValue(), REquiv->getGlobalValue());
  379. }
  380. default: // Unknown constant, abort.
  381. LLVM_DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n");
  382. llvm_unreachable("Constant ValueID not recognized.");
  383. return -1;
  384. }
  385. }
  386. int FunctionComparator::cmpGlobalValues(GlobalValue *L, GlobalValue *R) const {
  387. uint64_t LNumber = GlobalNumbers->getNumber(L);
  388. uint64_t RNumber = GlobalNumbers->getNumber(R);
  389. return cmpNumbers(LNumber, RNumber);
  390. }
  391. /// cmpType - compares two types,
  392. /// defines total ordering among the types set.
  393. /// See method declaration comments for more details.
  394. int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const {
  395. PointerType *PTyL = dyn_cast<PointerType>(TyL);
  396. PointerType *PTyR = dyn_cast<PointerType>(TyR);
  397. const DataLayout &DL = FnL->getParent()->getDataLayout();
  398. if (PTyL && PTyL->getAddressSpace() == 0)
  399. TyL = DL.getIntPtrType(TyL);
  400. if (PTyR && PTyR->getAddressSpace() == 0)
  401. TyR = DL.getIntPtrType(TyR);
  402. if (TyL == TyR)
  403. return 0;
  404. if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID()))
  405. return Res;
  406. switch (TyL->getTypeID()) {
  407. default:
  408. llvm_unreachable("Unknown type!");
  409. case Type::IntegerTyID:
  410. return cmpNumbers(cast<IntegerType>(TyL)->getBitWidth(),
  411. cast<IntegerType>(TyR)->getBitWidth());
  412. // TyL == TyR would have returned true earlier, because types are uniqued.
  413. case Type::VoidTyID:
  414. case Type::FloatTyID:
  415. case Type::DoubleTyID:
  416. case Type::X86_FP80TyID:
  417. case Type::FP128TyID:
  418. case Type::PPC_FP128TyID:
  419. case Type::LabelTyID:
  420. case Type::MetadataTyID:
  421. case Type::TokenTyID:
  422. return 0;
  423. case Type::PointerTyID:
  424. assert(PTyL && PTyR && "Both types must be pointers here.");
  425. return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace());
  426. case Type::StructTyID: {
  427. StructType *STyL = cast<StructType>(TyL);
  428. StructType *STyR = cast<StructType>(TyR);
  429. if (STyL->getNumElements() != STyR->getNumElements())
  430. return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
  431. if (STyL->isPacked() != STyR->isPacked())
  432. return cmpNumbers(STyL->isPacked(), STyR->isPacked());
  433. for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
  434. if (int Res = cmpTypes(STyL->getElementType(i), STyR->getElementType(i)))
  435. return Res;
  436. }
  437. return 0;
  438. }
  439. case Type::FunctionTyID: {
  440. FunctionType *FTyL = cast<FunctionType>(TyL);
  441. FunctionType *FTyR = cast<FunctionType>(TyR);
  442. if (FTyL->getNumParams() != FTyR->getNumParams())
  443. return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams());
  444. if (FTyL->isVarArg() != FTyR->isVarArg())
  445. return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg());
  446. if (int Res = cmpTypes(FTyL->getReturnType(), FTyR->getReturnType()))
  447. return Res;
  448. for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
  449. if (int Res = cmpTypes(FTyL->getParamType(i), FTyR->getParamType(i)))
  450. return Res;
  451. }
  452. return 0;
  453. }
  454. case Type::ArrayTyID: {
  455. auto *STyL = cast<ArrayType>(TyL);
  456. auto *STyR = cast<ArrayType>(TyR);
  457. if (STyL->getNumElements() != STyR->getNumElements())
  458. return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
  459. return cmpTypes(STyL->getElementType(), STyR->getElementType());
  460. }
  461. case Type::FixedVectorTyID:
  462. case Type::ScalableVectorTyID: {
  463. auto *STyL = cast<VectorType>(TyL);
  464. auto *STyR = cast<VectorType>(TyR);
  465. if (STyL->getElementCount().isScalable() !=
  466. STyR->getElementCount().isScalable())
  467. return cmpNumbers(STyL->getElementCount().isScalable(),
  468. STyR->getElementCount().isScalable());
  469. if (STyL->getElementCount() != STyR->getElementCount())
  470. return cmpNumbers(STyL->getElementCount().getKnownMinValue(),
  471. STyR->getElementCount().getKnownMinValue());
  472. return cmpTypes(STyL->getElementType(), STyR->getElementType());
  473. }
  474. }
  475. }
  476. // Determine whether the two operations are the same except that pointer-to-A
  477. // and pointer-to-B are equivalent. This should be kept in sync with
  478. // Instruction::isSameOperationAs.
  479. // Read method declaration comments for more details.
  480. int FunctionComparator::cmpOperations(const Instruction *L,
  481. const Instruction *R,
  482. bool &needToCmpOperands) const {
  483. needToCmpOperands = true;
  484. if (int Res = cmpValues(L, R))
  485. return Res;
  486. // Differences from Instruction::isSameOperationAs:
  487. // * replace type comparison with calls to cmpTypes.
  488. // * we test for I->getRawSubclassOptionalData (nuw/nsw/tail) at the top.
  489. // * because of the above, we don't test for the tail bit on calls later on.
  490. if (int Res = cmpNumbers(L->getOpcode(), R->getOpcode()))
  491. return Res;
  492. if (const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(L)) {
  493. needToCmpOperands = false;
  494. const GetElementPtrInst *GEPR = cast<GetElementPtrInst>(R);
  495. if (int Res =
  496. cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand()))
  497. return Res;
  498. return cmpGEPs(GEPL, GEPR);
  499. }
  500. if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
  501. return Res;
  502. if (int Res = cmpTypes(L->getType(), R->getType()))
  503. return Res;
  504. if (int Res = cmpNumbers(L->getRawSubclassOptionalData(),
  505. R->getRawSubclassOptionalData()))
  506. return Res;
  507. // We have two instructions of identical opcode and #operands. Check to see
  508. // if all operands are the same type
  509. for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) {
  510. if (int Res =
  511. cmpTypes(L->getOperand(i)->getType(), R->getOperand(i)->getType()))
  512. return Res;
  513. }
  514. // Check special state that is a part of some instructions.
  515. if (const AllocaInst *AI = dyn_cast<AllocaInst>(L)) {
  516. if (int Res = cmpTypes(AI->getAllocatedType(),
  517. cast<AllocaInst>(R)->getAllocatedType()))
  518. return Res;
  519. return cmpAligns(AI->getAlign(), cast<AllocaInst>(R)->getAlign());
  520. }
  521. if (const LoadInst *LI = dyn_cast<LoadInst>(L)) {
  522. if (int Res = cmpNumbers(LI->isVolatile(), cast<LoadInst>(R)->isVolatile()))
  523. return Res;
  524. if (int Res = cmpAligns(LI->getAlign(), cast<LoadInst>(R)->getAlign()))
  525. return Res;
  526. if (int Res =
  527. cmpOrderings(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
  528. return Res;
  529. if (int Res = cmpNumbers(LI->getSyncScopeID(),
  530. cast<LoadInst>(R)->getSyncScopeID()))
  531. return Res;
  532. return cmpRangeMetadata(
  533. LI->getMetadata(LLVMContext::MD_range),
  534. cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
  535. }
  536. if (const StoreInst *SI = dyn_cast<StoreInst>(L)) {
  537. if (int Res =
  538. cmpNumbers(SI->isVolatile(), cast<StoreInst>(R)->isVolatile()))
  539. return Res;
  540. if (int Res = cmpAligns(SI->getAlign(), cast<StoreInst>(R)->getAlign()))
  541. return Res;
  542. if (int Res =
  543. cmpOrderings(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
  544. return Res;
  545. return cmpNumbers(SI->getSyncScopeID(),
  546. cast<StoreInst>(R)->getSyncScopeID());
  547. }
  548. if (const CmpInst *CI = dyn_cast<CmpInst>(L))
  549. return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
  550. if (auto *CBL = dyn_cast<CallBase>(L)) {
  551. auto *CBR = cast<CallBase>(R);
  552. if (int Res = cmpNumbers(CBL->getCallingConv(), CBR->getCallingConv()))
  553. return Res;
  554. if (int Res = cmpAttrs(CBL->getAttributes(), CBR->getAttributes()))
  555. return Res;
  556. if (int Res = cmpOperandBundlesSchema(*CBL, *CBR))
  557. return Res;
  558. if (const CallInst *CI = dyn_cast<CallInst>(L))
  559. if (int Res = cmpNumbers(CI->getTailCallKind(),
  560. cast<CallInst>(R)->getTailCallKind()))
  561. return Res;
  562. return cmpRangeMetadata(L->getMetadata(LLVMContext::MD_range),
  563. R->getMetadata(LLVMContext::MD_range));
  564. }
  565. if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(L)) {
  566. ArrayRef<unsigned> LIndices = IVI->getIndices();
  567. ArrayRef<unsigned> RIndices = cast<InsertValueInst>(R)->getIndices();
  568. if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
  569. return Res;
  570. for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
  571. if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
  572. return Res;
  573. }
  574. return 0;
  575. }
  576. if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(L)) {
  577. ArrayRef<unsigned> LIndices = EVI->getIndices();
  578. ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(R)->getIndices();
  579. if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
  580. return Res;
  581. for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
  582. if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
  583. return Res;
  584. }
  585. }
  586. if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
  587. if (int Res =
  588. cmpOrderings(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
  589. return Res;
  590. return cmpNumbers(FI->getSyncScopeID(),
  591. cast<FenceInst>(R)->getSyncScopeID());
  592. }
  593. if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
  594. if (int Res = cmpNumbers(CXI->isVolatile(),
  595. cast<AtomicCmpXchgInst>(R)->isVolatile()))
  596. return Res;
  597. if (int Res =
  598. cmpNumbers(CXI->isWeak(), cast<AtomicCmpXchgInst>(R)->isWeak()))
  599. return Res;
  600. if (int Res =
  601. cmpOrderings(CXI->getSuccessOrdering(),
  602. cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
  603. return Res;
  604. if (int Res =
  605. cmpOrderings(CXI->getFailureOrdering(),
  606. cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
  607. return Res;
  608. return cmpNumbers(CXI->getSyncScopeID(),
  609. cast<AtomicCmpXchgInst>(R)->getSyncScopeID());
  610. }
  611. if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
  612. if (int Res = cmpNumbers(RMWI->getOperation(),
  613. cast<AtomicRMWInst>(R)->getOperation()))
  614. return Res;
  615. if (int Res = cmpNumbers(RMWI->isVolatile(),
  616. cast<AtomicRMWInst>(R)->isVolatile()))
  617. return Res;
  618. if (int Res = cmpOrderings(RMWI->getOrdering(),
  619. cast<AtomicRMWInst>(R)->getOrdering()))
  620. return Res;
  621. return cmpNumbers(RMWI->getSyncScopeID(),
  622. cast<AtomicRMWInst>(R)->getSyncScopeID());
  623. }
  624. if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(L)) {
  625. ArrayRef<int> LMask = SVI->getShuffleMask();
  626. ArrayRef<int> RMask = cast<ShuffleVectorInst>(R)->getShuffleMask();
  627. if (int Res = cmpNumbers(LMask.size(), RMask.size()))
  628. return Res;
  629. for (size_t i = 0, e = LMask.size(); i != e; ++i) {
  630. if (int Res = cmpNumbers(LMask[i], RMask[i]))
  631. return Res;
  632. }
  633. }
  634. if (const PHINode *PNL = dyn_cast<PHINode>(L)) {
  635. const PHINode *PNR = cast<PHINode>(R);
  636. // Ensure that in addition to the incoming values being identical
  637. // (checked by the caller of this function), the incoming blocks
  638. // are also identical.
  639. for (unsigned i = 0, e = PNL->getNumIncomingValues(); i != e; ++i) {
  640. if (int Res =
  641. cmpValues(PNL->getIncomingBlock(i), PNR->getIncomingBlock(i)))
  642. return Res;
  643. }
  644. }
  645. return 0;
  646. }
  647. // Determine whether two GEP operations perform the same underlying arithmetic.
  648. // Read method declaration comments for more details.
  649. int FunctionComparator::cmpGEPs(const GEPOperator *GEPL,
  650. const GEPOperator *GEPR) const {
  651. unsigned int ASL = GEPL->getPointerAddressSpace();
  652. unsigned int ASR = GEPR->getPointerAddressSpace();
  653. if (int Res = cmpNumbers(ASL, ASR))
  654. return Res;
  655. // When we have target data, we can reduce the GEP down to the value in bytes
  656. // added to the address.
  657. const DataLayout &DL = FnL->getParent()->getDataLayout();
  658. unsigned BitWidth = DL.getPointerSizeInBits(ASL);
  659. APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0);
  660. if (GEPL->accumulateConstantOffset(DL, OffsetL) &&
  661. GEPR->accumulateConstantOffset(DL, OffsetR))
  662. return cmpAPInts(OffsetL, OffsetR);
  663. if (int Res =
  664. cmpTypes(GEPL->getSourceElementType(), GEPR->getSourceElementType()))
  665. return Res;
  666. if (int Res = cmpNumbers(GEPL->getNumOperands(), GEPR->getNumOperands()))
  667. return Res;
  668. for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) {
  669. if (int Res = cmpValues(GEPL->getOperand(i), GEPR->getOperand(i)))
  670. return Res;
  671. }
  672. return 0;
  673. }
  674. int FunctionComparator::cmpInlineAsm(const InlineAsm *L,
  675. const InlineAsm *R) const {
  676. // InlineAsm's are uniqued. If they are the same pointer, obviously they are
  677. // the same, otherwise compare the fields.
  678. if (L == R)
  679. return 0;
  680. if (int Res = cmpTypes(L->getFunctionType(), R->getFunctionType()))
  681. return Res;
  682. if (int Res = cmpMem(L->getAsmString(), R->getAsmString()))
  683. return Res;
  684. if (int Res = cmpMem(L->getConstraintString(), R->getConstraintString()))
  685. return Res;
  686. if (int Res = cmpNumbers(L->hasSideEffects(), R->hasSideEffects()))
  687. return Res;
  688. if (int Res = cmpNumbers(L->isAlignStack(), R->isAlignStack()))
  689. return Res;
  690. if (int Res = cmpNumbers(L->getDialect(), R->getDialect()))
  691. return Res;
  692. assert(L->getFunctionType() != R->getFunctionType());
  693. return 0;
  694. }
  695. /// Compare two values used by the two functions under pair-wise comparison. If
  696. /// this is the first time the values are seen, they're added to the mapping so
  697. /// that we will detect mismatches on next use.
  698. /// See comments in declaration for more details.
  699. int FunctionComparator::cmpValues(const Value *L, const Value *R) const {
  700. // Catch self-reference case.
  701. if (L == FnL) {
  702. if (R == FnR)
  703. return 0;
  704. return -1;
  705. }
  706. if (R == FnR) {
  707. if (L == FnL)
  708. return 0;
  709. return 1;
  710. }
  711. const Constant *ConstL = dyn_cast<Constant>(L);
  712. const Constant *ConstR = dyn_cast<Constant>(R);
  713. if (ConstL && ConstR) {
  714. if (L == R)
  715. return 0;
  716. return cmpConstants(ConstL, ConstR);
  717. }
  718. if (ConstL)
  719. return 1;
  720. if (ConstR)
  721. return -1;
  722. const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(L);
  723. const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(R);
  724. if (InlineAsmL && InlineAsmR)
  725. return cmpInlineAsm(InlineAsmL, InlineAsmR);
  726. if (InlineAsmL)
  727. return 1;
  728. if (InlineAsmR)
  729. return -1;
  730. auto LeftSN = sn_mapL.insert(std::make_pair(L, sn_mapL.size())),
  731. RightSN = sn_mapR.insert(std::make_pair(R, sn_mapR.size()));
  732. return cmpNumbers(LeftSN.first->second, RightSN.first->second);
  733. }
  734. // Test whether two basic blocks have equivalent behaviour.
  735. int FunctionComparator::cmpBasicBlocks(const BasicBlock *BBL,
  736. const BasicBlock *BBR) const {
  737. BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
  738. BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
  739. do {
  740. bool needToCmpOperands = true;
  741. if (int Res = cmpOperations(&*InstL, &*InstR, needToCmpOperands))
  742. return Res;
  743. if (needToCmpOperands) {
  744. assert(InstL->getNumOperands() == InstR->getNumOperands());
  745. for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
  746. Value *OpL = InstL->getOperand(i);
  747. Value *OpR = InstR->getOperand(i);
  748. if (int Res = cmpValues(OpL, OpR))
  749. return Res;
  750. // cmpValues should ensure this is true.
  751. assert(cmpTypes(OpL->getType(), OpR->getType()) == 0);
  752. }
  753. }
  754. ++InstL;
  755. ++InstR;
  756. } while (InstL != InstLE && InstR != InstRE);
  757. if (InstL != InstLE && InstR == InstRE)
  758. return 1;
  759. if (InstL == InstLE && InstR != InstRE)
  760. return -1;
  761. return 0;
  762. }
  763. int FunctionComparator::compareSignature() const {
  764. if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes()))
  765. return Res;
  766. if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC()))
  767. return Res;
  768. if (FnL->hasGC()) {
  769. if (int Res = cmpMem(FnL->getGC(), FnR->getGC()))
  770. return Res;
  771. }
  772. if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection()))
  773. return Res;
  774. if (FnL->hasSection()) {
  775. if (int Res = cmpMem(FnL->getSection(), FnR->getSection()))
  776. return Res;
  777. }
  778. if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg()))
  779. return Res;
  780. // TODO: if it's internal and only used in direct calls, we could handle this
  781. // case too.
  782. if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv()))
  783. return Res;
  784. if (int Res = cmpTypes(FnL->getFunctionType(), FnR->getFunctionType()))
  785. return Res;
  786. assert(FnL->arg_size() == FnR->arg_size() &&
  787. "Identically typed functions have different numbers of args!");
  788. // Visit the arguments so that they get enumerated in the order they're
  789. // passed in.
  790. for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
  791. ArgRI = FnR->arg_begin(),
  792. ArgLE = FnL->arg_end();
  793. ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
  794. if (cmpValues(&*ArgLI, &*ArgRI) != 0)
  795. llvm_unreachable("Arguments repeat!");
  796. }
  797. return 0;
  798. }
  799. // Test whether the two functions have equivalent behaviour.
  800. int FunctionComparator::compare() {
  801. beginCompare();
  802. if (int Res = compareSignature())
  803. return Res;
  804. // We do a CFG-ordered walk since the actual ordering of the blocks in the
  805. // linked list is immaterial. Our walk starts at the entry block for both
  806. // functions, then takes each block from each terminator in order. As an
  807. // artifact, this also means that unreachable blocks are ignored.
  808. SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
  809. SmallPtrSet<const BasicBlock *, 32> VisitedBBs; // in terms of F1.
  810. FnLBBs.push_back(&FnL->getEntryBlock());
  811. FnRBBs.push_back(&FnR->getEntryBlock());
  812. VisitedBBs.insert(FnLBBs[0]);
  813. while (!FnLBBs.empty()) {
  814. const BasicBlock *BBL = FnLBBs.pop_back_val();
  815. const BasicBlock *BBR = FnRBBs.pop_back_val();
  816. if (int Res = cmpValues(BBL, BBR))
  817. return Res;
  818. if (int Res = cmpBasicBlocks(BBL, BBR))
  819. return Res;
  820. const Instruction *TermL = BBL->getTerminator();
  821. const Instruction *TermR = BBR->getTerminator();
  822. assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
  823. for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
  824. if (!VisitedBBs.insert(TermL->getSuccessor(i)).second)
  825. continue;
  826. FnLBBs.push_back(TermL->getSuccessor(i));
  827. FnRBBs.push_back(TermR->getSuccessor(i));
  828. }
  829. }
  830. return 0;
  831. }
  832. namespace {
  833. // Accumulate the hash of a sequence of 64-bit integers. This is similar to a
  834. // hash of a sequence of 64bit ints, but the entire input does not need to be
  835. // available at once. This interface is necessary for functionHash because it
  836. // needs to accumulate the hash as the structure of the function is traversed
  837. // without saving these values to an intermediate buffer. This form of hashing
  838. // is not often needed, as usually the object to hash is just read from a
  839. // buffer.
  840. class HashAccumulator64 {
  841. uint64_t Hash;
  842. public:
  843. // Initialize to random constant, so the state isn't zero.
  844. HashAccumulator64() { Hash = 0x6acaa36bef8325c5ULL; }
  845. void add(uint64_t V) { Hash = hashing::detail::hash_16_bytes(Hash, V); }
  846. // No finishing is required, because the entire hash value is used.
  847. uint64_t getHash() { return Hash; }
  848. };
  849. } // end anonymous namespace
  850. // A function hash is calculated by considering only the number of arguments and
  851. // whether a function is varargs, the order of basic blocks (given by the
  852. // successors of each basic block in depth first order), and the order of
  853. // opcodes of each instruction within each of these basic blocks. This mirrors
  854. // the strategy compare() uses to compare functions by walking the BBs in depth
  855. // first order and comparing each instruction in sequence. Because this hash
  856. // does not look at the operands, it is insensitive to things such as the
  857. // target of calls and the constants used in the function, which makes it useful
  858. // when possibly merging functions which are the same modulo constants and call
  859. // targets.
  860. FunctionComparator::FunctionHash FunctionComparator::functionHash(Function &F) {
  861. HashAccumulator64 H;
  862. H.add(F.isVarArg());
  863. H.add(F.arg_size());
  864. SmallVector<const BasicBlock *, 8> BBs;
  865. SmallPtrSet<const BasicBlock *, 16> VisitedBBs;
  866. // Walk the blocks in the same order as FunctionComparator::cmpBasicBlocks(),
  867. // accumulating the hash of the function "structure." (BB and opcode sequence)
  868. BBs.push_back(&F.getEntryBlock());
  869. VisitedBBs.insert(BBs[0]);
  870. while (!BBs.empty()) {
  871. const BasicBlock *BB = BBs.pop_back_val();
  872. // This random value acts as a block header, as otherwise the partition of
  873. // opcodes into BBs wouldn't affect the hash, only the order of the opcodes
  874. H.add(45798);
  875. for (const auto &Inst : *BB) {
  876. H.add(Inst.getOpcode());
  877. }
  878. const Instruction *Term = BB->getTerminator();
  879. for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
  880. if (!VisitedBBs.insert(Term->getSuccessor(i)).second)
  881. continue;
  882. BBs.push_back(Term->getSuccessor(i));
  883. }
  884. }
  885. return H.getHash();
  886. }