X86RegisterInfo.cpp 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024
  1. //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the X86 implementation of the TargetRegisterInfo class.
  10. // This file is responsible for the frame pointer elimination optimization
  11. // on X86.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "X86RegisterInfo.h"
  15. #include "X86FrameLowering.h"
  16. #include "X86MachineFunctionInfo.h"
  17. #include "X86Subtarget.h"
  18. #include "llvm/ADT/BitVector.h"
  19. #include "llvm/ADT/STLExtras.h"
  20. #include "llvm/ADT/SmallSet.h"
  21. #include "llvm/CodeGen/LiveRegMatrix.h"
  22. #include "llvm/CodeGen/MachineFrameInfo.h"
  23. #include "llvm/CodeGen/MachineFunction.h"
  24. #include "llvm/CodeGen/MachineFunctionPass.h"
  25. #include "llvm/CodeGen/MachineRegisterInfo.h"
  26. #include "llvm/CodeGen/TargetFrameLowering.h"
  27. #include "llvm/CodeGen/TargetInstrInfo.h"
  28. #include "llvm/CodeGen/TileShapeInfo.h"
  29. #include "llvm/CodeGen/VirtRegMap.h"
  30. #include "llvm/IR/Constants.h"
  31. #include "llvm/IR/Function.h"
  32. #include "llvm/IR/Type.h"
  33. #include "llvm/Support/CommandLine.h"
  34. #include "llvm/Support/ErrorHandling.h"
  35. #include "llvm/Target/TargetMachine.h"
  36. #include "llvm/Target/TargetOptions.h"
  37. using namespace llvm;
  38. #define GET_REGINFO_TARGET_DESC
  39. #include "X86GenRegisterInfo.inc"
  40. static cl::opt<bool>
  41. EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
  42. cl::desc("Enable use of a base pointer for complex stack frames"));
  43. X86RegisterInfo::X86RegisterInfo(const Triple &TT)
  44. : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
  45. X86_MC::getDwarfRegFlavour(TT, false),
  46. X86_MC::getDwarfRegFlavour(TT, true),
  47. (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
  48. X86_MC::initLLVMToSEHAndCVRegMapping(this);
  49. // Cache some information.
  50. Is64Bit = TT.isArch64Bit();
  51. IsWin64 = Is64Bit && TT.isOSWindows();
  52. // Use a callee-saved register as the base pointer. These registers must
  53. // not conflict with any ABI requirements. For example, in 32-bit mode PIC
  54. // requires GOT in the EBX register before function calls via PLT GOT pointer.
  55. if (Is64Bit) {
  56. SlotSize = 8;
  57. // This matches the simplified 32-bit pointer code in the data layout
  58. // computation.
  59. // FIXME: Should use the data layout?
  60. bool Use64BitReg = !TT.isX32();
  61. StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
  62. FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
  63. BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
  64. } else {
  65. SlotSize = 4;
  66. StackPtr = X86::ESP;
  67. FramePtr = X86::EBP;
  68. BasePtr = X86::ESI;
  69. }
  70. }
  71. int
  72. X86RegisterInfo::getSEHRegNum(unsigned i) const {
  73. return getEncodingValue(i);
  74. }
  75. const TargetRegisterClass *
  76. X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
  77. unsigned Idx) const {
  78. // The sub_8bit sub-register index is more constrained in 32-bit mode.
  79. // It behaves just like the sub_8bit_hi index.
  80. if (!Is64Bit && Idx == X86::sub_8bit)
  81. Idx = X86::sub_8bit_hi;
  82. // Forward to TableGen's default version.
  83. return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
  84. }
  85. const TargetRegisterClass *
  86. X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
  87. const TargetRegisterClass *B,
  88. unsigned SubIdx) const {
  89. // The sub_8bit sub-register index is more constrained in 32-bit mode.
  90. if (!Is64Bit && SubIdx == X86::sub_8bit) {
  91. A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
  92. if (!A)
  93. return nullptr;
  94. }
  95. return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
  96. }
  97. const TargetRegisterClass *
  98. X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
  99. const MachineFunction &MF) const {
  100. // Don't allow super-classes of GR8_NOREX. This class is only used after
  101. // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
  102. // to the full GR8 register class in 64-bit mode, so we cannot allow the
  103. // reigster class inflation.
  104. //
  105. // The GR8_NOREX class is always used in a way that won't be constrained to a
  106. // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
  107. // full GR8 class.
  108. if (RC == &X86::GR8_NOREXRegClass)
  109. return RC;
  110. const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
  111. const TargetRegisterClass *Super = RC;
  112. TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
  113. do {
  114. switch (Super->getID()) {
  115. case X86::FR32RegClassID:
  116. case X86::FR64RegClassID:
  117. // If AVX-512 isn't supported we should only inflate to these classes.
  118. if (!Subtarget.hasAVX512() &&
  119. getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
  120. return Super;
  121. break;
  122. case X86::VR128RegClassID:
  123. case X86::VR256RegClassID:
  124. // If VLX isn't supported we should only inflate to these classes.
  125. if (!Subtarget.hasVLX() &&
  126. getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
  127. return Super;
  128. break;
  129. case X86::VR128XRegClassID:
  130. case X86::VR256XRegClassID:
  131. // If VLX isn't support we shouldn't inflate to these classes.
  132. if (Subtarget.hasVLX() &&
  133. getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
  134. return Super;
  135. break;
  136. case X86::FR32XRegClassID:
  137. case X86::FR64XRegClassID:
  138. // If AVX-512 isn't support we shouldn't inflate to these classes.
  139. if (Subtarget.hasAVX512() &&
  140. getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
  141. return Super;
  142. break;
  143. case X86::GR8RegClassID:
  144. case X86::GR16RegClassID:
  145. case X86::GR32RegClassID:
  146. case X86::GR64RegClassID:
  147. case X86::RFP32RegClassID:
  148. case X86::RFP64RegClassID:
  149. case X86::RFP80RegClassID:
  150. case X86::VR512_0_15RegClassID:
  151. case X86::VR512RegClassID:
  152. // Don't return a super-class that would shrink the spill size.
  153. // That can happen with the vector and float classes.
  154. if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
  155. return Super;
  156. }
  157. Super = *I++;
  158. } while (Super);
  159. return RC;
  160. }
  161. const TargetRegisterClass *
  162. X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
  163. unsigned Kind) const {
  164. const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
  165. switch (Kind) {
  166. default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
  167. case 0: // Normal GPRs.
  168. if (Subtarget.isTarget64BitLP64())
  169. return &X86::GR64RegClass;
  170. // If the target is 64bit but we have been told to use 32bit addresses,
  171. // we can still use 64-bit register as long as we know the high bits
  172. // are zeros.
  173. // Reflect that in the returned register class.
  174. if (Is64Bit) {
  175. // When the target also allows 64-bit frame pointer and we do have a
  176. // frame, this is fine to use it for the address accesses as well.
  177. const X86FrameLowering *TFI = getFrameLowering(MF);
  178. return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
  179. ? &X86::LOW32_ADDR_ACCESS_RBPRegClass
  180. : &X86::LOW32_ADDR_ACCESSRegClass;
  181. }
  182. return &X86::GR32RegClass;
  183. case 1: // Normal GPRs except the stack pointer (for encoding reasons).
  184. if (Subtarget.isTarget64BitLP64())
  185. return &X86::GR64_NOSPRegClass;
  186. // NOSP does not contain RIP, so no special case here.
  187. return &X86::GR32_NOSPRegClass;
  188. case 2: // NOREX GPRs.
  189. if (Subtarget.isTarget64BitLP64())
  190. return &X86::GR64_NOREXRegClass;
  191. return &X86::GR32_NOREXRegClass;
  192. case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
  193. if (Subtarget.isTarget64BitLP64())
  194. return &X86::GR64_NOREX_NOSPRegClass;
  195. // NOSP does not contain RIP, so no special case here.
  196. return &X86::GR32_NOREX_NOSPRegClass;
  197. case 4: // Available for tailcall (not callee-saved GPRs).
  198. return getGPRsForTailCall(MF);
  199. }
  200. }
  201. bool X86RegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
  202. unsigned DefSubReg,
  203. const TargetRegisterClass *SrcRC,
  204. unsigned SrcSubReg) const {
  205. // Prevent rewriting a copy where the destination size is larger than the
  206. // input size. See PR41619.
  207. // FIXME: Should this be factored into the base implementation somehow.
  208. if (DefRC->hasSuperClassEq(&X86::GR64RegClass) && DefSubReg == 0 &&
  209. SrcRC->hasSuperClassEq(&X86::GR64RegClass) && SrcSubReg == X86::sub_32bit)
  210. return false;
  211. return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg,
  212. SrcRC, SrcSubReg);
  213. }
  214. const TargetRegisterClass *
  215. X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
  216. const Function &F = MF.getFunction();
  217. if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
  218. return &X86::GR64_TCW64RegClass;
  219. else if (Is64Bit)
  220. return &X86::GR64_TCRegClass;
  221. bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
  222. if (hasHipeCC)
  223. return &X86::GR32RegClass;
  224. return &X86::GR32_TCRegClass;
  225. }
  226. const TargetRegisterClass *
  227. X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
  228. if (RC == &X86::CCRRegClass) {
  229. if (Is64Bit)
  230. return &X86::GR64RegClass;
  231. else
  232. return &X86::GR32RegClass;
  233. }
  234. return RC;
  235. }
  236. unsigned
  237. X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
  238. MachineFunction &MF) const {
  239. const X86FrameLowering *TFI = getFrameLowering(MF);
  240. unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
  241. switch (RC->getID()) {
  242. default:
  243. return 0;
  244. case X86::GR32RegClassID:
  245. return 4 - FPDiff;
  246. case X86::GR64RegClassID:
  247. return 12 - FPDiff;
  248. case X86::VR128RegClassID:
  249. return Is64Bit ? 10 : 4;
  250. case X86::VR64RegClassID:
  251. return 4;
  252. }
  253. }
  254. const MCPhysReg *
  255. X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
  256. assert(MF && "MachineFunction required");
  257. const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
  258. const Function &F = MF->getFunction();
  259. bool HasSSE = Subtarget.hasSSE1();
  260. bool HasAVX = Subtarget.hasAVX();
  261. bool HasAVX512 = Subtarget.hasAVX512();
  262. bool CallsEHReturn = MF->callsEHReturn();
  263. CallingConv::ID CC = F.getCallingConv();
  264. // If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
  265. // convention because it has the CSR list.
  266. if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
  267. CC = CallingConv::X86_INTR;
  268. // If atribute specified, override the CSRs normally specified by the
  269. // calling convention and use the empty set instead.
  270. if (MF->getFunction().hasFnAttribute("no_callee_saved_registers"))
  271. return CSR_NoRegs_SaveList;
  272. switch (CC) {
  273. case CallingConv::GHC:
  274. case CallingConv::HiPE:
  275. return CSR_NoRegs_SaveList;
  276. case CallingConv::AnyReg:
  277. if (HasAVX)
  278. return CSR_64_AllRegs_AVX_SaveList;
  279. return CSR_64_AllRegs_SaveList;
  280. case CallingConv::PreserveMost:
  281. return CSR_64_RT_MostRegs_SaveList;
  282. case CallingConv::PreserveAll:
  283. if (HasAVX)
  284. return CSR_64_RT_AllRegs_AVX_SaveList;
  285. return CSR_64_RT_AllRegs_SaveList;
  286. case CallingConv::CXX_FAST_TLS:
  287. if (Is64Bit)
  288. return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ?
  289. CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;
  290. break;
  291. case CallingConv::Intel_OCL_BI: {
  292. if (HasAVX512 && IsWin64)
  293. return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
  294. if (HasAVX512 && Is64Bit)
  295. return CSR_64_Intel_OCL_BI_AVX512_SaveList;
  296. if (HasAVX && IsWin64)
  297. return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
  298. if (HasAVX && Is64Bit)
  299. return CSR_64_Intel_OCL_BI_AVX_SaveList;
  300. if (!HasAVX && !IsWin64 && Is64Bit)
  301. return CSR_64_Intel_OCL_BI_SaveList;
  302. break;
  303. }
  304. case CallingConv::HHVM:
  305. return CSR_64_HHVM_SaveList;
  306. case CallingConv::X86_RegCall:
  307. if (Is64Bit) {
  308. if (IsWin64) {
  309. return (HasSSE ? CSR_Win64_RegCall_SaveList :
  310. CSR_Win64_RegCall_NoSSE_SaveList);
  311. } else {
  312. return (HasSSE ? CSR_SysV64_RegCall_SaveList :
  313. CSR_SysV64_RegCall_NoSSE_SaveList);
  314. }
  315. } else {
  316. return (HasSSE ? CSR_32_RegCall_SaveList :
  317. CSR_32_RegCall_NoSSE_SaveList);
  318. }
  319. case CallingConv::CFGuard_Check:
  320. assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");
  321. return (HasSSE ? CSR_Win32_CFGuard_Check_SaveList
  322. : CSR_Win32_CFGuard_Check_NoSSE_SaveList);
  323. case CallingConv::Cold:
  324. if (Is64Bit)
  325. return CSR_64_MostRegs_SaveList;
  326. break;
  327. case CallingConv::Win64:
  328. if (!HasSSE)
  329. return CSR_Win64_NoSSE_SaveList;
  330. return CSR_Win64_SaveList;
  331. case CallingConv::SwiftTail:
  332. if (!Is64Bit)
  333. return CSR_32_SaveList;
  334. return IsWin64 ? CSR_Win64_SwiftTail_SaveList : CSR_64_SwiftTail_SaveList;
  335. case CallingConv::X86_64_SysV:
  336. if (CallsEHReturn)
  337. return CSR_64EHRet_SaveList;
  338. return CSR_64_SaveList;
  339. case CallingConv::X86_INTR:
  340. if (Is64Bit) {
  341. if (HasAVX512)
  342. return CSR_64_AllRegs_AVX512_SaveList;
  343. if (HasAVX)
  344. return CSR_64_AllRegs_AVX_SaveList;
  345. if (HasSSE)
  346. return CSR_64_AllRegs_SaveList;
  347. return CSR_64_AllRegs_NoSSE_SaveList;
  348. } else {
  349. if (HasAVX512)
  350. return CSR_32_AllRegs_AVX512_SaveList;
  351. if (HasAVX)
  352. return CSR_32_AllRegs_AVX_SaveList;
  353. if (HasSSE)
  354. return CSR_32_AllRegs_SSE_SaveList;
  355. return CSR_32_AllRegs_SaveList;
  356. }
  357. default:
  358. break;
  359. }
  360. if (Is64Bit) {
  361. bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
  362. F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
  363. if (IsSwiftCC)
  364. return IsWin64 ? CSR_Win64_SwiftError_SaveList
  365. : CSR_64_SwiftError_SaveList;
  366. if (IsWin64)
  367. return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList;
  368. if (CallsEHReturn)
  369. return CSR_64EHRet_SaveList;
  370. return CSR_64_SaveList;
  371. }
  372. return CallsEHReturn ? CSR_32EHRet_SaveList : CSR_32_SaveList;
  373. }
  374. const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy(
  375. const MachineFunction *MF) const {
  376. assert(MF && "Invalid MachineFunction pointer.");
  377. if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
  378. MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR())
  379. return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
  380. return nullptr;
  381. }
  382. const uint32_t *
  383. X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
  384. CallingConv::ID CC) const {
  385. const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
  386. bool HasSSE = Subtarget.hasSSE1();
  387. bool HasAVX = Subtarget.hasAVX();
  388. bool HasAVX512 = Subtarget.hasAVX512();
  389. switch (CC) {
  390. case CallingConv::GHC:
  391. case CallingConv::HiPE:
  392. return CSR_NoRegs_RegMask;
  393. case CallingConv::AnyReg:
  394. if (HasAVX)
  395. return CSR_64_AllRegs_AVX_RegMask;
  396. return CSR_64_AllRegs_RegMask;
  397. case CallingConv::PreserveMost:
  398. return CSR_64_RT_MostRegs_RegMask;
  399. case CallingConv::PreserveAll:
  400. if (HasAVX)
  401. return CSR_64_RT_AllRegs_AVX_RegMask;
  402. return CSR_64_RT_AllRegs_RegMask;
  403. case CallingConv::CXX_FAST_TLS:
  404. if (Is64Bit)
  405. return CSR_64_TLS_Darwin_RegMask;
  406. break;
  407. case CallingConv::Intel_OCL_BI: {
  408. if (HasAVX512 && IsWin64)
  409. return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
  410. if (HasAVX512 && Is64Bit)
  411. return CSR_64_Intel_OCL_BI_AVX512_RegMask;
  412. if (HasAVX && IsWin64)
  413. return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
  414. if (HasAVX && Is64Bit)
  415. return CSR_64_Intel_OCL_BI_AVX_RegMask;
  416. if (!HasAVX && !IsWin64 && Is64Bit)
  417. return CSR_64_Intel_OCL_BI_RegMask;
  418. break;
  419. }
  420. case CallingConv::HHVM:
  421. return CSR_64_HHVM_RegMask;
  422. case CallingConv::X86_RegCall:
  423. if (Is64Bit) {
  424. if (IsWin64) {
  425. return (HasSSE ? CSR_Win64_RegCall_RegMask :
  426. CSR_Win64_RegCall_NoSSE_RegMask);
  427. } else {
  428. return (HasSSE ? CSR_SysV64_RegCall_RegMask :
  429. CSR_SysV64_RegCall_NoSSE_RegMask);
  430. }
  431. } else {
  432. return (HasSSE ? CSR_32_RegCall_RegMask :
  433. CSR_32_RegCall_NoSSE_RegMask);
  434. }
  435. case CallingConv::CFGuard_Check:
  436. assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");
  437. return (HasSSE ? CSR_Win32_CFGuard_Check_RegMask
  438. : CSR_Win32_CFGuard_Check_NoSSE_RegMask);
  439. case CallingConv::Cold:
  440. if (Is64Bit)
  441. return CSR_64_MostRegs_RegMask;
  442. break;
  443. case CallingConv::Win64:
  444. return CSR_Win64_RegMask;
  445. case CallingConv::SwiftTail:
  446. if (!Is64Bit)
  447. return CSR_32_RegMask;
  448. return IsWin64 ? CSR_Win64_SwiftTail_RegMask : CSR_64_SwiftTail_RegMask;
  449. case CallingConv::X86_64_SysV:
  450. return CSR_64_RegMask;
  451. case CallingConv::X86_INTR:
  452. if (Is64Bit) {
  453. if (HasAVX512)
  454. return CSR_64_AllRegs_AVX512_RegMask;
  455. if (HasAVX)
  456. return CSR_64_AllRegs_AVX_RegMask;
  457. if (HasSSE)
  458. return CSR_64_AllRegs_RegMask;
  459. return CSR_64_AllRegs_NoSSE_RegMask;
  460. } else {
  461. if (HasAVX512)
  462. return CSR_32_AllRegs_AVX512_RegMask;
  463. if (HasAVX)
  464. return CSR_32_AllRegs_AVX_RegMask;
  465. if (HasSSE)
  466. return CSR_32_AllRegs_SSE_RegMask;
  467. return CSR_32_AllRegs_RegMask;
  468. }
  469. default:
  470. break;
  471. }
  472. // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
  473. // callsEHReturn().
  474. if (Is64Bit) {
  475. const Function &F = MF.getFunction();
  476. bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
  477. F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
  478. if (IsSwiftCC)
  479. return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
  480. return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
  481. }
  482. return CSR_32_RegMask;
  483. }
  484. const uint32_t*
  485. X86RegisterInfo::getNoPreservedMask() const {
  486. return CSR_NoRegs_RegMask;
  487. }
  488. const uint32_t *X86RegisterInfo::getDarwinTLSCallPreservedMask() const {
  489. return CSR_64_TLS_Darwin_RegMask;
  490. }
  491. BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
  492. BitVector Reserved(getNumRegs());
  493. const X86FrameLowering *TFI = getFrameLowering(MF);
  494. // Set the floating point control register as reserved.
  495. Reserved.set(X86::FPCW);
  496. // Set the floating point status register as reserved.
  497. Reserved.set(X86::FPSW);
  498. // Set the SIMD floating point control register as reserved.
  499. Reserved.set(X86::MXCSR);
  500. // Set the stack-pointer register and its aliases as reserved.
  501. for (const MCPhysReg &SubReg : subregs_inclusive(X86::RSP))
  502. Reserved.set(SubReg);
  503. // Set the Shadow Stack Pointer as reserved.
  504. Reserved.set(X86::SSP);
  505. // Set the instruction pointer register and its aliases as reserved.
  506. for (const MCPhysReg &SubReg : subregs_inclusive(X86::RIP))
  507. Reserved.set(SubReg);
  508. // Set the frame-pointer register and its aliases as reserved if needed.
  509. if (TFI->hasFP(MF)) {
  510. for (const MCPhysReg &SubReg : subregs_inclusive(X86::RBP))
  511. Reserved.set(SubReg);
  512. }
  513. // Set the base-pointer register and its aliases as reserved if needed.
  514. if (hasBasePointer(MF)) {
  515. CallingConv::ID CC = MF.getFunction().getCallingConv();
  516. const uint32_t *RegMask = getCallPreservedMask(MF, CC);
  517. if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
  518. report_fatal_error(
  519. "Stack realignment in presence of dynamic allocas is not supported with"
  520. "this calling convention.");
  521. Register BasePtr = getX86SubSuperRegister(getBaseRegister(), 64);
  522. for (const MCPhysReg &SubReg : subregs_inclusive(BasePtr))
  523. Reserved.set(SubReg);
  524. }
  525. // Mark the segment registers as reserved.
  526. Reserved.set(X86::CS);
  527. Reserved.set(X86::SS);
  528. Reserved.set(X86::DS);
  529. Reserved.set(X86::ES);
  530. Reserved.set(X86::FS);
  531. Reserved.set(X86::GS);
  532. // Mark the floating point stack registers as reserved.
  533. for (unsigned n = 0; n != 8; ++n)
  534. Reserved.set(X86::ST0 + n);
  535. // Reserve the registers that only exist in 64-bit mode.
  536. if (!Is64Bit) {
  537. // These 8-bit registers are part of the x86-64 extension even though their
  538. // super-registers are old 32-bits.
  539. Reserved.set(X86::SIL);
  540. Reserved.set(X86::DIL);
  541. Reserved.set(X86::BPL);
  542. Reserved.set(X86::SPL);
  543. Reserved.set(X86::SIH);
  544. Reserved.set(X86::DIH);
  545. Reserved.set(X86::BPH);
  546. Reserved.set(X86::SPH);
  547. for (unsigned n = 0; n != 8; ++n) {
  548. // R8, R9, ...
  549. for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
  550. Reserved.set(*AI);
  551. // XMM8, XMM9, ...
  552. for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
  553. Reserved.set(*AI);
  554. }
  555. }
  556. if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
  557. for (unsigned n = 16; n != 32; ++n) {
  558. for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
  559. Reserved.set(*AI);
  560. }
  561. }
  562. assert(checkAllSuperRegsMarked(Reserved,
  563. {X86::SIL, X86::DIL, X86::BPL, X86::SPL,
  564. X86::SIH, X86::DIH, X86::BPH, X86::SPH}));
  565. return Reserved;
  566. }
  567. bool X86RegisterInfo::isArgumentRegister(const MachineFunction &MF,
  568. MCRegister Reg) const {
  569. const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
  570. const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
  571. auto IsSubReg = [&](MCRegister RegA, MCRegister RegB) {
  572. return TRI.isSuperOrSubRegisterEq(RegA, RegB);
  573. };
  574. if (!ST.is64Bit())
  575. return llvm::any_of(
  576. SmallVector<MCRegister>{X86::EAX, X86::ECX, X86::EDX},
  577. [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }) ||
  578. (ST.hasMMX() && X86::VR64RegClass.contains(Reg));
  579. CallingConv::ID CC = MF.getFunction().getCallingConv();
  580. if (CC == CallingConv::X86_64_SysV && IsSubReg(X86::RAX, Reg))
  581. return true;
  582. if (llvm::any_of(
  583. SmallVector<MCRegister>{X86::RDX, X86::RCX, X86::R8, X86::R9},
  584. [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
  585. return true;
  586. if (CC != CallingConv::Win64 &&
  587. llvm::any_of(SmallVector<MCRegister>{X86::RDI, X86::RSI},
  588. [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
  589. return true;
  590. if (ST.hasSSE1() &&
  591. llvm::any_of(SmallVector<MCRegister>{X86::XMM0, X86::XMM1, X86::XMM2,
  592. X86::XMM3, X86::XMM4, X86::XMM5,
  593. X86::XMM6, X86::XMM7},
  594. [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))
  595. return true;
  596. return X86GenRegisterInfo::isArgumentRegister(MF, Reg);
  597. }
  598. bool X86RegisterInfo::isFixedRegister(const MachineFunction &MF,
  599. MCRegister PhysReg) const {
  600. const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
  601. const TargetRegisterInfo &TRI = *ST.getRegisterInfo();
  602. // Stack pointer.
  603. if (TRI.isSuperOrSubRegisterEq(X86::RSP, PhysReg))
  604. return true;
  605. // Don't use the frame pointer if it's being used.
  606. const X86FrameLowering &TFI = *getFrameLowering(MF);
  607. if (TFI.hasFP(MF) && TRI.isSuperOrSubRegisterEq(X86::RBP, PhysReg))
  608. return true;
  609. return X86GenRegisterInfo::isFixedRegister(MF, PhysReg);
  610. }
  611. bool X86RegisterInfo::isTileRegisterClass(const TargetRegisterClass *RC) const {
  612. return RC->getID() == X86::TILERegClassID;
  613. }
  614. void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
  615. // Check if the EFLAGS register is marked as live-out. This shouldn't happen,
  616. // because the calling convention defines the EFLAGS register as NOT
  617. // preserved.
  618. //
  619. // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
  620. // an assert to track this and clear the register afterwards to avoid
  621. // unnecessary crashes during release builds.
  622. assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
  623. "EFLAGS are not live-out from a patchpoint.");
  624. // Also clean other registers that don't need preserving (IP).
  625. for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
  626. Mask[Reg / 32] &= ~(1U << (Reg % 32));
  627. }
  628. //===----------------------------------------------------------------------===//
  629. // Stack Frame Processing methods
  630. //===----------------------------------------------------------------------===//
  631. static bool CantUseSP(const MachineFrameInfo &MFI) {
  632. return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment();
  633. }
  634. bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
  635. const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
  636. if (X86FI->hasPreallocatedCall())
  637. return true;
  638. const MachineFrameInfo &MFI = MF.getFrameInfo();
  639. if (!EnableBasePointer)
  640. return false;
  641. // When we need stack realignment, we can't address the stack from the frame
  642. // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
  643. // can't address variables from the stack pointer. MS inline asm can
  644. // reference locals while also adjusting the stack pointer. When we can't
  645. // use both the SP and the FP, we need a separate base pointer register.
  646. bool CantUseFP = hasStackRealignment(MF);
  647. return CantUseFP && CantUseSP(MFI);
  648. }
  649. bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
  650. if (!TargetRegisterInfo::canRealignStack(MF))
  651. return false;
  652. const MachineFrameInfo &MFI = MF.getFrameInfo();
  653. const MachineRegisterInfo *MRI = &MF.getRegInfo();
  654. // Stack realignment requires a frame pointer. If we already started
  655. // register allocation with frame pointer elimination, it is too late now.
  656. if (!MRI->canReserveReg(FramePtr))
  657. return false;
  658. // If a base pointer is necessary. Check that it isn't too late to reserve
  659. // it.
  660. if (CantUseSP(MFI))
  661. return MRI->canReserveReg(BasePtr);
  662. return true;
  663. }
  664. // tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction
  665. // of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'.
  666. // TODO: In this case we should be really trying first to entirely eliminate
  667. // this instruction which is a plain copy.
  668. static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II) {
  669. MachineInstr &MI = *II;
  670. unsigned Opc = II->getOpcode();
  671. // Check if this is a LEA of the form 'lea (%esp), %ebx'
  672. if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||
  673. MI.getOperand(2).getImm() != 1 ||
  674. MI.getOperand(3).getReg() != X86::NoRegister ||
  675. MI.getOperand(4).getImm() != 0 ||
  676. MI.getOperand(5).getReg() != X86::NoRegister)
  677. return false;
  678. Register BasePtr = MI.getOperand(1).getReg();
  679. // In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will
  680. // be replaced with a 32-bit operand MOV which will zero extend the upper
  681. // 32-bits of the super register.
  682. if (Opc == X86::LEA64_32r)
  683. BasePtr = getX86SubSuperRegister(BasePtr, 32);
  684. Register NewDestReg = MI.getOperand(0).getReg();
  685. const X86InstrInfo *TII =
  686. MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();
  687. TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,
  688. MI.getOperand(1).isKill());
  689. MI.eraseFromParent();
  690. return true;
  691. }
  692. static bool isFuncletReturnInstr(MachineInstr &MI) {
  693. switch (MI.getOpcode()) {
  694. case X86::CATCHRET:
  695. case X86::CLEANUPRET:
  696. return true;
  697. default:
  698. return false;
  699. }
  700. llvm_unreachable("impossible");
  701. }
  702. bool
  703. X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
  704. int SPAdj, unsigned FIOperandNum,
  705. RegScavenger *RS) const {
  706. MachineInstr &MI = *II;
  707. MachineBasicBlock &MBB = *MI.getParent();
  708. MachineFunction &MF = *MBB.getParent();
  709. MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
  710. bool IsEHFuncletEpilogue = MBBI == MBB.end() ? false
  711. : isFuncletReturnInstr(*MBBI);
  712. const X86FrameLowering *TFI = getFrameLowering(MF);
  713. int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
  714. // Determine base register and offset.
  715. int FIOffset;
  716. Register BasePtr;
  717. if (MI.isReturn()) {
  718. assert((!hasStackRealignment(MF) ||
  719. MF.getFrameInfo().isFixedObjectIndex(FrameIndex)) &&
  720. "Return instruction can only reference SP relative frame objects");
  721. FIOffset =
  722. TFI->getFrameIndexReferenceSP(MF, FrameIndex, BasePtr, 0).getFixed();
  723. } else if (TFI->Is64Bit && (MBB.isEHFuncletEntry() || IsEHFuncletEpilogue)) {
  724. FIOffset = TFI->getWin64EHFrameIndexRef(MF, FrameIndex, BasePtr);
  725. } else {
  726. FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed();
  727. }
  728. // LOCAL_ESCAPE uses a single offset, with no register. It only works in the
  729. // simple FP case, and doesn't work with stack realignment. On 32-bit, the
  730. // offset is from the traditional base pointer location. On 64-bit, the
  731. // offset is from the SP at the end of the prologue, not the FP location. This
  732. // matches the behavior of llvm.frameaddress.
  733. unsigned Opc = MI.getOpcode();
  734. if (Opc == TargetOpcode::LOCAL_ESCAPE) {
  735. MachineOperand &FI = MI.getOperand(FIOperandNum);
  736. FI.ChangeToImmediate(FIOffset);
  737. return false;
  738. }
  739. // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
  740. // register as source operand, semantic is the same and destination is
  741. // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
  742. // Don't change BasePtr since it is used later for stack adjustment.
  743. Register MachineBasePtr = BasePtr;
  744. if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
  745. MachineBasePtr = getX86SubSuperRegister(BasePtr, 64);
  746. // This must be part of a four operand memory reference. Replace the
  747. // FrameIndex with base register. Add an offset to the offset.
  748. MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false);
  749. if (BasePtr == StackPtr)
  750. FIOffset += SPAdj;
  751. // The frame index format for stackmaps and patchpoints is different from the
  752. // X86 format. It only has a FI and an offset.
  753. if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
  754. assert(BasePtr == FramePtr && "Expected the FP as base register");
  755. int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
  756. MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
  757. return false;
  758. }
  759. if (MI.getOperand(FIOperandNum+3).isImm()) {
  760. // Offset is a 32-bit integer.
  761. int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
  762. int Offset = FIOffset + Imm;
  763. assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
  764. "Requesting 64-bit offset in 32-bit immediate!");
  765. if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
  766. MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
  767. } else {
  768. // Offset is symbolic. This is extremely rare.
  769. uint64_t Offset = FIOffset +
  770. (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
  771. MI.getOperand(FIOperandNum + 3).setOffset(Offset);
  772. }
  773. return false;
  774. }
  775. unsigned X86RegisterInfo::findDeadCallerSavedReg(
  776. MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI) const {
  777. const MachineFunction *MF = MBB.getParent();
  778. if (MF->callsEHReturn())
  779. return 0;
  780. const TargetRegisterClass &AvailableRegs = *getGPRsForTailCall(*MF);
  781. if (MBBI == MBB.end())
  782. return 0;
  783. switch (MBBI->getOpcode()) {
  784. default:
  785. return 0;
  786. case TargetOpcode::PATCHABLE_RET:
  787. case X86::RET:
  788. case X86::RET32:
  789. case X86::RET64:
  790. case X86::RETI32:
  791. case X86::RETI64:
  792. case X86::TCRETURNdi:
  793. case X86::TCRETURNri:
  794. case X86::TCRETURNmi:
  795. case X86::TCRETURNdi64:
  796. case X86::TCRETURNri64:
  797. case X86::TCRETURNmi64:
  798. case X86::EH_RETURN:
  799. case X86::EH_RETURN64: {
  800. SmallSet<uint16_t, 8> Uses;
  801. for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I) {
  802. MachineOperand &MO = MBBI->getOperand(I);
  803. if (!MO.isReg() || MO.isDef())
  804. continue;
  805. Register Reg = MO.getReg();
  806. if (!Reg)
  807. continue;
  808. for (MCRegAliasIterator AI(Reg, this, true); AI.isValid(); ++AI)
  809. Uses.insert(*AI);
  810. }
  811. for (auto CS : AvailableRegs)
  812. if (!Uses.count(CS) && CS != X86::RIP && CS != X86::RSP && CS != X86::ESP)
  813. return CS;
  814. }
  815. }
  816. return 0;
  817. }
  818. Register X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
  819. const X86FrameLowering *TFI = getFrameLowering(MF);
  820. return TFI->hasFP(MF) ? FramePtr : StackPtr;
  821. }
  822. unsigned
  823. X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const {
  824. const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
  825. Register FrameReg = getFrameRegister(MF);
  826. if (Subtarget.isTarget64BitILP32())
  827. FrameReg = getX86SubSuperRegister(FrameReg, 32);
  828. return FrameReg;
  829. }
  830. unsigned
  831. X86RegisterInfo::getPtrSizedStackRegister(const MachineFunction &MF) const {
  832. const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
  833. Register StackReg = getStackRegister();
  834. if (Subtarget.isTarget64BitILP32())
  835. StackReg = getX86SubSuperRegister(StackReg, 32);
  836. return StackReg;
  837. }
  838. static ShapeT getTileShape(Register VirtReg, VirtRegMap *VRM,
  839. const MachineRegisterInfo *MRI) {
  840. if (VRM->hasShape(VirtReg))
  841. return VRM->getShape(VirtReg);
  842. const MachineOperand &Def = *MRI->def_begin(VirtReg);
  843. MachineInstr *MI = const_cast<MachineInstr *>(Def.getParent());
  844. unsigned OpCode = MI->getOpcode();
  845. switch (OpCode) {
  846. default:
  847. llvm_unreachable("Unexpected machine instruction on tile register!");
  848. break;
  849. case X86::COPY: {
  850. Register SrcReg = MI->getOperand(1).getReg();
  851. ShapeT Shape = getTileShape(SrcReg, VRM, MRI);
  852. VRM->assignVirt2Shape(VirtReg, Shape);
  853. return Shape;
  854. }
  855. // We only collect the tile shape that is defined.
  856. case X86::PTILELOADDV:
  857. case X86::PTILELOADDT1V:
  858. case X86::PTDPBSSDV:
  859. case X86::PTDPBSUDV:
  860. case X86::PTDPBUSDV:
  861. case X86::PTDPBUUDV:
  862. case X86::PTILEZEROV:
  863. case X86::PTDPBF16PSV:
  864. case X86::PTDPFP16PSV:
  865. MachineOperand &MO1 = MI->getOperand(1);
  866. MachineOperand &MO2 = MI->getOperand(2);
  867. ShapeT Shape(&MO1, &MO2, MRI);
  868. VRM->assignVirt2Shape(VirtReg, Shape);
  869. return Shape;
  870. }
  871. }
  872. bool X86RegisterInfo::getRegAllocationHints(Register VirtReg,
  873. ArrayRef<MCPhysReg> Order,
  874. SmallVectorImpl<MCPhysReg> &Hints,
  875. const MachineFunction &MF,
  876. const VirtRegMap *VRM,
  877. const LiveRegMatrix *Matrix) const {
  878. const MachineRegisterInfo *MRI = &MF.getRegInfo();
  879. const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
  880. bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
  881. VirtReg, Order, Hints, MF, VRM, Matrix);
  882. if (RC.getID() != X86::TILERegClassID)
  883. return BaseImplRetVal;
  884. ShapeT VirtShape = getTileShape(VirtReg, const_cast<VirtRegMap *>(VRM), MRI);
  885. auto AddHint = [&](MCPhysReg PhysReg) {
  886. Register VReg = Matrix->getOneVReg(PhysReg);
  887. if (VReg == MCRegister::NoRegister) { // Not allocated yet
  888. Hints.push_back(PhysReg);
  889. return;
  890. }
  891. ShapeT PhysShape = getTileShape(VReg, const_cast<VirtRegMap *>(VRM), MRI);
  892. if (PhysShape == VirtShape)
  893. Hints.push_back(PhysReg);
  894. };
  895. SmallSet<MCPhysReg, 4> CopyHints;
  896. CopyHints.insert(Hints.begin(), Hints.end());
  897. Hints.clear();
  898. for (auto Hint : CopyHints) {
  899. if (RC.contains(Hint) && !MRI->isReserved(Hint))
  900. AddHint(Hint);
  901. }
  902. for (MCPhysReg PhysReg : Order) {
  903. if (!CopyHints.count(PhysReg) && RC.contains(PhysReg) &&
  904. !MRI->isReserved(PhysReg))
  905. AddHint(PhysReg);
  906. }
  907. #define DEBUG_TYPE "tile-hint"
  908. LLVM_DEBUG({
  909. dbgs() << "Hints for virtual register " << format_hex(VirtReg, 8) << "\n";
  910. for (auto Hint : Hints) {
  911. dbgs() << "tmm" << Hint << ",";
  912. }
  913. dbgs() << "\n";
  914. });
  915. #undef DEBUG_TYPE
  916. return true;
  917. }