X86FloatingPoint.cpp 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789
  1. //===-- X86FloatingPoint.cpp - Floating point Reg -> Stack converter ------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file defines the pass which converts floating point instructions from
  10. // pseudo registers into register stack instructions. This pass uses live
  11. // variable information to indicate where the FPn registers are used and their
  12. // lifetimes.
  13. //
  14. // The x87 hardware tracks liveness of the stack registers, so it is necessary
  15. // to implement exact liveness tracking between basic blocks. The CFG edges are
  16. // partitioned into bundles where the same FP registers must be live in
  17. // identical stack positions. Instructions are inserted at the end of each basic
  18. // block to rearrange the live registers to match the outgoing bundle.
  19. //
  20. // This approach avoids splitting critical edges at the potential cost of more
  21. // live register shuffling instructions when critical edges are present.
  22. //
  23. //===----------------------------------------------------------------------===//
  24. #include "X86.h"
  25. #include "X86InstrInfo.h"
  26. #include "llvm/ADT/DepthFirstIterator.h"
  27. #include "llvm/ADT/STLExtras.h"
  28. #include "llvm/ADT/SmallPtrSet.h"
  29. #include "llvm/ADT/SmallSet.h"
  30. #include "llvm/ADT/SmallVector.h"
  31. #include "llvm/ADT/Statistic.h"
  32. #include "llvm/CodeGen/EdgeBundles.h"
  33. #include "llvm/CodeGen/LivePhysRegs.h"
  34. #include "llvm/CodeGen/MachineFunctionPass.h"
  35. #include "llvm/CodeGen/MachineInstrBuilder.h"
  36. #include "llvm/CodeGen/MachineRegisterInfo.h"
  37. #include "llvm/CodeGen/Passes.h"
  38. #include "llvm/CodeGen/TargetInstrInfo.h"
  39. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  40. #include "llvm/Config/llvm-config.h"
  41. #include "llvm/IR/InlineAsm.h"
  42. #include "llvm/InitializePasses.h"
  43. #include "llvm/Support/Debug.h"
  44. #include "llvm/Support/ErrorHandling.h"
  45. #include "llvm/Support/raw_ostream.h"
  46. #include "llvm/Target/TargetMachine.h"
  47. #include <algorithm>
  48. #include <bitset>
  49. using namespace llvm;
  50. #define DEBUG_TYPE "x86-codegen"
  51. STATISTIC(NumFXCH, "Number of fxch instructions inserted");
  52. STATISTIC(NumFP , "Number of floating point instructions");
  53. namespace {
  54. const unsigned ScratchFPReg = 7;
  55. struct FPS : public MachineFunctionPass {
  56. static char ID;
  57. FPS() : MachineFunctionPass(ID) {
  58. // This is really only to keep valgrind quiet.
  59. // The logic in isLive() is too much for it.
  60. memset(Stack, 0, sizeof(Stack));
  61. memset(RegMap, 0, sizeof(RegMap));
  62. }
  63. void getAnalysisUsage(AnalysisUsage &AU) const override {
  64. AU.setPreservesCFG();
  65. AU.addRequired<EdgeBundles>();
  66. AU.addPreservedID(MachineLoopInfoID);
  67. AU.addPreservedID(MachineDominatorsID);
  68. MachineFunctionPass::getAnalysisUsage(AU);
  69. }
  70. bool runOnMachineFunction(MachineFunction &MF) override;
  71. MachineFunctionProperties getRequiredProperties() const override {
  72. return MachineFunctionProperties().set(
  73. MachineFunctionProperties::Property::NoVRegs);
  74. }
  75. StringRef getPassName() const override { return "X86 FP Stackifier"; }
  76. private:
  77. const TargetInstrInfo *TII = nullptr; // Machine instruction info.
  78. // Two CFG edges are related if they leave the same block, or enter the same
  79. // block. The transitive closure of an edge under this relation is a
  80. // LiveBundle. It represents a set of CFG edges where the live FP stack
  81. // registers must be allocated identically in the x87 stack.
  82. //
  83. // A LiveBundle is usually all the edges leaving a block, or all the edges
  84. // entering a block, but it can contain more edges if critical edges are
  85. // present.
  86. //
  87. // The set of live FP registers in a LiveBundle is calculated by bundleCFG,
  88. // but the exact mapping of FP registers to stack slots is fixed later.
  89. struct LiveBundle {
  90. // Bit mask of live FP registers. Bit 0 = FP0, bit 1 = FP1, &c.
  91. unsigned Mask = 0;
  92. // Number of pre-assigned live registers in FixStack. This is 0 when the
  93. // stack order has not yet been fixed.
  94. unsigned FixCount = 0;
  95. // Assigned stack order for live-in registers.
  96. // FixStack[i] == getStackEntry(i) for all i < FixCount.
  97. unsigned char FixStack[8];
  98. LiveBundle() = default;
  99. // Have the live registers been assigned a stack order yet?
  100. bool isFixed() const { return !Mask || FixCount; }
  101. };
  102. // Numbered LiveBundle structs. LiveBundles[0] is used for all CFG edges
  103. // with no live FP registers.
  104. SmallVector<LiveBundle, 8> LiveBundles;
  105. // The edge bundle analysis provides indices into the LiveBundles vector.
  106. EdgeBundles *Bundles = nullptr;
  107. // Return a bitmask of FP registers in block's live-in list.
  108. static unsigned calcLiveInMask(MachineBasicBlock *MBB, bool RemoveFPs) {
  109. unsigned Mask = 0;
  110. for (MachineBasicBlock::livein_iterator I = MBB->livein_begin();
  111. I != MBB->livein_end(); ) {
  112. MCPhysReg Reg = I->PhysReg;
  113. static_assert(X86::FP6 - X86::FP0 == 6, "sequential regnums");
  114. if (Reg >= X86::FP0 && Reg <= X86::FP6) {
  115. Mask |= 1 << (Reg - X86::FP0);
  116. if (RemoveFPs) {
  117. I = MBB->removeLiveIn(I);
  118. continue;
  119. }
  120. }
  121. ++I;
  122. }
  123. return Mask;
  124. }
  125. // Partition all the CFG edges into LiveBundles.
  126. void bundleCFGRecomputeKillFlags(MachineFunction &MF);
  127. MachineBasicBlock *MBB = nullptr; // Current basic block
  128. // The hardware keeps track of how many FP registers are live, so we have
  129. // to model that exactly. Usually, each live register corresponds to an
  130. // FP<n> register, but when dealing with calls, returns, and inline
  131. // assembly, it is sometimes necessary to have live scratch registers.
  132. unsigned Stack[8]; // FP<n> Registers in each stack slot...
  133. unsigned StackTop = 0; // The current top of the FP stack.
  134. enum {
  135. NumFPRegs = 8 // Including scratch pseudo-registers.
  136. };
  137. // For each live FP<n> register, point to its Stack[] entry.
  138. // The first entries correspond to FP0-FP6, the rest are scratch registers
  139. // used when we need slightly different live registers than what the
  140. // register allocator thinks.
  141. unsigned RegMap[NumFPRegs];
  142. // Set up our stack model to match the incoming registers to MBB.
  143. void setupBlockStack();
  144. // Shuffle live registers to match the expectations of successor blocks.
  145. void finishBlockStack();
  146. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  147. void dumpStack() const {
  148. dbgs() << "Stack contents:";
  149. for (unsigned i = 0; i != StackTop; ++i) {
  150. dbgs() << " FP" << Stack[i];
  151. assert(RegMap[Stack[i]] == i && "Stack[] doesn't match RegMap[]!");
  152. }
  153. }
  154. #endif
  155. /// getSlot - Return the stack slot number a particular register number is
  156. /// in.
  157. unsigned getSlot(unsigned RegNo) const {
  158. assert(RegNo < NumFPRegs && "Regno out of range!");
  159. return RegMap[RegNo];
  160. }
  161. /// isLive - Is RegNo currently live in the stack?
  162. bool isLive(unsigned RegNo) const {
  163. unsigned Slot = getSlot(RegNo);
  164. return Slot < StackTop && Stack[Slot] == RegNo;
  165. }
  166. /// getStackEntry - Return the X86::FP<n> register in register ST(i).
  167. unsigned getStackEntry(unsigned STi) const {
  168. if (STi >= StackTop)
  169. report_fatal_error("Access past stack top!");
  170. return Stack[StackTop-1-STi];
  171. }
  172. /// getSTReg - Return the X86::ST(i) register which contains the specified
  173. /// FP<RegNo> register.
  174. unsigned getSTReg(unsigned RegNo) const {
  175. return StackTop - 1 - getSlot(RegNo) + X86::ST0;
  176. }
  177. // pushReg - Push the specified FP<n> register onto the stack.
  178. void pushReg(unsigned Reg) {
  179. assert(Reg < NumFPRegs && "Register number out of range!");
  180. if (StackTop >= 8)
  181. report_fatal_error("Stack overflow!");
  182. Stack[StackTop] = Reg;
  183. RegMap[Reg] = StackTop++;
  184. }
  185. // popReg - Pop a register from the stack.
  186. void popReg() {
  187. if (StackTop == 0)
  188. report_fatal_error("Cannot pop empty stack!");
  189. RegMap[Stack[--StackTop]] = ~0; // Update state
  190. }
  191. bool isAtTop(unsigned RegNo) const { return getSlot(RegNo) == StackTop-1; }
  192. void moveToTop(unsigned RegNo, MachineBasicBlock::iterator I) {
  193. DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
  194. if (isAtTop(RegNo)) return;
  195. unsigned STReg = getSTReg(RegNo);
  196. unsigned RegOnTop = getStackEntry(0);
  197. // Swap the slots the regs are in.
  198. std::swap(RegMap[RegNo], RegMap[RegOnTop]);
  199. // Swap stack slot contents.
  200. if (RegMap[RegOnTop] >= StackTop)
  201. report_fatal_error("Access past stack top!");
  202. std::swap(Stack[RegMap[RegOnTop]], Stack[StackTop-1]);
  203. // Emit an fxch to update the runtime processors version of the state.
  204. BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(STReg);
  205. ++NumFXCH;
  206. }
  207. void duplicateToTop(unsigned RegNo, unsigned AsReg,
  208. MachineBasicBlock::iterator I) {
  209. DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
  210. unsigned STReg = getSTReg(RegNo);
  211. pushReg(AsReg); // New register on top of stack
  212. BuildMI(*MBB, I, dl, TII->get(X86::LD_Frr)).addReg(STReg);
  213. }
  214. /// popStackAfter - Pop the current value off of the top of the FP stack
  215. /// after the specified instruction.
  216. void popStackAfter(MachineBasicBlock::iterator &I);
  217. /// freeStackSlotAfter - Free the specified register from the register
  218. /// stack, so that it is no longer in a register. If the register is
  219. /// currently at the top of the stack, we just pop the current instruction,
  220. /// otherwise we store the current top-of-stack into the specified slot,
  221. /// then pop the top of stack.
  222. void freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned Reg);
  223. /// freeStackSlotBefore - Just the pop, no folding. Return the inserted
  224. /// instruction.
  225. MachineBasicBlock::iterator
  226. freeStackSlotBefore(MachineBasicBlock::iterator I, unsigned FPRegNo);
  227. /// Adjust the live registers to be the set in Mask.
  228. void adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I);
  229. /// Shuffle the top FixCount stack entries such that FP reg FixStack[0] is
  230. /// st(0), FP reg FixStack[1] is st(1) etc.
  231. void shuffleStackTop(const unsigned char *FixStack, unsigned FixCount,
  232. MachineBasicBlock::iterator I);
  233. bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
  234. void handleCall(MachineBasicBlock::iterator &I);
  235. void handleReturn(MachineBasicBlock::iterator &I);
  236. void handleZeroArgFP(MachineBasicBlock::iterator &I);
  237. void handleOneArgFP(MachineBasicBlock::iterator &I);
  238. void handleOneArgFPRW(MachineBasicBlock::iterator &I);
  239. void handleTwoArgFP(MachineBasicBlock::iterator &I);
  240. void handleCompareFP(MachineBasicBlock::iterator &I);
  241. void handleCondMovFP(MachineBasicBlock::iterator &I);
  242. void handleSpecialFP(MachineBasicBlock::iterator &I);
  243. // Check if a COPY instruction is using FP registers.
  244. static bool isFPCopy(MachineInstr &MI) {
  245. Register DstReg = MI.getOperand(0).getReg();
  246. Register SrcReg = MI.getOperand(1).getReg();
  247. return X86::RFP80RegClass.contains(DstReg) ||
  248. X86::RFP80RegClass.contains(SrcReg);
  249. }
  250. void setKillFlags(MachineBasicBlock &MBB) const;
  251. };
  252. }
  253. char FPS::ID = 0;
  254. INITIALIZE_PASS_BEGIN(FPS, DEBUG_TYPE, "X86 FP Stackifier",
  255. false, false)
  256. INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
  257. INITIALIZE_PASS_END(FPS, DEBUG_TYPE, "X86 FP Stackifier",
  258. false, false)
  259. FunctionPass *llvm::createX86FloatingPointStackifierPass() { return new FPS(); }
  260. /// getFPReg - Return the X86::FPx register number for the specified operand.
  261. /// For example, this returns 3 for X86::FP3.
  262. static unsigned getFPReg(const MachineOperand &MO) {
  263. assert(MO.isReg() && "Expected an FP register!");
  264. Register Reg = MO.getReg();
  265. assert(Reg >= X86::FP0 && Reg <= X86::FP6 && "Expected FP register!");
  266. return Reg - X86::FP0;
  267. }
  268. /// runOnMachineFunction - Loop over all of the basic blocks, transforming FP
  269. /// register references into FP stack references.
  270. ///
  271. bool FPS::runOnMachineFunction(MachineFunction &MF) {
  272. // We only need to run this pass if there are any FP registers used in this
  273. // function. If it is all integer, there is nothing for us to do!
  274. bool FPIsUsed = false;
  275. static_assert(X86::FP6 == X86::FP0+6, "Register enums aren't sorted right!");
  276. const MachineRegisterInfo &MRI = MF.getRegInfo();
  277. for (unsigned i = 0; i <= 6; ++i)
  278. if (!MRI.reg_nodbg_empty(X86::FP0 + i)) {
  279. FPIsUsed = true;
  280. break;
  281. }
  282. // Early exit.
  283. if (!FPIsUsed) return false;
  284. Bundles = &getAnalysis<EdgeBundles>();
  285. TII = MF.getSubtarget().getInstrInfo();
  286. // Prepare cross-MBB liveness.
  287. bundleCFGRecomputeKillFlags(MF);
  288. StackTop = 0;
  289. // Process the function in depth first order so that we process at least one
  290. // of the predecessors for every reachable block in the function.
  291. df_iterator_default_set<MachineBasicBlock*> Processed;
  292. MachineBasicBlock *Entry = &MF.front();
  293. LiveBundle &Bundle =
  294. LiveBundles[Bundles->getBundle(Entry->getNumber(), false)];
  295. // In regcall convention, some FP registers may not be passed through
  296. // the stack, so they will need to be assigned to the stack first
  297. if ((Entry->getParent()->getFunction().getCallingConv() ==
  298. CallingConv::X86_RegCall) && (Bundle.Mask && !Bundle.FixCount)) {
  299. // In the register calling convention, up to one FP argument could be
  300. // saved in the first FP register.
  301. // If bundle.mask is non-zero and Bundle.FixCount is zero, it means
  302. // that the FP registers contain arguments.
  303. // The actual value is passed in FP0.
  304. // Here we fix the stack and mark FP0 as pre-assigned register.
  305. assert((Bundle.Mask & 0xFE) == 0 &&
  306. "Only FP0 could be passed as an argument");
  307. Bundle.FixCount = 1;
  308. Bundle.FixStack[0] = 0;
  309. }
  310. bool Changed = false;
  311. for (MachineBasicBlock *BB : depth_first_ext(Entry, Processed))
  312. Changed |= processBasicBlock(MF, *BB);
  313. // Process any unreachable blocks in arbitrary order now.
  314. if (MF.size() != Processed.size())
  315. for (MachineBasicBlock &BB : MF)
  316. if (Processed.insert(&BB).second)
  317. Changed |= processBasicBlock(MF, BB);
  318. LiveBundles.clear();
  319. return Changed;
  320. }
  321. /// bundleCFG - Scan all the basic blocks to determine consistent live-in and
  322. /// live-out sets for the FP registers. Consistent means that the set of
  323. /// registers live-out from a block is identical to the live-in set of all
  324. /// successors. This is not enforced by the normal live-in lists since
  325. /// registers may be implicitly defined, or not used by all successors.
  326. void FPS::bundleCFGRecomputeKillFlags(MachineFunction &MF) {
  327. assert(LiveBundles.empty() && "Stale data in LiveBundles");
  328. LiveBundles.resize(Bundles->getNumBundles());
  329. // Gather the actual live-in masks for all MBBs.
  330. for (MachineBasicBlock &MBB : MF) {
  331. setKillFlags(MBB);
  332. const unsigned Mask = calcLiveInMask(&MBB, false);
  333. if (!Mask)
  334. continue;
  335. // Update MBB ingoing bundle mask.
  336. LiveBundles[Bundles->getBundle(MBB.getNumber(), false)].Mask |= Mask;
  337. }
  338. }
  339. /// processBasicBlock - Loop over all of the instructions in the basic block,
  340. /// transforming FP instructions into their stack form.
  341. ///
  342. bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
  343. bool Changed = false;
  344. MBB = &BB;
  345. setupBlockStack();
  346. for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
  347. MachineInstr &MI = *I;
  348. uint64_t Flags = MI.getDesc().TSFlags;
  349. unsigned FPInstClass = Flags & X86II::FPTypeMask;
  350. if (MI.isInlineAsm())
  351. FPInstClass = X86II::SpecialFP;
  352. if (MI.isCopy() && isFPCopy(MI))
  353. FPInstClass = X86II::SpecialFP;
  354. if (MI.isImplicitDef() &&
  355. X86::RFP80RegClass.contains(MI.getOperand(0).getReg()))
  356. FPInstClass = X86II::SpecialFP;
  357. if (MI.isCall())
  358. FPInstClass = X86II::SpecialFP;
  359. if (FPInstClass == X86II::NotFP)
  360. continue; // Efficiently ignore non-fp insts!
  361. MachineInstr *PrevMI = nullptr;
  362. if (I != BB.begin())
  363. PrevMI = &*std::prev(I);
  364. ++NumFP; // Keep track of # of pseudo instrs
  365. LLVM_DEBUG(dbgs() << "\nFPInst:\t" << MI);
  366. // Get dead variables list now because the MI pointer may be deleted as part
  367. // of processing!
  368. SmallVector<unsigned, 8> DeadRegs;
  369. for (const MachineOperand &MO : MI.operands())
  370. if (MO.isReg() && MO.isDead())
  371. DeadRegs.push_back(MO.getReg());
  372. switch (FPInstClass) {
  373. case X86II::ZeroArgFP: handleZeroArgFP(I); break;
  374. case X86II::OneArgFP: handleOneArgFP(I); break; // fstp ST(0)
  375. case X86II::OneArgFPRW: handleOneArgFPRW(I); break; // ST(0) = fsqrt(ST(0))
  376. case X86II::TwoArgFP: handleTwoArgFP(I); break;
  377. case X86II::CompareFP: handleCompareFP(I); break;
  378. case X86II::CondMovFP: handleCondMovFP(I); break;
  379. case X86II::SpecialFP: handleSpecialFP(I); break;
  380. default: llvm_unreachable("Unknown FP Type!");
  381. }
  382. // Check to see if any of the values defined by this instruction are dead
  383. // after definition. If so, pop them.
  384. for (unsigned i = 0, e = DeadRegs.size(); i != e; ++i) {
  385. unsigned Reg = DeadRegs[i];
  386. // Check if Reg is live on the stack. An inline-asm register operand that
  387. // is in the clobber list and marked dead might not be live on the stack.
  388. static_assert(X86::FP7 - X86::FP0 == 7, "sequential FP regnumbers");
  389. if (Reg >= X86::FP0 && Reg <= X86::FP6 && isLive(Reg-X86::FP0)) {
  390. LLVM_DEBUG(dbgs() << "Register FP#" << Reg - X86::FP0 << " is dead!\n");
  391. freeStackSlotAfter(I, Reg-X86::FP0);
  392. }
  393. }
  394. // Print out all of the instructions expanded to if -debug
  395. LLVM_DEBUG({
  396. MachineBasicBlock::iterator PrevI = PrevMI;
  397. if (I == PrevI) {
  398. dbgs() << "Just deleted pseudo instruction\n";
  399. } else {
  400. MachineBasicBlock::iterator Start = I;
  401. // Rewind to first instruction newly inserted.
  402. while (Start != BB.begin() && std::prev(Start) != PrevI)
  403. --Start;
  404. dbgs() << "Inserted instructions:\n\t";
  405. Start->print(dbgs());
  406. while (++Start != std::next(I)) {
  407. }
  408. }
  409. dumpStack();
  410. });
  411. (void)PrevMI;
  412. Changed = true;
  413. }
  414. finishBlockStack();
  415. return Changed;
  416. }
  417. /// setupBlockStack - Use the live bundles to set up our model of the stack
  418. /// to match predecessors' live out stack.
  419. void FPS::setupBlockStack() {
  420. LLVM_DEBUG(dbgs() << "\nSetting up live-ins for " << printMBBReference(*MBB)
  421. << " derived from " << MBB->getName() << ".\n");
  422. StackTop = 0;
  423. // Get the live-in bundle for MBB.
  424. const LiveBundle &Bundle =
  425. LiveBundles[Bundles->getBundle(MBB->getNumber(), false)];
  426. if (!Bundle.Mask) {
  427. LLVM_DEBUG(dbgs() << "Block has no FP live-ins.\n");
  428. return;
  429. }
  430. // Depth-first iteration should ensure that we always have an assigned stack.
  431. assert(Bundle.isFixed() && "Reached block before any predecessors");
  432. // Push the fixed live-in registers.
  433. for (unsigned i = Bundle.FixCount; i > 0; --i) {
  434. LLVM_DEBUG(dbgs() << "Live-in st(" << (i - 1) << "): %fp"
  435. << unsigned(Bundle.FixStack[i - 1]) << '\n');
  436. pushReg(Bundle.FixStack[i-1]);
  437. }
  438. // Kill off unwanted live-ins. This can happen with a critical edge.
  439. // FIXME: We could keep these live registers around as zombies. They may need
  440. // to be revived at the end of a short block. It might save a few instrs.
  441. unsigned Mask = calcLiveInMask(MBB, /*RemoveFPs=*/true);
  442. adjustLiveRegs(Mask, MBB->begin());
  443. LLVM_DEBUG(MBB->dump());
  444. }
  445. /// finishBlockStack - Revive live-outs that are implicitly defined out of
  446. /// MBB. Shuffle live registers to match the expected fixed stack of any
  447. /// predecessors, and ensure that all predecessors are expecting the same
  448. /// stack.
  449. void FPS::finishBlockStack() {
  450. // The RET handling below takes care of return blocks for us.
  451. if (MBB->succ_empty())
  452. return;
  453. LLVM_DEBUG(dbgs() << "Setting up live-outs for " << printMBBReference(*MBB)
  454. << " derived from " << MBB->getName() << ".\n");
  455. // Get MBB's live-out bundle.
  456. unsigned BundleIdx = Bundles->getBundle(MBB->getNumber(), true);
  457. LiveBundle &Bundle = LiveBundles[BundleIdx];
  458. // We may need to kill and define some registers to match successors.
  459. // FIXME: This can probably be combined with the shuffle below.
  460. MachineBasicBlock::iterator Term = MBB->getFirstTerminator();
  461. adjustLiveRegs(Bundle.Mask, Term);
  462. if (!Bundle.Mask) {
  463. LLVM_DEBUG(dbgs() << "No live-outs.\n");
  464. return;
  465. }
  466. // Has the stack order been fixed yet?
  467. LLVM_DEBUG(dbgs() << "LB#" << BundleIdx << ": ");
  468. if (Bundle.isFixed()) {
  469. LLVM_DEBUG(dbgs() << "Shuffling stack to match.\n");
  470. shuffleStackTop(Bundle.FixStack, Bundle.FixCount, Term);
  471. } else {
  472. // Not fixed yet, we get to choose.
  473. LLVM_DEBUG(dbgs() << "Fixing stack order now.\n");
  474. Bundle.FixCount = StackTop;
  475. for (unsigned i = 0; i < StackTop; ++i)
  476. Bundle.FixStack[i] = getStackEntry(i);
  477. }
  478. }
  479. //===----------------------------------------------------------------------===//
  480. // Efficient Lookup Table Support
  481. //===----------------------------------------------------------------------===//
  482. namespace {
  483. struct TableEntry {
  484. uint16_t from;
  485. uint16_t to;
  486. bool operator<(const TableEntry &TE) const { return from < TE.from; }
  487. friend bool operator<(const TableEntry &TE, unsigned V) {
  488. return TE.from < V;
  489. }
  490. friend bool LLVM_ATTRIBUTE_UNUSED operator<(unsigned V,
  491. const TableEntry &TE) {
  492. return V < TE.from;
  493. }
  494. };
  495. }
  496. static int Lookup(ArrayRef<TableEntry> Table, unsigned Opcode) {
  497. const TableEntry *I = llvm::lower_bound(Table, Opcode);
  498. if (I != Table.end() && I->from == Opcode)
  499. return I->to;
  500. return -1;
  501. }
  502. #ifdef NDEBUG
  503. #define ASSERT_SORTED(TABLE)
  504. #else
  505. #define ASSERT_SORTED(TABLE) \
  506. { \
  507. static std::atomic<bool> TABLE##Checked(false); \
  508. if (!TABLE##Checked.load(std::memory_order_relaxed)) { \
  509. assert(is_sorted(TABLE) && \
  510. "All lookup tables must be sorted for efficient access!"); \
  511. TABLE##Checked.store(true, std::memory_order_relaxed); \
  512. } \
  513. }
  514. #endif
  515. //===----------------------------------------------------------------------===//
  516. // Register File -> Register Stack Mapping Methods
  517. //===----------------------------------------------------------------------===//
  518. // OpcodeTable - Sorted map of register instructions to their stack version.
  519. // The first element is an register file pseudo instruction, the second is the
  520. // concrete X86 instruction which uses the register stack.
  521. //
  522. static const TableEntry OpcodeTable[] = {
  523. { X86::ABS_Fp32 , X86::ABS_F },
  524. { X86::ABS_Fp64 , X86::ABS_F },
  525. { X86::ABS_Fp80 , X86::ABS_F },
  526. { X86::ADD_Fp32m , X86::ADD_F32m },
  527. { X86::ADD_Fp64m , X86::ADD_F64m },
  528. { X86::ADD_Fp64m32 , X86::ADD_F32m },
  529. { X86::ADD_Fp80m32 , X86::ADD_F32m },
  530. { X86::ADD_Fp80m64 , X86::ADD_F64m },
  531. { X86::ADD_FpI16m32 , X86::ADD_FI16m },
  532. { X86::ADD_FpI16m64 , X86::ADD_FI16m },
  533. { X86::ADD_FpI16m80 , X86::ADD_FI16m },
  534. { X86::ADD_FpI32m32 , X86::ADD_FI32m },
  535. { X86::ADD_FpI32m64 , X86::ADD_FI32m },
  536. { X86::ADD_FpI32m80 , X86::ADD_FI32m },
  537. { X86::CHS_Fp32 , X86::CHS_F },
  538. { X86::CHS_Fp64 , X86::CHS_F },
  539. { X86::CHS_Fp80 , X86::CHS_F },
  540. { X86::CMOVBE_Fp32 , X86::CMOVBE_F },
  541. { X86::CMOVBE_Fp64 , X86::CMOVBE_F },
  542. { X86::CMOVBE_Fp80 , X86::CMOVBE_F },
  543. { X86::CMOVB_Fp32 , X86::CMOVB_F },
  544. { X86::CMOVB_Fp64 , X86::CMOVB_F },
  545. { X86::CMOVB_Fp80 , X86::CMOVB_F },
  546. { X86::CMOVE_Fp32 , X86::CMOVE_F },
  547. { X86::CMOVE_Fp64 , X86::CMOVE_F },
  548. { X86::CMOVE_Fp80 , X86::CMOVE_F },
  549. { X86::CMOVNBE_Fp32 , X86::CMOVNBE_F },
  550. { X86::CMOVNBE_Fp64 , X86::CMOVNBE_F },
  551. { X86::CMOVNBE_Fp80 , X86::CMOVNBE_F },
  552. { X86::CMOVNB_Fp32 , X86::CMOVNB_F },
  553. { X86::CMOVNB_Fp64 , X86::CMOVNB_F },
  554. { X86::CMOVNB_Fp80 , X86::CMOVNB_F },
  555. { X86::CMOVNE_Fp32 , X86::CMOVNE_F },
  556. { X86::CMOVNE_Fp64 , X86::CMOVNE_F },
  557. { X86::CMOVNE_Fp80 , X86::CMOVNE_F },
  558. { X86::CMOVNP_Fp32 , X86::CMOVNP_F },
  559. { X86::CMOVNP_Fp64 , X86::CMOVNP_F },
  560. { X86::CMOVNP_Fp80 , X86::CMOVNP_F },
  561. { X86::CMOVP_Fp32 , X86::CMOVP_F },
  562. { X86::CMOVP_Fp64 , X86::CMOVP_F },
  563. { X86::CMOVP_Fp80 , X86::CMOVP_F },
  564. { X86::COM_FpIr32 , X86::COM_FIr },
  565. { X86::COM_FpIr64 , X86::COM_FIr },
  566. { X86::COM_FpIr80 , X86::COM_FIr },
  567. { X86::COM_Fpr32 , X86::COM_FST0r },
  568. { X86::COM_Fpr64 , X86::COM_FST0r },
  569. { X86::COM_Fpr80 , X86::COM_FST0r },
  570. { X86::DIVR_Fp32m , X86::DIVR_F32m },
  571. { X86::DIVR_Fp64m , X86::DIVR_F64m },
  572. { X86::DIVR_Fp64m32 , X86::DIVR_F32m },
  573. { X86::DIVR_Fp80m32 , X86::DIVR_F32m },
  574. { X86::DIVR_Fp80m64 , X86::DIVR_F64m },
  575. { X86::DIVR_FpI16m32, X86::DIVR_FI16m},
  576. { X86::DIVR_FpI16m64, X86::DIVR_FI16m},
  577. { X86::DIVR_FpI16m80, X86::DIVR_FI16m},
  578. { X86::DIVR_FpI32m32, X86::DIVR_FI32m},
  579. { X86::DIVR_FpI32m64, X86::DIVR_FI32m},
  580. { X86::DIVR_FpI32m80, X86::DIVR_FI32m},
  581. { X86::DIV_Fp32m , X86::DIV_F32m },
  582. { X86::DIV_Fp64m , X86::DIV_F64m },
  583. { X86::DIV_Fp64m32 , X86::DIV_F32m },
  584. { X86::DIV_Fp80m32 , X86::DIV_F32m },
  585. { X86::DIV_Fp80m64 , X86::DIV_F64m },
  586. { X86::DIV_FpI16m32 , X86::DIV_FI16m },
  587. { X86::DIV_FpI16m64 , X86::DIV_FI16m },
  588. { X86::DIV_FpI16m80 , X86::DIV_FI16m },
  589. { X86::DIV_FpI32m32 , X86::DIV_FI32m },
  590. { X86::DIV_FpI32m64 , X86::DIV_FI32m },
  591. { X86::DIV_FpI32m80 , X86::DIV_FI32m },
  592. { X86::ILD_Fp16m32 , X86::ILD_F16m },
  593. { X86::ILD_Fp16m64 , X86::ILD_F16m },
  594. { X86::ILD_Fp16m80 , X86::ILD_F16m },
  595. { X86::ILD_Fp32m32 , X86::ILD_F32m },
  596. { X86::ILD_Fp32m64 , X86::ILD_F32m },
  597. { X86::ILD_Fp32m80 , X86::ILD_F32m },
  598. { X86::ILD_Fp64m32 , X86::ILD_F64m },
  599. { X86::ILD_Fp64m64 , X86::ILD_F64m },
  600. { X86::ILD_Fp64m80 , X86::ILD_F64m },
  601. { X86::ISTT_Fp16m32 , X86::ISTT_FP16m},
  602. { X86::ISTT_Fp16m64 , X86::ISTT_FP16m},
  603. { X86::ISTT_Fp16m80 , X86::ISTT_FP16m},
  604. { X86::ISTT_Fp32m32 , X86::ISTT_FP32m},
  605. { X86::ISTT_Fp32m64 , X86::ISTT_FP32m},
  606. { X86::ISTT_Fp32m80 , X86::ISTT_FP32m},
  607. { X86::ISTT_Fp64m32 , X86::ISTT_FP64m},
  608. { X86::ISTT_Fp64m64 , X86::ISTT_FP64m},
  609. { X86::ISTT_Fp64m80 , X86::ISTT_FP64m},
  610. { X86::IST_Fp16m32 , X86::IST_F16m },
  611. { X86::IST_Fp16m64 , X86::IST_F16m },
  612. { X86::IST_Fp16m80 , X86::IST_F16m },
  613. { X86::IST_Fp32m32 , X86::IST_F32m },
  614. { X86::IST_Fp32m64 , X86::IST_F32m },
  615. { X86::IST_Fp32m80 , X86::IST_F32m },
  616. { X86::IST_Fp64m32 , X86::IST_FP64m },
  617. { X86::IST_Fp64m64 , X86::IST_FP64m },
  618. { X86::IST_Fp64m80 , X86::IST_FP64m },
  619. { X86::LD_Fp032 , X86::LD_F0 },
  620. { X86::LD_Fp064 , X86::LD_F0 },
  621. { X86::LD_Fp080 , X86::LD_F0 },
  622. { X86::LD_Fp132 , X86::LD_F1 },
  623. { X86::LD_Fp164 , X86::LD_F1 },
  624. { X86::LD_Fp180 , X86::LD_F1 },
  625. { X86::LD_Fp32m , X86::LD_F32m },
  626. { X86::LD_Fp32m64 , X86::LD_F32m },
  627. { X86::LD_Fp32m80 , X86::LD_F32m },
  628. { X86::LD_Fp64m , X86::LD_F64m },
  629. { X86::LD_Fp64m80 , X86::LD_F64m },
  630. { X86::LD_Fp80m , X86::LD_F80m },
  631. { X86::MUL_Fp32m , X86::MUL_F32m },
  632. { X86::MUL_Fp64m , X86::MUL_F64m },
  633. { X86::MUL_Fp64m32 , X86::MUL_F32m },
  634. { X86::MUL_Fp80m32 , X86::MUL_F32m },
  635. { X86::MUL_Fp80m64 , X86::MUL_F64m },
  636. { X86::MUL_FpI16m32 , X86::MUL_FI16m },
  637. { X86::MUL_FpI16m64 , X86::MUL_FI16m },
  638. { X86::MUL_FpI16m80 , X86::MUL_FI16m },
  639. { X86::MUL_FpI32m32 , X86::MUL_FI32m },
  640. { X86::MUL_FpI32m64 , X86::MUL_FI32m },
  641. { X86::MUL_FpI32m80 , X86::MUL_FI32m },
  642. { X86::SQRT_Fp32 , X86::SQRT_F },
  643. { X86::SQRT_Fp64 , X86::SQRT_F },
  644. { X86::SQRT_Fp80 , X86::SQRT_F },
  645. { X86::ST_Fp32m , X86::ST_F32m },
  646. { X86::ST_Fp64m , X86::ST_F64m },
  647. { X86::ST_Fp64m32 , X86::ST_F32m },
  648. { X86::ST_Fp80m32 , X86::ST_F32m },
  649. { X86::ST_Fp80m64 , X86::ST_F64m },
  650. { X86::ST_FpP80m , X86::ST_FP80m },
  651. { X86::SUBR_Fp32m , X86::SUBR_F32m },
  652. { X86::SUBR_Fp64m , X86::SUBR_F64m },
  653. { X86::SUBR_Fp64m32 , X86::SUBR_F32m },
  654. { X86::SUBR_Fp80m32 , X86::SUBR_F32m },
  655. { X86::SUBR_Fp80m64 , X86::SUBR_F64m },
  656. { X86::SUBR_FpI16m32, X86::SUBR_FI16m},
  657. { X86::SUBR_FpI16m64, X86::SUBR_FI16m},
  658. { X86::SUBR_FpI16m80, X86::SUBR_FI16m},
  659. { X86::SUBR_FpI32m32, X86::SUBR_FI32m},
  660. { X86::SUBR_FpI32m64, X86::SUBR_FI32m},
  661. { X86::SUBR_FpI32m80, X86::SUBR_FI32m},
  662. { X86::SUB_Fp32m , X86::SUB_F32m },
  663. { X86::SUB_Fp64m , X86::SUB_F64m },
  664. { X86::SUB_Fp64m32 , X86::SUB_F32m },
  665. { X86::SUB_Fp80m32 , X86::SUB_F32m },
  666. { X86::SUB_Fp80m64 , X86::SUB_F64m },
  667. { X86::SUB_FpI16m32 , X86::SUB_FI16m },
  668. { X86::SUB_FpI16m64 , X86::SUB_FI16m },
  669. { X86::SUB_FpI16m80 , X86::SUB_FI16m },
  670. { X86::SUB_FpI32m32 , X86::SUB_FI32m },
  671. { X86::SUB_FpI32m64 , X86::SUB_FI32m },
  672. { X86::SUB_FpI32m80 , X86::SUB_FI32m },
  673. { X86::TST_Fp32 , X86::TST_F },
  674. { X86::TST_Fp64 , X86::TST_F },
  675. { X86::TST_Fp80 , X86::TST_F },
  676. { X86::UCOM_FpIr32 , X86::UCOM_FIr },
  677. { X86::UCOM_FpIr64 , X86::UCOM_FIr },
  678. { X86::UCOM_FpIr80 , X86::UCOM_FIr },
  679. { X86::UCOM_Fpr32 , X86::UCOM_Fr },
  680. { X86::UCOM_Fpr64 , X86::UCOM_Fr },
  681. { X86::UCOM_Fpr80 , X86::UCOM_Fr },
  682. { X86::XAM_Fp32 , X86::XAM_F },
  683. { X86::XAM_Fp64 , X86::XAM_F },
  684. { X86::XAM_Fp80 , X86::XAM_F },
  685. };
  686. static unsigned getConcreteOpcode(unsigned Opcode) {
  687. ASSERT_SORTED(OpcodeTable);
  688. int Opc = Lookup(OpcodeTable, Opcode);
  689. assert(Opc != -1 && "FP Stack instruction not in OpcodeTable!");
  690. return Opc;
  691. }
  692. //===----------------------------------------------------------------------===//
  693. // Helper Methods
  694. //===----------------------------------------------------------------------===//
  695. // PopTable - Sorted map of instructions to their popping version. The first
  696. // element is an instruction, the second is the version which pops.
  697. //
  698. static const TableEntry PopTable[] = {
  699. { X86::ADD_FrST0 , X86::ADD_FPrST0 },
  700. { X86::COMP_FST0r, X86::FCOMPP },
  701. { X86::COM_FIr , X86::COM_FIPr },
  702. { X86::COM_FST0r , X86::COMP_FST0r },
  703. { X86::DIVR_FrST0, X86::DIVR_FPrST0 },
  704. { X86::DIV_FrST0 , X86::DIV_FPrST0 },
  705. { X86::IST_F16m , X86::IST_FP16m },
  706. { X86::IST_F32m , X86::IST_FP32m },
  707. { X86::MUL_FrST0 , X86::MUL_FPrST0 },
  708. { X86::ST_F32m , X86::ST_FP32m },
  709. { X86::ST_F64m , X86::ST_FP64m },
  710. { X86::ST_Frr , X86::ST_FPrr },
  711. { X86::SUBR_FrST0, X86::SUBR_FPrST0 },
  712. { X86::SUB_FrST0 , X86::SUB_FPrST0 },
  713. { X86::UCOM_FIr , X86::UCOM_FIPr },
  714. { X86::UCOM_FPr , X86::UCOM_FPPr },
  715. { X86::UCOM_Fr , X86::UCOM_FPr },
  716. };
  717. static bool doesInstructionSetFPSW(MachineInstr &MI) {
  718. if (const MachineOperand *MO = MI.findRegisterDefOperand(X86::FPSW))
  719. if (!MO->isDead())
  720. return true;
  721. return false;
  722. }
  723. static MachineBasicBlock::iterator
  724. getNextFPInstruction(MachineBasicBlock::iterator I) {
  725. MachineBasicBlock &MBB = *I->getParent();
  726. while (++I != MBB.end()) {
  727. MachineInstr &MI = *I;
  728. if (X86::isX87Instruction(MI))
  729. return I;
  730. }
  731. return MBB.end();
  732. }
  733. /// popStackAfter - Pop the current value off of the top of the FP stack after
  734. /// the specified instruction. This attempts to be sneaky and combine the pop
  735. /// into the instruction itself if possible. The iterator is left pointing to
  736. /// the last instruction, be it a new pop instruction inserted, or the old
  737. /// instruction if it was modified in place.
  738. ///
  739. void FPS::popStackAfter(MachineBasicBlock::iterator &I) {
  740. MachineInstr &MI = *I;
  741. const DebugLoc &dl = MI.getDebugLoc();
  742. ASSERT_SORTED(PopTable);
  743. popReg();
  744. // Check to see if there is a popping version of this instruction...
  745. int Opcode = Lookup(PopTable, I->getOpcode());
  746. if (Opcode != -1) {
  747. I->setDesc(TII->get(Opcode));
  748. if (Opcode == X86::FCOMPP || Opcode == X86::UCOM_FPPr)
  749. I->removeOperand(0);
  750. MI.dropDebugNumber();
  751. } else { // Insert an explicit pop
  752. // If this instruction sets FPSW, which is read in following instruction,
  753. // insert pop after that reader.
  754. if (doesInstructionSetFPSW(MI)) {
  755. MachineBasicBlock &MBB = *MI.getParent();
  756. MachineBasicBlock::iterator Next = getNextFPInstruction(I);
  757. if (Next != MBB.end() && Next->readsRegister(X86::FPSW))
  758. I = Next;
  759. }
  760. I = BuildMI(*MBB, ++I, dl, TII->get(X86::ST_FPrr)).addReg(X86::ST0);
  761. }
  762. }
  763. /// freeStackSlotAfter - Free the specified register from the register stack, so
  764. /// that it is no longer in a register. If the register is currently at the top
  765. /// of the stack, we just pop the current instruction, otherwise we store the
  766. /// current top-of-stack into the specified slot, then pop the top of stack.
  767. void FPS::freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned FPRegNo) {
  768. if (getStackEntry(0) == FPRegNo) { // already at the top of stack? easy.
  769. popStackAfter(I);
  770. return;
  771. }
  772. // Otherwise, store the top of stack into the dead slot, killing the operand
  773. // without having to add in an explicit xchg then pop.
  774. //
  775. I = freeStackSlotBefore(++I, FPRegNo);
  776. }
  777. /// freeStackSlotBefore - Free the specified register without trying any
  778. /// folding.
  779. MachineBasicBlock::iterator
  780. FPS::freeStackSlotBefore(MachineBasicBlock::iterator I, unsigned FPRegNo) {
  781. unsigned STReg = getSTReg(FPRegNo);
  782. unsigned OldSlot = getSlot(FPRegNo);
  783. unsigned TopReg = Stack[StackTop-1];
  784. Stack[OldSlot] = TopReg;
  785. RegMap[TopReg] = OldSlot;
  786. RegMap[FPRegNo] = ~0;
  787. Stack[--StackTop] = ~0;
  788. return BuildMI(*MBB, I, DebugLoc(), TII->get(X86::ST_FPrr))
  789. .addReg(STReg)
  790. .getInstr();
  791. }
  792. /// adjustLiveRegs - Kill and revive registers such that exactly the FP
  793. /// registers with a bit in Mask are live.
  794. void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
  795. unsigned Defs = Mask;
  796. unsigned Kills = 0;
  797. for (unsigned i = 0; i < StackTop; ++i) {
  798. unsigned RegNo = Stack[i];
  799. if (!(Defs & (1 << RegNo)))
  800. // This register is live, but we don't want it.
  801. Kills |= (1 << RegNo);
  802. else
  803. // We don't need to imp-def this live register.
  804. Defs &= ~(1 << RegNo);
  805. }
  806. assert((Kills & Defs) == 0 && "Register needs killing and def'ing?");
  807. // Produce implicit-defs for free by using killed registers.
  808. while (Kills && Defs) {
  809. unsigned KReg = countTrailingZeros(Kills);
  810. unsigned DReg = countTrailingZeros(Defs);
  811. LLVM_DEBUG(dbgs() << "Renaming %fp" << KReg << " as imp %fp" << DReg
  812. << "\n");
  813. std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]);
  814. std::swap(RegMap[KReg], RegMap[DReg]);
  815. Kills &= ~(1 << KReg);
  816. Defs &= ~(1 << DReg);
  817. }
  818. // Kill registers by popping.
  819. if (Kills && I != MBB->begin()) {
  820. MachineBasicBlock::iterator I2 = std::prev(I);
  821. while (StackTop) {
  822. unsigned KReg = getStackEntry(0);
  823. if (!(Kills & (1 << KReg)))
  824. break;
  825. LLVM_DEBUG(dbgs() << "Popping %fp" << KReg << "\n");
  826. popStackAfter(I2);
  827. Kills &= ~(1 << KReg);
  828. }
  829. }
  830. // Manually kill the rest.
  831. while (Kills) {
  832. unsigned KReg = countTrailingZeros(Kills);
  833. LLVM_DEBUG(dbgs() << "Killing %fp" << KReg << "\n");
  834. freeStackSlotBefore(I, KReg);
  835. Kills &= ~(1 << KReg);
  836. }
  837. // Load zeros for all the imp-defs.
  838. while(Defs) {
  839. unsigned DReg = countTrailingZeros(Defs);
  840. LLVM_DEBUG(dbgs() << "Defining %fp" << DReg << " as 0\n");
  841. BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0));
  842. pushReg(DReg);
  843. Defs &= ~(1 << DReg);
  844. }
  845. // Now we should have the correct registers live.
  846. LLVM_DEBUG(dumpStack());
  847. assert(StackTop == (unsigned)llvm::popcount(Mask) && "Live count mismatch");
  848. }
  849. /// shuffleStackTop - emit fxch instructions before I to shuffle the top
  850. /// FixCount entries into the order given by FixStack.
  851. /// FIXME: Is there a better algorithm than insertion sort?
  852. void FPS::shuffleStackTop(const unsigned char *FixStack,
  853. unsigned FixCount,
  854. MachineBasicBlock::iterator I) {
  855. // Move items into place, starting from the desired stack bottom.
  856. while (FixCount--) {
  857. // Old register at position FixCount.
  858. unsigned OldReg = getStackEntry(FixCount);
  859. // Desired register at position FixCount.
  860. unsigned Reg = FixStack[FixCount];
  861. if (Reg == OldReg)
  862. continue;
  863. // (Reg st0) (OldReg st0) = (Reg OldReg st0)
  864. moveToTop(Reg, I);
  865. if (FixCount > 0)
  866. moveToTop(OldReg, I);
  867. }
  868. LLVM_DEBUG(dumpStack());
  869. }
  870. //===----------------------------------------------------------------------===//
  871. // Instruction transformation implementation
  872. //===----------------------------------------------------------------------===//
  873. void FPS::handleCall(MachineBasicBlock::iterator &I) {
  874. MachineInstr &MI = *I;
  875. unsigned STReturns = 0;
  876. bool ClobbersFPStack = false;
  877. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
  878. MachineOperand &Op = MI.getOperand(i);
  879. // Check if this call clobbers the FP stack.
  880. // is sufficient.
  881. if (Op.isRegMask()) {
  882. bool ClobbersFP0 = Op.clobbersPhysReg(X86::FP0);
  883. #ifndef NDEBUG
  884. static_assert(X86::FP7 - X86::FP0 == 7, "sequential FP regnumbers");
  885. for (unsigned i = 1; i != 8; ++i)
  886. assert(Op.clobbersPhysReg(X86::FP0 + i) == ClobbersFP0 &&
  887. "Inconsistent FP register clobber");
  888. #endif
  889. if (ClobbersFP0)
  890. ClobbersFPStack = true;
  891. }
  892. if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
  893. continue;
  894. assert(Op.isImplicit() && "Expected implicit def/use");
  895. if (Op.isDef())
  896. STReturns |= 1 << getFPReg(Op);
  897. // Remove the operand so that later passes don't see it.
  898. MI.removeOperand(i);
  899. --i;
  900. --e;
  901. }
  902. // Most calls should have a regmask that clobbers the FP registers. If it
  903. // isn't present then the register allocator didn't spill the FP registers
  904. // so they are still on the stack.
  905. assert((ClobbersFPStack || STReturns == 0) &&
  906. "ST returns without FP stack clobber");
  907. if (!ClobbersFPStack)
  908. return;
  909. unsigned N = countTrailingOnes(STReturns);
  910. // FP registers used for function return must be consecutive starting at
  911. // FP0
  912. assert(STReturns == 0 || (isMask_32(STReturns) && N <= 2));
  913. // Reset the FP Stack - It is required because of possible leftovers from
  914. // passed arguments. The caller should assume that the FP stack is
  915. // returned empty (unless the callee returns values on FP stack).
  916. while (StackTop > 0)
  917. popReg();
  918. for (unsigned I = 0; I < N; ++I)
  919. pushReg(N - I - 1);
  920. // If this call has been modified, drop all variable values defined by it.
  921. // We can't track them once they've been stackified.
  922. if (STReturns)
  923. I->dropDebugNumber();
  924. }
  925. /// If RET has an FP register use operand, pass the first one in ST(0) and
  926. /// the second one in ST(1).
  927. void FPS::handleReturn(MachineBasicBlock::iterator &I) {
  928. MachineInstr &MI = *I;
  929. // Find the register operands.
  930. unsigned FirstFPRegOp = ~0U, SecondFPRegOp = ~0U;
  931. unsigned LiveMask = 0;
  932. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
  933. MachineOperand &Op = MI.getOperand(i);
  934. if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
  935. continue;
  936. // FP Register uses must be kills unless there are two uses of the same
  937. // register, in which case only one will be a kill.
  938. assert(Op.isUse() &&
  939. (Op.isKill() || // Marked kill.
  940. getFPReg(Op) == FirstFPRegOp || // Second instance.
  941. MI.killsRegister(Op.getReg())) && // Later use is marked kill.
  942. "Ret only defs operands, and values aren't live beyond it");
  943. if (FirstFPRegOp == ~0U)
  944. FirstFPRegOp = getFPReg(Op);
  945. else {
  946. assert(SecondFPRegOp == ~0U && "More than two fp operands!");
  947. SecondFPRegOp = getFPReg(Op);
  948. }
  949. LiveMask |= (1 << getFPReg(Op));
  950. // Remove the operand so that later passes don't see it.
  951. MI.removeOperand(i);
  952. --i;
  953. --e;
  954. }
  955. // We may have been carrying spurious live-ins, so make sure only the
  956. // returned registers are left live.
  957. adjustLiveRegs(LiveMask, MI);
  958. if (!LiveMask) return; // Quick check to see if any are possible.
  959. // There are only four possibilities here:
  960. // 1) we are returning a single FP value. In this case, it has to be in
  961. // ST(0) already, so just declare success by removing the value from the
  962. // FP Stack.
  963. if (SecondFPRegOp == ~0U) {
  964. // Assert that the top of stack contains the right FP register.
  965. assert(StackTop == 1 && FirstFPRegOp == getStackEntry(0) &&
  966. "Top of stack not the right register for RET!");
  967. // Ok, everything is good, mark the value as not being on the stack
  968. // anymore so that our assertion about the stack being empty at end of
  969. // block doesn't fire.
  970. StackTop = 0;
  971. return;
  972. }
  973. // Otherwise, we are returning two values:
  974. // 2) If returning the same value for both, we only have one thing in the FP
  975. // stack. Consider: RET FP1, FP1
  976. if (StackTop == 1) {
  977. assert(FirstFPRegOp == SecondFPRegOp && FirstFPRegOp == getStackEntry(0)&&
  978. "Stack misconfiguration for RET!");
  979. // Duplicate the TOS so that we return it twice. Just pick some other FPx
  980. // register to hold it.
  981. unsigned NewReg = ScratchFPReg;
  982. duplicateToTop(FirstFPRegOp, NewReg, MI);
  983. FirstFPRegOp = NewReg;
  984. }
  985. /// Okay we know we have two different FPx operands now:
  986. assert(StackTop == 2 && "Must have two values live!");
  987. /// 3) If SecondFPRegOp is currently in ST(0) and FirstFPRegOp is currently
  988. /// in ST(1). In this case, emit an fxch.
  989. if (getStackEntry(0) == SecondFPRegOp) {
  990. assert(getStackEntry(1) == FirstFPRegOp && "Unknown regs live");
  991. moveToTop(FirstFPRegOp, MI);
  992. }
  993. /// 4) Finally, FirstFPRegOp must be in ST(0) and SecondFPRegOp must be in
  994. /// ST(1). Just remove both from our understanding of the stack and return.
  995. assert(getStackEntry(0) == FirstFPRegOp && "Unknown regs live");
  996. assert(getStackEntry(1) == SecondFPRegOp && "Unknown regs live");
  997. StackTop = 0;
  998. }
  999. /// handleZeroArgFP - ST(0) = fld0 ST(0) = flds <mem>
  1000. ///
  1001. void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
  1002. MachineInstr &MI = *I;
  1003. unsigned DestReg = getFPReg(MI.getOperand(0));
  1004. // Change from the pseudo instruction to the concrete instruction.
  1005. MI.removeOperand(0); // Remove the explicit ST(0) operand
  1006. MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
  1007. MI.addOperand(
  1008. MachineOperand::CreateReg(X86::ST0, /*isDef*/ true, /*isImp*/ true));
  1009. // Result gets pushed on the stack.
  1010. pushReg(DestReg);
  1011. MI.dropDebugNumber();
  1012. }
  1013. /// handleOneArgFP - fst <mem>, ST(0)
  1014. ///
  1015. void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
  1016. MachineInstr &MI = *I;
  1017. unsigned NumOps = MI.getDesc().getNumOperands();
  1018. assert((NumOps == X86::AddrNumOperands + 1 || NumOps == 1) &&
  1019. "Can only handle fst* & ftst instructions!");
  1020. // Is this the last use of the source register?
  1021. unsigned Reg = getFPReg(MI.getOperand(NumOps - 1));
  1022. bool KillsSrc = MI.killsRegister(X86::FP0 + Reg);
  1023. // FISTP64m is strange because there isn't a non-popping versions.
  1024. // If we have one _and_ we don't want to pop the operand, duplicate the value
  1025. // on the stack instead of moving it. This ensure that popping the value is
  1026. // always ok.
  1027. // Ditto FISTTP16m, FISTTP32m, FISTTP64m, ST_FpP80m.
  1028. //
  1029. if (!KillsSrc && (MI.getOpcode() == X86::IST_Fp64m32 ||
  1030. MI.getOpcode() == X86::ISTT_Fp16m32 ||
  1031. MI.getOpcode() == X86::ISTT_Fp32m32 ||
  1032. MI.getOpcode() == X86::ISTT_Fp64m32 ||
  1033. MI.getOpcode() == X86::IST_Fp64m64 ||
  1034. MI.getOpcode() == X86::ISTT_Fp16m64 ||
  1035. MI.getOpcode() == X86::ISTT_Fp32m64 ||
  1036. MI.getOpcode() == X86::ISTT_Fp64m64 ||
  1037. MI.getOpcode() == X86::IST_Fp64m80 ||
  1038. MI.getOpcode() == X86::ISTT_Fp16m80 ||
  1039. MI.getOpcode() == X86::ISTT_Fp32m80 ||
  1040. MI.getOpcode() == X86::ISTT_Fp64m80 ||
  1041. MI.getOpcode() == X86::ST_FpP80m)) {
  1042. duplicateToTop(Reg, ScratchFPReg, I);
  1043. } else {
  1044. moveToTop(Reg, I); // Move to the top of the stack...
  1045. }
  1046. // Convert from the pseudo instruction to the concrete instruction.
  1047. MI.removeOperand(NumOps - 1); // Remove explicit ST(0) operand
  1048. MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
  1049. MI.addOperand(
  1050. MachineOperand::CreateReg(X86::ST0, /*isDef*/ false, /*isImp*/ true));
  1051. if (MI.getOpcode() == X86::IST_FP64m || MI.getOpcode() == X86::ISTT_FP16m ||
  1052. MI.getOpcode() == X86::ISTT_FP32m || MI.getOpcode() == X86::ISTT_FP64m ||
  1053. MI.getOpcode() == X86::ST_FP80m) {
  1054. if (StackTop == 0)
  1055. report_fatal_error("Stack empty??");
  1056. --StackTop;
  1057. } else if (KillsSrc) { // Last use of operand?
  1058. popStackAfter(I);
  1059. }
  1060. MI.dropDebugNumber();
  1061. }
  1062. /// handleOneArgFPRW: Handle instructions that read from the top of stack and
  1063. /// replace the value with a newly computed value. These instructions may have
  1064. /// non-fp operands after their FP operands.
  1065. ///
  1066. /// Examples:
  1067. /// R1 = fchs R2
  1068. /// R1 = fadd R2, [mem]
  1069. ///
  1070. void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
  1071. MachineInstr &MI = *I;
  1072. #ifndef NDEBUG
  1073. unsigned NumOps = MI.getDesc().getNumOperands();
  1074. assert(NumOps >= 2 && "FPRW instructions must have 2 ops!!");
  1075. #endif
  1076. // Is this the last use of the source register?
  1077. unsigned Reg = getFPReg(MI.getOperand(1));
  1078. bool KillsSrc = MI.killsRegister(X86::FP0 + Reg);
  1079. if (KillsSrc) {
  1080. // If this is the last use of the source register, just make sure it's on
  1081. // the top of the stack.
  1082. moveToTop(Reg, I);
  1083. if (StackTop == 0)
  1084. report_fatal_error("Stack cannot be empty!");
  1085. --StackTop;
  1086. pushReg(getFPReg(MI.getOperand(0)));
  1087. } else {
  1088. // If this is not the last use of the source register, _copy_ it to the top
  1089. // of the stack.
  1090. duplicateToTop(Reg, getFPReg(MI.getOperand(0)), I);
  1091. }
  1092. // Change from the pseudo instruction to the concrete instruction.
  1093. MI.removeOperand(1); // Drop the source operand.
  1094. MI.removeOperand(0); // Drop the destination operand.
  1095. MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
  1096. MI.dropDebugNumber();
  1097. }
  1098. //===----------------------------------------------------------------------===//
  1099. // Define tables of various ways to map pseudo instructions
  1100. //
  1101. // ForwardST0Table - Map: A = B op C into: ST(0) = ST(0) op ST(i)
  1102. static const TableEntry ForwardST0Table[] = {
  1103. { X86::ADD_Fp32 , X86::ADD_FST0r },
  1104. { X86::ADD_Fp64 , X86::ADD_FST0r },
  1105. { X86::ADD_Fp80 , X86::ADD_FST0r },
  1106. { X86::DIV_Fp32 , X86::DIV_FST0r },
  1107. { X86::DIV_Fp64 , X86::DIV_FST0r },
  1108. { X86::DIV_Fp80 , X86::DIV_FST0r },
  1109. { X86::MUL_Fp32 , X86::MUL_FST0r },
  1110. { X86::MUL_Fp64 , X86::MUL_FST0r },
  1111. { X86::MUL_Fp80 , X86::MUL_FST0r },
  1112. { X86::SUB_Fp32 , X86::SUB_FST0r },
  1113. { X86::SUB_Fp64 , X86::SUB_FST0r },
  1114. { X86::SUB_Fp80 , X86::SUB_FST0r },
  1115. };
  1116. // ReverseST0Table - Map: A = B op C into: ST(0) = ST(i) op ST(0)
  1117. static const TableEntry ReverseST0Table[] = {
  1118. { X86::ADD_Fp32 , X86::ADD_FST0r }, // commutative
  1119. { X86::ADD_Fp64 , X86::ADD_FST0r }, // commutative
  1120. { X86::ADD_Fp80 , X86::ADD_FST0r }, // commutative
  1121. { X86::DIV_Fp32 , X86::DIVR_FST0r },
  1122. { X86::DIV_Fp64 , X86::DIVR_FST0r },
  1123. { X86::DIV_Fp80 , X86::DIVR_FST0r },
  1124. { X86::MUL_Fp32 , X86::MUL_FST0r }, // commutative
  1125. { X86::MUL_Fp64 , X86::MUL_FST0r }, // commutative
  1126. { X86::MUL_Fp80 , X86::MUL_FST0r }, // commutative
  1127. { X86::SUB_Fp32 , X86::SUBR_FST0r },
  1128. { X86::SUB_Fp64 , X86::SUBR_FST0r },
  1129. { X86::SUB_Fp80 , X86::SUBR_FST0r },
  1130. };
  1131. // ForwardSTiTable - Map: A = B op C into: ST(i) = ST(0) op ST(i)
  1132. static const TableEntry ForwardSTiTable[] = {
  1133. { X86::ADD_Fp32 , X86::ADD_FrST0 }, // commutative
  1134. { X86::ADD_Fp64 , X86::ADD_FrST0 }, // commutative
  1135. { X86::ADD_Fp80 , X86::ADD_FrST0 }, // commutative
  1136. { X86::DIV_Fp32 , X86::DIVR_FrST0 },
  1137. { X86::DIV_Fp64 , X86::DIVR_FrST0 },
  1138. { X86::DIV_Fp80 , X86::DIVR_FrST0 },
  1139. { X86::MUL_Fp32 , X86::MUL_FrST0 }, // commutative
  1140. { X86::MUL_Fp64 , X86::MUL_FrST0 }, // commutative
  1141. { X86::MUL_Fp80 , X86::MUL_FrST0 }, // commutative
  1142. { X86::SUB_Fp32 , X86::SUBR_FrST0 },
  1143. { X86::SUB_Fp64 , X86::SUBR_FrST0 },
  1144. { X86::SUB_Fp80 , X86::SUBR_FrST0 },
  1145. };
  1146. // ReverseSTiTable - Map: A = B op C into: ST(i) = ST(i) op ST(0)
  1147. static const TableEntry ReverseSTiTable[] = {
  1148. { X86::ADD_Fp32 , X86::ADD_FrST0 },
  1149. { X86::ADD_Fp64 , X86::ADD_FrST0 },
  1150. { X86::ADD_Fp80 , X86::ADD_FrST0 },
  1151. { X86::DIV_Fp32 , X86::DIV_FrST0 },
  1152. { X86::DIV_Fp64 , X86::DIV_FrST0 },
  1153. { X86::DIV_Fp80 , X86::DIV_FrST0 },
  1154. { X86::MUL_Fp32 , X86::MUL_FrST0 },
  1155. { X86::MUL_Fp64 , X86::MUL_FrST0 },
  1156. { X86::MUL_Fp80 , X86::MUL_FrST0 },
  1157. { X86::SUB_Fp32 , X86::SUB_FrST0 },
  1158. { X86::SUB_Fp64 , X86::SUB_FrST0 },
  1159. { X86::SUB_Fp80 , X86::SUB_FrST0 },
  1160. };
  1161. /// handleTwoArgFP - Handle instructions like FADD and friends which are virtual
  1162. /// instructions which need to be simplified and possibly transformed.
  1163. ///
  1164. /// Result: ST(0) = fsub ST(0), ST(i)
  1165. /// ST(i) = fsub ST(0), ST(i)
  1166. /// ST(0) = fsubr ST(0), ST(i)
  1167. /// ST(i) = fsubr ST(0), ST(i)
  1168. ///
  1169. void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
  1170. ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
  1171. ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
  1172. MachineInstr &MI = *I;
  1173. unsigned NumOperands = MI.getDesc().getNumOperands();
  1174. assert(NumOperands == 3 && "Illegal TwoArgFP instruction!");
  1175. unsigned Dest = getFPReg(MI.getOperand(0));
  1176. unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2));
  1177. unsigned Op1 = getFPReg(MI.getOperand(NumOperands - 1));
  1178. bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0);
  1179. bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
  1180. const DebugLoc &dl = MI.getDebugLoc();
  1181. unsigned TOS = getStackEntry(0);
  1182. // One of our operands must be on the top of the stack. If neither is yet, we
  1183. // need to move one.
  1184. if (Op0 != TOS && Op1 != TOS) { // No operand at TOS?
  1185. // We can choose to move either operand to the top of the stack. If one of
  1186. // the operands is killed by this instruction, we want that one so that we
  1187. // can update right on top of the old version.
  1188. if (KillsOp0) {
  1189. moveToTop(Op0, I); // Move dead operand to TOS.
  1190. TOS = Op0;
  1191. } else if (KillsOp1) {
  1192. moveToTop(Op1, I);
  1193. TOS = Op1;
  1194. } else {
  1195. // All of the operands are live after this instruction executes, so we
  1196. // cannot update on top of any operand. Because of this, we must
  1197. // duplicate one of the stack elements to the top. It doesn't matter
  1198. // which one we pick.
  1199. //
  1200. duplicateToTop(Op0, Dest, I);
  1201. Op0 = TOS = Dest;
  1202. KillsOp0 = true;
  1203. }
  1204. } else if (!KillsOp0 && !KillsOp1) {
  1205. // If we DO have one of our operands at the top of the stack, but we don't
  1206. // have a dead operand, we must duplicate one of the operands to a new slot
  1207. // on the stack.
  1208. duplicateToTop(Op0, Dest, I);
  1209. Op0 = TOS = Dest;
  1210. KillsOp0 = true;
  1211. }
  1212. // Now we know that one of our operands is on the top of the stack, and at
  1213. // least one of our operands is killed by this instruction.
  1214. assert((TOS == Op0 || TOS == Op1) && (KillsOp0 || KillsOp1) &&
  1215. "Stack conditions not set up right!");
  1216. // We decide which form to use based on what is on the top of the stack, and
  1217. // which operand is killed by this instruction.
  1218. ArrayRef<TableEntry> InstTable;
  1219. bool isForward = TOS == Op0;
  1220. bool updateST0 = (TOS == Op0 && !KillsOp1) || (TOS == Op1 && !KillsOp0);
  1221. if (updateST0) {
  1222. if (isForward)
  1223. InstTable = ForwardST0Table;
  1224. else
  1225. InstTable = ReverseST0Table;
  1226. } else {
  1227. if (isForward)
  1228. InstTable = ForwardSTiTable;
  1229. else
  1230. InstTable = ReverseSTiTable;
  1231. }
  1232. int Opcode = Lookup(InstTable, MI.getOpcode());
  1233. assert(Opcode != -1 && "Unknown TwoArgFP pseudo instruction!");
  1234. // NotTOS - The register which is not on the top of stack...
  1235. unsigned NotTOS = (TOS == Op0) ? Op1 : Op0;
  1236. // Replace the old instruction with a new instruction
  1237. MBB->remove(&*I++);
  1238. I = BuildMI(*MBB, I, dl, TII->get(Opcode)).addReg(getSTReg(NotTOS));
  1239. if (!MI.mayRaiseFPException())
  1240. I->setFlag(MachineInstr::MIFlag::NoFPExcept);
  1241. // If both operands are killed, pop one off of the stack in addition to
  1242. // overwriting the other one.
  1243. if (KillsOp0 && KillsOp1 && Op0 != Op1) {
  1244. assert(!updateST0 && "Should have updated other operand!");
  1245. popStackAfter(I); // Pop the top of stack
  1246. }
  1247. // Update stack information so that we know the destination register is now on
  1248. // the stack.
  1249. unsigned UpdatedSlot = getSlot(updateST0 ? TOS : NotTOS);
  1250. assert(UpdatedSlot < StackTop && Dest < 7);
  1251. Stack[UpdatedSlot] = Dest;
  1252. RegMap[Dest] = UpdatedSlot;
  1253. MBB->getParent()->deleteMachineInstr(&MI); // Remove the old instruction
  1254. }
  1255. /// handleCompareFP - Handle FUCOM and FUCOMI instructions, which have two FP
  1256. /// register arguments and no explicit destinations.
  1257. ///
  1258. void FPS::handleCompareFP(MachineBasicBlock::iterator &I) {
  1259. MachineInstr &MI = *I;
  1260. unsigned NumOperands = MI.getDesc().getNumOperands();
  1261. assert(NumOperands == 2 && "Illegal FUCOM* instruction!");
  1262. unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2));
  1263. unsigned Op1 = getFPReg(MI.getOperand(NumOperands - 1));
  1264. bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0);
  1265. bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
  1266. // Make sure the first operand is on the top of stack, the other one can be
  1267. // anywhere.
  1268. moveToTop(Op0, I);
  1269. // Change from the pseudo instruction to the concrete instruction.
  1270. MI.getOperand(0).setReg(getSTReg(Op1));
  1271. MI.removeOperand(1);
  1272. MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
  1273. MI.dropDebugNumber();
  1274. // If any of the operands are killed by this instruction, free them.
  1275. if (KillsOp0) freeStackSlotAfter(I, Op0);
  1276. if (KillsOp1 && Op0 != Op1) freeStackSlotAfter(I, Op1);
  1277. }
  1278. /// handleCondMovFP - Handle two address conditional move instructions. These
  1279. /// instructions move a st(i) register to st(0) iff a condition is true. These
  1280. /// instructions require that the first operand is at the top of the stack, but
  1281. /// otherwise don't modify the stack at all.
  1282. void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
  1283. MachineInstr &MI = *I;
  1284. unsigned Op0 = getFPReg(MI.getOperand(0));
  1285. unsigned Op1 = getFPReg(MI.getOperand(2));
  1286. bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
  1287. // The first operand *must* be on the top of the stack.
  1288. moveToTop(Op0, I);
  1289. // Change the second operand to the stack register that the operand is in.
  1290. // Change from the pseudo instruction to the concrete instruction.
  1291. MI.removeOperand(0);
  1292. MI.removeOperand(1);
  1293. MI.getOperand(0).setReg(getSTReg(Op1));
  1294. MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
  1295. MI.dropDebugNumber();
  1296. // If we kill the second operand, make sure to pop it from the stack.
  1297. if (Op0 != Op1 && KillsOp1) {
  1298. // Get this value off of the register stack.
  1299. freeStackSlotAfter(I, Op1);
  1300. }
  1301. }
  1302. /// handleSpecialFP - Handle special instructions which behave unlike other
  1303. /// floating point instructions. This is primarily intended for use by pseudo
  1304. /// instructions.
  1305. ///
  1306. void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
  1307. MachineInstr &MI = *Inst;
  1308. if (MI.isCall()) {
  1309. handleCall(Inst);
  1310. return;
  1311. }
  1312. if (MI.isReturn()) {
  1313. handleReturn(Inst);
  1314. return;
  1315. }
  1316. switch (MI.getOpcode()) {
  1317. default: llvm_unreachable("Unknown SpecialFP instruction!");
  1318. case TargetOpcode::COPY: {
  1319. // We handle three kinds of copies: FP <- FP, FP <- ST, and ST <- FP.
  1320. const MachineOperand &MO1 = MI.getOperand(1);
  1321. const MachineOperand &MO0 = MI.getOperand(0);
  1322. bool KillsSrc = MI.killsRegister(MO1.getReg());
  1323. // FP <- FP copy.
  1324. unsigned DstFP = getFPReg(MO0);
  1325. unsigned SrcFP = getFPReg(MO1);
  1326. assert(isLive(SrcFP) && "Cannot copy dead register");
  1327. if (KillsSrc) {
  1328. // If the input operand is killed, we can just change the owner of the
  1329. // incoming stack slot into the result.
  1330. unsigned Slot = getSlot(SrcFP);
  1331. Stack[Slot] = DstFP;
  1332. RegMap[DstFP] = Slot;
  1333. } else {
  1334. // For COPY we just duplicate the specified value to a new stack slot.
  1335. // This could be made better, but would require substantial changes.
  1336. duplicateToTop(SrcFP, DstFP, Inst);
  1337. }
  1338. break;
  1339. }
  1340. case TargetOpcode::IMPLICIT_DEF: {
  1341. // All FP registers must be explicitly defined, so load a 0 instead.
  1342. unsigned Reg = MI.getOperand(0).getReg() - X86::FP0;
  1343. LLVM_DEBUG(dbgs() << "Emitting LD_F0 for implicit FP" << Reg << '\n');
  1344. BuildMI(*MBB, Inst, MI.getDebugLoc(), TII->get(X86::LD_F0));
  1345. pushReg(Reg);
  1346. break;
  1347. }
  1348. case TargetOpcode::INLINEASM:
  1349. case TargetOpcode::INLINEASM_BR: {
  1350. // The inline asm MachineInstr currently only *uses* FP registers for the
  1351. // 'f' constraint. These should be turned into the current ST(x) register
  1352. // in the machine instr.
  1353. //
  1354. // There are special rules for x87 inline assembly. The compiler must know
  1355. // exactly how many registers are popped and pushed implicitly by the asm.
  1356. // Otherwise it is not possible to restore the stack state after the inline
  1357. // asm.
  1358. //
  1359. // There are 3 kinds of input operands:
  1360. //
  1361. // 1. Popped inputs. These must appear at the stack top in ST0-STn. A
  1362. // popped input operand must be in a fixed stack slot, and it is either
  1363. // tied to an output operand, or in the clobber list. The MI has ST use
  1364. // and def operands for these inputs.
  1365. //
  1366. // 2. Fixed inputs. These inputs appear in fixed stack slots, but are
  1367. // preserved by the inline asm. The fixed stack slots must be STn-STm
  1368. // following the popped inputs. A fixed input operand cannot be tied to
  1369. // an output or appear in the clobber list. The MI has ST use operands
  1370. // and no defs for these inputs.
  1371. //
  1372. // 3. Preserved inputs. These inputs use the "f" constraint which is
  1373. // represented as an FP register. The inline asm won't change these
  1374. // stack slots.
  1375. //
  1376. // Outputs must be in ST registers, FP outputs are not allowed. Clobbered
  1377. // registers do not count as output operands. The inline asm changes the
  1378. // stack as if it popped all the popped inputs and then pushed all the
  1379. // output operands.
  1380. // Scan the assembly for ST registers used, defined and clobbered. We can
  1381. // only tell clobbers from defs by looking at the asm descriptor.
  1382. unsigned STUses = 0, STDefs = 0, STClobbers = 0;
  1383. unsigned NumOps = 0;
  1384. SmallSet<unsigned, 1> FRegIdx;
  1385. unsigned RCID;
  1386. for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI.getNumOperands();
  1387. i != e && MI.getOperand(i).isImm(); i += 1 + NumOps) {
  1388. unsigned Flags = MI.getOperand(i).getImm();
  1389. NumOps = InlineAsm::getNumOperandRegisters(Flags);
  1390. if (NumOps != 1)
  1391. continue;
  1392. const MachineOperand &MO = MI.getOperand(i + 1);
  1393. if (!MO.isReg())
  1394. continue;
  1395. unsigned STReg = MO.getReg() - X86::FP0;
  1396. if (STReg >= 8)
  1397. continue;
  1398. // If the flag has a register class constraint, this must be an operand
  1399. // with constraint "f". Record its index and continue.
  1400. if (InlineAsm::hasRegClassConstraint(Flags, RCID)) {
  1401. FRegIdx.insert(i + 1);
  1402. continue;
  1403. }
  1404. switch (InlineAsm::getKind(Flags)) {
  1405. case InlineAsm::Kind_RegUse:
  1406. STUses |= (1u << STReg);
  1407. break;
  1408. case InlineAsm::Kind_RegDef:
  1409. case InlineAsm::Kind_RegDefEarlyClobber:
  1410. STDefs |= (1u << STReg);
  1411. break;
  1412. case InlineAsm::Kind_Clobber:
  1413. STClobbers |= (1u << STReg);
  1414. break;
  1415. default:
  1416. break;
  1417. }
  1418. }
  1419. if (STUses && !isMask_32(STUses))
  1420. MI.emitError("fixed input regs must be last on the x87 stack");
  1421. unsigned NumSTUses = countTrailingOnes(STUses);
  1422. // Defs must be contiguous from the stack top. ST0-STn.
  1423. if (STDefs && !isMask_32(STDefs)) {
  1424. MI.emitError("output regs must be last on the x87 stack");
  1425. STDefs = NextPowerOf2(STDefs) - 1;
  1426. }
  1427. unsigned NumSTDefs = countTrailingOnes(STDefs);
  1428. // So must the clobbered stack slots. ST0-STm, m >= n.
  1429. if (STClobbers && !isMask_32(STDefs | STClobbers))
  1430. MI.emitError("clobbers must be last on the x87 stack");
  1431. // Popped inputs are the ones that are also clobbered or defined.
  1432. unsigned STPopped = STUses & (STDefs | STClobbers);
  1433. if (STPopped && !isMask_32(STPopped))
  1434. MI.emitError("implicitly popped regs must be last on the x87 stack");
  1435. unsigned NumSTPopped = countTrailingOnes(STPopped);
  1436. LLVM_DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
  1437. << NumSTPopped << ", and defines " << NumSTDefs
  1438. << " regs.\n");
  1439. #ifndef NDEBUG
  1440. // If any input operand uses constraint "f", all output register
  1441. // constraints must be early-clobber defs.
  1442. for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I)
  1443. if (FRegIdx.count(I)) {
  1444. assert((1 << getFPReg(MI.getOperand(I)) & STDefs) == 0 &&
  1445. "Operands with constraint \"f\" cannot overlap with defs");
  1446. }
  1447. #endif
  1448. // Collect all FP registers (register operands with constraints "t", "u",
  1449. // and "f") to kill afer the instruction.
  1450. unsigned FPKills = ((1u << NumFPRegs) - 1) & ~0xff;
  1451. for (const MachineOperand &Op : MI.operands()) {
  1452. if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
  1453. continue;
  1454. unsigned FPReg = getFPReg(Op);
  1455. // If we kill this operand, make sure to pop it from the stack after the
  1456. // asm. We just remember it for now, and pop them all off at the end in
  1457. // a batch.
  1458. if (Op.isUse() && Op.isKill())
  1459. FPKills |= 1U << FPReg;
  1460. }
  1461. // Do not include registers that are implicitly popped by defs/clobbers.
  1462. FPKills &= ~(STDefs | STClobbers);
  1463. // Now we can rearrange the live registers to match what was requested.
  1464. unsigned char STUsesArray[8];
  1465. for (unsigned I = 0; I < NumSTUses; ++I)
  1466. STUsesArray[I] = I;
  1467. shuffleStackTop(STUsesArray, NumSTUses, Inst);
  1468. LLVM_DEBUG({
  1469. dbgs() << "Before asm: ";
  1470. dumpStack();
  1471. });
  1472. // With the stack layout fixed, rewrite the FP registers.
  1473. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
  1474. MachineOperand &Op = MI.getOperand(i);
  1475. if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
  1476. continue;
  1477. unsigned FPReg = getFPReg(Op);
  1478. if (FRegIdx.count(i))
  1479. // Operand with constraint "f".
  1480. Op.setReg(getSTReg(FPReg));
  1481. else
  1482. // Operand with a single register class constraint ("t" or "u").
  1483. Op.setReg(X86::ST0 + FPReg);
  1484. }
  1485. // Simulate the inline asm popping its inputs and pushing its outputs.
  1486. StackTop -= NumSTPopped;
  1487. for (unsigned i = 0; i < NumSTDefs; ++i)
  1488. pushReg(NumSTDefs - i - 1);
  1489. // If this asm kills any FP registers (is the last use of them) we must
  1490. // explicitly emit pop instructions for them. Do this now after the asm has
  1491. // executed so that the ST(x) numbers are not off (which would happen if we
  1492. // did this inline with operand rewriting).
  1493. //
  1494. // Note: this might be a non-optimal pop sequence. We might be able to do
  1495. // better by trying to pop in stack order or something.
  1496. while (FPKills) {
  1497. unsigned FPReg = countTrailingZeros(FPKills);
  1498. if (isLive(FPReg))
  1499. freeStackSlotAfter(Inst, FPReg);
  1500. FPKills &= ~(1U << FPReg);
  1501. }
  1502. // Don't delete the inline asm!
  1503. return;
  1504. }
  1505. }
  1506. Inst = MBB->erase(Inst); // Remove the pseudo instruction
  1507. // We want to leave I pointing to the previous instruction, but what if we
  1508. // just erased the first instruction?
  1509. if (Inst == MBB->begin()) {
  1510. LLVM_DEBUG(dbgs() << "Inserting dummy KILL\n");
  1511. Inst = BuildMI(*MBB, Inst, DebugLoc(), TII->get(TargetOpcode::KILL));
  1512. } else
  1513. --Inst;
  1514. }
  1515. void FPS::setKillFlags(MachineBasicBlock &MBB) const {
  1516. const TargetRegisterInfo &TRI =
  1517. *MBB.getParent()->getSubtarget().getRegisterInfo();
  1518. LivePhysRegs LPR(TRI);
  1519. LPR.addLiveOuts(MBB);
  1520. for (MachineInstr &MI : llvm::reverse(MBB)) {
  1521. if (MI.isDebugInstr())
  1522. continue;
  1523. std::bitset<8> Defs;
  1524. SmallVector<MachineOperand *, 2> Uses;
  1525. for (auto &MO : MI.operands()) {
  1526. if (!MO.isReg())
  1527. continue;
  1528. unsigned Reg = MO.getReg() - X86::FP0;
  1529. if (Reg >= 8)
  1530. continue;
  1531. if (MO.isDef()) {
  1532. Defs.set(Reg);
  1533. if (!LPR.contains(MO.getReg()))
  1534. MO.setIsDead();
  1535. } else
  1536. Uses.push_back(&MO);
  1537. }
  1538. for (auto *MO : Uses)
  1539. if (Defs.test(getFPReg(*MO)) || !LPR.contains(MO->getReg()))
  1540. MO->setIsKill();
  1541. LPR.stepBackward(MI);
  1542. }
  1543. }