TargetSchedule.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- llvm/CodeGen/TargetSchedule.h - Sched Machine Model ------*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. //
  14. // This file defines a wrapper around MCSchedModel that allows the interface to
  15. // benefit from information currently only available in TargetInstrInfo.
  16. // Ideally, the scheduling interface would be fully defined in the MC layer.
  17. //
  18. //===----------------------------------------------------------------------===//
  19. #ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
  20. #define LLVM_CODEGEN_TARGETSCHEDULE_H
  21. #include "llvm/ADT/SmallVector.h"
  22. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  23. #include "llvm/Config/llvm-config.h"
  24. #include "llvm/MC/MCInstrItineraries.h"
  25. #include "llvm/MC/MCSchedule.h"
  26. namespace llvm {
  27. class MachineInstr;
  28. class TargetInstrInfo;
  29. /// Provide an instruction scheduling machine model to CodeGen passes.
  30. class TargetSchedModel {
  31. // For efficiency, hold a copy of the statically defined MCSchedModel for this
  32. // processor.
  33. MCSchedModel SchedModel;
  34. InstrItineraryData InstrItins;
  35. const TargetSubtargetInfo *STI = nullptr;
  36. const TargetInstrInfo *TII = nullptr;
  37. SmallVector<unsigned, 16> ResourceFactors;
  38. // Multiply to normalize microops to resource units.
  39. unsigned MicroOpFactor = 0;
  40. // Resource units per cycle. Latency normalization factor.
  41. unsigned ResourceLCM = 0;
  42. unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const;
  43. public:
  44. TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {}
  45. /// Initialize the machine model for instruction scheduling.
  46. ///
  47. /// The machine model API keeps a copy of the top-level MCSchedModel table
  48. /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
  49. /// dynamic properties.
  50. void init(const TargetSubtargetInfo *TSInfo);
  51. /// Return the MCSchedClassDesc for this instruction.
  52. const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
  53. /// TargetSubtargetInfo getter.
  54. const TargetSubtargetInfo *getSubtargetInfo() const { return STI; }
  55. /// TargetInstrInfo getter.
  56. const TargetInstrInfo *getInstrInfo() const { return TII; }
  57. /// Return true if this machine model includes an instruction-level
  58. /// scheduling model.
  59. ///
  60. /// This is more detailed than the course grain IssueWidth and default
  61. /// latency properties, but separate from the per-cycle itinerary data.
  62. bool hasInstrSchedModel() const;
  63. const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
  64. /// Return true if this machine model includes cycle-to-cycle itinerary
  65. /// data.
  66. ///
  67. /// This models scheduling at each stage in the processor pipeline.
  68. bool hasInstrItineraries() const;
  69. const InstrItineraryData *getInstrItineraries() const {
  70. if (hasInstrItineraries())
  71. return &InstrItins;
  72. return nullptr;
  73. }
  74. /// Return true if this machine model includes an instruction-level
  75. /// scheduling model or cycle-to-cycle itinerary data.
  76. bool hasInstrSchedModelOrItineraries() const {
  77. return hasInstrSchedModel() || hasInstrItineraries();
  78. }
  79. /// Identify the processor corresponding to the current subtarget.
  80. unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
  81. /// Maximum number of micro-ops that may be scheduled per cycle.
  82. unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
  83. /// Return true if new group must begin.
  84. bool mustBeginGroup(const MachineInstr *MI,
  85. const MCSchedClassDesc *SC = nullptr) const;
  86. /// Return true if current group must end.
  87. bool mustEndGroup(const MachineInstr *MI,
  88. const MCSchedClassDesc *SC = nullptr) const;
  89. /// Return the number of issue slots required for this MI.
  90. unsigned getNumMicroOps(const MachineInstr *MI,
  91. const MCSchedClassDesc *SC = nullptr) const;
  92. /// Get the number of kinds of resources for this target.
  93. unsigned getNumProcResourceKinds() const {
  94. return SchedModel.getNumProcResourceKinds();
  95. }
  96. /// Get a processor resource by ID for convenience.
  97. const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
  98. return SchedModel.getProcResource(PIdx);
  99. }
  100. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  101. const char *getResourceName(unsigned PIdx) const {
  102. if (!PIdx)
  103. return "MOps";
  104. return SchedModel.getProcResource(PIdx)->Name;
  105. }
  106. #endif
  107. using ProcResIter = const MCWriteProcResEntry *;
  108. // Get an iterator into the processor resources consumed by this
  109. // scheduling class.
  110. ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
  111. // The subtarget holds a single resource table for all processors.
  112. return STI->getWriteProcResBegin(SC);
  113. }
  114. ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
  115. return STI->getWriteProcResEnd(SC);
  116. }
  117. /// Multiply the number of units consumed for a resource by this factor
  118. /// to normalize it relative to other resources.
  119. unsigned getResourceFactor(unsigned ResIdx) const {
  120. return ResourceFactors[ResIdx];
  121. }
  122. /// Multiply number of micro-ops by this factor to normalize it
  123. /// relative to other resources.
  124. unsigned getMicroOpFactor() const {
  125. return MicroOpFactor;
  126. }
  127. /// Multiply cycle count by this factor to normalize it relative to
  128. /// other resources. This is the number of resource units per cycle.
  129. unsigned getLatencyFactor() const {
  130. return ResourceLCM;
  131. }
  132. /// Number of micro-ops that may be buffered for OOO execution.
  133. unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }
  134. /// Number of resource units that may be buffered for OOO execution.
  135. /// \return The buffer size in resource units or -1 for unlimited.
  136. int getResourceBufferSize(unsigned PIdx) const {
  137. return SchedModel.getProcResource(PIdx)->BufferSize;
  138. }
  139. /// Compute operand latency based on the available machine model.
  140. ///
  141. /// Compute and return the latency of the given data dependent def and use
  142. /// when the operand indices are already known. UseMI may be NULL for an
  143. /// unknown user.
  144. unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
  145. const MachineInstr *UseMI, unsigned UseOperIdx)
  146. const;
  147. /// Compute the instruction latency based on the available machine
  148. /// model.
  149. ///
  150. /// Compute and return the expected latency of this instruction independent of
  151. /// a particular use. computeOperandLatency is the preferred API, but this is
  152. /// occasionally useful to help estimate instruction cost.
  153. ///
  154. /// If UseDefaultDefLatency is false and no new machine sched model is
  155. /// present this method falls back to TII->getInstrLatency with an empty
  156. /// instruction itinerary (this is so we preserve the previous behavior of the
  157. /// if converter after moving it to TargetSchedModel).
  158. unsigned computeInstrLatency(const MachineInstr *MI,
  159. bool UseDefaultDefLatency = true) const;
  160. unsigned computeInstrLatency(const MCInst &Inst) const;
  161. unsigned computeInstrLatency(unsigned Opcode) const;
  162. /// Output dependency latency of a pair of defs of the same register.
  163. ///
  164. /// This is typically one cycle.
  165. unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
  166. const MachineInstr *DepMI) const;
  167. /// Compute the reciprocal throughput of the given instruction.
  168. double computeReciprocalThroughput(const MachineInstr *MI) const;
  169. double computeReciprocalThroughput(const MCInst &MI) const;
  170. double computeReciprocalThroughput(unsigned Opcode) const;
  171. };
  172. } // end namespace llvm
  173. #endif // LLVM_CODEGEN_TARGETSCHEDULE_H
  174. #ifdef __GNUC__
  175. #pragma GCC diagnostic pop
  176. #endif