InlineAsmLowering.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. //===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. ///
  9. /// \file
  10. /// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
  11. ///
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
  14. #include "llvm/CodeGen/Analysis.h"
  15. #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
  16. #include "llvm/CodeGen/GlobalISel/Utils.h"
  17. #include "llvm/CodeGen/MachineOperand.h"
  18. #include "llvm/CodeGen/MachineRegisterInfo.h"
  19. #include "llvm/CodeGen/TargetLowering.h"
  20. #include "llvm/IR/DataLayout.h"
  21. #include "llvm/IR/Instructions.h"
  22. #include "llvm/IR/LLVMContext.h"
  23. #include "llvm/IR/Module.h"
  24. #define DEBUG_TYPE "inline-asm-lowering"
  25. using namespace llvm;
  26. void InlineAsmLowering::anchor() {}
  27. namespace {
  28. /// GISelAsmOperandInfo - This contains information for each constraint that we
  29. /// are lowering.
  30. class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
  31. public:
  32. /// Regs - If this is a register or register class operand, this
  33. /// contains the set of assigned registers corresponding to the operand.
  34. SmallVector<Register, 1> Regs;
  35. explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info)
  36. : TargetLowering::AsmOperandInfo(Info) {}
  37. };
  38. using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>;
  39. class ExtraFlags {
  40. unsigned Flags = 0;
  41. public:
  42. explicit ExtraFlags(const CallBase &CB) {
  43. const InlineAsm *IA = cast<InlineAsm>(CB.getCalledOperand());
  44. if (IA->hasSideEffects())
  45. Flags |= InlineAsm::Extra_HasSideEffects;
  46. if (IA->isAlignStack())
  47. Flags |= InlineAsm::Extra_IsAlignStack;
  48. if (CB.isConvergent())
  49. Flags |= InlineAsm::Extra_IsConvergent;
  50. Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
  51. }
  52. void update(const TargetLowering::AsmOperandInfo &OpInfo) {
  53. // Ideally, we would only check against memory constraints. However, the
  54. // meaning of an Other constraint can be target-specific and we can't easily
  55. // reason about it. Therefore, be conservative and set MayLoad/MayStore
  56. // for Other constraints as well.
  57. if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
  58. OpInfo.ConstraintType == TargetLowering::C_Other) {
  59. if (OpInfo.Type == InlineAsm::isInput)
  60. Flags |= InlineAsm::Extra_MayLoad;
  61. else if (OpInfo.Type == InlineAsm::isOutput)
  62. Flags |= InlineAsm::Extra_MayStore;
  63. else if (OpInfo.Type == InlineAsm::isClobber)
  64. Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
  65. }
  66. }
  67. unsigned get() const { return Flags; }
  68. };
  69. } // namespace
  70. /// Assign virtual/physical registers for the specified register operand.
  71. static void getRegistersForValue(MachineFunction &MF,
  72. MachineIRBuilder &MIRBuilder,
  73. GISelAsmOperandInfo &OpInfo,
  74. GISelAsmOperandInfo &RefOpInfo) {
  75. const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
  76. const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
  77. // No work to do for memory operations.
  78. if (OpInfo.ConstraintType == TargetLowering::C_Memory)
  79. return;
  80. // If this is a constraint for a single physreg, or a constraint for a
  81. // register class, find it.
  82. Register AssignedReg;
  83. const TargetRegisterClass *RC;
  84. std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
  85. &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
  86. // RC is unset only on failure. Return immediately.
  87. if (!RC)
  88. return;
  89. // No need to allocate a matching input constraint since the constraint it's
  90. // matching to has already been allocated.
  91. if (OpInfo.isMatchingInputConstraint())
  92. return;
  93. // Initialize NumRegs.
  94. unsigned NumRegs = 1;
  95. if (OpInfo.ConstraintVT != MVT::Other)
  96. NumRegs =
  97. TLI.getNumRegisters(MF.getFunction().getContext(), OpInfo.ConstraintVT);
  98. // If this is a constraint for a specific physical register, but the type of
  99. // the operand requires more than one register to be passed, we allocate the
  100. // required amount of physical registers, starting from the selected physical
  101. // register.
  102. // For this, first retrieve a register iterator for the given register class
  103. TargetRegisterClass::iterator I = RC->begin();
  104. MachineRegisterInfo &RegInfo = MF.getRegInfo();
  105. // Advance the iterator to the assigned register (if set)
  106. if (AssignedReg) {
  107. for (; *I != AssignedReg; ++I)
  108. assert(I != RC->end() && "AssignedReg should be a member of provided RC");
  109. }
  110. // Finally, assign the registers. If the AssignedReg isn't set, create virtual
  111. // registers with the provided register class
  112. for (; NumRegs; --NumRegs, ++I) {
  113. assert(I != RC->end() && "Ran out of registers to allocate!");
  114. Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
  115. OpInfo.Regs.push_back(R);
  116. }
  117. }
  118. /// Return an integer indicating how general CT is.
  119. static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
  120. switch (CT) {
  121. case TargetLowering::C_Immediate:
  122. case TargetLowering::C_Other:
  123. case TargetLowering::C_Unknown:
  124. return 0;
  125. case TargetLowering::C_Register:
  126. return 1;
  127. case TargetLowering::C_RegisterClass:
  128. return 2;
  129. case TargetLowering::C_Memory:
  130. return 3;
  131. }
  132. llvm_unreachable("Invalid constraint type");
  133. }
  134. static void chooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
  135. const TargetLowering *TLI) {
  136. assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
  137. unsigned BestIdx = 0;
  138. TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
  139. int BestGenerality = -1;
  140. // Loop over the options, keeping track of the most general one.
  141. for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
  142. TargetLowering::ConstraintType CType =
  143. TLI->getConstraintType(OpInfo.Codes[i]);
  144. // Indirect 'other' or 'immediate' constraints are not allowed.
  145. if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory ||
  146. CType == TargetLowering::C_Register ||
  147. CType == TargetLowering::C_RegisterClass))
  148. continue;
  149. // If this is an 'other' or 'immediate' constraint, see if the operand is
  150. // valid for it. For example, on X86 we might have an 'rI' constraint. If
  151. // the operand is an integer in the range [0..31] we want to use I (saving a
  152. // load of a register), otherwise we must use 'r'.
  153. if (CType == TargetLowering::C_Other ||
  154. CType == TargetLowering::C_Immediate) {
  155. assert(OpInfo.Codes[i].size() == 1 &&
  156. "Unhandled multi-letter 'other' constraint");
  157. // FIXME: prefer immediate constraints if the target allows it
  158. }
  159. // Things with matching constraints can only be registers, per gcc
  160. // documentation. This mainly affects "g" constraints.
  161. if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
  162. continue;
  163. // This constraint letter is more general than the previous one, use it.
  164. int Generality = getConstraintGenerality(CType);
  165. if (Generality > BestGenerality) {
  166. BestType = CType;
  167. BestIdx = i;
  168. BestGenerality = Generality;
  169. }
  170. }
  171. OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
  172. OpInfo.ConstraintType = BestType;
  173. }
  174. static void computeConstraintToUse(const TargetLowering *TLI,
  175. TargetLowering::AsmOperandInfo &OpInfo) {
  176. assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
  177. // Single-letter constraints ('r') are very common.
  178. if (OpInfo.Codes.size() == 1) {
  179. OpInfo.ConstraintCode = OpInfo.Codes[0];
  180. OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
  181. } else {
  182. chooseConstraint(OpInfo, TLI);
  183. }
  184. // 'X' matches anything.
  185. if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
  186. // Labels and constants are handled elsewhere ('X' is the only thing
  187. // that matches labels). For Functions, the type here is the type of
  188. // the result, which is not what we want to look at; leave them alone.
  189. Value *Val = OpInfo.CallOperandVal;
  190. if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val))
  191. return;
  192. // Otherwise, try to resolve it to something we know about by looking at
  193. // the actual operand type.
  194. if (const char *Repl = TLI->LowerXConstraint(OpInfo.ConstraintVT)) {
  195. OpInfo.ConstraintCode = Repl;
  196. OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
  197. }
  198. }
  199. }
  200. static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) {
  201. unsigned Flag = I.getOperand(OpIdx).getImm();
  202. return InlineAsm::getNumOperandRegisters(Flag);
  203. }
  204. static bool buildAnyextOrCopy(Register Dst, Register Src,
  205. MachineIRBuilder &MIRBuilder) {
  206. const TargetRegisterInfo *TRI =
  207. MIRBuilder.getMF().getSubtarget().getRegisterInfo();
  208. MachineRegisterInfo *MRI = MIRBuilder.getMRI();
  209. auto SrcTy = MRI->getType(Src);
  210. if (!SrcTy.isValid()) {
  211. LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n");
  212. return false;
  213. }
  214. unsigned SrcSize = TRI->getRegSizeInBits(Src, *MRI);
  215. unsigned DstSize = TRI->getRegSizeInBits(Dst, *MRI);
  216. if (DstSize < SrcSize) {
  217. LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n");
  218. return false;
  219. }
  220. // Attempt to anyext small scalar sources.
  221. if (DstSize > SrcSize) {
  222. if (!SrcTy.isScalar()) {
  223. LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of"
  224. "destination register class\n");
  225. return false;
  226. }
  227. Src = MIRBuilder.buildAnyExt(LLT::scalar(DstSize), Src).getReg(0);
  228. }
  229. MIRBuilder.buildCopy(Dst, Src);
  230. return true;
  231. }
  232. bool InlineAsmLowering::lowerInlineAsm(
  233. MachineIRBuilder &MIRBuilder, const CallBase &Call,
  234. std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs)
  235. const {
  236. const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
  237. /// ConstraintOperands - Information about all of the constraints.
  238. GISelAsmOperandInfoVector ConstraintOperands;
  239. MachineFunction &MF = MIRBuilder.getMF();
  240. const Function &F = MF.getFunction();
  241. const DataLayout &DL = F.getParent()->getDataLayout();
  242. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
  243. MachineRegisterInfo *MRI = MIRBuilder.getMRI();
  244. TargetLowering::AsmOperandInfoVector TargetConstraints =
  245. TLI->ParseConstraints(DL, TRI, Call);
  246. ExtraFlags ExtraInfo(Call);
  247. unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
  248. unsigned ResNo = 0; // ResNo - The result number of the next output.
  249. for (auto &T : TargetConstraints) {
  250. ConstraintOperands.push_back(GISelAsmOperandInfo(T));
  251. GISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
  252. // Compute the value type for each operand.
  253. if (OpInfo.hasArg()) {
  254. OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo));
  255. if (isa<BasicBlock>(OpInfo.CallOperandVal)) {
  256. LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
  257. return false;
  258. }
  259. Type *OpTy = OpInfo.CallOperandVal->getType();
  260. // If this is an indirect operand, the operand is a pointer to the
  261. // accessed type.
  262. if (OpInfo.isIndirect) {
  263. OpTy = Call.getAttributes().getParamElementType(ArgNo);
  264. assert(OpTy && "Indirect operand must have elementtype attribute");
  265. }
  266. // FIXME: Support aggregate input operands
  267. if (!OpTy->isSingleValueType()) {
  268. LLVM_DEBUG(
  269. dbgs() << "Aggregate input operands are not supported yet\n");
  270. return false;
  271. }
  272. OpInfo.ConstraintVT =
  273. TLI->getAsmOperandValueType(DL, OpTy, true).getSimpleVT();
  274. ++ArgNo;
  275. } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
  276. assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
  277. if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
  278. OpInfo.ConstraintVT =
  279. TLI->getSimpleValueType(DL, STy->getElementType(ResNo));
  280. } else {
  281. assert(ResNo == 0 && "Asm only has one result!");
  282. OpInfo.ConstraintVT =
  283. TLI->getAsmOperandValueType(DL, Call.getType()).getSimpleVT();
  284. }
  285. ++ResNo;
  286. } else {
  287. OpInfo.ConstraintVT = MVT::Other;
  288. }
  289. if (OpInfo.ConstraintVT == MVT::i64x8)
  290. return false;
  291. // Compute the constraint code and ConstraintType to use.
  292. computeConstraintToUse(TLI, OpInfo);
  293. // The selected constraint type might expose new sideeffects
  294. ExtraInfo.update(OpInfo);
  295. }
  296. // At this point, all operand types are decided.
  297. // Create the MachineInstr, but don't insert it yet since input
  298. // operands still need to insert instructions before this one
  299. auto Inst = MIRBuilder.buildInstrNoInsert(TargetOpcode::INLINEASM)
  300. .addExternalSymbol(IA->getAsmString().c_str())
  301. .addImm(ExtraInfo.get());
  302. // Starting from this operand: flag followed by register(s) will be added as
  303. // operands to Inst for each constraint. Used for matching input constraints.
  304. unsigned StartIdx = Inst->getNumOperands();
  305. // Collects the output operands for later processing
  306. GISelAsmOperandInfoVector OutputOperands;
  307. for (auto &OpInfo : ConstraintOperands) {
  308. GISelAsmOperandInfo &RefOpInfo =
  309. OpInfo.isMatchingInputConstraint()
  310. ? ConstraintOperands[OpInfo.getMatchedOperand()]
  311. : OpInfo;
  312. // Assign registers for register operands
  313. getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo);
  314. switch (OpInfo.Type) {
  315. case InlineAsm::isOutput:
  316. if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
  317. unsigned ConstraintID =
  318. TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
  319. assert(ConstraintID != InlineAsm::Constraint_Unknown &&
  320. "Failed to convert memory constraint code to constraint id.");
  321. // Add information to the INLINEASM instruction to know about this
  322. // output.
  323. unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
  324. OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
  325. Inst.addImm(OpFlags);
  326. ArrayRef<Register> SourceRegs =
  327. GetOrCreateVRegs(*OpInfo.CallOperandVal);
  328. assert(
  329. SourceRegs.size() == 1 &&
  330. "Expected the memory output to fit into a single virtual register");
  331. Inst.addReg(SourceRegs[0]);
  332. } else {
  333. // Otherwise, this outputs to a register (directly for C_Register /
  334. // C_RegisterClass. Find a register that we can use.
  335. assert(OpInfo.ConstraintType == TargetLowering::C_Register ||
  336. OpInfo.ConstraintType == TargetLowering::C_RegisterClass);
  337. if (OpInfo.Regs.empty()) {
  338. LLVM_DEBUG(dbgs()
  339. << "Couldn't allocate output register for constraint\n");
  340. return false;
  341. }
  342. // Add information to the INLINEASM instruction to know that this
  343. // register is set.
  344. unsigned Flag = InlineAsm::getFlagWord(
  345. OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
  346. : InlineAsm::Kind_RegDef,
  347. OpInfo.Regs.size());
  348. if (OpInfo.Regs.front().isVirtual()) {
  349. // Put the register class of the virtual registers in the flag word.
  350. // That way, later passes can recompute register class constraints for
  351. // inline assembly as well as normal instructions. Don't do this for
  352. // tied operands that can use the regclass information from the def.
  353. const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
  354. Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
  355. }
  356. Inst.addImm(Flag);
  357. for (Register Reg : OpInfo.Regs) {
  358. Inst.addReg(Reg,
  359. RegState::Define | getImplRegState(Reg.isPhysical()) |
  360. (OpInfo.isEarlyClobber ? RegState::EarlyClobber : 0));
  361. }
  362. // Remember this output operand for later processing
  363. OutputOperands.push_back(OpInfo);
  364. }
  365. break;
  366. case InlineAsm::isInput: {
  367. if (OpInfo.isMatchingInputConstraint()) {
  368. unsigned DefIdx = OpInfo.getMatchedOperand();
  369. // Find operand with register def that corresponds to DefIdx.
  370. unsigned InstFlagIdx = StartIdx;
  371. for (unsigned i = 0; i < DefIdx; ++i)
  372. InstFlagIdx += getNumOpRegs(*Inst, InstFlagIdx) + 1;
  373. assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag");
  374. unsigned MatchedOperandFlag = Inst->getOperand(InstFlagIdx).getImm();
  375. if (InlineAsm::isMemKind(MatchedOperandFlag)) {
  376. LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
  377. "supported. This should be target specific.\n");
  378. return false;
  379. }
  380. if (!InlineAsm::isRegDefKind(MatchedOperandFlag) &&
  381. !InlineAsm::isRegDefEarlyClobberKind(MatchedOperandFlag)) {
  382. LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
  383. return false;
  384. }
  385. // We want to tie input to register in next operand.
  386. unsigned DefRegIdx = InstFlagIdx + 1;
  387. Register Def = Inst->getOperand(DefRegIdx).getReg();
  388. ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
  389. assert(SrcRegs.size() == 1 && "Single register is expected here");
  390. // When Def is physreg: use given input.
  391. Register In = SrcRegs[0];
  392. // When Def is vreg: copy input to new vreg with same reg class as Def.
  393. if (Def.isVirtual()) {
  394. In = MRI->createVirtualRegister(MRI->getRegClass(Def));
  395. if (!buildAnyextOrCopy(In, SrcRegs[0], MIRBuilder))
  396. return false;
  397. }
  398. // Add Flag and input register operand (In) to Inst. Tie In to Def.
  399. unsigned UseFlag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, 1);
  400. unsigned Flag = InlineAsm::getFlagWordForMatchingOp(UseFlag, DefIdx);
  401. Inst.addImm(Flag);
  402. Inst.addReg(In);
  403. Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1);
  404. break;
  405. }
  406. if (OpInfo.ConstraintType == TargetLowering::C_Other &&
  407. OpInfo.isIndirect) {
  408. LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
  409. "not supported yet\n");
  410. return false;
  411. }
  412. if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
  413. OpInfo.ConstraintType == TargetLowering::C_Other) {
  414. std::vector<MachineOperand> Ops;
  415. if (!lowerAsmOperandForConstraint(OpInfo.CallOperandVal,
  416. OpInfo.ConstraintCode, Ops,
  417. MIRBuilder)) {
  418. LLVM_DEBUG(dbgs() << "Don't support constraint: "
  419. << OpInfo.ConstraintCode << " yet\n");
  420. return false;
  421. }
  422. assert(Ops.size() > 0 &&
  423. "Expected constraint to be lowered to at least one operand");
  424. // Add information to the INLINEASM node to know about this input.
  425. unsigned OpFlags =
  426. InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
  427. Inst.addImm(OpFlags);
  428. Inst.add(Ops);
  429. break;
  430. }
  431. if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
  432. if (!OpInfo.isIndirect) {
  433. LLVM_DEBUG(dbgs()
  434. << "Cannot indirectify memory input operands yet\n");
  435. return false;
  436. }
  437. assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
  438. unsigned ConstraintID =
  439. TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
  440. unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
  441. OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
  442. Inst.addImm(OpFlags);
  443. ArrayRef<Register> SourceRegs =
  444. GetOrCreateVRegs(*OpInfo.CallOperandVal);
  445. assert(
  446. SourceRegs.size() == 1 &&
  447. "Expected the memory input to fit into a single virtual register");
  448. Inst.addReg(SourceRegs[0]);
  449. break;
  450. }
  451. assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
  452. OpInfo.ConstraintType == TargetLowering::C_Register) &&
  453. "Unknown constraint type!");
  454. if (OpInfo.isIndirect) {
  455. LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
  456. "for constraint '"
  457. << OpInfo.ConstraintCode << "'\n");
  458. return false;
  459. }
  460. // Copy the input into the appropriate registers.
  461. if (OpInfo.Regs.empty()) {
  462. LLVM_DEBUG(
  463. dbgs()
  464. << "Couldn't allocate input register for register constraint\n");
  465. return false;
  466. }
  467. unsigned NumRegs = OpInfo.Regs.size();
  468. ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
  469. assert(NumRegs == SourceRegs.size() &&
  470. "Expected the number of input registers to match the number of "
  471. "source registers");
  472. if (NumRegs > 1) {
  473. LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
  474. "not supported yet\n");
  475. return false;
  476. }
  477. unsigned Flag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, NumRegs);
  478. if (OpInfo.Regs.front().isVirtual()) {
  479. // Put the register class of the virtual registers in the flag word.
  480. const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
  481. Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
  482. }
  483. Inst.addImm(Flag);
  484. if (!buildAnyextOrCopy(OpInfo.Regs[0], SourceRegs[0], MIRBuilder))
  485. return false;
  486. Inst.addReg(OpInfo.Regs[0]);
  487. break;
  488. }
  489. case InlineAsm::isClobber: {
  490. unsigned NumRegs = OpInfo.Regs.size();
  491. if (NumRegs > 0) {
  492. unsigned Flag =
  493. InlineAsm::getFlagWord(InlineAsm::Kind_Clobber, NumRegs);
  494. Inst.addImm(Flag);
  495. for (Register Reg : OpInfo.Regs) {
  496. Inst.addReg(Reg, RegState::Define | RegState::EarlyClobber |
  497. getImplRegState(Reg.isPhysical()));
  498. }
  499. }
  500. break;
  501. }
  502. }
  503. }
  504. if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
  505. Inst.addMetadata(SrcLoc);
  506. // All inputs are handled, insert the instruction now
  507. MIRBuilder.insertInstr(Inst);
  508. // Finally, copy the output operands into the output registers
  509. ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call);
  510. if (ResRegs.size() != OutputOperands.size()) {
  511. LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
  512. "number of destination registers\n");
  513. return false;
  514. }
  515. for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) {
  516. GISelAsmOperandInfo &OpInfo = OutputOperands[i];
  517. if (OpInfo.Regs.empty())
  518. continue;
  519. switch (OpInfo.ConstraintType) {
  520. case TargetLowering::C_Register:
  521. case TargetLowering::C_RegisterClass: {
  522. if (OpInfo.Regs.size() > 1) {
  523. LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
  524. "registers are not supported yet\n");
  525. return false;
  526. }
  527. Register SrcReg = OpInfo.Regs[0];
  528. unsigned SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
  529. LLT ResTy = MRI->getType(ResRegs[i]);
  530. if (ResTy.isScalar() && ResTy.getSizeInBits() < SrcSize) {
  531. // First copy the non-typed virtual register into a generic virtual
  532. // register
  533. Register Tmp1Reg =
  534. MRI->createGenericVirtualRegister(LLT::scalar(SrcSize));
  535. MIRBuilder.buildCopy(Tmp1Reg, SrcReg);
  536. // Need to truncate the result of the register
  537. MIRBuilder.buildTrunc(ResRegs[i], Tmp1Reg);
  538. } else if (ResTy.getSizeInBits() == SrcSize) {
  539. MIRBuilder.buildCopy(ResRegs[i], SrcReg);
  540. } else {
  541. LLVM_DEBUG(dbgs() << "Unhandled output operand with "
  542. "mismatched register size\n");
  543. return false;
  544. }
  545. break;
  546. }
  547. case TargetLowering::C_Immediate:
  548. case TargetLowering::C_Other:
  549. LLVM_DEBUG(
  550. dbgs() << "Cannot lower target specific output constraints yet\n");
  551. return false;
  552. case TargetLowering::C_Memory:
  553. break; // Already handled.
  554. case TargetLowering::C_Unknown:
  555. LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
  556. return false;
  557. }
  558. }
  559. return true;
  560. }
  561. bool InlineAsmLowering::lowerAsmOperandForConstraint(
  562. Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops,
  563. MachineIRBuilder &MIRBuilder) const {
  564. if (Constraint.size() > 1)
  565. return false;
  566. char ConstraintLetter = Constraint[0];
  567. switch (ConstraintLetter) {
  568. default:
  569. return false;
  570. case 'i': // Simple Integer or Relocatable Constant
  571. case 'n': // immediate integer with a known value.
  572. if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
  573. assert(CI->getBitWidth() <= 64 &&
  574. "expected immediate to fit into 64-bits");
  575. // Boolean constants should be zero-extended, others are sign-extended
  576. bool IsBool = CI->getBitWidth() == 1;
  577. int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue();
  578. Ops.push_back(MachineOperand::CreateImm(ExtVal));
  579. return true;
  580. }
  581. return false;
  582. }
  583. }