BPFInstrInfo.td 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. //===-- BPFInstrInfo.td - Target Description for BPF Target ---------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file describes the BPF instructions in TableGen format.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. include "BPFInstrFormats.td"
  13. // Instruction Operands and Patterns
  14. // These are target-independent nodes, but have target-specific formats.
  15. def SDT_BPFCallSeqStart : SDCallSeqStart<[SDTCisVT<0, iPTR>,
  16. SDTCisVT<1, iPTR>]>;
  17. def SDT_BPFCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
  18. def SDT_BPFCall : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
  19. def SDT_BPFSetFlag : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>]>;
  20. def SDT_BPFSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>,
  21. SDTCisSameAs<0, 4>,
  22. SDTCisSameAs<4, 5>]>;
  23. def SDT_BPFBrCC : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>,
  24. SDTCisVT<3, OtherVT>]>;
  25. def SDT_BPFWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
  26. SDTCisPtrTy<0>]>;
  27. def SDT_BPFMEMCPY : SDTypeProfile<0, 4, [SDTCisVT<0, i64>,
  28. SDTCisVT<1, i64>,
  29. SDTCisVT<2, i64>,
  30. SDTCisVT<3, i64>]>;
  31. def BPFcall : SDNode<"BPFISD::CALL", SDT_BPFCall,
  32. [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
  33. SDNPVariadic]>;
  34. def BPFretflag : SDNode<"BPFISD::RET_FLAG", SDTNone,
  35. [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
  36. def BPFcallseq_start: SDNode<"ISD::CALLSEQ_START", SDT_BPFCallSeqStart,
  37. [SDNPHasChain, SDNPOutGlue]>;
  38. def BPFcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_BPFCallSeqEnd,
  39. [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
  40. def BPFbrcc : SDNode<"BPFISD::BR_CC", SDT_BPFBrCC,
  41. [SDNPHasChain, SDNPOutGlue, SDNPInGlue]>;
  42. def BPFselectcc : SDNode<"BPFISD::SELECT_CC", SDT_BPFSelectCC, [SDNPInGlue]>;
  43. def BPFWrapper : SDNode<"BPFISD::Wrapper", SDT_BPFWrapper>;
  44. def BPFmemcpy : SDNode<"BPFISD::MEMCPY", SDT_BPFMEMCPY,
  45. [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
  46. SDNPMayStore, SDNPMayLoad]>;
  47. def BPFIsLittleEndian : Predicate<"CurDAG->getDataLayout().isLittleEndian()">;
  48. def BPFIsBigEndian : Predicate<"!CurDAG->getDataLayout().isLittleEndian()">;
  49. def BPFHasALU32 : Predicate<"Subtarget->getHasAlu32()">;
  50. def BPFNoALU32 : Predicate<"!Subtarget->getHasAlu32()">;
  51. def brtarget : Operand<OtherVT> {
  52. let PrintMethod = "printBrTargetOperand";
  53. }
  54. def calltarget : Operand<i64>;
  55. def u64imm : Operand<i64> {
  56. let PrintMethod = "printImm64Operand";
  57. }
  58. def i64immSExt32 : PatLeaf<(i64 imm),
  59. [{return isInt<32>(N->getSExtValue()); }]>;
  60. def i32immSExt32 : PatLeaf<(i32 imm),
  61. [{return isInt<32>(N->getSExtValue()); }]>;
  62. // Addressing modes.
  63. def ADDRri : ComplexPattern<i64, 2, "SelectAddr", [], []>;
  64. def FIri : ComplexPattern<i64, 2, "SelectFIAddr", [add, or], []>;
  65. // Address operands
  66. def MEMri : Operand<i64> {
  67. let PrintMethod = "printMemOperand";
  68. let EncoderMethod = "getMemoryOpValue";
  69. let DecoderMethod = "decodeMemoryOpValue";
  70. let MIOperandInfo = (ops GPR, i16imm);
  71. }
  72. // Conditional code predicates - used for pattern matching for jump instructions
  73. def BPF_CC_EQ : PatLeaf<(i64 imm),
  74. [{return (N->getZExtValue() == ISD::SETEQ);}]>;
  75. def BPF_CC_NE : PatLeaf<(i64 imm),
  76. [{return (N->getZExtValue() == ISD::SETNE);}]>;
  77. def BPF_CC_GE : PatLeaf<(i64 imm),
  78. [{return (N->getZExtValue() == ISD::SETGE);}]>;
  79. def BPF_CC_GT : PatLeaf<(i64 imm),
  80. [{return (N->getZExtValue() == ISD::SETGT);}]>;
  81. def BPF_CC_GTU : PatLeaf<(i64 imm),
  82. [{return (N->getZExtValue() == ISD::SETUGT);}]>;
  83. def BPF_CC_GEU : PatLeaf<(i64 imm),
  84. [{return (N->getZExtValue() == ISD::SETUGE);}]>;
  85. def BPF_CC_LE : PatLeaf<(i64 imm),
  86. [{return (N->getZExtValue() == ISD::SETLE);}]>;
  87. def BPF_CC_LT : PatLeaf<(i64 imm),
  88. [{return (N->getZExtValue() == ISD::SETLT);}]>;
  89. def BPF_CC_LTU : PatLeaf<(i64 imm),
  90. [{return (N->getZExtValue() == ISD::SETULT);}]>;
  91. def BPF_CC_LEU : PatLeaf<(i64 imm),
  92. [{return (N->getZExtValue() == ISD::SETULE);}]>;
  93. def BPF_CC_EQ_32 : PatLeaf<(i32 imm),
  94. [{return (N->getZExtValue() == ISD::SETEQ);}]>;
  95. def BPF_CC_NE_32 : PatLeaf<(i32 imm),
  96. [{return (N->getZExtValue() == ISD::SETNE);}]>;
  97. def BPF_CC_GE_32 : PatLeaf<(i32 imm),
  98. [{return (N->getZExtValue() == ISD::SETGE);}]>;
  99. def BPF_CC_GT_32 : PatLeaf<(i32 imm),
  100. [{return (N->getZExtValue() == ISD::SETGT);}]>;
  101. def BPF_CC_GTU_32 : PatLeaf<(i32 imm),
  102. [{return (N->getZExtValue() == ISD::SETUGT);}]>;
  103. def BPF_CC_GEU_32 : PatLeaf<(i32 imm),
  104. [{return (N->getZExtValue() == ISD::SETUGE);}]>;
  105. def BPF_CC_LE_32 : PatLeaf<(i32 imm),
  106. [{return (N->getZExtValue() == ISD::SETLE);}]>;
  107. def BPF_CC_LT_32 : PatLeaf<(i32 imm),
  108. [{return (N->getZExtValue() == ISD::SETLT);}]>;
  109. def BPF_CC_LTU_32 : PatLeaf<(i32 imm),
  110. [{return (N->getZExtValue() == ISD::SETULT);}]>;
  111. def BPF_CC_LEU_32 : PatLeaf<(i32 imm),
  112. [{return (N->getZExtValue() == ISD::SETULE);}]>;
  113. // For arithmetic and jump instructions the 8-bit 'code'
  114. // field is divided into three parts:
  115. //
  116. // +----------------+--------+--------------------+
  117. // | 4 bits | 1 bit | 3 bits |
  118. // | operation code | source | instruction class |
  119. // +----------------+--------+--------------------+
  120. // (MSB) (LSB)
  121. class TYPE_ALU_JMP<bits<4> op, bits<1> srctype,
  122. dag outs, dag ins, string asmstr, list<dag> pattern>
  123. : InstBPF<outs, ins, asmstr, pattern> {
  124. let Inst{63-60} = op;
  125. let Inst{59} = srctype;
  126. }
  127. //For load and store instructions the 8-bit 'code' field is divided as:
  128. //
  129. // +--------+--------+-------------------+
  130. // | 3 bits | 2 bits | 3 bits |
  131. // | mode | size | instruction class |
  132. // +--------+--------+-------------------+
  133. // (MSB) (LSB)
  134. class TYPE_LD_ST<bits<3> mode, bits<2> size,
  135. dag outs, dag ins, string asmstr, list<dag> pattern>
  136. : InstBPF<outs, ins, asmstr, pattern> {
  137. let Inst{63-61} = mode;
  138. let Inst{60-59} = size;
  139. }
  140. // jump instructions
  141. class JMP_RR<BPFJumpOp Opc, string OpcodeStr, PatLeaf Cond>
  142. : TYPE_ALU_JMP<Opc.Value, BPF_X.Value,
  143. (outs),
  144. (ins GPR:$dst, GPR:$src, brtarget:$BrDst),
  145. "if $dst "#OpcodeStr#" $src goto $BrDst",
  146. [(BPFbrcc i64:$dst, i64:$src, Cond, bb:$BrDst)]> {
  147. bits<4> dst;
  148. bits<4> src;
  149. bits<16> BrDst;
  150. let Inst{55-52} = src;
  151. let Inst{51-48} = dst;
  152. let Inst{47-32} = BrDst;
  153. let BPFClass = BPF_JMP;
  154. }
  155. class JMP_RI<BPFJumpOp Opc, string OpcodeStr, PatLeaf Cond>
  156. : TYPE_ALU_JMP<Opc.Value, BPF_K.Value,
  157. (outs),
  158. (ins GPR:$dst, i64imm:$imm, brtarget:$BrDst),
  159. "if $dst "#OpcodeStr#" $imm goto $BrDst",
  160. [(BPFbrcc i64:$dst, i64immSExt32:$imm, Cond, bb:$BrDst)]> {
  161. bits<4> dst;
  162. bits<16> BrDst;
  163. bits<32> imm;
  164. let Inst{51-48} = dst;
  165. let Inst{47-32} = BrDst;
  166. let Inst{31-0} = imm;
  167. let BPFClass = BPF_JMP;
  168. }
  169. class JMP_RR_32<BPFJumpOp Opc, string OpcodeStr, PatLeaf Cond>
  170. : TYPE_ALU_JMP<Opc.Value, BPF_X.Value,
  171. (outs),
  172. (ins GPR32:$dst, GPR32:$src, brtarget:$BrDst),
  173. "if $dst "#OpcodeStr#" $src goto $BrDst",
  174. [(BPFbrcc i32:$dst, i32:$src, Cond, bb:$BrDst)]> {
  175. bits<4> dst;
  176. bits<4> src;
  177. bits<16> BrDst;
  178. let Inst{55-52} = src;
  179. let Inst{51-48} = dst;
  180. let Inst{47-32} = BrDst;
  181. let BPFClass = BPF_JMP32;
  182. }
  183. class JMP_RI_32<BPFJumpOp Opc, string OpcodeStr, PatLeaf Cond>
  184. : TYPE_ALU_JMP<Opc.Value, BPF_K.Value,
  185. (outs),
  186. (ins GPR32:$dst, i32imm:$imm, brtarget:$BrDst),
  187. "if $dst "#OpcodeStr#" $imm goto $BrDst",
  188. [(BPFbrcc i32:$dst, i32immSExt32:$imm, Cond, bb:$BrDst)]> {
  189. bits<4> dst;
  190. bits<16> BrDst;
  191. bits<32> imm;
  192. let Inst{51-48} = dst;
  193. let Inst{47-32} = BrDst;
  194. let Inst{31-0} = imm;
  195. let BPFClass = BPF_JMP32;
  196. }
  197. multiclass J<BPFJumpOp Opc, string OpcodeStr, PatLeaf Cond, PatLeaf Cond32> {
  198. def _rr : JMP_RR<Opc, OpcodeStr, Cond>;
  199. def _ri : JMP_RI<Opc, OpcodeStr, Cond>;
  200. def _rr_32 : JMP_RR_32<Opc, OpcodeStr, Cond32>;
  201. def _ri_32 : JMP_RI_32<Opc, OpcodeStr, Cond32>;
  202. }
  203. let isBranch = 1, isTerminator = 1, hasDelaySlot=0 in {
  204. // cmp+goto instructions
  205. defm JEQ : J<BPF_JEQ, "==", BPF_CC_EQ, BPF_CC_EQ_32>;
  206. defm JUGT : J<BPF_JGT, ">", BPF_CC_GTU, BPF_CC_GTU_32>;
  207. defm JUGE : J<BPF_JGE, ">=", BPF_CC_GEU, BPF_CC_GEU_32>;
  208. defm JNE : J<BPF_JNE, "!=", BPF_CC_NE, BPF_CC_NE_32>;
  209. defm JSGT : J<BPF_JSGT, "s>", BPF_CC_GT, BPF_CC_GT_32>;
  210. defm JSGE : J<BPF_JSGE, "s>=", BPF_CC_GE, BPF_CC_GE_32>;
  211. defm JULT : J<BPF_JLT, "<", BPF_CC_LTU, BPF_CC_LTU_32>;
  212. defm JULE : J<BPF_JLE, "<=", BPF_CC_LEU, BPF_CC_LEU_32>;
  213. defm JSLT : J<BPF_JSLT, "s<", BPF_CC_LT, BPF_CC_LT_32>;
  214. defm JSLE : J<BPF_JSLE, "s<=", BPF_CC_LE, BPF_CC_LE_32>;
  215. }
  216. // ALU instructions
  217. class ALU_RI<BPFOpClass Class, BPFArithOp Opc,
  218. dag outs, dag ins, string asmstr, list<dag> pattern>
  219. : TYPE_ALU_JMP<Opc.Value, BPF_K.Value, outs, ins, asmstr, pattern> {
  220. bits<4> dst;
  221. bits<32> imm;
  222. let Inst{51-48} = dst;
  223. let Inst{31-0} = imm;
  224. let BPFClass = Class;
  225. }
  226. class ALU_RR<BPFOpClass Class, BPFArithOp Opc,
  227. dag outs, dag ins, string asmstr, list<dag> pattern>
  228. : TYPE_ALU_JMP<Opc.Value, BPF_X.Value, outs, ins, asmstr, pattern> {
  229. bits<4> dst;
  230. bits<4> src;
  231. let Inst{55-52} = src;
  232. let Inst{51-48} = dst;
  233. let BPFClass = Class;
  234. }
  235. multiclass ALU<BPFArithOp Opc, string OpcodeStr, SDNode OpNode> {
  236. def _rr : ALU_RR<BPF_ALU64, Opc,
  237. (outs GPR:$dst),
  238. (ins GPR:$src2, GPR:$src),
  239. "$dst "#OpcodeStr#" $src",
  240. [(set GPR:$dst, (OpNode i64:$src2, i64:$src))]>;
  241. def _ri : ALU_RI<BPF_ALU64, Opc,
  242. (outs GPR:$dst),
  243. (ins GPR:$src2, i64imm:$imm),
  244. "$dst "#OpcodeStr#" $imm",
  245. [(set GPR:$dst, (OpNode GPR:$src2, i64immSExt32:$imm))]>;
  246. def _rr_32 : ALU_RR<BPF_ALU, Opc,
  247. (outs GPR32:$dst),
  248. (ins GPR32:$src2, GPR32:$src),
  249. "$dst "#OpcodeStr#" $src",
  250. [(set GPR32:$dst, (OpNode i32:$src2, i32:$src))]>;
  251. def _ri_32 : ALU_RI<BPF_ALU, Opc,
  252. (outs GPR32:$dst),
  253. (ins GPR32:$src2, i32imm:$imm),
  254. "$dst "#OpcodeStr#" $imm",
  255. [(set GPR32:$dst, (OpNode GPR32:$src2, i32immSExt32:$imm))]>;
  256. }
  257. let Constraints = "$dst = $src2" in {
  258. let isAsCheapAsAMove = 1 in {
  259. defm ADD : ALU<BPF_ADD, "+=", add>;
  260. defm SUB : ALU<BPF_SUB, "-=", sub>;
  261. defm OR : ALU<BPF_OR, "|=", or>;
  262. defm AND : ALU<BPF_AND, "&=", and>;
  263. defm SLL : ALU<BPF_LSH, "<<=", shl>;
  264. defm SRL : ALU<BPF_RSH, ">>=", srl>;
  265. defm XOR : ALU<BPF_XOR, "^=", xor>;
  266. defm SRA : ALU<BPF_ARSH, "s>>=", sra>;
  267. }
  268. defm MUL : ALU<BPF_MUL, "*=", mul>;
  269. defm DIV : ALU<BPF_DIV, "/=", udiv>;
  270. }
  271. class NEG_RR<BPFOpClass Class, BPFArithOp Opc,
  272. dag outs, dag ins, string asmstr, list<dag> pattern>
  273. : TYPE_ALU_JMP<Opc.Value, 0, outs, ins, asmstr, pattern> {
  274. bits<4> dst;
  275. let Inst{51-48} = dst;
  276. let BPFClass = Class;
  277. }
  278. let Constraints = "$dst = $src", isAsCheapAsAMove = 1 in {
  279. def NEG_64: NEG_RR<BPF_ALU64, BPF_NEG, (outs GPR:$dst), (ins GPR:$src),
  280. "$dst = -$src",
  281. [(set GPR:$dst, (ineg i64:$src))]>;
  282. def NEG_32: NEG_RR<BPF_ALU, BPF_NEG, (outs GPR32:$dst), (ins GPR32:$src),
  283. "$dst = -$src",
  284. [(set GPR32:$dst, (ineg i32:$src))]>;
  285. }
  286. class LD_IMM64<bits<4> Pseudo, string OpcodeStr>
  287. : TYPE_LD_ST<BPF_IMM.Value, BPF_DW.Value,
  288. (outs GPR:$dst),
  289. (ins u64imm:$imm),
  290. "$dst "#OpcodeStr#" ${imm} ll",
  291. [(set GPR:$dst, (i64 imm:$imm))]> {
  292. bits<4> dst;
  293. bits<64> imm;
  294. let Inst{51-48} = dst;
  295. let Inst{55-52} = Pseudo;
  296. let Inst{47-32} = 0;
  297. let Inst{31-0} = imm{31-0};
  298. let BPFClass = BPF_LD;
  299. }
  300. let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
  301. def LD_imm64 : LD_IMM64<0, "=">;
  302. def MOV_rr : ALU_RR<BPF_ALU64, BPF_MOV,
  303. (outs GPR:$dst),
  304. (ins GPR:$src),
  305. "$dst = $src",
  306. []>;
  307. def MOV_ri : ALU_RI<BPF_ALU64, BPF_MOV,
  308. (outs GPR:$dst),
  309. (ins i64imm:$imm),
  310. "$dst = $imm",
  311. [(set GPR:$dst, (i64 i64immSExt32:$imm))]>;
  312. def MOV_rr_32 : ALU_RR<BPF_ALU, BPF_MOV,
  313. (outs GPR32:$dst),
  314. (ins GPR32:$src),
  315. "$dst = $src",
  316. []>;
  317. def MOV_ri_32 : ALU_RI<BPF_ALU, BPF_MOV,
  318. (outs GPR32:$dst),
  319. (ins i32imm:$imm),
  320. "$dst = $imm",
  321. [(set GPR32:$dst, (i32 i32immSExt32:$imm))]>;
  322. }
  323. def FI_ri
  324. : TYPE_LD_ST<BPF_IMM.Value, BPF_DW.Value,
  325. (outs GPR:$dst),
  326. (ins MEMri:$addr),
  327. "lea\t$dst, $addr",
  328. [(set i64:$dst, FIri:$addr)]> {
  329. // This is a tentative instruction, and will be replaced
  330. // with MOV_rr and ADD_ri in PEI phase
  331. let Inst{51-48} = 0;
  332. let Inst{55-52} = 2;
  333. let Inst{47-32} = 0;
  334. let Inst{31-0} = 0;
  335. let BPFClass = BPF_LD;
  336. }
  337. def LD_pseudo
  338. : TYPE_LD_ST<BPF_IMM.Value, BPF_DW.Value,
  339. (outs GPR:$dst),
  340. (ins i64imm:$pseudo, u64imm:$imm),
  341. "ld_pseudo\t$dst, $pseudo, $imm",
  342. [(set GPR:$dst, (int_bpf_pseudo imm:$pseudo, imm:$imm))]> {
  343. bits<4> dst;
  344. bits<64> imm;
  345. bits<4> pseudo;
  346. let Inst{51-48} = dst;
  347. let Inst{55-52} = pseudo;
  348. let Inst{47-32} = 0;
  349. let Inst{31-0} = imm{31-0};
  350. let BPFClass = BPF_LD;
  351. }
  352. // STORE instructions
  353. class STORE<BPFWidthModifer SizeOp, string OpcodeStr, list<dag> Pattern>
  354. : TYPE_LD_ST<BPF_MEM.Value, SizeOp.Value,
  355. (outs),
  356. (ins GPR:$src, MEMri:$addr),
  357. "*("#OpcodeStr#" *)($addr) = $src",
  358. Pattern> {
  359. bits<4> src;
  360. bits<20> addr;
  361. let Inst{51-48} = addr{19-16}; // base reg
  362. let Inst{55-52} = src;
  363. let Inst{47-32} = addr{15-0}; // offset
  364. let BPFClass = BPF_STX;
  365. }
  366. class STOREi64<BPFWidthModifer Opc, string OpcodeStr, PatFrag OpNode>
  367. : STORE<Opc, OpcodeStr, [(OpNode i64:$src, ADDRri:$addr)]>;
  368. let Predicates = [BPFNoALU32] in {
  369. def STW : STOREi64<BPF_W, "u32", truncstorei32>;
  370. def STH : STOREi64<BPF_H, "u16", truncstorei16>;
  371. def STB : STOREi64<BPF_B, "u8", truncstorei8>;
  372. }
  373. def STD : STOREi64<BPF_DW, "u64", store>;
  374. // LOAD instructions
  375. class LOAD<BPFWidthModifer SizeOp, string OpcodeStr, list<dag> Pattern>
  376. : TYPE_LD_ST<BPF_MEM.Value, SizeOp.Value,
  377. (outs GPR:$dst),
  378. (ins MEMri:$addr),
  379. "$dst = *("#OpcodeStr#" *)($addr)",
  380. Pattern> {
  381. bits<4> dst;
  382. bits<20> addr;
  383. let Inst{51-48} = dst;
  384. let Inst{55-52} = addr{19-16};
  385. let Inst{47-32} = addr{15-0};
  386. let BPFClass = BPF_LDX;
  387. }
  388. class LOADi64<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode>
  389. : LOAD<SizeOp, OpcodeStr, [(set i64:$dst, (OpNode ADDRri:$addr))]>;
  390. let isCodeGenOnly = 1 in {
  391. def CORE_MEM : TYPE_LD_ST<BPF_MEM.Value, BPF_W.Value,
  392. (outs GPR:$dst),
  393. (ins u64imm:$opcode, GPR:$src, u64imm:$offset),
  394. "$dst = core_mem($opcode, $src, $offset)",
  395. []>;
  396. def CORE_ALU32_MEM : TYPE_LD_ST<BPF_MEM.Value, BPF_W.Value,
  397. (outs GPR32:$dst),
  398. (ins u64imm:$opcode, GPR:$src, u64imm:$offset),
  399. "$dst = core_alu32_mem($opcode, $src, $offset)",
  400. []>;
  401. let Constraints = "$dst = $src" in {
  402. def CORE_SHIFT : ALU_RR<BPF_ALU64, BPF_LSH,
  403. (outs GPR:$dst),
  404. (ins u64imm:$opcode, GPR:$src, u64imm:$offset),
  405. "$dst = core_shift($opcode, $src, $offset)",
  406. []>;
  407. }
  408. }
  409. let Predicates = [BPFNoALU32] in {
  410. def LDW : LOADi64<BPF_W, "u32", zextloadi32>;
  411. def LDH : LOADi64<BPF_H, "u16", zextloadi16>;
  412. def LDB : LOADi64<BPF_B, "u8", zextloadi8>;
  413. }
  414. def LDD : LOADi64<BPF_DW, "u64", load>;
  415. class BRANCH<BPFJumpOp Opc, string OpcodeStr, list<dag> Pattern>
  416. : TYPE_ALU_JMP<Opc.Value, BPF_K.Value,
  417. (outs),
  418. (ins brtarget:$BrDst),
  419. !strconcat(OpcodeStr, " $BrDst"),
  420. Pattern> {
  421. bits<16> BrDst;
  422. let Inst{47-32} = BrDst;
  423. let BPFClass = BPF_JMP;
  424. }
  425. class CALL<string OpcodeStr>
  426. : TYPE_ALU_JMP<BPF_CALL.Value, BPF_K.Value,
  427. (outs),
  428. (ins calltarget:$BrDst),
  429. !strconcat(OpcodeStr, " $BrDst"),
  430. []> {
  431. bits<32> BrDst;
  432. let Inst{31-0} = BrDst;
  433. let BPFClass = BPF_JMP;
  434. }
  435. class CALLX<string OpcodeStr>
  436. : TYPE_ALU_JMP<BPF_CALL.Value, BPF_X.Value,
  437. (outs),
  438. (ins GPR:$BrDst),
  439. !strconcat(OpcodeStr, " $BrDst"),
  440. []> {
  441. bits<32> BrDst;
  442. let Inst{31-0} = BrDst;
  443. let BPFClass = BPF_JMP;
  444. }
  445. // Jump always
  446. let isBranch = 1, isTerminator = 1, hasDelaySlot=0, isBarrier = 1 in {
  447. def JMP : BRANCH<BPF_JA, "goto", [(br bb:$BrDst)]>;
  448. }
  449. // Jump and link
  450. let isCall=1, hasDelaySlot=0, Uses = [R11],
  451. // Potentially clobbered registers
  452. Defs = [R0, R1, R2, R3, R4, R5] in {
  453. def JAL : CALL<"call">;
  454. def JALX : CALLX<"callx">;
  455. }
  456. class NOP_I<string OpcodeStr>
  457. : TYPE_ALU_JMP<BPF_MOV.Value, BPF_X.Value,
  458. (outs),
  459. (ins i32imm:$imm),
  460. !strconcat(OpcodeStr, "\t$imm"),
  461. []> {
  462. // mov r0, r0 == nop
  463. let Inst{55-52} = 0;
  464. let Inst{51-48} = 0;
  465. let BPFClass = BPF_ALU64;
  466. }
  467. let hasSideEffects = 0, isCodeGenOnly = 1 in
  468. def NOP : NOP_I<"nop">;
  469. class RET<string OpcodeStr>
  470. : TYPE_ALU_JMP<BPF_EXIT.Value, BPF_K.Value,
  471. (outs),
  472. (ins),
  473. !strconcat(OpcodeStr, ""),
  474. [(BPFretflag)]> {
  475. let Inst{31-0} = 0;
  476. let BPFClass = BPF_JMP;
  477. }
  478. let isReturn = 1, isTerminator = 1, hasDelaySlot=0, isBarrier = 1,
  479. isNotDuplicable = 1 in {
  480. def RET : RET<"exit">;
  481. }
  482. // ADJCALLSTACKDOWN/UP pseudo insns
  483. let Defs = [R11], Uses = [R11], isCodeGenOnly = 1 in {
  484. def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
  485. "#ADJCALLSTACKDOWN $amt1 $amt2",
  486. [(BPFcallseq_start timm:$amt1, timm:$amt2)]>;
  487. def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
  488. "#ADJCALLSTACKUP $amt1 $amt2",
  489. [(BPFcallseq_end timm:$amt1, timm:$amt2)]>;
  490. }
  491. let usesCustomInserter = 1, isCodeGenOnly = 1 in {
  492. def Select : Pseudo<(outs GPR:$dst),
  493. (ins GPR:$lhs, GPR:$rhs, i64imm:$imm, GPR:$src, GPR:$src2),
  494. "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
  495. [(set i64:$dst,
  496. (BPFselectcc i64:$lhs, i64:$rhs, (i64 imm:$imm), i64:$src, i64:$src2))]>;
  497. def Select_Ri : Pseudo<(outs GPR:$dst),
  498. (ins GPR:$lhs, i64imm:$rhs, i64imm:$imm, GPR:$src, GPR:$src2),
  499. "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
  500. [(set i64:$dst,
  501. (BPFselectcc i64:$lhs, (i64immSExt32:$rhs), (i64 imm:$imm), i64:$src, i64:$src2))]>;
  502. def Select_64_32 : Pseudo<(outs GPR32:$dst),
  503. (ins GPR:$lhs, GPR:$rhs, i64imm:$imm, GPR32:$src, GPR32:$src2),
  504. "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
  505. [(set i32:$dst,
  506. (BPFselectcc i64:$lhs, i64:$rhs, (i64 imm:$imm), i32:$src, i32:$src2))]>;
  507. def Select_Ri_64_32 : Pseudo<(outs GPR32:$dst),
  508. (ins GPR:$lhs, i64imm:$rhs, i64imm:$imm, GPR32:$src, GPR32:$src2),
  509. "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
  510. [(set i32:$dst,
  511. (BPFselectcc i64:$lhs, (i64immSExt32:$rhs), (i64 imm:$imm), i32:$src, i32:$src2))]>;
  512. def Select_32 : Pseudo<(outs GPR32:$dst),
  513. (ins GPR32:$lhs, GPR32:$rhs, i32imm:$imm, GPR32:$src, GPR32:$src2),
  514. "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
  515. [(set i32:$dst,
  516. (BPFselectcc i32:$lhs, i32:$rhs, (i32 imm:$imm), i32:$src, i32:$src2))]>;
  517. def Select_Ri_32 : Pseudo<(outs GPR32:$dst),
  518. (ins GPR32:$lhs, i32imm:$rhs, i32imm:$imm, GPR32:$src, GPR32:$src2),
  519. "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
  520. [(set i32:$dst,
  521. (BPFselectcc i32:$lhs, (i32immSExt32:$rhs), (i32 imm:$imm), i32:$src, i32:$src2))]>;
  522. def Select_32_64 : Pseudo<(outs GPR:$dst),
  523. (ins GPR32:$lhs, GPR32:$rhs, i32imm:$imm, GPR:$src, GPR:$src2),
  524. "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
  525. [(set i64:$dst,
  526. (BPFselectcc i32:$lhs, i32:$rhs, (i32 imm:$imm), i64:$src, i64:$src2))]>;
  527. def Select_Ri_32_64 : Pseudo<(outs GPR:$dst),
  528. (ins GPR32:$lhs, i32imm:$rhs, i32imm:$imm, GPR:$src, GPR:$src2),
  529. "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
  530. [(set i64:$dst,
  531. (BPFselectcc i32:$lhs, (i32immSExt32:$rhs), (i32 imm:$imm), i64:$src, i64:$src2))]>;
  532. }
  533. // load 64-bit global addr into register
  534. def : Pat<(BPFWrapper tglobaladdr:$in), (LD_imm64 tglobaladdr:$in)>;
  535. // 0xffffFFFF doesn't fit into simm32, optimize common case
  536. def : Pat<(i64 (and (i64 GPR:$src), 0xffffFFFF)),
  537. (SRL_ri (SLL_ri (i64 GPR:$src), 32), 32)>;
  538. // Calls
  539. def : Pat<(BPFcall tglobaladdr:$dst), (JAL tglobaladdr:$dst)>;
  540. def : Pat<(BPFcall texternalsym:$dst), (JAL texternalsym:$dst)>;
  541. def : Pat<(BPFcall imm:$dst), (JAL imm:$dst)>;
  542. def : Pat<(BPFcall GPR:$dst), (JALX GPR:$dst)>;
  543. // Loads
  544. let Predicates = [BPFNoALU32] in {
  545. def : Pat<(i64 (extloadi8 ADDRri:$src)), (i64 (LDB ADDRri:$src))>;
  546. def : Pat<(i64 (extloadi16 ADDRri:$src)), (i64 (LDH ADDRri:$src))>;
  547. def : Pat<(i64 (extloadi32 ADDRri:$src)), (i64 (LDW ADDRri:$src))>;
  548. }
  549. // Atomic XADD for BPFNoALU32
  550. class XADD<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode>
  551. : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
  552. (outs GPR:$dst),
  553. (ins MEMri:$addr, GPR:$val),
  554. "lock *("#OpcodeStr#" *)($addr) += $val",
  555. [(set GPR:$dst, (OpNode ADDRri:$addr, GPR:$val))]> {
  556. bits<4> dst;
  557. bits<20> addr;
  558. let Inst{51-48} = addr{19-16}; // base reg
  559. let Inst{55-52} = dst;
  560. let Inst{47-32} = addr{15-0}; // offset
  561. let Inst{7-4} = BPF_ADD.Value;
  562. let BPFClass = BPF_STX;
  563. }
  564. let Constraints = "$dst = $val" in {
  565. let Predicates = [BPFNoALU32] in {
  566. def XADDW : XADD<BPF_W, "u32", atomic_load_add_32>;
  567. }
  568. }
  569. // Atomic add, and, or, xor
  570. class ATOMIC_NOFETCH<BPFArithOp Opc, string Opstr>
  571. : TYPE_LD_ST<BPF_ATOMIC.Value, BPF_DW.Value,
  572. (outs GPR:$dst),
  573. (ins MEMri:$addr, GPR:$val),
  574. "lock *(u64 *)($addr) " #Opstr# "= $val",
  575. []> {
  576. bits<4> dst;
  577. bits<20> addr;
  578. let Inst{51-48} = addr{19-16}; // base reg
  579. let Inst{55-52} = dst;
  580. let Inst{47-32} = addr{15-0}; // offset
  581. let Inst{7-4} = Opc.Value;
  582. let BPFClass = BPF_STX;
  583. }
  584. class ATOMIC32_NOFETCH<BPFArithOp Opc, string Opstr>
  585. : TYPE_LD_ST<BPF_ATOMIC.Value, BPF_W.Value,
  586. (outs GPR32:$dst),
  587. (ins MEMri:$addr, GPR32:$val),
  588. "lock *(u32 *)($addr) " #Opstr# "= $val",
  589. []> {
  590. bits<4> dst;
  591. bits<20> addr;
  592. let Inst{51-48} = addr{19-16}; // base reg
  593. let Inst{55-52} = dst;
  594. let Inst{47-32} = addr{15-0}; // offset
  595. let Inst{7-4} = Opc.Value;
  596. let BPFClass = BPF_STX;
  597. }
  598. let Constraints = "$dst = $val" in {
  599. let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in {
  600. def XADDW32 : ATOMIC32_NOFETCH<BPF_ADD, "+">;
  601. def XANDW32 : ATOMIC32_NOFETCH<BPF_AND, "&">;
  602. def XORW32 : ATOMIC32_NOFETCH<BPF_OR, "|">;
  603. def XXORW32 : ATOMIC32_NOFETCH<BPF_XOR, "^">;
  604. }
  605. def XADDD : ATOMIC_NOFETCH<BPF_ADD, "+">;
  606. def XANDD : ATOMIC_NOFETCH<BPF_AND, "&">;
  607. def XORD : ATOMIC_NOFETCH<BPF_OR, "|">;
  608. def XXORD : ATOMIC_NOFETCH<BPF_XOR, "^">;
  609. }
  610. // Atomic Fetch-and-<add, and, or, xor> operations
  611. class XFALU64<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,
  612. string OpcStr, PatFrag OpNode>
  613. : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
  614. (outs GPR:$dst),
  615. (ins MEMri:$addr, GPR:$val),
  616. "$dst = atomic_fetch_"#OpcStr#"(("#OpcodeStr#" *)($addr), $val)",
  617. [(set GPR:$dst, (OpNode ADDRri:$addr, GPR:$val))]> {
  618. bits<4> dst;
  619. bits<20> addr;
  620. let Inst{51-48} = addr{19-16}; // base reg
  621. let Inst{55-52} = dst;
  622. let Inst{47-32} = addr{15-0}; // offset
  623. let Inst{7-4} = Opc.Value;
  624. let Inst{3-0} = BPF_FETCH.Value;
  625. let BPFClass = BPF_STX;
  626. }
  627. class XFALU32<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,
  628. string OpcStr, PatFrag OpNode>
  629. : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
  630. (outs GPR32:$dst),
  631. (ins MEMri:$addr, GPR32:$val),
  632. "$dst = atomic_fetch_"#OpcStr#"(("#OpcodeStr#" *)($addr), $val)",
  633. [(set GPR32:$dst, (OpNode ADDRri:$addr, GPR32:$val))]> {
  634. bits<4> dst;
  635. bits<20> addr;
  636. let Inst{51-48} = addr{19-16}; // base reg
  637. let Inst{55-52} = dst;
  638. let Inst{47-32} = addr{15-0}; // offset
  639. let Inst{7-4} = Opc.Value;
  640. let Inst{3-0} = BPF_FETCH.Value;
  641. let BPFClass = BPF_STX;
  642. }
  643. let Constraints = "$dst = $val" in {
  644. let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in {
  645. def XFADDW32 : XFALU32<BPF_W, BPF_ADD, "u32", "add", atomic_load_add_32>;
  646. def XFANDW32 : XFALU32<BPF_W, BPF_AND, "u32", "and", atomic_load_and_32>;
  647. def XFORW32 : XFALU32<BPF_W, BPF_OR, "u32", "or", atomic_load_or_32>;
  648. def XFXORW32 : XFALU32<BPF_W, BPF_XOR, "u32", "xor", atomic_load_xor_32>;
  649. }
  650. def XFADDD : XFALU64<BPF_DW, BPF_ADD, "u64", "add", atomic_load_add_64>;
  651. def XFANDD : XFALU64<BPF_DW, BPF_AND, "u64", "and", atomic_load_and_64>;
  652. def XFORD : XFALU64<BPF_DW, BPF_OR, "u64", "or", atomic_load_or_64>;
  653. def XFXORD : XFALU64<BPF_DW, BPF_XOR, "u64", "xor", atomic_load_xor_64>;
  654. }
  655. // atomic_load_sub can be represented as a neg followed
  656. // by an atomic_load_add.
  657. def : Pat<(atomic_load_sub_32 ADDRri:$addr, GPR32:$val),
  658. (XFADDW32 ADDRri:$addr, (NEG_32 GPR32:$val))>;
  659. def : Pat<(atomic_load_sub_64 ADDRri:$addr, GPR:$val),
  660. (XFADDD ADDRri:$addr, (NEG_64 GPR:$val))>;
  661. // Atomic Exchange
  662. class XCHG<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode>
  663. : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
  664. (outs GPR:$dst),
  665. (ins MEMri:$addr, GPR:$val),
  666. "$dst = xchg_"#OpcodeStr#"($addr, $val)",
  667. [(set GPR:$dst, (OpNode ADDRri:$addr,GPR:$val))]> {
  668. bits<4> dst;
  669. bits<20> addr;
  670. let Inst{51-48} = addr{19-16}; // base reg
  671. let Inst{55-52} = dst;
  672. let Inst{47-32} = addr{15-0}; // offset
  673. let Inst{7-4} = BPF_XCHG.Value;
  674. let Inst{3-0} = BPF_FETCH.Value;
  675. let BPFClass = BPF_STX;
  676. }
  677. class XCHG32<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode>
  678. : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
  679. (outs GPR32:$dst),
  680. (ins MEMri:$addr, GPR32:$val),
  681. "$dst = xchg32_"#OpcodeStr#"($addr, $val)",
  682. [(set GPR32:$dst, (OpNode ADDRri:$addr,GPR32:$val))]> {
  683. bits<4> dst;
  684. bits<20> addr;
  685. let Inst{51-48} = addr{19-16}; // base reg
  686. let Inst{55-52} = dst;
  687. let Inst{47-32} = addr{15-0}; // offset
  688. let Inst{7-4} = BPF_XCHG.Value;
  689. let Inst{3-0} = BPF_FETCH.Value;
  690. let BPFClass = BPF_STX;
  691. }
  692. let Constraints = "$dst = $val" in {
  693. let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in {
  694. def XCHGW32 : XCHG32<BPF_W, "32", atomic_swap_32>;
  695. }
  696. def XCHGD : XCHG<BPF_DW, "64", atomic_swap_64>;
  697. }
  698. // Compare-And-Exchange
  699. class CMPXCHG<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode>
  700. : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
  701. (outs),
  702. (ins MEMri:$addr, GPR:$new),
  703. "r0 = cmpxchg_"#OpcodeStr#"($addr, r0, $new)",
  704. [(set R0, (OpNode ADDRri:$addr, R0, GPR:$new))]> {
  705. bits<4> new;
  706. bits<20> addr;
  707. let Inst{51-48} = addr{19-16}; // base reg
  708. let Inst{55-52} = new;
  709. let Inst{47-32} = addr{15-0}; // offset
  710. let Inst{7-4} = BPF_CMPXCHG.Value;
  711. let Inst{3-0} = BPF_FETCH.Value;
  712. let BPFClass = BPF_STX;
  713. }
  714. class CMPXCHG32<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode>
  715. : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
  716. (outs),
  717. (ins MEMri:$addr, GPR32:$new),
  718. "w0 = cmpxchg32_"#OpcodeStr#"($addr, w0, $new)",
  719. [(set W0, (OpNode ADDRri:$addr, W0, GPR32:$new))]> {
  720. bits<4> new;
  721. bits<20> addr;
  722. let Inst{51-48} = addr{19-16}; // base reg
  723. let Inst{55-52} = new;
  724. let Inst{47-32} = addr{15-0}; // offset
  725. let Inst{7-4} = BPF_CMPXCHG.Value;
  726. let Inst{3-0} = BPF_FETCH.Value;
  727. let BPFClass = BPF_STX;
  728. }
  729. let Predicates = [BPFHasALU32], Defs = [W0], Uses = [W0],
  730. DecoderNamespace = "BPFALU32" in {
  731. def CMPXCHGW32 : CMPXCHG32<BPF_W, "32", atomic_cmp_swap_32>;
  732. }
  733. let Defs = [R0], Uses = [R0] in {
  734. def CMPXCHGD : CMPXCHG<BPF_DW, "64", atomic_cmp_swap_64>;
  735. }
  736. // bswap16, bswap32, bswap64
  737. class BSWAP<bits<32> SizeOp, string OpcodeStr, BPFSrcType SrcType, list<dag> Pattern>
  738. : TYPE_ALU_JMP<BPF_END.Value, SrcType.Value,
  739. (outs GPR:$dst),
  740. (ins GPR:$src),
  741. "$dst = "#OpcodeStr#" $src",
  742. Pattern> {
  743. bits<4> dst;
  744. let Inst{51-48} = dst;
  745. let Inst{31-0} = SizeOp;
  746. let BPFClass = BPF_ALU;
  747. }
  748. let Constraints = "$dst = $src" in {
  749. let Predicates = [BPFIsLittleEndian] in {
  750. def BE16 : BSWAP<16, "be16", BPF_TO_BE, [(set GPR:$dst, (srl (bswap GPR:$src), (i64 48)))]>;
  751. def BE32 : BSWAP<32, "be32", BPF_TO_BE, [(set GPR:$dst, (srl (bswap GPR:$src), (i64 32)))]>;
  752. def BE64 : BSWAP<64, "be64", BPF_TO_BE, [(set GPR:$dst, (bswap GPR:$src))]>;
  753. }
  754. let Predicates = [BPFIsBigEndian] in {
  755. def LE16 : BSWAP<16, "le16", BPF_TO_LE, [(set GPR:$dst, (srl (bswap GPR:$src), (i64 48)))]>;
  756. def LE32 : BSWAP<32, "le32", BPF_TO_LE, [(set GPR:$dst, (srl (bswap GPR:$src), (i64 32)))]>;
  757. def LE64 : BSWAP<64, "le64", BPF_TO_LE, [(set GPR:$dst, (bswap GPR:$src))]>;
  758. }
  759. }
  760. let Defs = [R0, R1, R2, R3, R4, R5], Uses = [R6], hasSideEffects = 1,
  761. hasExtraDefRegAllocReq = 1, hasExtraSrcRegAllocReq = 1, mayLoad = 1 in {
  762. class LOAD_ABS<BPFWidthModifer SizeOp, string OpcodeStr, Intrinsic OpNode>
  763. : TYPE_LD_ST<BPF_ABS.Value, SizeOp.Value,
  764. (outs),
  765. (ins GPR:$skb, i64imm:$imm),
  766. "r0 = *("#OpcodeStr#" *)skb[$imm]",
  767. [(set R0, (OpNode GPR:$skb, i64immSExt32:$imm))]> {
  768. bits<32> imm;
  769. let Inst{31-0} = imm;
  770. let BPFClass = BPF_LD;
  771. }
  772. class LOAD_IND<BPFWidthModifer SizeOp, string OpcodeStr, Intrinsic OpNode>
  773. : TYPE_LD_ST<BPF_IND.Value, SizeOp.Value,
  774. (outs),
  775. (ins GPR:$skb, GPR:$val),
  776. "r0 = *("#OpcodeStr#" *)skb[$val]",
  777. [(set R0, (OpNode GPR:$skb, GPR:$val))]> {
  778. bits<4> val;
  779. let Inst{55-52} = val;
  780. let BPFClass = BPF_LD;
  781. }
  782. }
  783. def LD_ABS_B : LOAD_ABS<BPF_B, "u8", int_bpf_load_byte>;
  784. def LD_ABS_H : LOAD_ABS<BPF_H, "u16", int_bpf_load_half>;
  785. def LD_ABS_W : LOAD_ABS<BPF_W, "u32", int_bpf_load_word>;
  786. def LD_IND_B : LOAD_IND<BPF_B, "u8", int_bpf_load_byte>;
  787. def LD_IND_H : LOAD_IND<BPF_H, "u16", int_bpf_load_half>;
  788. def LD_IND_W : LOAD_IND<BPF_W, "u32", int_bpf_load_word>;
  789. let isCodeGenOnly = 1 in {
  790. def MOV_32_64 : ALU_RR<BPF_ALU, BPF_MOV,
  791. (outs GPR:$dst), (ins GPR32:$src),
  792. "$dst = $src", []>;
  793. }
  794. def : Pat<(i64 (sext GPR32:$src)),
  795. (SRA_ri (SLL_ri (MOV_32_64 GPR32:$src), 32), 32)>;
  796. def : Pat<(i64 (zext GPR32:$src)), (MOV_32_64 GPR32:$src)>;
  797. // For i64 -> i32 truncation, use the 32-bit subregister directly.
  798. def : Pat<(i32 (trunc GPR:$src)),
  799. (i32 (EXTRACT_SUBREG GPR:$src, sub_32))>;
  800. // For i32 -> i64 anyext, we don't care about the high bits.
  801. def : Pat<(i64 (anyext GPR32:$src)),
  802. (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
  803. class STORE32<BPFWidthModifer SizeOp, string OpcodeStr, list<dag> Pattern>
  804. : TYPE_LD_ST<BPF_MEM.Value, SizeOp.Value,
  805. (outs),
  806. (ins GPR32:$src, MEMri:$addr),
  807. "*("#OpcodeStr#" *)($addr) = $src",
  808. Pattern> {
  809. bits<4> src;
  810. bits<20> addr;
  811. let Inst{51-48} = addr{19-16}; // base reg
  812. let Inst{55-52} = src;
  813. let Inst{47-32} = addr{15-0}; // offset
  814. let BPFClass = BPF_STX;
  815. }
  816. class STOREi32<BPFWidthModifer Opc, string OpcodeStr, PatFrag OpNode>
  817. : STORE32<Opc, OpcodeStr, [(OpNode i32:$src, ADDRri:$addr)]>;
  818. let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in {
  819. def STW32 : STOREi32<BPF_W, "u32", store>;
  820. def STH32 : STOREi32<BPF_H, "u16", truncstorei16>;
  821. def STB32 : STOREi32<BPF_B, "u8", truncstorei8>;
  822. }
  823. class LOAD32<BPFWidthModifer SizeOp, string OpcodeStr, list<dag> Pattern>
  824. : TYPE_LD_ST<BPF_MEM.Value, SizeOp.Value,
  825. (outs GPR32:$dst),
  826. (ins MEMri:$addr),
  827. "$dst = *("#OpcodeStr#" *)($addr)",
  828. Pattern> {
  829. bits<4> dst;
  830. bits<20> addr;
  831. let Inst{51-48} = dst;
  832. let Inst{55-52} = addr{19-16};
  833. let Inst{47-32} = addr{15-0};
  834. let BPFClass = BPF_LDX;
  835. }
  836. class LOADi32<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode>
  837. : LOAD32<SizeOp, OpcodeStr, [(set i32:$dst, (OpNode ADDRri:$addr))]>;
  838. let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in {
  839. def LDW32 : LOADi32<BPF_W, "u32", load>;
  840. def LDH32 : LOADi32<BPF_H, "u16", zextloadi16>;
  841. def LDB32 : LOADi32<BPF_B, "u8", zextloadi8>;
  842. }
  843. let Predicates = [BPFHasALU32] in {
  844. def : Pat<(truncstorei8 GPR:$src, ADDRri:$dst),
  845. (STB32 (EXTRACT_SUBREG GPR:$src, sub_32), ADDRri:$dst)>;
  846. def : Pat<(truncstorei16 GPR:$src, ADDRri:$dst),
  847. (STH32 (EXTRACT_SUBREG GPR:$src, sub_32), ADDRri:$dst)>;
  848. def : Pat<(truncstorei32 GPR:$src, ADDRri:$dst),
  849. (STW32 (EXTRACT_SUBREG GPR:$src, sub_32), ADDRri:$dst)>;
  850. def : Pat<(i32 (extloadi8 ADDRri:$src)), (i32 (LDB32 ADDRri:$src))>;
  851. def : Pat<(i32 (extloadi16 ADDRri:$src)), (i32 (LDH32 ADDRri:$src))>;
  852. def : Pat<(i64 (zextloadi8 ADDRri:$src)),
  853. (SUBREG_TO_REG (i64 0), (LDB32 ADDRri:$src), sub_32)>;
  854. def : Pat<(i64 (zextloadi16 ADDRri:$src)),
  855. (SUBREG_TO_REG (i64 0), (LDH32 ADDRri:$src), sub_32)>;
  856. def : Pat<(i64 (zextloadi32 ADDRri:$src)),
  857. (SUBREG_TO_REG (i64 0), (LDW32 ADDRri:$src), sub_32)>;
  858. def : Pat<(i64 (extloadi8 ADDRri:$src)),
  859. (SUBREG_TO_REG (i64 0), (LDB32 ADDRri:$src), sub_32)>;
  860. def : Pat<(i64 (extloadi16 ADDRri:$src)),
  861. (SUBREG_TO_REG (i64 0), (LDH32 ADDRri:$src), sub_32)>;
  862. def : Pat<(i64 (extloadi32 ADDRri:$src)),
  863. (SUBREG_TO_REG (i64 0), (LDW32 ADDRri:$src), sub_32)>;
  864. }
  865. let usesCustomInserter = 1, isCodeGenOnly = 1 in {
  866. def MEMCPY : Pseudo<
  867. (outs),
  868. (ins GPR:$dst, GPR:$src, i64imm:$len, i64imm:$align, variable_ops),
  869. "#memcpy dst: $dst, src: $src, len: $len, align: $align",
  870. [(BPFmemcpy GPR:$dst, GPR:$src, imm:$len, imm:$align)]>;
  871. }