AArch64AddressingModes.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857
  1. //===- AArch64AddressingModes.h - AArch64 Addressing Modes ------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the AArch64 addressing mode implementation stuff.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
  13. #define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
  14. #include "AArch64ExpandImm.h"
  15. #include "llvm/ADT/APFloat.h"
  16. #include "llvm/ADT/APInt.h"
  17. #include "llvm/ADT/bit.h"
  18. #include "llvm/Support/ErrorHandling.h"
  19. #include "llvm/Support/MathExtras.h"
  20. #include <cassert>
  21. namespace llvm {
  22. /// AArch64_AM - AArch64 Addressing Mode Stuff
  23. namespace AArch64_AM {
  24. //===----------------------------------------------------------------------===//
  25. // Shifts
  26. //
  27. enum ShiftExtendType {
  28. InvalidShiftExtend = -1,
  29. LSL = 0,
  30. LSR,
  31. ASR,
  32. ROR,
  33. MSL,
  34. UXTB,
  35. UXTH,
  36. UXTW,
  37. UXTX,
  38. SXTB,
  39. SXTH,
  40. SXTW,
  41. SXTX,
  42. };
  43. /// getShiftName - Get the string encoding for the shift type.
  44. static inline const char *getShiftExtendName(AArch64_AM::ShiftExtendType ST) {
  45. switch (ST) {
  46. default: llvm_unreachable("unhandled shift type!");
  47. case AArch64_AM::LSL: return "lsl";
  48. case AArch64_AM::LSR: return "lsr";
  49. case AArch64_AM::ASR: return "asr";
  50. case AArch64_AM::ROR: return "ror";
  51. case AArch64_AM::MSL: return "msl";
  52. case AArch64_AM::UXTB: return "uxtb";
  53. case AArch64_AM::UXTH: return "uxth";
  54. case AArch64_AM::UXTW: return "uxtw";
  55. case AArch64_AM::UXTX: return "uxtx";
  56. case AArch64_AM::SXTB: return "sxtb";
  57. case AArch64_AM::SXTH: return "sxth";
  58. case AArch64_AM::SXTW: return "sxtw";
  59. case AArch64_AM::SXTX: return "sxtx";
  60. }
  61. return nullptr;
  62. }
  63. /// getShiftType - Extract the shift type.
  64. static inline AArch64_AM::ShiftExtendType getShiftType(unsigned Imm) {
  65. switch ((Imm >> 6) & 0x7) {
  66. default: return AArch64_AM::InvalidShiftExtend;
  67. case 0: return AArch64_AM::LSL;
  68. case 1: return AArch64_AM::LSR;
  69. case 2: return AArch64_AM::ASR;
  70. case 3: return AArch64_AM::ROR;
  71. case 4: return AArch64_AM::MSL;
  72. }
  73. }
  74. /// getShiftValue - Extract the shift value.
  75. static inline unsigned getShiftValue(unsigned Imm) {
  76. return Imm & 0x3f;
  77. }
  78. /// getShifterImm - Encode the shift type and amount:
  79. /// imm: 6-bit shift amount
  80. /// shifter: 000 ==> lsl
  81. /// 001 ==> lsr
  82. /// 010 ==> asr
  83. /// 011 ==> ror
  84. /// 100 ==> msl
  85. /// {8-6} = shifter
  86. /// {5-0} = imm
  87. static inline unsigned getShifterImm(AArch64_AM::ShiftExtendType ST,
  88. unsigned Imm) {
  89. assert((Imm & 0x3f) == Imm && "Illegal shifted immedate value!");
  90. unsigned STEnc = 0;
  91. switch (ST) {
  92. default: llvm_unreachable("Invalid shift requested");
  93. case AArch64_AM::LSL: STEnc = 0; break;
  94. case AArch64_AM::LSR: STEnc = 1; break;
  95. case AArch64_AM::ASR: STEnc = 2; break;
  96. case AArch64_AM::ROR: STEnc = 3; break;
  97. case AArch64_AM::MSL: STEnc = 4; break;
  98. }
  99. return (STEnc << 6) | (Imm & 0x3f);
  100. }
  101. //===----------------------------------------------------------------------===//
  102. // Extends
  103. //
  104. /// getArithShiftValue - get the arithmetic shift value.
  105. static inline unsigned getArithShiftValue(unsigned Imm) {
  106. return Imm & 0x7;
  107. }
  108. /// getExtendType - Extract the extend type for operands of arithmetic ops.
  109. static inline AArch64_AM::ShiftExtendType getExtendType(unsigned Imm) {
  110. assert((Imm & 0x7) == Imm && "invalid immediate!");
  111. switch (Imm) {
  112. default: llvm_unreachable("Compiler bug!");
  113. case 0: return AArch64_AM::UXTB;
  114. case 1: return AArch64_AM::UXTH;
  115. case 2: return AArch64_AM::UXTW;
  116. case 3: return AArch64_AM::UXTX;
  117. case 4: return AArch64_AM::SXTB;
  118. case 5: return AArch64_AM::SXTH;
  119. case 6: return AArch64_AM::SXTW;
  120. case 7: return AArch64_AM::SXTX;
  121. }
  122. }
  123. static inline AArch64_AM::ShiftExtendType getArithExtendType(unsigned Imm) {
  124. return getExtendType((Imm >> 3) & 0x7);
  125. }
  126. /// Mapping from extend bits to required operation:
  127. /// shifter: 000 ==> uxtb
  128. /// 001 ==> uxth
  129. /// 010 ==> uxtw
  130. /// 011 ==> uxtx
  131. /// 100 ==> sxtb
  132. /// 101 ==> sxth
  133. /// 110 ==> sxtw
  134. /// 111 ==> sxtx
  135. inline unsigned getExtendEncoding(AArch64_AM::ShiftExtendType ET) {
  136. switch (ET) {
  137. default: llvm_unreachable("Invalid extend type requested");
  138. case AArch64_AM::UXTB: return 0; break;
  139. case AArch64_AM::UXTH: return 1; break;
  140. case AArch64_AM::UXTW: return 2; break;
  141. case AArch64_AM::UXTX: return 3; break;
  142. case AArch64_AM::SXTB: return 4; break;
  143. case AArch64_AM::SXTH: return 5; break;
  144. case AArch64_AM::SXTW: return 6; break;
  145. case AArch64_AM::SXTX: return 7; break;
  146. }
  147. }
  148. /// getArithExtendImm - Encode the extend type and shift amount for an
  149. /// arithmetic instruction:
  150. /// imm: 3-bit extend amount
  151. /// {5-3} = shifter
  152. /// {2-0} = imm3
  153. static inline unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET,
  154. unsigned Imm) {
  155. assert((Imm & 0x7) == Imm && "Illegal shifted immedate value!");
  156. return (getExtendEncoding(ET) << 3) | (Imm & 0x7);
  157. }
  158. /// getMemDoShift - Extract the "do shift" flag value for load/store
  159. /// instructions.
  160. static inline bool getMemDoShift(unsigned Imm) {
  161. return (Imm & 0x1) != 0;
  162. }
  163. /// getExtendType - Extract the extend type for the offset operand of
  164. /// loads/stores.
  165. static inline AArch64_AM::ShiftExtendType getMemExtendType(unsigned Imm) {
  166. return getExtendType((Imm >> 1) & 0x7);
  167. }
  168. /// getExtendImm - Encode the extend type and amount for a load/store inst:
  169. /// doshift: should the offset be scaled by the access size
  170. /// shifter: 000 ==> uxtb
  171. /// 001 ==> uxth
  172. /// 010 ==> uxtw
  173. /// 011 ==> uxtx
  174. /// 100 ==> sxtb
  175. /// 101 ==> sxth
  176. /// 110 ==> sxtw
  177. /// 111 ==> sxtx
  178. /// {3-1} = shifter
  179. /// {0} = doshift
  180. static inline unsigned getMemExtendImm(AArch64_AM::ShiftExtendType ET,
  181. bool DoShift) {
  182. return (getExtendEncoding(ET) << 1) | unsigned(DoShift);
  183. }
  184. static inline uint64_t ror(uint64_t elt, unsigned size) {
  185. return ((elt & 1) << (size-1)) | (elt >> 1);
  186. }
  187. /// processLogicalImmediate - Determine if an immediate value can be encoded
  188. /// as the immediate operand of a logical instruction for the given register
  189. /// size. If so, return true with "encoding" set to the encoded value in
  190. /// the form N:immr:imms.
  191. static inline bool processLogicalImmediate(uint64_t Imm, unsigned RegSize,
  192. uint64_t &Encoding) {
  193. if (Imm == 0ULL || Imm == ~0ULL ||
  194. (RegSize != 64 &&
  195. (Imm >> RegSize != 0 || Imm == (~0ULL >> (64 - RegSize)))))
  196. return false;
  197. // First, determine the element size.
  198. unsigned Size = RegSize;
  199. do {
  200. Size /= 2;
  201. uint64_t Mask = (1ULL << Size) - 1;
  202. if ((Imm & Mask) != ((Imm >> Size) & Mask)) {
  203. Size *= 2;
  204. break;
  205. }
  206. } while (Size > 2);
  207. // Second, determine the rotation to make the element be: 0^m 1^n.
  208. uint32_t CTO, I;
  209. uint64_t Mask = ((uint64_t)-1LL) >> (64 - Size);
  210. Imm &= Mask;
  211. if (isShiftedMask_64(Imm)) {
  212. I = countTrailingZeros(Imm);
  213. assert(I < 64 && "undefined behavior");
  214. CTO = countTrailingOnes(Imm >> I);
  215. } else {
  216. Imm |= ~Mask;
  217. if (!isShiftedMask_64(~Imm))
  218. return false;
  219. unsigned CLO = countLeadingOnes(Imm);
  220. I = 64 - CLO;
  221. CTO = CLO + countTrailingOnes(Imm) - (64 - Size);
  222. }
  223. // Encode in Immr the number of RORs it would take to get *from* 0^m 1^n
  224. // to our target value, where I is the number of RORs to go the opposite
  225. // direction.
  226. assert(Size > I && "I should be smaller than element size");
  227. unsigned Immr = (Size - I) & (Size - 1);
  228. // If size has a 1 in the n'th bit, create a value that has zeroes in
  229. // bits [0, n] and ones above that.
  230. uint64_t NImms = ~(Size-1) << 1;
  231. // Or the CTO value into the low bits, which must be below the Nth bit
  232. // bit mentioned above.
  233. NImms |= (CTO-1);
  234. // Extract the seventh bit and toggle it to create the N field.
  235. unsigned N = ((NImms >> 6) & 1) ^ 1;
  236. Encoding = (N << 12) | (Immr << 6) | (NImms & 0x3f);
  237. return true;
  238. }
  239. /// isLogicalImmediate - Return true if the immediate is valid for a logical
  240. /// immediate instruction of the given register size. Return false otherwise.
  241. static inline bool isLogicalImmediate(uint64_t imm, unsigned regSize) {
  242. uint64_t encoding;
  243. return processLogicalImmediate(imm, regSize, encoding);
  244. }
  245. /// encodeLogicalImmediate - Return the encoded immediate value for a logical
  246. /// immediate instruction of the given register size.
  247. static inline uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize) {
  248. uint64_t encoding = 0;
  249. bool res = processLogicalImmediate(imm, regSize, encoding);
  250. assert(res && "invalid logical immediate");
  251. (void)res;
  252. return encoding;
  253. }
  254. /// decodeLogicalImmediate - Decode a logical immediate value in the form
  255. /// "N:immr:imms" (where the immr and imms fields are each 6 bits) into the
  256. /// integer value it represents with regSize bits.
  257. static inline uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize) {
  258. // Extract the N, imms, and immr fields.
  259. unsigned N = (val >> 12) & 1;
  260. unsigned immr = (val >> 6) & 0x3f;
  261. unsigned imms = val & 0x3f;
  262. assert((regSize == 64 || N == 0) && "undefined logical immediate encoding");
  263. int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
  264. assert(len >= 0 && "undefined logical immediate encoding");
  265. unsigned size = (1 << len);
  266. unsigned R = immr & (size - 1);
  267. unsigned S = imms & (size - 1);
  268. assert(S != size - 1 && "undefined logical immediate encoding");
  269. uint64_t pattern = (1ULL << (S + 1)) - 1;
  270. for (unsigned i = 0; i < R; ++i)
  271. pattern = ror(pattern, size);
  272. // Replicate the pattern to fill the regSize.
  273. while (size != regSize) {
  274. pattern |= (pattern << size);
  275. size *= 2;
  276. }
  277. return pattern;
  278. }
  279. /// isValidDecodeLogicalImmediate - Check to see if the logical immediate value
  280. /// in the form "N:immr:imms" (where the immr and imms fields are each 6 bits)
  281. /// is a valid encoding for an integer value with regSize bits.
  282. static inline bool isValidDecodeLogicalImmediate(uint64_t val,
  283. unsigned regSize) {
  284. // Extract the N and imms fields needed for checking.
  285. unsigned N = (val >> 12) & 1;
  286. unsigned imms = val & 0x3f;
  287. if (regSize == 32 && N != 0) // undefined logical immediate encoding
  288. return false;
  289. int len = 31 - countLeadingZeros((N << 6) | (~imms & 0x3f));
  290. if (len < 0) // undefined logical immediate encoding
  291. return false;
  292. unsigned size = (1 << len);
  293. unsigned S = imms & (size - 1);
  294. if (S == size - 1) // undefined logical immediate encoding
  295. return false;
  296. return true;
  297. }
  298. //===----------------------------------------------------------------------===//
  299. // Floating-point Immediates
  300. //
  301. static inline float getFPImmFloat(unsigned Imm) {
  302. // We expect an 8-bit binary encoding of a floating-point number here.
  303. uint8_t Sign = (Imm >> 7) & 0x1;
  304. uint8_t Exp = (Imm >> 4) & 0x7;
  305. uint8_t Mantissa = Imm & 0xf;
  306. // 8-bit FP IEEE Float Encoding
  307. // abcd efgh aBbbbbbc defgh000 00000000 00000000
  308. //
  309. // where B = NOT(b);
  310. uint32_t I = 0;
  311. I |= Sign << 31;
  312. I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30;
  313. I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25;
  314. I |= (Exp & 0x3) << 23;
  315. I |= Mantissa << 19;
  316. return bit_cast<float>(I);
  317. }
  318. /// getFP16Imm - Return an 8-bit floating-point version of the 16-bit
  319. /// floating-point value. If the value cannot be represented as an 8-bit
  320. /// floating-point value, then return -1.
  321. static inline int getFP16Imm(const APInt &Imm) {
  322. uint32_t Sign = Imm.lshr(15).getZExtValue() & 1;
  323. int32_t Exp = (Imm.lshr(10).getSExtValue() & 0x1f) - 15; // -14 to 15
  324. int32_t Mantissa = Imm.getZExtValue() & 0x3ff; // 10 bits
  325. // We can handle 4 bits of mantissa.
  326. // mantissa = (16+UInt(e:f:g:h))/16.
  327. if (Mantissa & 0x3f)
  328. return -1;
  329. Mantissa >>= 6;
  330. // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
  331. if (Exp < -3 || Exp > 4)
  332. return -1;
  333. Exp = ((Exp+3) & 0x7) ^ 4;
  334. return ((int)Sign << 7) | (Exp << 4) | Mantissa;
  335. }
  336. static inline int getFP16Imm(const APFloat &FPImm) {
  337. return getFP16Imm(FPImm.bitcastToAPInt());
  338. }
  339. /// getFP32Imm - Return an 8-bit floating-point version of the 32-bit
  340. /// floating-point value. If the value cannot be represented as an 8-bit
  341. /// floating-point value, then return -1.
  342. static inline int getFP32Imm(const APInt &Imm) {
  343. uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
  344. int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
  345. int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
  346. // We can handle 4 bits of mantissa.
  347. // mantissa = (16+UInt(e:f:g:h))/16.
  348. if (Mantissa & 0x7ffff)
  349. return -1;
  350. Mantissa >>= 19;
  351. if ((Mantissa & 0xf) != Mantissa)
  352. return -1;
  353. // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
  354. if (Exp < -3 || Exp > 4)
  355. return -1;
  356. Exp = ((Exp+3) & 0x7) ^ 4;
  357. return ((int)Sign << 7) | (Exp << 4) | Mantissa;
  358. }
  359. static inline int getFP32Imm(const APFloat &FPImm) {
  360. return getFP32Imm(FPImm.bitcastToAPInt());
  361. }
  362. /// getFP64Imm - Return an 8-bit floating-point version of the 64-bit
  363. /// floating-point value. If the value cannot be represented as an 8-bit
  364. /// floating-point value, then return -1.
  365. static inline int getFP64Imm(const APInt &Imm) {
  366. uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
  367. int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
  368. uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL;
  369. // We can handle 4 bits of mantissa.
  370. // mantissa = (16+UInt(e:f:g:h))/16.
  371. if (Mantissa & 0xffffffffffffULL)
  372. return -1;
  373. Mantissa >>= 48;
  374. if ((Mantissa & 0xf) != Mantissa)
  375. return -1;
  376. // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
  377. if (Exp < -3 || Exp > 4)
  378. return -1;
  379. Exp = ((Exp+3) & 0x7) ^ 4;
  380. return ((int)Sign << 7) | (Exp << 4) | Mantissa;
  381. }
  382. static inline int getFP64Imm(const APFloat &FPImm) {
  383. return getFP64Imm(FPImm.bitcastToAPInt());
  384. }
  385. //===--------------------------------------------------------------------===//
  386. // AdvSIMD Modified Immediates
  387. //===--------------------------------------------------------------------===//
  388. // 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh
  389. static inline bool isAdvSIMDModImmType1(uint64_t Imm) {
  390. return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
  391. ((Imm & 0xffffff00ffffff00ULL) == 0);
  392. }
  393. static inline uint8_t encodeAdvSIMDModImmType1(uint64_t Imm) {
  394. return (Imm & 0xffULL);
  395. }
  396. static inline uint64_t decodeAdvSIMDModImmType1(uint8_t Imm) {
  397. uint64_t EncVal = Imm;
  398. return (EncVal << 32) | EncVal;
  399. }
  400. // 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00
  401. static inline bool isAdvSIMDModImmType2(uint64_t Imm) {
  402. return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
  403. ((Imm & 0xffff00ffffff00ffULL) == 0);
  404. }
  405. static inline uint8_t encodeAdvSIMDModImmType2(uint64_t Imm) {
  406. return (Imm & 0xff00ULL) >> 8;
  407. }
  408. static inline uint64_t decodeAdvSIMDModImmType2(uint8_t Imm) {
  409. uint64_t EncVal = Imm;
  410. return (EncVal << 40) | (EncVal << 8);
  411. }
  412. // 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00
  413. static inline bool isAdvSIMDModImmType3(uint64_t Imm) {
  414. return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
  415. ((Imm & 0xff00ffffff00ffffULL) == 0);
  416. }
  417. static inline uint8_t encodeAdvSIMDModImmType3(uint64_t Imm) {
  418. return (Imm & 0xff0000ULL) >> 16;
  419. }
  420. static inline uint64_t decodeAdvSIMDModImmType3(uint8_t Imm) {
  421. uint64_t EncVal = Imm;
  422. return (EncVal << 48) | (EncVal << 16);
  423. }
  424. // abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00
  425. static inline bool isAdvSIMDModImmType4(uint64_t Imm) {
  426. return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
  427. ((Imm & 0x00ffffff00ffffffULL) == 0);
  428. }
  429. static inline uint8_t encodeAdvSIMDModImmType4(uint64_t Imm) {
  430. return (Imm & 0xff000000ULL) >> 24;
  431. }
  432. static inline uint64_t decodeAdvSIMDModImmType4(uint8_t Imm) {
  433. uint64_t EncVal = Imm;
  434. return (EncVal << 56) | (EncVal << 24);
  435. }
  436. // 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh
  437. static inline bool isAdvSIMDModImmType5(uint64_t Imm) {
  438. return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
  439. (((Imm & 0x00ff0000ULL) >> 16) == (Imm & 0x000000ffULL)) &&
  440. ((Imm & 0xff00ff00ff00ff00ULL) == 0);
  441. }
  442. static inline uint8_t encodeAdvSIMDModImmType5(uint64_t Imm) {
  443. return (Imm & 0xffULL);
  444. }
  445. static inline uint64_t decodeAdvSIMDModImmType5(uint8_t Imm) {
  446. uint64_t EncVal = Imm;
  447. return (EncVal << 48) | (EncVal << 32) | (EncVal << 16) | EncVal;
  448. }
  449. // abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00
  450. static inline bool isAdvSIMDModImmType6(uint64_t Imm) {
  451. return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
  452. (((Imm & 0xff000000ULL) >> 16) == (Imm & 0x0000ff00ULL)) &&
  453. ((Imm & 0x00ff00ff00ff00ffULL) == 0);
  454. }
  455. static inline uint8_t encodeAdvSIMDModImmType6(uint64_t Imm) {
  456. return (Imm & 0xff00ULL) >> 8;
  457. }
  458. static inline uint64_t decodeAdvSIMDModImmType6(uint8_t Imm) {
  459. uint64_t EncVal = Imm;
  460. return (EncVal << 56) | (EncVal << 40) | (EncVal << 24) | (EncVal << 8);
  461. }
  462. // 0x00 0x00 abcdefgh 0xFF 0x00 0x00 abcdefgh 0xFF
  463. static inline bool isAdvSIMDModImmType7(uint64_t Imm) {
  464. return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
  465. ((Imm & 0xffff00ffffff00ffULL) == 0x000000ff000000ffULL);
  466. }
  467. static inline uint8_t encodeAdvSIMDModImmType7(uint64_t Imm) {
  468. return (Imm & 0xff00ULL) >> 8;
  469. }
  470. static inline uint64_t decodeAdvSIMDModImmType7(uint8_t Imm) {
  471. uint64_t EncVal = Imm;
  472. return (EncVal << 40) | (EncVal << 8) | 0x000000ff000000ffULL;
  473. }
  474. // 0x00 abcdefgh 0xFF 0xFF 0x00 abcdefgh 0xFF 0xFF
  475. static inline bool isAdvSIMDModImmType8(uint64_t Imm) {
  476. return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
  477. ((Imm & 0xff00ffffff00ffffULL) == 0x0000ffff0000ffffULL);
  478. }
  479. static inline uint64_t decodeAdvSIMDModImmType8(uint8_t Imm) {
  480. uint64_t EncVal = Imm;
  481. return (EncVal << 48) | (EncVal << 16) | 0x0000ffff0000ffffULL;
  482. }
  483. static inline uint8_t encodeAdvSIMDModImmType8(uint64_t Imm) {
  484. return (Imm & 0x00ff0000ULL) >> 16;
  485. }
  486. // abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh
  487. static inline bool isAdvSIMDModImmType9(uint64_t Imm) {
  488. return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
  489. ((Imm >> 48) == (Imm & 0x0000ffffULL)) &&
  490. ((Imm >> 56) == (Imm & 0x000000ffULL));
  491. }
  492. static inline uint8_t encodeAdvSIMDModImmType9(uint64_t Imm) {
  493. return (Imm & 0xffULL);
  494. }
  495. static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) {
  496. uint64_t EncVal = Imm;
  497. EncVal |= (EncVal << 8);
  498. EncVal |= (EncVal << 16);
  499. EncVal |= (EncVal << 32);
  500. return EncVal;
  501. }
  502. // aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
  503. // cmode: 1110, op: 1
  504. static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
  505. uint64_t ByteA = Imm & 0xff00000000000000ULL;
  506. uint64_t ByteB = Imm & 0x00ff000000000000ULL;
  507. uint64_t ByteC = Imm & 0x0000ff0000000000ULL;
  508. uint64_t ByteD = Imm & 0x000000ff00000000ULL;
  509. uint64_t ByteE = Imm & 0x00000000ff000000ULL;
  510. uint64_t ByteF = Imm & 0x0000000000ff0000ULL;
  511. uint64_t ByteG = Imm & 0x000000000000ff00ULL;
  512. uint64_t ByteH = Imm & 0x00000000000000ffULL;
  513. return (ByteA == 0ULL || ByteA == 0xff00000000000000ULL) &&
  514. (ByteB == 0ULL || ByteB == 0x00ff000000000000ULL) &&
  515. (ByteC == 0ULL || ByteC == 0x0000ff0000000000ULL) &&
  516. (ByteD == 0ULL || ByteD == 0x000000ff00000000ULL) &&
  517. (ByteE == 0ULL || ByteE == 0x00000000ff000000ULL) &&
  518. (ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) &&
  519. (ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) &&
  520. (ByteH == 0ULL || ByteH == 0x00000000000000ffULL);
  521. }
  522. static inline uint8_t encodeAdvSIMDModImmType10(uint64_t Imm) {
  523. uint8_t BitA = (Imm & 0xff00000000000000ULL) != 0;
  524. uint8_t BitB = (Imm & 0x00ff000000000000ULL) != 0;
  525. uint8_t BitC = (Imm & 0x0000ff0000000000ULL) != 0;
  526. uint8_t BitD = (Imm & 0x000000ff00000000ULL) != 0;
  527. uint8_t BitE = (Imm & 0x00000000ff000000ULL) != 0;
  528. uint8_t BitF = (Imm & 0x0000000000ff0000ULL) != 0;
  529. uint8_t BitG = (Imm & 0x000000000000ff00ULL) != 0;
  530. uint8_t BitH = (Imm & 0x00000000000000ffULL) != 0;
  531. uint8_t EncVal = BitA;
  532. EncVal <<= 1;
  533. EncVal |= BitB;
  534. EncVal <<= 1;
  535. EncVal |= BitC;
  536. EncVal <<= 1;
  537. EncVal |= BitD;
  538. EncVal <<= 1;
  539. EncVal |= BitE;
  540. EncVal <<= 1;
  541. EncVal |= BitF;
  542. EncVal <<= 1;
  543. EncVal |= BitG;
  544. EncVal <<= 1;
  545. EncVal |= BitH;
  546. return EncVal;
  547. }
  548. static inline uint64_t decodeAdvSIMDModImmType10(uint8_t Imm) {
  549. uint64_t EncVal = 0;
  550. if (Imm & 0x80) EncVal |= 0xff00000000000000ULL;
  551. if (Imm & 0x40) EncVal |= 0x00ff000000000000ULL;
  552. if (Imm & 0x20) EncVal |= 0x0000ff0000000000ULL;
  553. if (Imm & 0x10) EncVal |= 0x000000ff00000000ULL;
  554. if (Imm & 0x08) EncVal |= 0x00000000ff000000ULL;
  555. if (Imm & 0x04) EncVal |= 0x0000000000ff0000ULL;
  556. if (Imm & 0x02) EncVal |= 0x000000000000ff00ULL;
  557. if (Imm & 0x01) EncVal |= 0x00000000000000ffULL;
  558. return EncVal;
  559. }
  560. // aBbbbbbc defgh000 0x00 0x00 aBbbbbbc defgh000 0x00 0x00
  561. static inline bool isAdvSIMDModImmType11(uint64_t Imm) {
  562. uint64_t BString = (Imm & 0x7E000000ULL) >> 25;
  563. return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
  564. (BString == 0x1f || BString == 0x20) &&
  565. ((Imm & 0x0007ffff0007ffffULL) == 0);
  566. }
  567. static inline uint8_t encodeAdvSIMDModImmType11(uint64_t Imm) {
  568. uint8_t BitA = (Imm & 0x80000000ULL) != 0;
  569. uint8_t BitB = (Imm & 0x20000000ULL) != 0;
  570. uint8_t BitC = (Imm & 0x01000000ULL) != 0;
  571. uint8_t BitD = (Imm & 0x00800000ULL) != 0;
  572. uint8_t BitE = (Imm & 0x00400000ULL) != 0;
  573. uint8_t BitF = (Imm & 0x00200000ULL) != 0;
  574. uint8_t BitG = (Imm & 0x00100000ULL) != 0;
  575. uint8_t BitH = (Imm & 0x00080000ULL) != 0;
  576. uint8_t EncVal = BitA;
  577. EncVal <<= 1;
  578. EncVal |= BitB;
  579. EncVal <<= 1;
  580. EncVal |= BitC;
  581. EncVal <<= 1;
  582. EncVal |= BitD;
  583. EncVal <<= 1;
  584. EncVal |= BitE;
  585. EncVal <<= 1;
  586. EncVal |= BitF;
  587. EncVal <<= 1;
  588. EncVal |= BitG;
  589. EncVal <<= 1;
  590. EncVal |= BitH;
  591. return EncVal;
  592. }
  593. static inline uint64_t decodeAdvSIMDModImmType11(uint8_t Imm) {
  594. uint64_t EncVal = 0;
  595. if (Imm & 0x80) EncVal |= 0x80000000ULL;
  596. if (Imm & 0x40) EncVal |= 0x3e000000ULL;
  597. else EncVal |= 0x40000000ULL;
  598. if (Imm & 0x20) EncVal |= 0x01000000ULL;
  599. if (Imm & 0x10) EncVal |= 0x00800000ULL;
  600. if (Imm & 0x08) EncVal |= 0x00400000ULL;
  601. if (Imm & 0x04) EncVal |= 0x00200000ULL;
  602. if (Imm & 0x02) EncVal |= 0x00100000ULL;
  603. if (Imm & 0x01) EncVal |= 0x00080000ULL;
  604. return (EncVal << 32) | EncVal;
  605. }
  606. // aBbbbbbb bbcdefgh 0x00 0x00 0x00 0x00 0x00 0x00
  607. static inline bool isAdvSIMDModImmType12(uint64_t Imm) {
  608. uint64_t BString = (Imm & 0x7fc0000000000000ULL) >> 54;
  609. return ((BString == 0xff || BString == 0x100) &&
  610. ((Imm & 0x0000ffffffffffffULL) == 0));
  611. }
  612. static inline uint8_t encodeAdvSIMDModImmType12(uint64_t Imm) {
  613. uint8_t BitA = (Imm & 0x8000000000000000ULL) != 0;
  614. uint8_t BitB = (Imm & 0x0040000000000000ULL) != 0;
  615. uint8_t BitC = (Imm & 0x0020000000000000ULL) != 0;
  616. uint8_t BitD = (Imm & 0x0010000000000000ULL) != 0;
  617. uint8_t BitE = (Imm & 0x0008000000000000ULL) != 0;
  618. uint8_t BitF = (Imm & 0x0004000000000000ULL) != 0;
  619. uint8_t BitG = (Imm & 0x0002000000000000ULL) != 0;
  620. uint8_t BitH = (Imm & 0x0001000000000000ULL) != 0;
  621. uint8_t EncVal = BitA;
  622. EncVal <<= 1;
  623. EncVal |= BitB;
  624. EncVal <<= 1;
  625. EncVal |= BitC;
  626. EncVal <<= 1;
  627. EncVal |= BitD;
  628. EncVal <<= 1;
  629. EncVal |= BitE;
  630. EncVal <<= 1;
  631. EncVal |= BitF;
  632. EncVal <<= 1;
  633. EncVal |= BitG;
  634. EncVal <<= 1;
  635. EncVal |= BitH;
  636. return EncVal;
  637. }
  638. static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) {
  639. uint64_t EncVal = 0;
  640. if (Imm & 0x80) EncVal |= 0x8000000000000000ULL;
  641. if (Imm & 0x40) EncVal |= 0x3fc0000000000000ULL;
  642. else EncVal |= 0x4000000000000000ULL;
  643. if (Imm & 0x20) EncVal |= 0x0020000000000000ULL;
  644. if (Imm & 0x10) EncVal |= 0x0010000000000000ULL;
  645. if (Imm & 0x08) EncVal |= 0x0008000000000000ULL;
  646. if (Imm & 0x04) EncVal |= 0x0004000000000000ULL;
  647. if (Imm & 0x02) EncVal |= 0x0002000000000000ULL;
  648. if (Imm & 0x01) EncVal |= 0x0001000000000000ULL;
  649. return (EncVal << 32) | EncVal;
  650. }
  651. /// Returns true if Imm is the concatenation of a repeating pattern of type T.
  652. template <typename T>
  653. static inline bool isSVEMaskOfIdenticalElements(int64_t Imm) {
  654. auto Parts = bit_cast<std::array<T, sizeof(int64_t) / sizeof(T)>>(Imm);
  655. return llvm::all_equal(Parts);
  656. }
  657. /// Returns true if Imm is valid for CPY/DUP.
  658. template <typename T>
  659. static inline bool isSVECpyImm(int64_t Imm) {
  660. // Imm is interpreted as a signed value, which means top bits must be all ones
  661. // (sign bits if the immediate value is negative and passed in a larger
  662. // container), or all zeroes.
  663. int64_t Mask = ~int64_t(std::numeric_limits<std::make_unsigned_t<T>>::max());
  664. if ((Imm & Mask) != 0 && (Imm & Mask) != Mask)
  665. return false;
  666. // Imm is a signed 8-bit value.
  667. // Top bits must be zeroes or sign bits.
  668. if (Imm & 0xff)
  669. return int8_t(Imm) == T(Imm);
  670. // Imm is a signed 16-bit value and multiple of 256.
  671. // Top bits must be zeroes or sign bits.
  672. if (Imm & 0xff00)
  673. return int16_t(Imm) == T(Imm);
  674. return Imm == 0;
  675. }
  676. /// Returns true if Imm is valid for ADD/SUB.
  677. template <typename T>
  678. static inline bool isSVEAddSubImm(int64_t Imm) {
  679. bool IsInt8t = std::is_same<int8_t, std::make_signed_t<T>>::value ||
  680. std::is_same<int8_t, T>::value;
  681. return uint8_t(Imm) == Imm || (!IsInt8t && uint16_t(Imm & ~0xff) == Imm);
  682. }
  683. /// Return true if Imm is valid for DUPM and has no single CPY/DUP equivalent.
  684. static inline bool isSVEMoveMaskPreferredLogicalImmediate(int64_t Imm) {
  685. if (isSVECpyImm<int64_t>(Imm))
  686. return false;
  687. auto S = bit_cast<std::array<int32_t, 2>>(Imm);
  688. auto H = bit_cast<std::array<int16_t, 4>>(Imm);
  689. auto B = bit_cast<std::array<int8_t, 8>>(Imm);
  690. if (isSVEMaskOfIdenticalElements<int32_t>(Imm) && isSVECpyImm<int32_t>(S[0]))
  691. return false;
  692. if (isSVEMaskOfIdenticalElements<int16_t>(Imm) && isSVECpyImm<int16_t>(H[0]))
  693. return false;
  694. if (isSVEMaskOfIdenticalElements<int8_t>(Imm) && isSVECpyImm<int8_t>(B[0]))
  695. return false;
  696. return isLogicalImmediate(Imm, 64);
  697. }
  698. inline static bool isAnyMOVZMovAlias(uint64_t Value, int RegWidth) {
  699. for (int Shift = 0; Shift <= RegWidth - 16; Shift += 16)
  700. if ((Value & ~(0xffffULL << Shift)) == 0)
  701. return true;
  702. return false;
  703. }
  704. inline static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth) {
  705. if (RegWidth == 32)
  706. Value &= 0xffffffffULL;
  707. // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
  708. if (Value == 0 && Shift != 0)
  709. return false;
  710. return (Value & ~(0xffffULL << Shift)) == 0;
  711. }
  712. inline static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth) {
  713. // MOVZ takes precedence over MOVN.
  714. if (isAnyMOVZMovAlias(Value, RegWidth))
  715. return false;
  716. Value = ~Value;
  717. if (RegWidth == 32)
  718. Value &= 0xffffffffULL;
  719. return isMOVZMovAlias(Value, Shift, RegWidth);
  720. }
  721. inline static bool isAnyMOVWMovAlias(uint64_t Value, int RegWidth) {
  722. if (isAnyMOVZMovAlias(Value, RegWidth))
  723. return true;
  724. // It's not a MOVZ, but it might be a MOVN.
  725. Value = ~Value;
  726. if (RegWidth == 32)
  727. Value &= 0xffffffffULL;
  728. return isAnyMOVZMovAlias(Value, RegWidth);
  729. }
  730. } // end namespace AArch64_AM
  731. } // end namespace llvm
  732. #endif