X86InstrShiftRotate.td 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. //===-- X86InstrShiftRotate.td - Shift and Rotate Instrs ---*- tablegen -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file describes the shift and rotate instructions.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. // FIXME: Someone needs to smear multipattern goodness all over this file.
  13. let Defs = [EFLAGS] in {
  14. let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
  15. let Uses = [CL], SchedRW = [WriteShiftCL] in {
  16. def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
  17. "shl{b}\t{%cl, $dst|$dst, cl}",
  18. [(set GR8:$dst, (shl GR8:$src1, CL))]>;
  19. def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
  20. "shl{w}\t{%cl, $dst|$dst, cl}",
  21. [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize16;
  22. def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
  23. "shl{l}\t{%cl, $dst|$dst, cl}",
  24. [(set GR32:$dst, (shl GR32:$src1, CL))]>, OpSize32;
  25. def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
  26. "shl{q}\t{%cl, $dst|$dst, cl}",
  27. [(set GR64:$dst, (shl GR64:$src1, CL))]>;
  28. } // Uses = [CL], SchedRW
  29. let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
  30. def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
  31. "shl{b}\t{$src2, $dst|$dst, $src2}",
  32. [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
  33. def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
  34. "shl{w}\t{$src2, $dst|$dst, $src2}",
  35. [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>,
  36. OpSize16;
  37. def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
  38. "shl{l}\t{$src2, $dst|$dst, $src2}",
  39. [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>,
  40. OpSize32;
  41. def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
  42. (ins GR64:$src1, u8imm:$src2),
  43. "shl{q}\t{$src2, $dst|$dst, $src2}",
  44. [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
  45. } // isConvertibleToThreeAddress = 1
  46. // NOTE: We don't include patterns for shifts of a register by one, because
  47. // 'add reg,reg' is cheaper (and we have a Pat pattern for shift-by-one).
  48. let hasSideEffects = 0 in {
  49. def SHL8r1 : I<0xD0, MRM4r, (outs GR8:$dst), (ins GR8:$src1),
  50. "shl{b}\t$dst", []>;
  51. def SHL16r1 : I<0xD1, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
  52. "shl{w}\t$dst", []>, OpSize16;
  53. def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
  54. "shl{l}\t$dst", []>, OpSize32;
  55. def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
  56. "shl{q}\t$dst", []>;
  57. } // hasSideEffects = 0
  58. } // Constraints = "$src = $dst", SchedRW
  59. // FIXME: Why do we need an explicit "Uses = [CL]" when the instr has a pattern
  60. // using CL?
  61. let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
  62. def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
  63. "shl{b}\t{%cl, $dst|$dst, cl}",
  64. [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>;
  65. def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
  66. "shl{w}\t{%cl, $dst|$dst, cl}",
  67. [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>,
  68. OpSize16;
  69. def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
  70. "shl{l}\t{%cl, $dst|$dst, cl}",
  71. [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>,
  72. OpSize32;
  73. def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
  74. "shl{q}\t{%cl, $dst|$dst, cl}",
  75. [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>,
  76. Requires<[In64BitMode]>;
  77. }
  78. let SchedRW = [WriteShiftLd, WriteRMW] in {
  79. def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, u8imm:$src),
  80. "shl{b}\t{$src, $dst|$dst, $src}",
  81. [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
  82. def SHL16mi : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, u8imm:$src),
  83. "shl{w}\t{$src, $dst|$dst, $src}",
  84. [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  85. OpSize16;
  86. def SHL32mi : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, u8imm:$src),
  87. "shl{l}\t{$src, $dst|$dst, $src}",
  88. [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  89. OpSize32;
  90. def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, u8imm:$src),
  91. "shl{q}\t{$src, $dst|$dst, $src}",
  92. [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  93. Requires<[In64BitMode]>;
  94. // Shift by 1
  95. def SHL8m1 : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
  96. "shl{b}\t$dst",
  97. [(store (shl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
  98. def SHL16m1 : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
  99. "shl{w}\t$dst",
  100. [(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
  101. OpSize16;
  102. def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
  103. "shl{l}\t$dst",
  104. [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
  105. OpSize32;
  106. def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
  107. "shl{q}\t$dst",
  108. [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
  109. Requires<[In64BitMode]>;
  110. } // SchedRW
  111. let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
  112. let Uses = [CL], SchedRW = [WriteShiftCL] in {
  113. def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
  114. "shr{b}\t{%cl, $dst|$dst, cl}",
  115. [(set GR8:$dst, (srl GR8:$src1, CL))]>;
  116. def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
  117. "shr{w}\t{%cl, $dst|$dst, cl}",
  118. [(set GR16:$dst, (srl GR16:$src1, CL))]>, OpSize16;
  119. def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
  120. "shr{l}\t{%cl, $dst|$dst, cl}",
  121. [(set GR32:$dst, (srl GR32:$src1, CL))]>, OpSize32;
  122. def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
  123. "shr{q}\t{%cl, $dst|$dst, cl}",
  124. [(set GR64:$dst, (srl GR64:$src1, CL))]>;
  125. }
  126. def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$src2),
  127. "shr{b}\t{$src2, $dst|$dst, $src2}",
  128. [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))]>;
  129. def SHR16ri : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
  130. "shr{w}\t{$src2, $dst|$dst, $src2}",
  131. [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))]>,
  132. OpSize16;
  133. def SHR32ri : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
  134. "shr{l}\t{$src2, $dst|$dst, $src2}",
  135. [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))]>,
  136. OpSize32;
  137. def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$src2),
  138. "shr{q}\t{$src2, $dst|$dst, $src2}",
  139. [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
  140. // Shift right by 1
  141. def SHR8r1 : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
  142. "shr{b}\t$dst",
  143. [(set GR8:$dst, (srl GR8:$src1, (i8 1)))]>;
  144. def SHR16r1 : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
  145. "shr{w}\t$dst",
  146. [(set GR16:$dst, (srl GR16:$src1, (i8 1)))]>, OpSize16;
  147. def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
  148. "shr{l}\t$dst",
  149. [(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>, OpSize32;
  150. def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
  151. "shr{q}\t$dst",
  152. [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
  153. } // Constraints = "$src = $dst", SchedRW
  154. let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
  155. def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
  156. "shr{b}\t{%cl, $dst|$dst, cl}",
  157. [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>;
  158. def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
  159. "shr{w}\t{%cl, $dst|$dst, cl}",
  160. [(store (srl (loadi16 addr:$dst), CL), addr:$dst)]>,
  161. OpSize16;
  162. def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
  163. "shr{l}\t{%cl, $dst|$dst, cl}",
  164. [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>,
  165. OpSize32;
  166. def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
  167. "shr{q}\t{%cl, $dst|$dst, cl}",
  168. [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>,
  169. Requires<[In64BitMode]>;
  170. }
  171. let SchedRW = [WriteShiftLd, WriteRMW] in {
  172. def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, u8imm:$src),
  173. "shr{b}\t{$src, $dst|$dst, $src}",
  174. [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
  175. def SHR16mi : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, u8imm:$src),
  176. "shr{w}\t{$src, $dst|$dst, $src}",
  177. [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  178. OpSize16;
  179. def SHR32mi : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, u8imm:$src),
  180. "shr{l}\t{$src, $dst|$dst, $src}",
  181. [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  182. OpSize32;
  183. def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, u8imm:$src),
  184. "shr{q}\t{$src, $dst|$dst, $src}",
  185. [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  186. Requires<[In64BitMode]>;
  187. // Shift by 1
  188. def SHR8m1 : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
  189. "shr{b}\t$dst",
  190. [(store (srl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
  191. def SHR16m1 : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
  192. "shr{w}\t$dst",
  193. [(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
  194. OpSize16;
  195. def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
  196. "shr{l}\t$dst",
  197. [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
  198. OpSize32;
  199. def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
  200. "shr{q}\t$dst",
  201. [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
  202. Requires<[In64BitMode]>;
  203. } // SchedRW
  204. let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
  205. let Uses = [CL], SchedRW = [WriteShiftCL] in {
  206. def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
  207. "sar{b}\t{%cl, $dst|$dst, cl}",
  208. [(set GR8:$dst, (sra GR8:$src1, CL))]>;
  209. def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
  210. "sar{w}\t{%cl, $dst|$dst, cl}",
  211. [(set GR16:$dst, (sra GR16:$src1, CL))]>,
  212. OpSize16;
  213. def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
  214. "sar{l}\t{%cl, $dst|$dst, cl}",
  215. [(set GR32:$dst, (sra GR32:$src1, CL))]>,
  216. OpSize32;
  217. def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
  218. "sar{q}\t{%cl, $dst|$dst, cl}",
  219. [(set GR64:$dst, (sra GR64:$src1, CL))]>;
  220. }
  221. def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
  222. "sar{b}\t{$src2, $dst|$dst, $src2}",
  223. [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))]>;
  224. def SAR16ri : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
  225. "sar{w}\t{$src2, $dst|$dst, $src2}",
  226. [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))]>,
  227. OpSize16;
  228. def SAR32ri : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
  229. "sar{l}\t{$src2, $dst|$dst, $src2}",
  230. [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))]>,
  231. OpSize32;
  232. def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
  233. (ins GR64:$src1, u8imm:$src2),
  234. "sar{q}\t{$src2, $dst|$dst, $src2}",
  235. [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
  236. // Shift by 1
  237. def SAR8r1 : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
  238. "sar{b}\t$dst",
  239. [(set GR8:$dst, (sra GR8:$src1, (i8 1)))]>;
  240. def SAR16r1 : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
  241. "sar{w}\t$dst",
  242. [(set GR16:$dst, (sra GR16:$src1, (i8 1)))]>, OpSize16;
  243. def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
  244. "sar{l}\t$dst",
  245. [(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>, OpSize32;
  246. def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
  247. "sar{q}\t$dst",
  248. [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
  249. } // Constraints = "$src = $dst", SchedRW
  250. let Uses = [CL], SchedRW = [WriteShiftCLLd, WriteRMW] in {
  251. def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
  252. "sar{b}\t{%cl, $dst|$dst, cl}",
  253. [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>;
  254. def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
  255. "sar{w}\t{%cl, $dst|$dst, cl}",
  256. [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>,
  257. OpSize16;
  258. def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
  259. "sar{l}\t{%cl, $dst|$dst, cl}",
  260. [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>,
  261. OpSize32;
  262. def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
  263. "sar{q}\t{%cl, $dst|$dst, cl}",
  264. [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>,
  265. Requires<[In64BitMode]>;
  266. }
  267. let SchedRW = [WriteShiftLd, WriteRMW] in {
  268. def SAR8mi : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, u8imm:$src),
  269. "sar{b}\t{$src, $dst|$dst, $src}",
  270. [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
  271. def SAR16mi : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, u8imm:$src),
  272. "sar{w}\t{$src, $dst|$dst, $src}",
  273. [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  274. OpSize16;
  275. def SAR32mi : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, u8imm:$src),
  276. "sar{l}\t{$src, $dst|$dst, $src}",
  277. [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  278. OpSize32;
  279. def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, u8imm:$src),
  280. "sar{q}\t{$src, $dst|$dst, $src}",
  281. [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  282. Requires<[In64BitMode]>;
  283. // Shift by 1
  284. def SAR8m1 : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
  285. "sar{b}\t$dst",
  286. [(store (sra (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
  287. def SAR16m1 : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
  288. "sar{w}\t$dst",
  289. [(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
  290. OpSize16;
  291. def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
  292. "sar{l}\t$dst",
  293. [(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
  294. OpSize32;
  295. def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
  296. "sar{q}\t$dst",
  297. [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
  298. Requires<[In64BitMode]>;
  299. } // SchedRW
  300. //===----------------------------------------------------------------------===//
  301. // Rotate instructions
  302. //===----------------------------------------------------------------------===//
  303. let hasSideEffects = 0 in {
  304. let Constraints = "$src1 = $dst", SchedRW = [WriteRotate] in {
  305. let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCL] in {
  306. def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
  307. "rcl{b}\t{%cl, $dst|$dst, cl}", []>;
  308. def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
  309. "rcl{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
  310. def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
  311. "rcl{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
  312. def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
  313. "rcl{q}\t{%cl, $dst|$dst, cl}", []>;
  314. } // Uses = [CL, EFLAGS]
  315. let Uses = [EFLAGS] in {
  316. def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
  317. "rcl{b}\t$dst", []>;
  318. def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$cnt),
  319. "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
  320. def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
  321. "rcl{w}\t$dst", []>, OpSize16;
  322. def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$cnt),
  323. "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
  324. def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
  325. "rcl{l}\t$dst", []>, OpSize32;
  326. def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$cnt),
  327. "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
  328. def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
  329. "rcl{q}\t$dst", []>;
  330. def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$cnt),
  331. "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
  332. } // Uses = [EFLAGS]
  333. let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCL] in {
  334. def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
  335. "rcr{b}\t{%cl, $dst|$dst, cl}", []>;
  336. def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
  337. "rcr{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
  338. def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
  339. "rcr{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
  340. def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
  341. "rcr{q}\t{%cl, $dst|$dst, cl}", []>;
  342. } // Uses = [CL, EFLAGS]
  343. let Uses = [EFLAGS] in {
  344. def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
  345. "rcr{b}\t$dst", []>;
  346. def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$cnt),
  347. "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
  348. def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
  349. "rcr{w}\t$dst", []>, OpSize16;
  350. def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$cnt),
  351. "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
  352. def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
  353. "rcr{l}\t$dst", []>, OpSize32;
  354. def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$cnt),
  355. "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
  356. def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
  357. "rcr{q}\t$dst", []>;
  358. def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$cnt),
  359. "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
  360. } // Uses = [EFLAGS]
  361. } // Constraints = "$src = $dst"
  362. let SchedRW = [WriteRotateLd, WriteRMW], mayStore = 1 in {
  363. let Uses = [EFLAGS] in {
  364. def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
  365. "rcl{b}\t$dst", []>;
  366. def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, u8imm:$cnt),
  367. "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
  368. def RCL16m1 : I<0xD1, MRM2m, (outs), (ins i16mem:$dst),
  369. "rcl{w}\t$dst", []>, OpSize16;
  370. def RCL16mi : Ii8<0xC1, MRM2m, (outs), (ins i16mem:$dst, u8imm:$cnt),
  371. "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
  372. def RCL32m1 : I<0xD1, MRM2m, (outs), (ins i32mem:$dst),
  373. "rcl{l}\t$dst", []>, OpSize32;
  374. def RCL32mi : Ii8<0xC1, MRM2m, (outs), (ins i32mem:$dst, u8imm:$cnt),
  375. "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
  376. def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
  377. "rcl{q}\t$dst", []>, Requires<[In64BitMode]>;
  378. def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, u8imm:$cnt),
  379. "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>,
  380. Requires<[In64BitMode]>;
  381. def RCR8m1 : I<0xD0, MRM3m, (outs), (ins i8mem:$dst),
  382. "rcr{b}\t$dst", []>;
  383. def RCR8mi : Ii8<0xC0, MRM3m, (outs), (ins i8mem:$dst, u8imm:$cnt),
  384. "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
  385. def RCR16m1 : I<0xD1, MRM3m, (outs), (ins i16mem:$dst),
  386. "rcr{w}\t$dst", []>, OpSize16;
  387. def RCR16mi : Ii8<0xC1, MRM3m, (outs), (ins i16mem:$dst, u8imm:$cnt),
  388. "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize16;
  389. def RCR32m1 : I<0xD1, MRM3m, (outs), (ins i32mem:$dst),
  390. "rcr{l}\t$dst", []>, OpSize32;
  391. def RCR32mi : Ii8<0xC1, MRM3m, (outs), (ins i32mem:$dst, u8imm:$cnt),
  392. "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize32;
  393. def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
  394. "rcr{q}\t$dst", []>, Requires<[In64BitMode]>;
  395. def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, u8imm:$cnt),
  396. "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>,
  397. Requires<[In64BitMode]>;
  398. } // Uses = [EFLAGS]
  399. let Uses = [CL, EFLAGS], SchedRW = [WriteRotateCLLd, WriteRMW] in {
  400. def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst),
  401. "rcl{b}\t{%cl, $dst|$dst, cl}", []>;
  402. def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst),
  403. "rcl{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
  404. def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst),
  405. "rcl{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
  406. def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
  407. "rcl{q}\t{%cl, $dst|$dst, cl}", []>,
  408. Requires<[In64BitMode]>;
  409. def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst),
  410. "rcr{b}\t{%cl, $dst|$dst, cl}", []>;
  411. def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
  412. "rcr{w}\t{%cl, $dst|$dst, cl}", []>, OpSize16;
  413. def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
  414. "rcr{l}\t{%cl, $dst|$dst, cl}", []>, OpSize32;
  415. def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
  416. "rcr{q}\t{%cl, $dst|$dst, cl}", []>,
  417. Requires<[In64BitMode]>;
  418. } // Uses = [CL, EFLAGS]
  419. } // SchedRW
  420. } // hasSideEffects = 0
  421. let Constraints = "$src1 = $dst", SchedRW = [WriteRotate] in {
  422. // FIXME: provide shorter instructions when imm8 == 1
  423. let Uses = [CL], SchedRW = [WriteRotateCL] in {
  424. def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
  425. "rol{b}\t{%cl, $dst|$dst, cl}",
  426. [(set GR8:$dst, (rotl GR8:$src1, CL))]>;
  427. def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
  428. "rol{w}\t{%cl, $dst|$dst, cl}",
  429. [(set GR16:$dst, (rotl GR16:$src1, CL))]>, OpSize16;
  430. def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
  431. "rol{l}\t{%cl, $dst|$dst, cl}",
  432. [(set GR32:$dst, (rotl GR32:$src1, CL))]>, OpSize32;
  433. def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
  434. "rol{q}\t{%cl, $dst|$dst, cl}",
  435. [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
  436. }
  437. def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
  438. "rol{b}\t{$src2, $dst|$dst, $src2}",
  439. [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))]>;
  440. def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
  441. "rol{w}\t{$src2, $dst|$dst, $src2}",
  442. [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>,
  443. OpSize16;
  444. def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
  445. "rol{l}\t{$src2, $dst|$dst, $src2}",
  446. [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))]>,
  447. OpSize32;
  448. def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
  449. (ins GR64:$src1, u8imm:$src2),
  450. "rol{q}\t{$src2, $dst|$dst, $src2}",
  451. [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
  452. // Rotate by 1
  453. def ROL8r1 : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
  454. "rol{b}\t$dst",
  455. [(set GR8:$dst, (rotl GR8:$src1, (i8 1)))]>;
  456. def ROL16r1 : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
  457. "rol{w}\t$dst",
  458. [(set GR16:$dst, (rotl GR16:$src1, (i8 1)))]>, OpSize16;
  459. def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
  460. "rol{l}\t$dst",
  461. [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>, OpSize32;
  462. def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
  463. "rol{q}\t$dst",
  464. [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
  465. } // Constraints = "$src = $dst", SchedRW
  466. let Uses = [CL], SchedRW = [WriteRotateCLLd, WriteRMW] in {
  467. def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
  468. "rol{b}\t{%cl, $dst|$dst, cl}",
  469. [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>;
  470. def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
  471. "rol{w}\t{%cl, $dst|$dst, cl}",
  472. [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize16;
  473. def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
  474. "rol{l}\t{%cl, $dst|$dst, cl}",
  475. [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>, OpSize32;
  476. def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
  477. "rol{q}\t{%cl, $dst|$dst, cl}",
  478. [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>,
  479. Requires<[In64BitMode]>;
  480. }
  481. let SchedRW = [WriteRotateLd, WriteRMW] in {
  482. def ROL8mi : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, u8imm:$src1),
  483. "rol{b}\t{$src1, $dst|$dst, $src1}",
  484. [(store (rotl (loadi8 addr:$dst), (i8 imm:$src1)), addr:$dst)]>;
  485. def ROL16mi : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, u8imm:$src1),
  486. "rol{w}\t{$src1, $dst|$dst, $src1}",
  487. [(store (rotl (loadi16 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
  488. OpSize16;
  489. def ROL32mi : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, u8imm:$src1),
  490. "rol{l}\t{$src1, $dst|$dst, $src1}",
  491. [(store (rotl (loadi32 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
  492. OpSize32;
  493. def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, u8imm:$src1),
  494. "rol{q}\t{$src1, $dst|$dst, $src1}",
  495. [(store (rotl (loadi64 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
  496. Requires<[In64BitMode]>;
  497. // Rotate by 1
  498. def ROL8m1 : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
  499. "rol{b}\t$dst",
  500. [(store (rotl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
  501. def ROL16m1 : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
  502. "rol{w}\t$dst",
  503. [(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
  504. OpSize16;
  505. def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
  506. "rol{l}\t$dst",
  507. [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
  508. OpSize32;
  509. def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
  510. "rol{q}\t$dst",
  511. [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
  512. Requires<[In64BitMode]>;
  513. } // SchedRW
  514. let Constraints = "$src1 = $dst", SchedRW = [WriteRotate] in {
  515. let Uses = [CL], SchedRW = [WriteRotateCL] in {
  516. def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
  517. "ror{b}\t{%cl, $dst|$dst, cl}",
  518. [(set GR8:$dst, (rotr GR8:$src1, CL))]>;
  519. def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
  520. "ror{w}\t{%cl, $dst|$dst, cl}",
  521. [(set GR16:$dst, (rotr GR16:$src1, CL))]>, OpSize16;
  522. def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
  523. "ror{l}\t{%cl, $dst|$dst, cl}",
  524. [(set GR32:$dst, (rotr GR32:$src1, CL))]>, OpSize32;
  525. def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
  526. "ror{q}\t{%cl, $dst|$dst, cl}",
  527. [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
  528. }
  529. def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
  530. "ror{b}\t{$src2, $dst|$dst, $src2}",
  531. [(set GR8:$dst, (rotr GR8:$src1, (i8 imm:$src2)))]>;
  532. def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
  533. "ror{w}\t{$src2, $dst|$dst, $src2}",
  534. [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>,
  535. OpSize16;
  536. def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
  537. "ror{l}\t{$src2, $dst|$dst, $src2}",
  538. [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))]>,
  539. OpSize32;
  540. def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
  541. (ins GR64:$src1, u8imm:$src2),
  542. "ror{q}\t{$src2, $dst|$dst, $src2}",
  543. [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
  544. // Rotate by 1
  545. def ROR8r1 : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
  546. "ror{b}\t$dst",
  547. [(set GR8:$dst, (rotr GR8:$src1, (i8 1)))]>;
  548. def ROR16r1 : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
  549. "ror{w}\t$dst",
  550. [(set GR16:$dst, (rotr GR16:$src1, (i8 1)))]>, OpSize16;
  551. def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
  552. "ror{l}\t$dst",
  553. [(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>, OpSize32;
  554. def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
  555. "ror{q}\t$dst",
  556. [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
  557. } // Constraints = "$src = $dst", SchedRW
  558. let Uses = [CL], SchedRW = [WriteRotateCLLd, WriteRMW] in {
  559. def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
  560. "ror{b}\t{%cl, $dst|$dst, cl}",
  561. [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>;
  562. def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
  563. "ror{w}\t{%cl, $dst|$dst, cl}",
  564. [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize16;
  565. def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
  566. "ror{l}\t{%cl, $dst|$dst, cl}",
  567. [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>, OpSize32;
  568. def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
  569. "ror{q}\t{%cl, $dst|$dst, cl}",
  570. [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>,
  571. Requires<[In64BitMode]>;
  572. }
  573. let SchedRW = [WriteRotateLd, WriteRMW] in {
  574. def ROR8mi : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, u8imm:$src),
  575. "ror{b}\t{$src, $dst|$dst, $src}",
  576. [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
  577. def ROR16mi : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, u8imm:$src),
  578. "ror{w}\t{$src, $dst|$dst, $src}",
  579. [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  580. OpSize16;
  581. def ROR32mi : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, u8imm:$src),
  582. "ror{l}\t{$src, $dst|$dst, $src}",
  583. [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  584. OpSize32;
  585. def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, u8imm:$src),
  586. "ror{q}\t{$src, $dst|$dst, $src}",
  587. [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
  588. Requires<[In64BitMode]>;
  589. // Rotate by 1
  590. def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
  591. "ror{b}\t$dst",
  592. [(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
  593. def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
  594. "ror{w}\t$dst",
  595. [(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
  596. OpSize16;
  597. def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
  598. "ror{l}\t$dst",
  599. [(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)]>,
  600. OpSize32;
  601. def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
  602. "ror{q}\t$dst",
  603. [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>,
  604. Requires<[In64BitMode]>;
  605. } // SchedRW
  606. //===----------------------------------------------------------------------===//
  607. // Double shift instructions (generalizations of rotate)
  608. //===----------------------------------------------------------------------===//
  609. let Constraints = "$src1 = $dst" in {
  610. let Uses = [CL], SchedRW = [WriteSHDrrcl] in {
  611. def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
  612. (ins GR16:$src1, GR16:$src2),
  613. "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  614. [(set GR16:$dst, (X86fshl GR16:$src1, GR16:$src2, CL))]>,
  615. TB, OpSize16;
  616. def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
  617. (ins GR16:$src1, GR16:$src2),
  618. "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  619. [(set GR16:$dst, (X86fshr GR16:$src2, GR16:$src1, CL))]>,
  620. TB, OpSize16;
  621. def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
  622. (ins GR32:$src1, GR32:$src2),
  623. "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  624. [(set GR32:$dst, (fshl GR32:$src1, GR32:$src2, CL))]>,
  625. TB, OpSize32;
  626. def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
  627. (ins GR32:$src1, GR32:$src2),
  628. "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  629. [(set GR32:$dst, (fshr GR32:$src2, GR32:$src1, CL))]>,
  630. TB, OpSize32;
  631. def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
  632. (ins GR64:$src1, GR64:$src2),
  633. "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  634. [(set GR64:$dst, (fshl GR64:$src1, GR64:$src2, CL))]>,
  635. TB;
  636. def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
  637. (ins GR64:$src1, GR64:$src2),
  638. "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  639. [(set GR64:$dst, (fshr GR64:$src2, GR64:$src1, CL))]>,
  640. TB;
  641. } // SchedRW
  642. let isCommutable = 1, SchedRW = [WriteSHDrri] in { // These instructions commute to each other.
  643. def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
  644. (outs GR16:$dst),
  645. (ins GR16:$src1, GR16:$src2, u8imm:$src3),
  646. "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  647. [(set GR16:$dst, (X86fshl GR16:$src1, GR16:$src2,
  648. (i8 imm:$src3)))]>,
  649. TB, OpSize16;
  650. def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
  651. (outs GR16:$dst),
  652. (ins GR16:$src1, GR16:$src2, u8imm:$src3),
  653. "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  654. [(set GR16:$dst, (X86fshr GR16:$src2, GR16:$src1,
  655. (i8 imm:$src3)))]>,
  656. TB, OpSize16;
  657. def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
  658. (outs GR32:$dst),
  659. (ins GR32:$src1, GR32:$src2, u8imm:$src3),
  660. "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  661. [(set GR32:$dst, (fshl GR32:$src1, GR32:$src2,
  662. (i8 imm:$src3)))]>,
  663. TB, OpSize32;
  664. def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
  665. (outs GR32:$dst),
  666. (ins GR32:$src1, GR32:$src2, u8imm:$src3),
  667. "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  668. [(set GR32:$dst, (fshr GR32:$src2, GR32:$src1,
  669. (i8 imm:$src3)))]>,
  670. TB, OpSize32;
  671. def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
  672. (outs GR64:$dst),
  673. (ins GR64:$src1, GR64:$src2, u8imm:$src3),
  674. "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  675. [(set GR64:$dst, (fshl GR64:$src1, GR64:$src2,
  676. (i8 imm:$src3)))]>,
  677. TB;
  678. def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
  679. (outs GR64:$dst),
  680. (ins GR64:$src1, GR64:$src2, u8imm:$src3),
  681. "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  682. [(set GR64:$dst, (fshr GR64:$src2, GR64:$src1,
  683. (i8 imm:$src3)))]>,
  684. TB;
  685. } // SchedRW
  686. } // Constraints = "$src = $dst"
  687. let Uses = [CL], SchedRW = [WriteSHDmrcl] in {
  688. def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
  689. "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  690. [(store (X86fshl (loadi16 addr:$dst), GR16:$src2, CL),
  691. addr:$dst)]>, TB, OpSize16;
  692. def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
  693. "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  694. [(store (X86fshr GR16:$src2, (loadi16 addr:$dst), CL),
  695. addr:$dst)]>, TB, OpSize16;
  696. def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
  697. "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  698. [(store (fshl (loadi32 addr:$dst), GR32:$src2, CL),
  699. addr:$dst)]>, TB, OpSize32;
  700. def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
  701. "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  702. [(store (fshr GR32:$src2, (loadi32 addr:$dst), CL),
  703. addr:$dst)]>, TB, OpSize32;
  704. def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
  705. "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  706. [(store (fshl (loadi64 addr:$dst), GR64:$src2, CL),
  707. addr:$dst)]>, TB;
  708. def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
  709. "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
  710. [(store (fshr GR64:$src2, (loadi64 addr:$dst), CL),
  711. addr:$dst)]>, TB;
  712. } // SchedRW
  713. let SchedRW = [WriteSHDmri] in {
  714. def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
  715. (outs), (ins i16mem:$dst, GR16:$src2, u8imm:$src3),
  716. "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  717. [(store (X86fshl (loadi16 addr:$dst), GR16:$src2,
  718. (i8 imm:$src3)), addr:$dst)]>,
  719. TB, OpSize16;
  720. def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
  721. (outs), (ins i16mem:$dst, GR16:$src2, u8imm:$src3),
  722. "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  723. [(store (X86fshr GR16:$src2, (loadi16 addr:$dst),
  724. (i8 imm:$src3)), addr:$dst)]>,
  725. TB, OpSize16;
  726. def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
  727. (outs), (ins i32mem:$dst, GR32:$src2, u8imm:$src3),
  728. "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  729. [(store (fshl (loadi32 addr:$dst), GR32:$src2,
  730. (i8 imm:$src3)), addr:$dst)]>,
  731. TB, OpSize32;
  732. def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
  733. (outs), (ins i32mem:$dst, GR32:$src2, u8imm:$src3),
  734. "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  735. [(store (fshr GR32:$src2, (loadi32 addr:$dst),
  736. (i8 imm:$src3)), addr:$dst)]>,
  737. TB, OpSize32;
  738. def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
  739. (outs), (ins i64mem:$dst, GR64:$src2, u8imm:$src3),
  740. "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  741. [(store (fshl (loadi64 addr:$dst), GR64:$src2,
  742. (i8 imm:$src3)), addr:$dst)]>,
  743. TB;
  744. def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
  745. (outs), (ins i64mem:$dst, GR64:$src2, u8imm:$src3),
  746. "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
  747. [(store (fshr GR64:$src2, (loadi64 addr:$dst),
  748. (i8 imm:$src3)), addr:$dst)]>,
  749. TB;
  750. } // SchedRW
  751. } // Defs = [EFLAGS]
  752. // Use the opposite rotate if allows us to use the rotate by 1 instruction.
  753. def : Pat<(rotl GR8:$src1, (i8 7)), (ROR8r1 GR8:$src1)>;
  754. def : Pat<(rotl GR16:$src1, (i8 15)), (ROR16r1 GR16:$src1)>;
  755. def : Pat<(rotl GR32:$src1, (i8 31)), (ROR32r1 GR32:$src1)>;
  756. def : Pat<(rotl GR64:$src1, (i8 63)), (ROR64r1 GR64:$src1)>;
  757. def : Pat<(rotr GR8:$src1, (i8 7)), (ROL8r1 GR8:$src1)>;
  758. def : Pat<(rotr GR16:$src1, (i8 15)), (ROL16r1 GR16:$src1)>;
  759. def : Pat<(rotr GR32:$src1, (i8 31)), (ROL32r1 GR32:$src1)>;
  760. def : Pat<(rotr GR64:$src1, (i8 63)), (ROL64r1 GR64:$src1)>;
  761. def : Pat<(store (rotl (loadi8 addr:$dst), (i8 7)), addr:$dst),
  762. (ROR8m1 addr:$dst)>;
  763. def : Pat<(store (rotl (loadi16 addr:$dst), (i8 15)), addr:$dst),
  764. (ROR16m1 addr:$dst)>;
  765. def : Pat<(store (rotl (loadi32 addr:$dst), (i8 31)), addr:$dst),
  766. (ROR32m1 addr:$dst)>;
  767. def : Pat<(store (rotl (loadi64 addr:$dst), (i8 63)), addr:$dst),
  768. (ROR64m1 addr:$dst)>, Requires<[In64BitMode]>;
  769. def : Pat<(store (rotr (loadi8 addr:$dst), (i8 7)), addr:$dst),
  770. (ROL8m1 addr:$dst)>;
  771. def : Pat<(store (rotr (loadi16 addr:$dst), (i8 15)), addr:$dst),
  772. (ROL16m1 addr:$dst)>;
  773. def : Pat<(store (rotr (loadi32 addr:$dst), (i8 31)), addr:$dst),
  774. (ROL32m1 addr:$dst)>;
  775. def : Pat<(store (rotr (loadi64 addr:$dst), (i8 63)), addr:$dst),
  776. (ROL64m1 addr:$dst)>, Requires<[In64BitMode]>;
  777. // Sandy Bridge and newer Intel processors support faster rotates using
  778. // SHLD to avoid a partial flag update on the normal rotate instructions.
  779. // Use a pseudo so that TwoInstructionPass and register allocation will see
  780. // this as unary instruction.
  781. let Predicates = [HasFastSHLDRotate], AddedComplexity = 5,
  782. Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteSHDrri],
  783. Constraints = "$src1 = $dst" in {
  784. def SHLDROT32ri : I<0, Pseudo, (outs GR32:$dst),
  785. (ins GR32:$src1, u8imm:$shamt), "",
  786. [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$shamt)))]>;
  787. def SHLDROT64ri : I<0, Pseudo, (outs GR64:$dst),
  788. (ins GR64:$src1, u8imm:$shamt), "",
  789. [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$shamt)))]>;
  790. def SHRDROT32ri : I<0, Pseudo, (outs GR32:$dst),
  791. (ins GR32:$src1, u8imm:$shamt), "",
  792. [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$shamt)))]>;
  793. def SHRDROT64ri : I<0, Pseudo, (outs GR64:$dst),
  794. (ins GR64:$src1, u8imm:$shamt), "",
  795. [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$shamt)))]>;
  796. }
  797. def ROT32L2R_imm8 : SDNodeXForm<imm, [{
  798. // Convert a ROTL shamt to a ROTR shamt on 32-bit integer.
  799. return getI8Imm(32 - N->getZExtValue(), SDLoc(N));
  800. }]>;
  801. def ROT64L2R_imm8 : SDNodeXForm<imm, [{
  802. // Convert a ROTL shamt to a ROTR shamt on 64-bit integer.
  803. return getI8Imm(64 - N->getZExtValue(), SDLoc(N));
  804. }]>;
  805. // NOTE: We use WriteShift for these rotates as they avoid the stalls
  806. // of many of the older x86 rotate instructions.
  807. multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop> {
  808. let hasSideEffects = 0 in {
  809. def ri : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, u8imm:$src2),
  810. !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
  811. []>, TAXD, VEX, Sched<[WriteShift]>;
  812. let mayLoad = 1 in
  813. def mi : Ii8<0xF0, MRMSrcMem, (outs RC:$dst),
  814. (ins x86memop:$src1, u8imm:$src2),
  815. !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
  816. []>, TAXD, VEX, Sched<[WriteShiftLd]>;
  817. }
  818. }
  819. multiclass bmi_shift<string asm, RegisterClass RC, X86MemOperand x86memop> {
  820. let hasSideEffects = 0 in {
  821. def rr : I<0xF7, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
  822. !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
  823. VEX, Sched<[WriteShift]>;
  824. let mayLoad = 1 in
  825. def rm : I<0xF7, MRMSrcMem4VOp3,
  826. (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
  827. !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
  828. VEX, Sched<[WriteShift.Folded,
  829. // x86memop:$src1
  830. ReadDefault, ReadDefault, ReadDefault, ReadDefault,
  831. ReadDefault,
  832. // RC:$src2
  833. WriteShift.ReadAfterFold]>;
  834. }
  835. }
  836. let Predicates = [HasBMI2] in {
  837. defm RORX32 : bmi_rotate<"rorx{l}", GR32, i32mem>;
  838. defm RORX64 : bmi_rotate<"rorx{q}", GR64, i64mem>, VEX_W;
  839. defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem>, T8XS;
  840. defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem>, T8XS, VEX_W;
  841. defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem>, T8XD;
  842. defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8XD, VEX_W;
  843. defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8PD;
  844. defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8PD, VEX_W;
  845. // Prefer RORX which is non-destructive and doesn't update EFLAGS.
  846. let AddedComplexity = 10 in {
  847. def : Pat<(rotr GR32:$src, (i8 imm:$shamt)),
  848. (RORX32ri GR32:$src, imm:$shamt)>;
  849. def : Pat<(rotr GR64:$src, (i8 imm:$shamt)),
  850. (RORX64ri GR64:$src, imm:$shamt)>;
  851. def : Pat<(rotl GR32:$src, (i8 imm:$shamt)),
  852. (RORX32ri GR32:$src, (ROT32L2R_imm8 imm:$shamt))>;
  853. def : Pat<(rotl GR64:$src, (i8 imm:$shamt)),
  854. (RORX64ri GR64:$src, (ROT64L2R_imm8 imm:$shamt))>;
  855. }
  856. def : Pat<(rotr (loadi32 addr:$src), (i8 imm:$shamt)),
  857. (RORX32mi addr:$src, imm:$shamt)>;
  858. def : Pat<(rotr (loadi64 addr:$src), (i8 imm:$shamt)),
  859. (RORX64mi addr:$src, imm:$shamt)>;
  860. def : Pat<(rotl (loadi32 addr:$src), (i8 imm:$shamt)),
  861. (RORX32mi addr:$src, (ROT32L2R_imm8 imm:$shamt))>;
  862. def : Pat<(rotl (loadi64 addr:$src), (i8 imm:$shamt)),
  863. (RORX64mi addr:$src, (ROT64L2R_imm8 imm:$shamt))>;
  864. // Prefer SARX/SHRX/SHLX over SAR/SHR/SHL with variable shift BUT not
  865. // immediate shift, i.e. the following code is considered better
  866. //
  867. // mov %edi, %esi
  868. // shl $imm, %esi
  869. // ... %edi, ...
  870. //
  871. // than
  872. //
  873. // movb $imm, %sil
  874. // shlx %sil, %edi, %esi
  875. // ... %edi, ...
  876. //
  877. let AddedComplexity = 1 in {
  878. def : Pat<(sra GR32:$src1, GR8:$src2),
  879. (SARX32rr GR32:$src1,
  880. (INSERT_SUBREG
  881. (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  882. def : Pat<(sra GR64:$src1, GR8:$src2),
  883. (SARX64rr GR64:$src1,
  884. (INSERT_SUBREG
  885. (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  886. def : Pat<(srl GR32:$src1, GR8:$src2),
  887. (SHRX32rr GR32:$src1,
  888. (INSERT_SUBREG
  889. (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  890. def : Pat<(srl GR64:$src1, GR8:$src2),
  891. (SHRX64rr GR64:$src1,
  892. (INSERT_SUBREG
  893. (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  894. def : Pat<(shl GR32:$src1, GR8:$src2),
  895. (SHLX32rr GR32:$src1,
  896. (INSERT_SUBREG
  897. (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  898. def : Pat<(shl GR64:$src1, GR8:$src2),
  899. (SHLX64rr GR64:$src1,
  900. (INSERT_SUBREG
  901. (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  902. }
  903. // We prefer to use
  904. // mov (%ecx), %esi
  905. // shl $imm, $esi
  906. //
  907. // over
  908. //
  909. // movb $imm, %al
  910. // shlx %al, (%ecx), %esi
  911. //
  912. // This priority is enforced by IsProfitableToFoldLoad.
  913. def : Pat<(sra (loadi32 addr:$src1), GR8:$src2),
  914. (SARX32rm addr:$src1,
  915. (INSERT_SUBREG
  916. (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  917. def : Pat<(sra (loadi64 addr:$src1), GR8:$src2),
  918. (SARX64rm addr:$src1,
  919. (INSERT_SUBREG
  920. (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  921. def : Pat<(srl (loadi32 addr:$src1), GR8:$src2),
  922. (SHRX32rm addr:$src1,
  923. (INSERT_SUBREG
  924. (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  925. def : Pat<(srl (loadi64 addr:$src1), GR8:$src2),
  926. (SHRX64rm addr:$src1,
  927. (INSERT_SUBREG
  928. (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  929. def : Pat<(shl (loadi32 addr:$src1), GR8:$src2),
  930. (SHLX32rm addr:$src1,
  931. (INSERT_SUBREG
  932. (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  933. def : Pat<(shl (loadi64 addr:$src1), GR8:$src2),
  934. (SHLX64rm addr:$src1,
  935. (INSERT_SUBREG
  936. (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
  937. }
  938. def : Pat<(rotl GR8:$src1, (i8 relocImm:$src2)),
  939. (ROL8ri GR8:$src1, relocImm:$src2)>;
  940. def : Pat<(rotl GR16:$src1, (i8 relocImm:$src2)),
  941. (ROL16ri GR16:$src1, relocImm:$src2)>;
  942. def : Pat<(rotl GR32:$src1, (i8 relocImm:$src2)),
  943. (ROL32ri GR32:$src1, relocImm:$src2)>;
  944. def : Pat<(rotl GR64:$src1, (i8 relocImm:$src2)),
  945. (ROL64ri GR64:$src1, relocImm:$src2)>;
  946. def : Pat<(rotr GR8:$src1, (i8 relocImm:$src2)),
  947. (ROR8ri GR8:$src1, relocImm:$src2)>;
  948. def : Pat<(rotr GR16:$src1, (i8 relocImm:$src2)),
  949. (ROR16ri GR16:$src1, relocImm:$src2)>;
  950. def : Pat<(rotr GR32:$src1, (i8 relocImm:$src2)),
  951. (ROR32ri GR32:$src1, relocImm:$src2)>;
  952. def : Pat<(rotr GR64:$src1, (i8 relocImm:$src2)),
  953. (ROR64ri GR64:$src1, relocImm:$src2)>;