sljitNativeARM_64.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035
  1. /*
  2. * Stack-less Just-In-Time compiler
  3. *
  4. * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without modification, are
  7. * permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright notice, this list of
  10. * conditions and the following disclaimer.
  11. *
  12. * 2. Redistributions in binary form must reproduce the above copyright notice, this list
  13. * of conditions and the following disclaimer in the documentation and/or other materials
  14. * provided with the distribution.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
  17. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
  19. * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
  21. * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  22. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  23. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  24. * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
  27. {
  28. return "ARM-64" SLJIT_CPUINFO;
  29. }
  30. /* Length of an instruction word */
  31. typedef sljit_u32 sljit_ins;
  32. #define TMP_ZERO (0)
  33. #define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2)
  34. #define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3)
  35. #define TMP_LR (SLJIT_NUMBER_OF_REGISTERS + 4)
  36. #define TMP_FP (SLJIT_NUMBER_OF_REGISTERS + 5)
  37. #define TMP_FREG1 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1)
  38. #define TMP_FREG2 (SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2)
  39. /* r18 - platform register, currently not used */
  40. static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 8] = {
  41. 31, 0, 1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 8, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 31, 9, 10, 30, 29
  42. };
  43. static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
  44. 0, 0, 1, 2, 3, 4, 5, 6, 7
  45. };
  46. #define W_OP (1u << 31)
  47. #define RD(rd) (reg_map[rd])
  48. #define RT(rt) (reg_map[rt])
  49. #define RN(rn) (reg_map[rn] << 5)
  50. #define RT2(rt2) (reg_map[rt2] << 10)
  51. #define RM(rm) (reg_map[rm] << 16)
  52. #define VD(vd) (freg_map[vd])
  53. #define VT(vt) (freg_map[vt])
  54. #define VN(vn) (freg_map[vn] << 5)
  55. #define VM(vm) (freg_map[vm] << 16)
  56. /* --------------------------------------------------------------------- */
  57. /* Instrucion forms */
  58. /* --------------------------------------------------------------------- */
  59. #define ADC 0x9a000000
  60. #define ADD 0x8b000000
  61. #define ADDE 0x8b200000
  62. #define ADDI 0x91000000
  63. #define AND 0x8a000000
  64. #define ANDI 0x92000000
  65. #define ASRV 0x9ac02800
  66. #define B 0x14000000
  67. #define B_CC 0x54000000
  68. #define BL 0x94000000
  69. #define BLR 0xd63f0000
  70. #define BR 0xd61f0000
  71. #define BRK 0xd4200000
  72. #define CBZ 0xb4000000
  73. #define CLZ 0xdac01000
  74. #define CSEL 0x9a800000
  75. #define CSINC 0x9a800400
  76. #define EOR 0xca000000
  77. #define EORI 0xd2000000
  78. #define FABS 0x1e60c000
  79. #define FADD 0x1e602800
  80. #define FCMP 0x1e602000
  81. #define FCVT 0x1e224000
  82. #define FCVTZS 0x9e780000
  83. #define FDIV 0x1e601800
  84. #define FMOV 0x1e604000
  85. #define FMUL 0x1e600800
  86. #define FNEG 0x1e614000
  87. #define FSUB 0x1e603800
  88. #define LDRI 0xf9400000
  89. #define LDP 0xa9400000
  90. #define LDP_PRE 0xa9c00000
  91. #define LDR_PRE 0xf8400c00
  92. #define LSLV 0x9ac02000
  93. #define LSRV 0x9ac02400
  94. #define MADD 0x9b000000
  95. #define MOVK 0xf2800000
  96. #define MOVN 0x92800000
  97. #define MOVZ 0xd2800000
  98. #define NOP 0xd503201f
  99. #define ORN 0xaa200000
  100. #define ORR 0xaa000000
  101. #define ORRI 0xb2000000
  102. #define RET 0xd65f0000
  103. #define SBC 0xda000000
  104. #define SBFM 0x93000000
  105. #define SCVTF 0x9e620000
  106. #define SDIV 0x9ac00c00
  107. #define SMADDL 0x9b200000
  108. #define SMULH 0x9b403c00
  109. #define STP 0xa9000000
  110. #define STP_PRE 0xa9800000
  111. #define STRB 0x38206800
  112. #define STRBI 0x39000000
  113. #define STRI 0xf9000000
  114. #define STR_FI 0x3d000000
  115. #define STR_FR 0x3c206800
  116. #define STUR_FI 0x3c000000
  117. #define STURBI 0x38000000
  118. #define SUB 0xcb000000
  119. #define SUBI 0xd1000000
  120. #define SUBS 0xeb000000
  121. #define UBFM 0xd3000000
  122. #define UDIV 0x9ac00800
  123. #define UMULH 0x9bc03c00
  124. /* dest_reg is the absolute name of the register
  125. Useful for reordering instructions in the delay slot. */
  126. static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins)
  127. {
  128. sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
  129. FAIL_IF(!ptr);
  130. *ptr = ins;
  131. compiler->size++;
  132. return SLJIT_SUCCESS;
  133. }
  134. static SLJIT_INLINE sljit_s32 emit_imm64_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_uw imm)
  135. {
  136. FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5)));
  137. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((imm >> 16) & 0xffff) << 5) | (1 << 21)));
  138. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | (((imm >> 32) & 0xffff) << 5) | (2 << 21)));
  139. return push_inst(compiler, MOVK | RD(dst) | ((imm >> 48) << 5) | (3 << 21));
  140. }
  141. static SLJIT_INLINE void modify_imm64_const(sljit_ins* inst, sljit_uw new_imm)
  142. {
  143. sljit_s32 dst = inst[0] & 0x1f;
  144. SLJIT_ASSERT((inst[0] & 0xffe00000) == MOVZ && (inst[1] & 0xffe00000) == (MOVK | (1 << 21)));
  145. inst[0] = MOVZ | dst | ((new_imm & 0xffff) << 5);
  146. inst[1] = MOVK | dst | (((new_imm >> 16) & 0xffff) << 5) | (1 << 21);
  147. inst[2] = MOVK | dst | (((new_imm >> 32) & 0xffff) << 5) | (2 << 21);
  148. inst[3] = MOVK | dst | ((new_imm >> 48) << 5) | (3 << 21);
  149. }
  150. static SLJIT_INLINE sljit_sw detect_jump_type(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code, sljit_sw executable_offset)
  151. {
  152. sljit_sw diff;
  153. sljit_uw target_addr;
  154. if (jump->flags & SLJIT_REWRITABLE_JUMP) {
  155. jump->flags |= PATCH_ABS64;
  156. return 0;
  157. }
  158. if (jump->flags & JUMP_ADDR)
  159. target_addr = jump->u.target;
  160. else {
  161. SLJIT_ASSERT(jump->flags & JUMP_LABEL);
  162. target_addr = (sljit_uw)(code + jump->u.label->size) + (sljit_uw)executable_offset;
  163. }
  164. diff = (sljit_sw)target_addr - (sljit_sw)(code_ptr + 4) - executable_offset;
  165. if (jump->flags & IS_COND) {
  166. diff += sizeof(sljit_ins);
  167. if (diff <= 0xfffff && diff >= -0x100000) {
  168. code_ptr[-5] ^= (jump->flags & IS_CBZ) ? (0x1 << 24) : 0x1;
  169. jump->addr -= sizeof(sljit_ins);
  170. jump->flags |= PATCH_COND;
  171. return 5;
  172. }
  173. diff -= sizeof(sljit_ins);
  174. }
  175. if (diff <= 0x7ffffff && diff >= -0x8000000) {
  176. jump->flags |= PATCH_B;
  177. return 4;
  178. }
  179. if (target_addr < 0x100000000l) {
  180. if (jump->flags & IS_COND)
  181. code_ptr[-5] -= (2 << 5);
  182. code_ptr[-2] = code_ptr[0];
  183. return 2;
  184. }
  185. if (target_addr < 0x1000000000000l) {
  186. if (jump->flags & IS_COND)
  187. code_ptr[-5] -= (1 << 5);
  188. jump->flags |= PATCH_ABS48;
  189. code_ptr[-1] = code_ptr[0];
  190. return 1;
  191. }
  192. jump->flags |= PATCH_ABS64;
  193. return 0;
  194. }
  195. static SLJIT_INLINE sljit_sw put_label_get_length(struct sljit_put_label *put_label, sljit_uw max_label)
  196. {
  197. if (max_label < 0x100000000l) {
  198. put_label->flags = 0;
  199. return 2;
  200. }
  201. if (max_label < 0x1000000000000l) {
  202. put_label->flags = 1;
  203. return 1;
  204. }
  205. put_label->flags = 2;
  206. return 0;
  207. }
  208. SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
  209. {
  210. struct sljit_memory_fragment *buf;
  211. sljit_ins *code;
  212. sljit_ins *code_ptr;
  213. sljit_ins *buf_ptr;
  214. sljit_ins *buf_end;
  215. sljit_uw word_count;
  216. sljit_uw next_addr;
  217. sljit_sw executable_offset;
  218. sljit_uw addr;
  219. sljit_s32 dst;
  220. struct sljit_label *label;
  221. struct sljit_jump *jump;
  222. struct sljit_const *const_;
  223. struct sljit_put_label *put_label;
  224. CHECK_ERROR_PTR();
  225. CHECK_PTR(check_sljit_generate_code(compiler));
  226. reverse_buf(compiler);
  227. code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins));
  228. PTR_FAIL_WITH_EXEC_IF(code);
  229. buf = compiler->buf;
  230. code_ptr = code;
  231. word_count = 0;
  232. next_addr = 0;
  233. executable_offset = SLJIT_EXEC_OFFSET(code);
  234. label = compiler->labels;
  235. jump = compiler->jumps;
  236. const_ = compiler->consts;
  237. put_label = compiler->put_labels;
  238. do {
  239. buf_ptr = (sljit_ins*)buf->memory;
  240. buf_end = buf_ptr + (buf->used_size >> 2);
  241. do {
  242. *code_ptr = *buf_ptr++;
  243. if (next_addr == word_count) {
  244. SLJIT_ASSERT(!label || label->size >= word_count);
  245. SLJIT_ASSERT(!jump || jump->addr >= word_count);
  246. SLJIT_ASSERT(!const_ || const_->addr >= word_count);
  247. SLJIT_ASSERT(!put_label || put_label->addr >= word_count);
  248. /* These structures are ordered by their address. */
  249. if (label && label->size == word_count) {
  250. label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  251. label->size = code_ptr - code;
  252. label = label->next;
  253. }
  254. if (jump && jump->addr == word_count) {
  255. jump->addr = (sljit_uw)(code_ptr - 4);
  256. code_ptr -= detect_jump_type(jump, code_ptr, code, executable_offset);
  257. jump = jump->next;
  258. }
  259. if (const_ && const_->addr == word_count) {
  260. const_->addr = (sljit_uw)code_ptr;
  261. const_ = const_->next;
  262. }
  263. if (put_label && put_label->addr == word_count) {
  264. SLJIT_ASSERT(put_label->label);
  265. put_label->addr = (sljit_uw)(code_ptr - 3);
  266. code_ptr -= put_label_get_length(put_label, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size));
  267. put_label = put_label->next;
  268. }
  269. next_addr = compute_next_addr(label, jump, const_, put_label);
  270. }
  271. code_ptr ++;
  272. word_count ++;
  273. } while (buf_ptr < buf_end);
  274. buf = buf->next;
  275. } while (buf);
  276. if (label && label->size == word_count) {
  277. label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  278. label->size = code_ptr - code;
  279. label = label->next;
  280. }
  281. SLJIT_ASSERT(!label);
  282. SLJIT_ASSERT(!jump);
  283. SLJIT_ASSERT(!const_);
  284. SLJIT_ASSERT(!put_label);
  285. SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size);
  286. jump = compiler->jumps;
  287. while (jump) {
  288. do {
  289. addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target;
  290. buf_ptr = (sljit_ins *)jump->addr;
  291. if (jump->flags & PATCH_B) {
  292. addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
  293. SLJIT_ASSERT((sljit_sw)addr <= 0x1ffffff && (sljit_sw)addr >= -0x2000000);
  294. buf_ptr[0] = ((jump->flags & IS_BL) ? BL : B) | (addr & 0x3ffffff);
  295. if (jump->flags & IS_COND)
  296. buf_ptr[-1] -= (4 << 5);
  297. break;
  298. }
  299. if (jump->flags & PATCH_COND) {
  300. addr = (sljit_sw)(addr - (sljit_uw)SLJIT_ADD_EXEC_OFFSET(buf_ptr, executable_offset)) >> 2;
  301. SLJIT_ASSERT((sljit_sw)addr <= 0x3ffff && (sljit_sw)addr >= -0x40000);
  302. buf_ptr[0] = (buf_ptr[0] & ~0xffffe0) | ((addr & 0x7ffff) << 5);
  303. break;
  304. }
  305. SLJIT_ASSERT((jump->flags & (PATCH_ABS48 | PATCH_ABS64)) || addr <= 0xffffffffl);
  306. SLJIT_ASSERT((jump->flags & PATCH_ABS64) || addr <= 0xffffffffffffl);
  307. dst = buf_ptr[0] & 0x1f;
  308. buf_ptr[0] = MOVZ | dst | ((addr & 0xffff) << 5);
  309. buf_ptr[1] = MOVK | dst | (((addr >> 16) & 0xffff) << 5) | (1 << 21);
  310. if (jump->flags & (PATCH_ABS48 | PATCH_ABS64))
  311. buf_ptr[2] = MOVK | dst | (((addr >> 32) & 0xffff) << 5) | (2 << 21);
  312. if (jump->flags & PATCH_ABS64)
  313. buf_ptr[3] = MOVK | dst | (((addr >> 48) & 0xffff) << 5) | (3 << 21);
  314. } while (0);
  315. jump = jump->next;
  316. }
  317. put_label = compiler->put_labels;
  318. while (put_label) {
  319. addr = put_label->label->addr;
  320. buf_ptr = (sljit_ins *)put_label->addr;
  321. buf_ptr[0] |= (addr & 0xffff) << 5;
  322. buf_ptr[1] |= ((addr >> 16) & 0xffff) << 5;
  323. if (put_label->flags >= 1)
  324. buf_ptr[2] |= ((addr >> 32) & 0xffff) << 5;
  325. if (put_label->flags >= 2)
  326. buf_ptr[3] |= ((addr >> 48) & 0xffff) << 5;
  327. put_label = put_label->next;
  328. }
  329. compiler->error = SLJIT_ERR_COMPILED;
  330. compiler->executable_offset = executable_offset;
  331. compiler->executable_size = (code_ptr - code) * sizeof(sljit_ins);
  332. code = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
  333. code_ptr = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
  334. SLJIT_CACHE_FLUSH(code, code_ptr);
  335. return code;
  336. }
  337. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
  338. {
  339. switch (feature_type) {
  340. case SLJIT_HAS_FPU:
  341. #ifdef SLJIT_IS_FPU_AVAILABLE
  342. return SLJIT_IS_FPU_AVAILABLE;
  343. #else
  344. /* Available by default. */
  345. return 1;
  346. #endif
  347. case SLJIT_HAS_CLZ:
  348. case SLJIT_HAS_CMOV:
  349. return 1;
  350. default:
  351. return 0;
  352. }
  353. }
  354. /* --------------------------------------------------------------------- */
  355. /* Core code generator functions. */
  356. /* --------------------------------------------------------------------- */
  357. #define COUNT_TRAILING_ZERO(value, result) \
  358. result = 0; \
  359. if (!(value & 0xffffffff)) { \
  360. result += 32; \
  361. value >>= 32; \
  362. } \
  363. if (!(value & 0xffff)) { \
  364. result += 16; \
  365. value >>= 16; \
  366. } \
  367. if (!(value & 0xff)) { \
  368. result += 8; \
  369. value >>= 8; \
  370. } \
  371. if (!(value & 0xf)) { \
  372. result += 4; \
  373. value >>= 4; \
  374. } \
  375. if (!(value & 0x3)) { \
  376. result += 2; \
  377. value >>= 2; \
  378. } \
  379. if (!(value & 0x1)) { \
  380. result += 1; \
  381. value >>= 1; \
  382. }
  383. #define LOGICAL_IMM_CHECK 0x100
  384. static sljit_ins logical_imm(sljit_sw imm, sljit_s32 len)
  385. {
  386. sljit_s32 negated, ones, right;
  387. sljit_uw mask, uimm;
  388. sljit_ins ins;
  389. if (len & LOGICAL_IMM_CHECK) {
  390. len &= ~LOGICAL_IMM_CHECK;
  391. if (len == 32 && (imm == 0 || imm == -1))
  392. return 0;
  393. if (len == 16 && ((sljit_s32)imm == 0 || (sljit_s32)imm == -1))
  394. return 0;
  395. }
  396. SLJIT_ASSERT((len == 32 && imm != 0 && imm != -1)
  397. || (len == 16 && (sljit_s32)imm != 0 && (sljit_s32)imm != -1));
  398. uimm = (sljit_uw)imm;
  399. while (1) {
  400. if (len <= 0) {
  401. SLJIT_UNREACHABLE();
  402. return 0;
  403. }
  404. mask = ((sljit_uw)1 << len) - 1;
  405. if ((uimm & mask) != ((uimm >> len) & mask))
  406. break;
  407. len >>= 1;
  408. }
  409. len <<= 1;
  410. negated = 0;
  411. if (uimm & 0x1) {
  412. negated = 1;
  413. uimm = ~uimm;
  414. }
  415. if (len < 64)
  416. uimm &= ((sljit_uw)1 << len) - 1;
  417. /* Unsigned right shift. */
  418. COUNT_TRAILING_ZERO(uimm, right);
  419. /* Signed shift. We also know that the highest bit is set. */
  420. imm = (sljit_sw)~uimm;
  421. SLJIT_ASSERT(imm < 0);
  422. COUNT_TRAILING_ZERO(imm, ones);
  423. if (~imm)
  424. return 0;
  425. if (len == 64)
  426. ins = 1 << 22;
  427. else
  428. ins = (0x3f - ((len << 1) - 1)) << 10;
  429. if (negated)
  430. return ins | ((len - ones - 1) << 10) | ((len - ones - right) << 16);
  431. return ins | ((ones - 1) << 10) | ((len - right) << 16);
  432. }
  433. #undef COUNT_TRAILING_ZERO
  434. static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw simm)
  435. {
  436. sljit_uw imm = (sljit_uw)simm;
  437. sljit_s32 i, zeros, ones, first;
  438. sljit_ins bitmask;
  439. /* Handling simple immediates first. */
  440. if (imm <= 0xffff)
  441. return push_inst(compiler, MOVZ | RD(dst) | (imm << 5));
  442. if (simm < 0 && simm >= -0x10000)
  443. return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5));
  444. if (imm <= 0xffffffffl) {
  445. if ((imm & 0xffff) == 0)
  446. return push_inst(compiler, MOVZ | RD(dst) | ((imm >> 16) << 5) | (1 << 21));
  447. if ((imm & 0xffff0000l) == 0xffff0000)
  448. return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff) << 5));
  449. if ((imm & 0xffff) == 0xffff)
  450. return push_inst(compiler, (MOVN ^ W_OP) | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  451. bitmask = logical_imm(simm, 16);
  452. if (bitmask != 0)
  453. return push_inst(compiler, (ORRI ^ W_OP) | RD(dst) | RN(TMP_ZERO) | bitmask);
  454. FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((imm & 0xffff) << 5)));
  455. return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  456. }
  457. bitmask = logical_imm(simm, 32);
  458. if (bitmask != 0)
  459. return push_inst(compiler, ORRI | RD(dst) | RN(TMP_ZERO) | bitmask);
  460. if (simm < 0 && simm >= -0x100000000l) {
  461. if ((imm & 0xffff) == 0xffff)
  462. return push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  463. FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((~imm & 0xffff) << 5)));
  464. return push_inst(compiler, MOVK | RD(dst) | ((imm & 0xffff0000l) >> (16 - 5)) | (1 << 21));
  465. }
  466. /* A large amount of number can be constructed from ORR and MOVx, but computing them is costly. */
  467. zeros = 0;
  468. ones = 0;
  469. for (i = 4; i > 0; i--) {
  470. if ((simm & 0xffff) == 0)
  471. zeros++;
  472. if ((simm & 0xffff) == 0xffff)
  473. ones++;
  474. simm >>= 16;
  475. }
  476. simm = (sljit_sw)imm;
  477. first = 1;
  478. if (ones > zeros) {
  479. simm = ~simm;
  480. for (i = 0; i < 4; i++) {
  481. if (!(simm & 0xffff)) {
  482. simm >>= 16;
  483. continue;
  484. }
  485. if (first) {
  486. first = 0;
  487. FAIL_IF(push_inst(compiler, MOVN | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
  488. }
  489. else
  490. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | ((~simm & 0xffff) << 5) | (i << 21)));
  491. simm >>= 16;
  492. }
  493. return SLJIT_SUCCESS;
  494. }
  495. for (i = 0; i < 4; i++) {
  496. if (!(simm & 0xffff)) {
  497. simm >>= 16;
  498. continue;
  499. }
  500. if (first) {
  501. first = 0;
  502. FAIL_IF(push_inst(compiler, MOVZ | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
  503. }
  504. else
  505. FAIL_IF(push_inst(compiler, MOVK | RD(dst) | ((simm & 0xffff) << 5) | (i << 21)));
  506. simm >>= 16;
  507. }
  508. return SLJIT_SUCCESS;
  509. }
  510. #define ARG1_IMM 0x0010000
  511. #define ARG2_IMM 0x0020000
  512. #define INT_OP 0x0040000
  513. #define SET_FLAGS 0x0080000
  514. #define UNUSED_RETURN 0x0100000
  515. #define CHECK_FLAGS(flag_bits) \
  516. if (flags & SET_FLAGS) { \
  517. inv_bits |= flag_bits; \
  518. if (flags & UNUSED_RETURN) \
  519. dst = TMP_ZERO; \
  520. }
  521. static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 dst, sljit_sw arg1, sljit_sw arg2)
  522. {
  523. /* dst must be register, TMP_REG1
  524. arg1 must be register, TMP_REG1, imm
  525. arg2 must be register, TMP_REG2, imm */
  526. sljit_ins inv_bits = (flags & INT_OP) ? W_OP : 0;
  527. sljit_ins inst_bits;
  528. sljit_s32 op = (flags & 0xffff);
  529. sljit_s32 reg;
  530. sljit_sw imm, nimm;
  531. if (SLJIT_UNLIKELY((flags & (ARG1_IMM | ARG2_IMM)) == (ARG1_IMM | ARG2_IMM))) {
  532. /* Both are immediates. */
  533. flags &= ~ARG1_IMM;
  534. if (arg1 == 0 && op != SLJIT_ADD && op != SLJIT_SUB)
  535. arg1 = TMP_ZERO;
  536. else {
  537. FAIL_IF(load_immediate(compiler, TMP_REG1, arg1));
  538. arg1 = TMP_REG1;
  539. }
  540. }
  541. if (flags & (ARG1_IMM | ARG2_IMM)) {
  542. reg = (flags & ARG2_IMM) ? arg1 : arg2;
  543. imm = (flags & ARG2_IMM) ? arg2 : arg1;
  544. switch (op) {
  545. case SLJIT_MUL:
  546. case SLJIT_NEG:
  547. case SLJIT_CLZ:
  548. case SLJIT_ADDC:
  549. case SLJIT_SUBC:
  550. /* No form with immediate operand (except imm 0, which
  551. is represented by a ZERO register). */
  552. break;
  553. case SLJIT_MOV:
  554. SLJIT_ASSERT(!(flags & SET_FLAGS) && (flags & ARG2_IMM) && arg1 == TMP_REG1);
  555. return load_immediate(compiler, dst, imm);
  556. case SLJIT_NOT:
  557. SLJIT_ASSERT(flags & ARG2_IMM);
  558. FAIL_IF(load_immediate(compiler, dst, (flags & INT_OP) ? (~imm & 0xffffffff) : ~imm));
  559. goto set_flags;
  560. case SLJIT_SUB:
  561. if (flags & ARG1_IMM)
  562. break;
  563. imm = -imm;
  564. /* Fall through. */
  565. case SLJIT_ADD:
  566. if (imm == 0) {
  567. CHECK_FLAGS(1 << 29);
  568. return push_inst(compiler, ((op == SLJIT_ADD ? ADDI : SUBI) ^ inv_bits) | RD(dst) | RN(reg));
  569. }
  570. if (imm > 0 && imm <= 0xfff) {
  571. CHECK_FLAGS(1 << 29);
  572. return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | (imm << 10));
  573. }
  574. nimm = -imm;
  575. if (nimm > 0 && nimm <= 0xfff) {
  576. CHECK_FLAGS(1 << 29);
  577. return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | (nimm << 10));
  578. }
  579. if (imm > 0 && imm <= 0xffffff && !(imm & 0xfff)) {
  580. CHECK_FLAGS(1 << 29);
  581. return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((imm >> 12) << 10) | (1 << 22));
  582. }
  583. if (nimm > 0 && nimm <= 0xffffff && !(nimm & 0xfff)) {
  584. CHECK_FLAGS(1 << 29);
  585. return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((nimm >> 12) << 10) | (1 << 22));
  586. }
  587. if (imm > 0 && imm <= 0xffffff && !(flags & SET_FLAGS)) {
  588. FAIL_IF(push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(reg) | ((imm >> 12) << 10) | (1 << 22)));
  589. return push_inst(compiler, (ADDI ^ inv_bits) | RD(dst) | RN(dst) | ((imm & 0xfff) << 10));
  590. }
  591. if (nimm > 0 && nimm <= 0xffffff && !(flags & SET_FLAGS)) {
  592. FAIL_IF(push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(reg) | ((nimm >> 12) << 10) | (1 << 22)));
  593. return push_inst(compiler, (SUBI ^ inv_bits) | RD(dst) | RN(dst) | ((nimm & 0xfff) << 10));
  594. }
  595. break;
  596. case SLJIT_AND:
  597. inst_bits = logical_imm(imm, LOGICAL_IMM_CHECK | ((flags & INT_OP) ? 16 : 32));
  598. if (!inst_bits)
  599. break;
  600. CHECK_FLAGS(3 << 29);
  601. return push_inst(compiler, (ANDI ^ inv_bits) | RD(dst) | RN(reg) | inst_bits);
  602. case SLJIT_OR:
  603. case SLJIT_XOR:
  604. inst_bits = logical_imm(imm, LOGICAL_IMM_CHECK | ((flags & INT_OP) ? 16 : 32));
  605. if (!inst_bits)
  606. break;
  607. if (op == SLJIT_OR)
  608. inst_bits |= ORRI;
  609. else
  610. inst_bits |= EORI;
  611. FAIL_IF(push_inst(compiler, (inst_bits ^ inv_bits) | RD(dst) | RN(reg)));
  612. goto set_flags;
  613. case SLJIT_SHL:
  614. if (flags & ARG1_IMM)
  615. break;
  616. if (flags & INT_OP) {
  617. imm &= 0x1f;
  618. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | ((-imm & 0x1f) << 16) | ((31 - imm) << 10)));
  619. }
  620. else {
  621. imm &= 0x3f;
  622. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) | ((-imm & 0x3f) << 16) | ((63 - imm) << 10)));
  623. }
  624. goto set_flags;
  625. case SLJIT_LSHR:
  626. case SLJIT_ASHR:
  627. if (flags & ARG1_IMM)
  628. break;
  629. if (op == SLJIT_ASHR)
  630. inv_bits |= 1 << 30;
  631. if (flags & INT_OP) {
  632. imm &= 0x1f;
  633. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (imm << 16) | (31 << 10)));
  634. }
  635. else {
  636. imm &= 0x3f;
  637. FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22) | (imm << 16) | (63 << 10)));
  638. }
  639. goto set_flags;
  640. default:
  641. SLJIT_UNREACHABLE();
  642. break;
  643. }
  644. if (flags & ARG2_IMM) {
  645. if (arg2 == 0)
  646. arg2 = TMP_ZERO;
  647. else {
  648. FAIL_IF(load_immediate(compiler, TMP_REG2, arg2));
  649. arg2 = TMP_REG2;
  650. }
  651. }
  652. else {
  653. if (arg1 == 0)
  654. arg1 = TMP_ZERO;
  655. else {
  656. FAIL_IF(load_immediate(compiler, TMP_REG1, arg1));
  657. arg1 = TMP_REG1;
  658. }
  659. }
  660. }
  661. /* Both arguments are registers. */
  662. switch (op) {
  663. case SLJIT_MOV:
  664. case SLJIT_MOV_P:
  665. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  666. if (dst == arg2)
  667. return SLJIT_SUCCESS;
  668. return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(arg2));
  669. case SLJIT_MOV_U8:
  670. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  671. return push_inst(compiler, (UBFM ^ W_OP) | RD(dst) | RN(arg2) | (7 << 10));
  672. case SLJIT_MOV_S8:
  673. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  674. if (!(flags & INT_OP))
  675. inv_bits |= 1 << 22;
  676. return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (7 << 10));
  677. case SLJIT_MOV_U16:
  678. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  679. return push_inst(compiler, (UBFM ^ W_OP) | RD(dst) | RN(arg2) | (15 << 10));
  680. case SLJIT_MOV_S16:
  681. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  682. if (!(flags & INT_OP))
  683. inv_bits |= 1 << 22;
  684. return push_inst(compiler, (SBFM ^ inv_bits) | RD(dst) | RN(arg2) | (15 << 10));
  685. case SLJIT_MOV_U32:
  686. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  687. if ((flags & INT_OP) && dst == arg2)
  688. return SLJIT_SUCCESS;
  689. return push_inst(compiler, (ORR ^ W_OP) | RD(dst) | RN(TMP_ZERO) | RM(arg2));
  690. case SLJIT_MOV_S32:
  691. SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1);
  692. if ((flags & INT_OP) && dst == arg2)
  693. return SLJIT_SUCCESS;
  694. return push_inst(compiler, SBFM | (1 << 22) | RD(dst) | RN(arg2) | (31 << 10));
  695. case SLJIT_NOT:
  696. SLJIT_ASSERT(arg1 == TMP_REG1);
  697. FAIL_IF(push_inst(compiler, (ORN ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(arg2)));
  698. break; /* Set flags. */
  699. case SLJIT_NEG:
  700. SLJIT_ASSERT(arg1 == TMP_REG1);
  701. if (flags & SET_FLAGS)
  702. inv_bits |= 1 << 29;
  703. return push_inst(compiler, (SUB ^ inv_bits) | RD(dst) | RN(TMP_ZERO) | RM(arg2));
  704. case SLJIT_CLZ:
  705. SLJIT_ASSERT(arg1 == TMP_REG1);
  706. return push_inst(compiler, (CLZ ^ inv_bits) | RD(dst) | RN(arg2));
  707. case SLJIT_ADD:
  708. CHECK_FLAGS(1 << 29);
  709. return push_inst(compiler, (ADD ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  710. case SLJIT_ADDC:
  711. CHECK_FLAGS(1 << 29);
  712. return push_inst(compiler, (ADC ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  713. case SLJIT_SUB:
  714. CHECK_FLAGS(1 << 29);
  715. return push_inst(compiler, (SUB ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  716. case SLJIT_SUBC:
  717. CHECK_FLAGS(1 << 29);
  718. return push_inst(compiler, (SBC ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  719. case SLJIT_MUL:
  720. if (!(flags & SET_FLAGS))
  721. return push_inst(compiler, (MADD ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2) | RT2(TMP_ZERO));
  722. if (flags & INT_OP) {
  723. FAIL_IF(push_inst(compiler, SMADDL | RD(dst) | RN(arg1) | RM(arg2) | (31 << 10)));
  724. FAIL_IF(push_inst(compiler, ADD | RD(TMP_LR) | RN(TMP_ZERO) | RM(dst) | (2 << 22) | (31 << 10)));
  725. return push_inst(compiler, SUBS | RD(TMP_ZERO) | RN(TMP_LR) | RM(dst) | (2 << 22) | (63 << 10));
  726. }
  727. FAIL_IF(push_inst(compiler, SMULH | RD(TMP_LR) | RN(arg1) | RM(arg2)));
  728. FAIL_IF(push_inst(compiler, MADD | RD(dst) | RN(arg1) | RM(arg2) | RT2(TMP_ZERO)));
  729. return push_inst(compiler, SUBS | RD(TMP_ZERO) | RN(TMP_LR) | RM(dst) | (2 << 22) | (63 << 10));
  730. case SLJIT_AND:
  731. CHECK_FLAGS(3 << 29);
  732. return push_inst(compiler, (AND ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
  733. case SLJIT_OR:
  734. FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  735. break; /* Set flags. */
  736. case SLJIT_XOR:
  737. FAIL_IF(push_inst(compiler, (EOR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  738. break; /* Set flags. */
  739. case SLJIT_SHL:
  740. FAIL_IF(push_inst(compiler, (LSLV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  741. break; /* Set flags. */
  742. case SLJIT_LSHR:
  743. FAIL_IF(push_inst(compiler, (LSRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  744. break; /* Set flags. */
  745. case SLJIT_ASHR:
  746. FAIL_IF(push_inst(compiler, (ASRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
  747. break; /* Set flags. */
  748. default:
  749. SLJIT_UNREACHABLE();
  750. return SLJIT_SUCCESS;
  751. }
  752. set_flags:
  753. if (flags & SET_FLAGS)
  754. return push_inst(compiler, (SUBS ^ inv_bits) | RD(TMP_ZERO) | RN(dst) | RM(TMP_ZERO));
  755. return SLJIT_SUCCESS;
  756. }
  757. #define STORE 0x10
  758. #define SIGNED 0x20
  759. #define BYTE_SIZE 0x0
  760. #define HALF_SIZE 0x1
  761. #define INT_SIZE 0x2
  762. #define WORD_SIZE 0x3
  763. #define MEM_SIZE_SHIFT(flags) ((flags) & 0x3)
  764. static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg,
  765. sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg)
  766. {
  767. sljit_u32 shift = MEM_SIZE_SHIFT(flags);
  768. sljit_u32 type = (shift << 30);
  769. if (!(flags & STORE))
  770. type |= (flags & SIGNED) ? 0x00800000 : 0x00400000;
  771. SLJIT_ASSERT(arg & SLJIT_MEM);
  772. if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
  773. argw &= 0x3;
  774. if (argw == 0 || argw == shift)
  775. return push_inst(compiler, STRB | type | RT(reg)
  776. | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0));
  777. FAIL_IF(push_inst(compiler, ADD | RD(tmp_reg) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10)));
  778. return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg));
  779. }
  780. arg &= REG_MASK;
  781. if (arg == SLJIT_UNUSED) {
  782. FAIL_IF(load_immediate(compiler, tmp_reg, argw & ~(0xfff << shift)));
  783. argw = (argw >> shift) & 0xfff;
  784. return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10));
  785. }
  786. if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
  787. if ((argw >> shift) <= 0xfff) {
  788. return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | (argw << (10 - shift)));
  789. }
  790. if (argw <= 0xffffff) {
  791. FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | ((argw >> 12) << 10)));
  792. argw = ((argw & 0xfff) >> shift);
  793. return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | (argw << 10));
  794. }
  795. }
  796. if (argw <= 255 && argw >= -256)
  797. return push_inst(compiler, STURBI | type | RT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
  798. FAIL_IF(load_immediate(compiler, tmp_reg, argw));
  799. return push_inst(compiler, STRB | type | RT(reg) | RN(arg) | RM(tmp_reg));
  800. }
  801. /* --------------------------------------------------------------------- */
  802. /* Entry, exit */
  803. /* --------------------------------------------------------------------- */
  804. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
  805. sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
  806. sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
  807. {
  808. sljit_s32 args, i, tmp, offs, prev, saved_regs_size;
  809. CHECK_ERROR();
  810. CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
  811. set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
  812. saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2);
  813. if (saved_regs_size & 0x8)
  814. saved_regs_size += sizeof(sljit_sw);
  815. local_size = (local_size + 15) & ~0xf;
  816. compiler->local_size = local_size + saved_regs_size;
  817. FAIL_IF(push_inst(compiler, STP_PRE | RT(TMP_FP) | RT2(TMP_LR)
  818. | RN(SLJIT_SP) | ((-(saved_regs_size >> 3) & 0x7f) << 15)));
  819. #ifdef _WIN32
  820. if (local_size >= 4096)
  821. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(SLJIT_SP) | (1 << 10) | (1 << 22)));
  822. else if (local_size > 256)
  823. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(SLJIT_SP) | (local_size << 10)));
  824. #endif
  825. tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
  826. prev = -1;
  827. offs = 2 << 15;
  828. for (i = SLJIT_S0; i >= tmp; i--) {
  829. if (prev == -1) {
  830. prev = i;
  831. continue;
  832. }
  833. FAIL_IF(push_inst(compiler, STP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  834. offs += 2 << 15;
  835. prev = -1;
  836. }
  837. for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
  838. if (prev == -1) {
  839. prev = i;
  840. continue;
  841. }
  842. FAIL_IF(push_inst(compiler, STP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  843. offs += 2 << 15;
  844. prev = -1;
  845. }
  846. if (prev != -1)
  847. FAIL_IF(push_inst(compiler, STRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5)));
  848. FAIL_IF(push_inst(compiler, ADDI | RD(TMP_FP) | RN(SLJIT_SP) | (0 << 10)));
  849. args = get_arg_count(arg_types);
  850. if (args >= 1)
  851. FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S0) | RN(TMP_ZERO) | RM(SLJIT_R0)));
  852. if (args >= 2)
  853. FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S1) | RN(TMP_ZERO) | RM(SLJIT_R1)));
  854. if (args >= 3)
  855. FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S2) | RN(TMP_ZERO) | RM(SLJIT_R2)));
  856. #ifdef _WIN32
  857. if (local_size >= 4096) {
  858. if (local_size < 4 * 4096) {
  859. /* No need for a loop. */
  860. if (local_size >= 2 * 4096) {
  861. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  862. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
  863. local_size -= 4096;
  864. }
  865. if (local_size >= 2 * 4096) {
  866. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  867. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
  868. local_size -= 4096;
  869. }
  870. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  871. local_size -= 4096;
  872. }
  873. else {
  874. FAIL_IF(push_inst(compiler, MOVZ | RD(TMP_REG2) | (((local_size >> 12) - 1) << 5)));
  875. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  876. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (1 << 10) | (1 << 22)));
  877. FAIL_IF(push_inst(compiler, SUBI | (1 << 29) | RD(TMP_REG2) | RN(TMP_REG2) | (1 << 10)));
  878. FAIL_IF(push_inst(compiler, B_CC | ((((sljit_ins) -3) & 0x7ffff) << 5) | 0x1 /* not-equal */));
  879. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  880. local_size &= 0xfff;
  881. }
  882. if (local_size > 256) {
  883. FAIL_IF(push_inst(compiler, SUBI | RD(TMP_REG1) | RN(TMP_REG1) | (local_size << 10)));
  884. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  885. }
  886. else if (local_size > 0)
  887. FAIL_IF(push_inst(compiler, LDR_PRE | RT(TMP_ZERO) | RN(TMP_REG1) | ((-local_size & 0x1ff) << 12)));
  888. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_REG1) | (0 << 10)));
  889. }
  890. else if (local_size > 256) {
  891. FAIL_IF(push_inst(compiler, LDRI | RT(TMP_ZERO) | RN(TMP_REG1)));
  892. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(TMP_REG1) | (0 << 10)));
  893. }
  894. else if (local_size > 0)
  895. FAIL_IF(push_inst(compiler, LDR_PRE | RT(TMP_ZERO) | RN(SLJIT_SP) | ((-local_size & 0x1ff) << 12)));
  896. #else /* !_WIN32 */
  897. /* The local_size does not include saved registers size. */
  898. if (local_size > 0xfff) {
  899. FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((local_size >> 12) << 10) | (1 << 22)));
  900. local_size &= 0xfff;
  901. }
  902. if (local_size != 0)
  903. FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (local_size << 10)));
  904. #endif /* _WIN32 */
  905. return SLJIT_SUCCESS;
  906. }
  907. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler,
  908. sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
  909. sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
  910. {
  911. sljit_s32 saved_regs_size;
  912. CHECK_ERROR();
  913. CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
  914. set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
  915. saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2);
  916. if (saved_regs_size & 0x8)
  917. saved_regs_size += sizeof(sljit_sw);
  918. compiler->local_size = saved_regs_size + ((local_size + 15) & ~0xf);
  919. return SLJIT_SUCCESS;
  920. }
  921. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
  922. {
  923. sljit_s32 local_size;
  924. sljit_s32 i, tmp, offs, prev, saved_regs_size;
  925. CHECK_ERROR();
  926. CHECK(check_sljit_emit_return(compiler, op, src, srcw));
  927. FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
  928. saved_regs_size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 2);
  929. if (saved_regs_size & 0x8)
  930. saved_regs_size += sizeof(sljit_sw);
  931. local_size = compiler->local_size - saved_regs_size;
  932. /* Load LR as early as possible. */
  933. if (local_size == 0)
  934. FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
  935. else if (local_size < 63 * sizeof(sljit_sw)) {
  936. FAIL_IF(push_inst(compiler, LDP_PRE | RT(TMP_FP) | RT2(TMP_LR)
  937. | RN(SLJIT_SP) | (local_size << (15 - 3))));
  938. }
  939. else {
  940. if (local_size > 0xfff) {
  941. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | ((local_size >> 12) << 10) | (1 << 22)));
  942. local_size &= 0xfff;
  943. }
  944. if (local_size)
  945. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (local_size << 10)));
  946. FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
  947. }
  948. tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
  949. prev = -1;
  950. offs = 2 << 15;
  951. for (i = SLJIT_S0; i >= tmp; i--) {
  952. if (prev == -1) {
  953. prev = i;
  954. continue;
  955. }
  956. FAIL_IF(push_inst(compiler, LDP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  957. offs += 2 << 15;
  958. prev = -1;
  959. }
  960. for (i = compiler->scratches; i >= SLJIT_FIRST_SAVED_REG; i--) {
  961. if (prev == -1) {
  962. prev = i;
  963. continue;
  964. }
  965. FAIL_IF(push_inst(compiler, LDP | RT(prev) | RT2(i) | RN(SLJIT_SP) | offs));
  966. offs += 2 << 15;
  967. prev = -1;
  968. }
  969. if (prev != -1)
  970. FAIL_IF(push_inst(compiler, LDRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5)));
  971. /* These two can be executed in parallel. */
  972. FAIL_IF(push_inst(compiler, ADDI | RD(SLJIT_SP) | RN(SLJIT_SP) | (saved_regs_size << 10)));
  973. return push_inst(compiler, RET | RN(TMP_LR));
  974. }
  975. /* --------------------------------------------------------------------- */
  976. /* Operators */
  977. /* --------------------------------------------------------------------- */
  978. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
  979. {
  980. sljit_ins inv_bits = (op & SLJIT_I32_OP) ? W_OP : 0;
  981. CHECK_ERROR();
  982. CHECK(check_sljit_emit_op0(compiler, op));
  983. op = GET_OPCODE(op);
  984. switch (op) {
  985. case SLJIT_BREAKPOINT:
  986. return push_inst(compiler, BRK);
  987. case SLJIT_NOP:
  988. return push_inst(compiler, NOP);
  989. case SLJIT_LMUL_UW:
  990. case SLJIT_LMUL_SW:
  991. FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
  992. FAIL_IF(push_inst(compiler, MADD | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
  993. return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULH : SMULH) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
  994. case SLJIT_DIVMOD_UW:
  995. case SLJIT_DIVMOD_SW:
  996. FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
  997. FAIL_IF(push_inst(compiler, ((op == SLJIT_DIVMOD_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1)));
  998. FAIL_IF(push_inst(compiler, (MADD ^ inv_bits) | RD(SLJIT_R1) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
  999. return push_inst(compiler, (SUB ^ inv_bits) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
  1000. case SLJIT_DIV_UW:
  1001. case SLJIT_DIV_SW:
  1002. return push_inst(compiler, ((op == SLJIT_DIV_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1));
  1003. }
  1004. return SLJIT_SUCCESS;
  1005. }
  1006. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
  1007. sljit_s32 dst, sljit_sw dstw,
  1008. sljit_s32 src, sljit_sw srcw)
  1009. {
  1010. sljit_s32 dst_r, flags, mem_flags;
  1011. sljit_s32 op_flags = GET_ALL_FLAGS(op);
  1012. CHECK_ERROR();
  1013. CHECK(check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw));
  1014. ADJUST_LOCAL_OFFSET(dst, dstw);
  1015. ADJUST_LOCAL_OFFSET(src, srcw);
  1016. if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) {
  1017. if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) {
  1018. SLJIT_ASSERT(reg_map[1] == 0 && reg_map[3] == 2 && reg_map[5] == 4);
  1019. if (op >= SLJIT_MOV_U8 && op <= SLJIT_MOV_S8)
  1020. dst = 5;
  1021. else if (op >= SLJIT_MOV_U16 && op <= SLJIT_MOV_S16)
  1022. dst = 3;
  1023. else
  1024. dst = 1;
  1025. /* Signed word sized load is the prefetch instruction. */
  1026. return emit_op_mem(compiler, WORD_SIZE | SIGNED, dst, src, srcw, TMP_REG1);
  1027. }
  1028. return SLJIT_SUCCESS;
  1029. }
  1030. dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
  1031. op = GET_OPCODE(op);
  1032. if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) {
  1033. /* Both operands are registers. */
  1034. if (dst_r != TMP_REG1 && FAST_IS_REG(src))
  1035. return emit_op_imm(compiler, op | ((op_flags & SLJIT_I32_OP) ? INT_OP : 0), dst_r, TMP_REG1, src);
  1036. switch (op) {
  1037. case SLJIT_MOV:
  1038. case SLJIT_MOV_P:
  1039. mem_flags = WORD_SIZE;
  1040. break;
  1041. case SLJIT_MOV_U8:
  1042. mem_flags = BYTE_SIZE;
  1043. if (src & SLJIT_IMM)
  1044. srcw = (sljit_u8)srcw;
  1045. break;
  1046. case SLJIT_MOV_S8:
  1047. mem_flags = BYTE_SIZE | SIGNED;
  1048. if (src & SLJIT_IMM)
  1049. srcw = (sljit_s8)srcw;
  1050. break;
  1051. case SLJIT_MOV_U16:
  1052. mem_flags = HALF_SIZE;
  1053. if (src & SLJIT_IMM)
  1054. srcw = (sljit_u16)srcw;
  1055. break;
  1056. case SLJIT_MOV_S16:
  1057. mem_flags = HALF_SIZE | SIGNED;
  1058. if (src & SLJIT_IMM)
  1059. srcw = (sljit_s16)srcw;
  1060. break;
  1061. case SLJIT_MOV_U32:
  1062. mem_flags = INT_SIZE;
  1063. if (src & SLJIT_IMM)
  1064. srcw = (sljit_u32)srcw;
  1065. break;
  1066. case SLJIT_MOV_S32:
  1067. mem_flags = INT_SIZE | SIGNED;
  1068. if (src & SLJIT_IMM)
  1069. srcw = (sljit_s32)srcw;
  1070. break;
  1071. default:
  1072. SLJIT_UNREACHABLE();
  1073. mem_flags = 0;
  1074. break;
  1075. }
  1076. if (src & SLJIT_IMM)
  1077. FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG1, srcw));
  1078. else if (!(src & SLJIT_MEM))
  1079. dst_r = src;
  1080. else
  1081. FAIL_IF(emit_op_mem(compiler, mem_flags, dst_r, src, srcw, TMP_REG1));
  1082. if (dst & SLJIT_MEM)
  1083. return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
  1084. return SLJIT_SUCCESS;
  1085. }
  1086. flags = HAS_FLAGS(op_flags) ? SET_FLAGS : 0;
  1087. mem_flags = WORD_SIZE;
  1088. if (op_flags & SLJIT_I32_OP) {
  1089. flags |= INT_OP;
  1090. mem_flags = INT_SIZE;
  1091. }
  1092. if (dst == SLJIT_UNUSED)
  1093. flags |= UNUSED_RETURN;
  1094. if (src & SLJIT_MEM) {
  1095. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src, srcw, TMP_REG2));
  1096. src = TMP_REG2;
  1097. }
  1098. emit_op_imm(compiler, flags | op, dst_r, TMP_REG1, src);
  1099. if (SLJIT_UNLIKELY(dst & SLJIT_MEM))
  1100. return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
  1101. return SLJIT_SUCCESS;
  1102. }
  1103. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
  1104. sljit_s32 dst, sljit_sw dstw,
  1105. sljit_s32 src1, sljit_sw src1w,
  1106. sljit_s32 src2, sljit_sw src2w)
  1107. {
  1108. sljit_s32 dst_r, flags, mem_flags;
  1109. CHECK_ERROR();
  1110. CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
  1111. ADJUST_LOCAL_OFFSET(dst, dstw);
  1112. ADJUST_LOCAL_OFFSET(src1, src1w);
  1113. ADJUST_LOCAL_OFFSET(src2, src2w);
  1114. if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
  1115. return SLJIT_SUCCESS;
  1116. dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
  1117. flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
  1118. mem_flags = WORD_SIZE;
  1119. if (op & SLJIT_I32_OP) {
  1120. flags |= INT_OP;
  1121. mem_flags = INT_SIZE;
  1122. }
  1123. if (dst == SLJIT_UNUSED)
  1124. flags |= UNUSED_RETURN;
  1125. if (src1 & SLJIT_MEM) {
  1126. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG1, src1, src1w, TMP_REG1));
  1127. src1 = TMP_REG1;
  1128. }
  1129. if (src2 & SLJIT_MEM) {
  1130. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG2, src2, src2w, TMP_REG2));
  1131. src2 = TMP_REG2;
  1132. }
  1133. if (src1 & SLJIT_IMM)
  1134. flags |= ARG1_IMM;
  1135. else
  1136. src1w = src1;
  1137. if (src2 & SLJIT_IMM)
  1138. flags |= ARG2_IMM;
  1139. else
  1140. src2w = src2;
  1141. emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src1w, src2w);
  1142. if (dst & SLJIT_MEM)
  1143. return emit_op_mem(compiler, mem_flags | STORE, dst_r, dst, dstw, TMP_REG2);
  1144. return SLJIT_SUCCESS;
  1145. }
  1146. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
  1147. {
  1148. CHECK_REG_INDEX(check_sljit_get_register_index(reg));
  1149. return reg_map[reg];
  1150. }
  1151. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
  1152. {
  1153. CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
  1154. return freg_map[reg];
  1155. }
  1156. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
  1157. void *instruction, sljit_s32 size)
  1158. {
  1159. CHECK_ERROR();
  1160. CHECK(check_sljit_emit_op_custom(compiler, instruction, size));
  1161. return push_inst(compiler, *(sljit_ins*)instruction);
  1162. }
  1163. /* --------------------------------------------------------------------- */
  1164. /* Floating point operators */
  1165. /* --------------------------------------------------------------------- */
  1166. static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags, sljit_s32 reg, sljit_s32 arg, sljit_sw argw)
  1167. {
  1168. sljit_u32 shift = MEM_SIZE_SHIFT(flags);
  1169. sljit_ins type = (shift << 30);
  1170. SLJIT_ASSERT(arg & SLJIT_MEM);
  1171. if (!(flags & STORE))
  1172. type |= 0x00400000;
  1173. if (arg & OFFS_REG_MASK) {
  1174. argw &= 3;
  1175. if (argw == 0 || argw == shift)
  1176. return push_inst(compiler, STR_FR | type | VT(reg)
  1177. | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw ? (1 << 12) : 0));
  1178. FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(arg & REG_MASK) | RM(OFFS_REG(arg)) | (argw << 10)));
  1179. return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1));
  1180. }
  1181. arg &= REG_MASK;
  1182. if (arg == SLJIT_UNUSED) {
  1183. FAIL_IF(load_immediate(compiler, TMP_REG1, argw & ~(0xfff << shift)));
  1184. argw = (argw >> shift) & 0xfff;
  1185. return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10));
  1186. }
  1187. if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
  1188. if ((argw >> shift) <= 0xfff)
  1189. return push_inst(compiler, STR_FI | type | VT(reg) | RN(arg) | (argw << (10 - shift)));
  1190. if (argw <= 0xffffff) {
  1191. FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(TMP_REG1) | RN(arg) | ((argw >> 12) << 10)));
  1192. argw = ((argw & 0xfff) >> shift);
  1193. return push_inst(compiler, STR_FI | type | VT(reg) | RN(TMP_REG1) | (argw << 10));
  1194. }
  1195. }
  1196. if (argw <= 255 && argw >= -256)
  1197. return push_inst(compiler, STUR_FI | type | VT(reg) | RN(arg) | ((argw & 0x1ff) << 12));
  1198. FAIL_IF(load_immediate(compiler, TMP_REG1, argw));
  1199. return push_inst(compiler, STR_FR | type | VT(reg) | RN(arg) | RM(TMP_REG1));
  1200. }
  1201. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
  1202. sljit_s32 dst, sljit_sw dstw,
  1203. sljit_s32 src, sljit_sw srcw)
  1204. {
  1205. sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1206. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1207. if (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64)
  1208. inv_bits |= W_OP;
  1209. if (src & SLJIT_MEM) {
  1210. emit_fop_mem(compiler, (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src, srcw);
  1211. src = TMP_FREG1;
  1212. }
  1213. FAIL_IF(push_inst(compiler, (FCVTZS ^ inv_bits) | RD(dst_r) | VN(src)));
  1214. if (dst & SLJIT_MEM)
  1215. return emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? INT_SIZE : WORD_SIZE) | STORE, TMP_REG1, dst, dstw, TMP_REG2);
  1216. return SLJIT_SUCCESS;
  1217. }
  1218. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
  1219. sljit_s32 dst, sljit_sw dstw,
  1220. sljit_s32 src, sljit_sw srcw)
  1221. {
  1222. sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
  1223. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1224. if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
  1225. inv_bits |= W_OP;
  1226. if (src & SLJIT_MEM) {
  1227. emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw, TMP_REG1);
  1228. src = TMP_REG1;
  1229. } else if (src & SLJIT_IMM) {
  1230. #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
  1231. if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
  1232. srcw = (sljit_s32)srcw;
  1233. #endif
  1234. FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
  1235. src = TMP_REG1;
  1236. }
  1237. FAIL_IF(push_inst(compiler, (SCVTF ^ inv_bits) | VD(dst_r) | RN(src)));
  1238. if (dst & SLJIT_MEM)
  1239. return emit_fop_mem(compiler, ((op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE) | STORE, TMP_FREG1, dst, dstw);
  1240. return SLJIT_SUCCESS;
  1241. }
  1242. static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
  1243. sljit_s32 src1, sljit_sw src1w,
  1244. sljit_s32 src2, sljit_sw src2w)
  1245. {
  1246. sljit_s32 mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
  1247. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1248. if (src1 & SLJIT_MEM) {
  1249. emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
  1250. src1 = TMP_FREG1;
  1251. }
  1252. if (src2 & SLJIT_MEM) {
  1253. emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w);
  1254. src2 = TMP_FREG2;
  1255. }
  1256. return push_inst(compiler, (FCMP ^ inv_bits) | VN(src1) | VM(src2));
  1257. }
  1258. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
  1259. sljit_s32 dst, sljit_sw dstw,
  1260. sljit_s32 src, sljit_sw srcw)
  1261. {
  1262. sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
  1263. sljit_ins inv_bits;
  1264. CHECK_ERROR();
  1265. SLJIT_COMPILE_ASSERT((INT_SIZE ^ 0x1) == WORD_SIZE, must_be_one_bit_difference);
  1266. SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);
  1267. inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1268. dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
  1269. if (src & SLJIT_MEM) {
  1270. emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) ? (mem_flags ^ 0x1) : mem_flags, dst_r, src, srcw);
  1271. src = dst_r;
  1272. }
  1273. switch (GET_OPCODE(op)) {
  1274. case SLJIT_MOV_F64:
  1275. if (src != dst_r) {
  1276. if (dst_r != TMP_FREG1)
  1277. FAIL_IF(push_inst(compiler, (FMOV ^ inv_bits) | VD(dst_r) | VN(src)));
  1278. else
  1279. dst_r = src;
  1280. }
  1281. break;
  1282. case SLJIT_NEG_F64:
  1283. FAIL_IF(push_inst(compiler, (FNEG ^ inv_bits) | VD(dst_r) | VN(src)));
  1284. break;
  1285. case SLJIT_ABS_F64:
  1286. FAIL_IF(push_inst(compiler, (FABS ^ inv_bits) | VD(dst_r) | VN(src)));
  1287. break;
  1288. case SLJIT_CONV_F64_FROM_F32:
  1289. FAIL_IF(push_inst(compiler, FCVT | ((op & SLJIT_F32_OP) ? (1 << 22) : (1 << 15)) | VD(dst_r) | VN(src)));
  1290. break;
  1291. }
  1292. if (dst & SLJIT_MEM)
  1293. return emit_fop_mem(compiler, mem_flags | STORE, dst_r, dst, dstw);
  1294. return SLJIT_SUCCESS;
  1295. }
  1296. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
  1297. sljit_s32 dst, sljit_sw dstw,
  1298. sljit_s32 src1, sljit_sw src1w,
  1299. sljit_s32 src2, sljit_sw src2w)
  1300. {
  1301. sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
  1302. sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
  1303. CHECK_ERROR();
  1304. CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
  1305. ADJUST_LOCAL_OFFSET(dst, dstw);
  1306. ADJUST_LOCAL_OFFSET(src1, src1w);
  1307. ADJUST_LOCAL_OFFSET(src2, src2w);
  1308. dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
  1309. if (src1 & SLJIT_MEM) {
  1310. emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
  1311. src1 = TMP_FREG1;
  1312. }
  1313. if (src2 & SLJIT_MEM) {
  1314. emit_fop_mem(compiler, mem_flags, TMP_FREG2, src2, src2w);
  1315. src2 = TMP_FREG2;
  1316. }
  1317. switch (GET_OPCODE(op)) {
  1318. case SLJIT_ADD_F64:
  1319. FAIL_IF(push_inst(compiler, (FADD ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1320. break;
  1321. case SLJIT_SUB_F64:
  1322. FAIL_IF(push_inst(compiler, (FSUB ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1323. break;
  1324. case SLJIT_MUL_F64:
  1325. FAIL_IF(push_inst(compiler, (FMUL ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1326. break;
  1327. case SLJIT_DIV_F64:
  1328. FAIL_IF(push_inst(compiler, (FDIV ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
  1329. break;
  1330. }
  1331. if (!(dst & SLJIT_MEM))
  1332. return SLJIT_SUCCESS;
  1333. return emit_fop_mem(compiler, mem_flags | STORE, TMP_FREG1, dst, dstw);
  1334. }
  1335. /* --------------------------------------------------------------------- */
  1336. /* Other instructions */
  1337. /* --------------------------------------------------------------------- */
  1338. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
  1339. {
  1340. CHECK_ERROR();
  1341. CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
  1342. ADJUST_LOCAL_OFFSET(dst, dstw);
  1343. if (FAST_IS_REG(dst))
  1344. return push_inst(compiler, ORR | RD(dst) | RN(TMP_ZERO) | RM(TMP_LR));
  1345. /* Memory. */
  1346. return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_LR, dst, dstw, TMP_REG1);
  1347. }
  1348. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw)
  1349. {
  1350. CHECK_ERROR();
  1351. CHECK(check_sljit_emit_fast_return(compiler, src, srcw));
  1352. ADJUST_LOCAL_OFFSET(src, srcw);
  1353. if (FAST_IS_REG(src))
  1354. FAIL_IF(push_inst(compiler, ORR | RD(TMP_LR) | RN(TMP_ZERO) | RM(src)));
  1355. else
  1356. FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_LR, src, srcw, TMP_REG1));
  1357. return push_inst(compiler, RET | RN(TMP_LR));
  1358. }
  1359. /* --------------------------------------------------------------------- */
  1360. /* Conditional instructions */
  1361. /* --------------------------------------------------------------------- */
  1362. static sljit_uw get_cc(sljit_s32 type)
  1363. {
  1364. switch (type) {
  1365. case SLJIT_EQUAL:
  1366. case SLJIT_MUL_NOT_OVERFLOW:
  1367. case SLJIT_EQUAL_F64:
  1368. return 0x1;
  1369. case SLJIT_NOT_EQUAL:
  1370. case SLJIT_MUL_OVERFLOW:
  1371. case SLJIT_NOT_EQUAL_F64:
  1372. return 0x0;
  1373. case SLJIT_LESS:
  1374. case SLJIT_LESS_F64:
  1375. return 0x2;
  1376. case SLJIT_GREATER_EQUAL:
  1377. case SLJIT_GREATER_EQUAL_F64:
  1378. return 0x3;
  1379. case SLJIT_GREATER:
  1380. case SLJIT_GREATER_F64:
  1381. return 0x9;
  1382. case SLJIT_LESS_EQUAL:
  1383. case SLJIT_LESS_EQUAL_F64:
  1384. return 0x8;
  1385. case SLJIT_SIG_LESS:
  1386. return 0xa;
  1387. case SLJIT_SIG_GREATER_EQUAL:
  1388. return 0xb;
  1389. case SLJIT_SIG_GREATER:
  1390. return 0xd;
  1391. case SLJIT_SIG_LESS_EQUAL:
  1392. return 0xc;
  1393. case SLJIT_OVERFLOW:
  1394. case SLJIT_UNORDERED_F64:
  1395. return 0x7;
  1396. case SLJIT_NOT_OVERFLOW:
  1397. case SLJIT_ORDERED_F64:
  1398. return 0x6;
  1399. default:
  1400. SLJIT_UNREACHABLE();
  1401. return 0xe;
  1402. }
  1403. }
  1404. SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler)
  1405. {
  1406. struct sljit_label *label;
  1407. CHECK_ERROR_PTR();
  1408. CHECK_PTR(check_sljit_emit_label(compiler));
  1409. if (compiler->last_label && compiler->last_label->size == compiler->size)
  1410. return compiler->last_label;
  1411. label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label));
  1412. PTR_FAIL_IF(!label);
  1413. set_label(label, compiler);
  1414. return label;
  1415. }
  1416. SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_s32 type)
  1417. {
  1418. struct sljit_jump *jump;
  1419. CHECK_ERROR_PTR();
  1420. CHECK_PTR(check_sljit_emit_jump(compiler, type));
  1421. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  1422. PTR_FAIL_IF(!jump);
  1423. set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
  1424. type &= 0xff;
  1425. if (type < SLJIT_JUMP) {
  1426. jump->flags |= IS_COND;
  1427. PTR_FAIL_IF(push_inst(compiler, B_CC | (6 << 5) | get_cc(type)));
  1428. }
  1429. else if (type >= SLJIT_FAST_CALL)
  1430. jump->flags |= IS_BL;
  1431. PTR_FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0));
  1432. jump->addr = compiler->size;
  1433. PTR_FAIL_IF(push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(TMP_REG1)));
  1434. return jump;
  1435. }
  1436. SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
  1437. sljit_s32 arg_types)
  1438. {
  1439. CHECK_ERROR_PTR();
  1440. CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
  1441. #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
  1442. || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
  1443. compiler->skip_checks = 1;
  1444. #endif
  1445. return sljit_emit_jump(compiler, type);
  1446. }
  1447. static SLJIT_INLINE struct sljit_jump* emit_cmp_to0(struct sljit_compiler *compiler, sljit_s32 type,
  1448. sljit_s32 src, sljit_sw srcw)
  1449. {
  1450. struct sljit_jump *jump;
  1451. sljit_ins inv_bits = (type & SLJIT_I32_OP) ? W_OP : 0;
  1452. SLJIT_ASSERT((type & 0xff) == SLJIT_EQUAL || (type & 0xff) == SLJIT_NOT_EQUAL);
  1453. ADJUST_LOCAL_OFFSET(src, srcw);
  1454. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  1455. PTR_FAIL_IF(!jump);
  1456. set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
  1457. jump->flags |= IS_CBZ | IS_COND;
  1458. if (src & SLJIT_MEM) {
  1459. PTR_FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
  1460. src = TMP_REG1;
  1461. }
  1462. else if (src & SLJIT_IMM) {
  1463. PTR_FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
  1464. src = TMP_REG1;
  1465. }
  1466. SLJIT_ASSERT(FAST_IS_REG(src));
  1467. if ((type & 0xff) == SLJIT_EQUAL)
  1468. inv_bits |= 1 << 24;
  1469. PTR_FAIL_IF(push_inst(compiler, (CBZ ^ inv_bits) | (6 << 5) | RT(src)));
  1470. PTR_FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0));
  1471. jump->addr = compiler->size;
  1472. PTR_FAIL_IF(push_inst(compiler, BR | RN(TMP_REG1)));
  1473. return jump;
  1474. }
  1475. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compiler, sljit_s32 type, sljit_s32 src, sljit_sw srcw)
  1476. {
  1477. struct sljit_jump *jump;
  1478. CHECK_ERROR();
  1479. CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
  1480. ADJUST_LOCAL_OFFSET(src, srcw);
  1481. if (!(src & SLJIT_IMM)) {
  1482. if (src & SLJIT_MEM) {
  1483. FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
  1484. src = TMP_REG1;
  1485. }
  1486. return push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(src));
  1487. }
  1488. /* These jumps are converted to jump/call instructions when possible. */
  1489. jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
  1490. FAIL_IF(!jump);
  1491. set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0));
  1492. jump->u.target = srcw;
  1493. FAIL_IF(emit_imm64_const(compiler, TMP_REG1, 0));
  1494. jump->addr = compiler->size;
  1495. return push_inst(compiler, ((type >= SLJIT_FAST_CALL) ? BLR : BR) | RN(TMP_REG1));
  1496. }
  1497. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compiler, sljit_s32 type,
  1498. sljit_s32 arg_types,
  1499. sljit_s32 src, sljit_sw srcw)
  1500. {
  1501. CHECK_ERROR();
  1502. CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
  1503. #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
  1504. || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
  1505. compiler->skip_checks = 1;
  1506. #endif
  1507. return sljit_emit_ijump(compiler, type, src, srcw);
  1508. }
  1509. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op,
  1510. sljit_s32 dst, sljit_sw dstw,
  1511. sljit_s32 type)
  1512. {
  1513. sljit_s32 dst_r, src_r, flags, mem_flags;
  1514. sljit_ins cc;
  1515. CHECK_ERROR();
  1516. CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
  1517. ADJUST_LOCAL_OFFSET(dst, dstw);
  1518. cc = get_cc(type & 0xff);
  1519. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1520. if (GET_OPCODE(op) < SLJIT_ADD) {
  1521. FAIL_IF(push_inst(compiler, CSINC | (cc << 12) | RD(dst_r) | RN(TMP_ZERO) | RM(TMP_ZERO)));
  1522. if (dst_r == TMP_REG1) {
  1523. mem_flags = (GET_OPCODE(op) == SLJIT_MOV ? WORD_SIZE : INT_SIZE) | STORE;
  1524. return emit_op_mem(compiler, mem_flags, TMP_REG1, dst, dstw, TMP_REG2);
  1525. }
  1526. return SLJIT_SUCCESS;
  1527. }
  1528. flags = HAS_FLAGS(op) ? SET_FLAGS : 0;
  1529. mem_flags = WORD_SIZE;
  1530. if (op & SLJIT_I32_OP) {
  1531. flags |= INT_OP;
  1532. mem_flags = INT_SIZE;
  1533. }
  1534. src_r = dst;
  1535. if (dst & SLJIT_MEM) {
  1536. FAIL_IF(emit_op_mem(compiler, mem_flags, TMP_REG1, dst, dstw, TMP_REG1));
  1537. src_r = TMP_REG1;
  1538. }
  1539. FAIL_IF(push_inst(compiler, CSINC | (cc << 12) | RD(TMP_REG2) | RN(TMP_ZERO) | RM(TMP_ZERO)));
  1540. emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src_r, TMP_REG2);
  1541. if (dst & SLJIT_MEM)
  1542. return emit_op_mem(compiler, mem_flags | STORE, TMP_REG1, dst, dstw, TMP_REG2);
  1543. return SLJIT_SUCCESS;
  1544. }
  1545. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
  1546. sljit_s32 dst_reg,
  1547. sljit_s32 src, sljit_sw srcw)
  1548. {
  1549. sljit_ins inv_bits = (dst_reg & SLJIT_I32_OP) ? W_OP : 0;
  1550. sljit_ins cc;
  1551. CHECK_ERROR();
  1552. CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
  1553. if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
  1554. if (dst_reg & SLJIT_I32_OP)
  1555. srcw = (sljit_s32)srcw;
  1556. FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
  1557. src = TMP_REG1;
  1558. srcw = 0;
  1559. }
  1560. cc = get_cc(type & 0xff);
  1561. dst_reg &= ~SLJIT_I32_OP;
  1562. return push_inst(compiler, (CSEL ^ inv_bits) | (cc << 12) | RD(dst_reg) | RN(dst_reg) | RM(src));
  1563. }
  1564. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
  1565. sljit_s32 reg,
  1566. sljit_s32 mem, sljit_sw memw)
  1567. {
  1568. sljit_u32 sign = 0, inst;
  1569. CHECK_ERROR();
  1570. CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
  1571. if ((mem & OFFS_REG_MASK) || (memw > 255 && memw < -256))
  1572. return SLJIT_ERR_UNSUPPORTED;
  1573. if (type & SLJIT_MEM_SUPP)
  1574. return SLJIT_SUCCESS;
  1575. switch (type & 0xff) {
  1576. case SLJIT_MOV:
  1577. case SLJIT_MOV_P:
  1578. inst = STURBI | (MEM_SIZE_SHIFT(WORD_SIZE) << 30) | 0x400;
  1579. break;
  1580. case SLJIT_MOV_S8:
  1581. sign = 1;
  1582. case SLJIT_MOV_U8:
  1583. inst = STURBI | (MEM_SIZE_SHIFT(BYTE_SIZE) << 30) | 0x400;
  1584. break;
  1585. case SLJIT_MOV_S16:
  1586. sign = 1;
  1587. case SLJIT_MOV_U16:
  1588. inst = STURBI | (MEM_SIZE_SHIFT(HALF_SIZE) << 30) | 0x400;
  1589. break;
  1590. case SLJIT_MOV_S32:
  1591. sign = 1;
  1592. case SLJIT_MOV_U32:
  1593. inst = STURBI | (MEM_SIZE_SHIFT(INT_SIZE) << 30) | 0x400;
  1594. break;
  1595. default:
  1596. SLJIT_UNREACHABLE();
  1597. inst = STURBI | (MEM_SIZE_SHIFT(WORD_SIZE) << 30) | 0x400;
  1598. break;
  1599. }
  1600. if (!(type & SLJIT_MEM_STORE))
  1601. inst |= sign ? 0x00800000 : 0x00400000;
  1602. if (type & SLJIT_MEM_PRE)
  1603. inst |= 0x800;
  1604. return push_inst(compiler, inst | RT(reg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12));
  1605. }
  1606. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
  1607. sljit_s32 freg,
  1608. sljit_s32 mem, sljit_sw memw)
  1609. {
  1610. sljit_u32 inst;
  1611. CHECK_ERROR();
  1612. CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
  1613. if ((mem & OFFS_REG_MASK) || (memw > 255 && memw < -256))
  1614. return SLJIT_ERR_UNSUPPORTED;
  1615. if (type & SLJIT_MEM_SUPP)
  1616. return SLJIT_SUCCESS;
  1617. inst = STUR_FI | 0x80000400;
  1618. if (!(type & SLJIT_F32_OP))
  1619. inst |= 0x40000000;
  1620. if (!(type & SLJIT_MEM_STORE))
  1621. inst |= 0x00400000;
  1622. if (type & SLJIT_MEM_PRE)
  1623. inst |= 0x800;
  1624. return push_inst(compiler, inst | VT(freg) | RN(mem & REG_MASK) | ((memw & 0x1ff) << 12));
  1625. }
  1626. SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset)
  1627. {
  1628. sljit_s32 dst_reg;
  1629. sljit_ins ins;
  1630. CHECK_ERROR();
  1631. CHECK(check_sljit_get_local_base(compiler, dst, dstw, offset));
  1632. SLJIT_ASSERT (SLJIT_LOCALS_OFFSET_BASE == 0);
  1633. dst_reg = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1634. if (offset <= 0xffffff && offset >= -0xffffff) {
  1635. ins = ADDI;
  1636. if (offset < 0) {
  1637. offset = -offset;
  1638. ins = SUBI;
  1639. }
  1640. if (offset <= 0xfff)
  1641. FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | (offset << 10)));
  1642. else {
  1643. FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(SLJIT_SP) | ((offset & 0xfff000) >> (12 - 10)) | (1 << 22)));
  1644. offset &= 0xfff;
  1645. if (offset != 0)
  1646. FAIL_IF(push_inst(compiler, ins | RD(dst_reg) | RN(dst_reg) | (offset << 10)));
  1647. }
  1648. }
  1649. else {
  1650. FAIL_IF(load_immediate (compiler, dst_reg, offset));
  1651. /* Add extended register form. */
  1652. FAIL_IF(push_inst(compiler, ADDE | (0x3 << 13) | RD(dst_reg) | RN(SLJIT_SP) | RM(dst_reg)));
  1653. }
  1654. if (SLJIT_UNLIKELY(dst & SLJIT_MEM))
  1655. return emit_op_mem(compiler, WORD_SIZE | STORE, dst_reg, dst, dstw, TMP_REG1);
  1656. return SLJIT_SUCCESS;
  1657. }
  1658. SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
  1659. {
  1660. struct sljit_const *const_;
  1661. sljit_s32 dst_r;
  1662. CHECK_ERROR_PTR();
  1663. CHECK_PTR(check_sljit_emit_const(compiler, dst, dstw, init_value));
  1664. ADJUST_LOCAL_OFFSET(dst, dstw);
  1665. const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const));
  1666. PTR_FAIL_IF(!const_);
  1667. set_const(const_, compiler);
  1668. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1669. PTR_FAIL_IF(emit_imm64_const(compiler, dst_r, init_value));
  1670. if (dst & SLJIT_MEM)
  1671. PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2));
  1672. return const_;
  1673. }
  1674. SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
  1675. {
  1676. struct sljit_put_label *put_label;
  1677. sljit_s32 dst_r;
  1678. CHECK_ERROR_PTR();
  1679. CHECK_PTR(check_sljit_emit_put_label(compiler, dst, dstw));
  1680. ADJUST_LOCAL_OFFSET(dst, dstw);
  1681. dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
  1682. PTR_FAIL_IF(emit_imm64_const(compiler, dst_r, 0));
  1683. put_label = (struct sljit_put_label*)ensure_abuf(compiler, sizeof(struct sljit_put_label));
  1684. PTR_FAIL_IF(!put_label);
  1685. set_put_label(put_label, compiler, 1);
  1686. if (dst & SLJIT_MEM)
  1687. PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw, TMP_REG2));
  1688. return put_label;
  1689. }
  1690. SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
  1691. {
  1692. sljit_ins* inst = (sljit_ins*)addr;
  1693. modify_imm64_const(inst, new_target);
  1694. inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
  1695. SLJIT_CACHE_FLUSH(inst, inst + 4);
  1696. }
  1697. SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
  1698. {
  1699. sljit_ins* inst = (sljit_ins*)addr;
  1700. modify_imm64_const(inst, new_constant);
  1701. inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
  1702. SLJIT_CACHE_FLUSH(inst, inst + 4);
  1703. }