x86inc.asm 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133
  1. ;*****************************************************************************
  2. ;* x86inc.asm: x264asm abstraction layer
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2005-2012 x264 project
  5. ;*
  6. ;* Authors: Loren Merritt <lorenm@u.washington.edu>
  7. ;* Anton Mitrofanov <BugMaster@narod.ru>
  8. ;* Jason Garrett-Glaser <darkshikari@gmail.com>
  9. ;* Henrik Gramner <hengar-6@student.ltu.se>
  10. ;*
  11. ;* Permission to use, copy, modify, and/or distribute this software for any
  12. ;* purpose with or without fee is hereby granted, provided that the above
  13. ;* copyright notice and this permission notice appear in all copies.
  14. ;*
  15. ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  16. ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  17. ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  18. ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  19. ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  20. ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  21. ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  22. ;*****************************************************************************
  23. ; This is a header file for the x264ASM assembly language, which uses
  24. ; NASM/YASM syntax combined with a large number of macros to provide easy
  25. ; abstraction between different calling conventions (x86_32, win64, linux64).
  26. ; It also has various other useful features to simplify writing the kind of
  27. ; DSP functions that are most often used in x264.
  28. ; Unlike the rest of x264, this file is available under an ISC license, as it
  29. ; has significant usefulness outside of x264 and we want it to be available
  30. ; to the largest audience possible. Of course, if you modify it for your own
  31. ; purposes to add a new feature, we strongly encourage contributing a patch
  32. ; as this feature might be useful for others as well. Send patches or ideas
  33. ; to x264-devel@videolan.org .
  34. %define program_name ff
  35. %define UNIX64 0
  36. %define WIN64 0
  37. %if ARCH_X86_64
  38. %ifidn __OUTPUT_FORMAT__,win32
  39. %define WIN64 1
  40. %elifidn __OUTPUT_FORMAT__,win64
  41. %define WIN64 1
  42. %else
  43. %define UNIX64 1
  44. %endif
  45. %endif
  46. %ifdef PREFIX
  47. %define mangle(x) _ %+ x
  48. %else
  49. %define mangle(x) x
  50. %endif
  51. ; FIXME: All of the 64bit asm functions that take a stride as an argument
  52. ; via register, assume that the high dword of that register is filled with 0.
  53. ; This is true in practice (since we never do any 64bit arithmetic on strides,
  54. ; and x264's strides are all positive), but is not guaranteed by the ABI.
  55. ; Name of the .rodata section.
  56. %macro SECTION_RODATA 0-1 16
  57. ; Kludge: Something on OS X fails to align .rodata even given an align
  58. ; attribute, so use a different read-only section. This has been fixed in
  59. ; yasm 0.8.0 and nasm 2.6.
  60. %ifdef __YASM_VERSION_ID__
  61. %if __YASM_VERSION_ID__ < 00080000h
  62. %define NEED_MACHO_RODATA_KLUDGE
  63. %endif
  64. %elifdef __NASM_VERSION_ID__
  65. %if __NASM_VERSION_ID__ < 02060000h
  66. %define NEED_MACHO_RODATA_KLUDGE
  67. %endif
  68. %endif
  69. %ifidn __OUTPUT_FORMAT__,aout
  70. section .text
  71. %else
  72. %ifndef NEED_MACHO_RODATA_KLUDGE
  73. SECTION .rodata align=%1
  74. %else
  75. %ifidn __OUTPUT_FORMAT__,macho64
  76. SECTION .text align=%1
  77. %elifidn __OUTPUT_FORMAT__,macho
  78. SECTION .text align=%1
  79. fakegot:
  80. %else
  81. SECTION .rodata align=%1
  82. %endif
  83. %endif
  84. %endif
  85. %undef NEED_MACHO_RODATA_KLUDGE
  86. %endmacro
  87. ; aout does not support align=
  88. %macro SECTION_TEXT 0-1 16
  89. %ifidn __OUTPUT_FORMAT__,aout
  90. SECTION .text
  91. %else
  92. SECTION .text align=%1
  93. %endif
  94. %endmacro
  95. %if WIN64
  96. %define PIC
  97. %elif ARCH_X86_64 == 0
  98. ; x86_32 doesn't require PIC.
  99. ; Some distros prefer shared objects to be PIC, but nothing breaks if
  100. ; the code contains a few textrels, so we'll skip that complexity.
  101. %undef PIC
  102. %endif
  103. %ifdef PIC
  104. default rel
  105. %endif
  106. ; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
  107. ; Not supported by NASM (except via smartalign package + ALIGNMODE k8,
  108. ; however that fails when used together with the -M option)
  109. %ifdef __YASM_VER__
  110. CPU amdnop
  111. %endif
  112. ; Macros to eliminate most code duplication between x86_32 and x86_64:
  113. ; Currently this works only for leaf functions which load all their arguments
  114. ; into registers at the start, and make no other use of the stack. Luckily that
  115. ; covers most of x264's asm.
  116. ; PROLOGUE:
  117. ; %1 = number of arguments. loads them from stack if needed.
  118. ; %2 = number of registers used. pushes callee-saved regs if needed.
  119. ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
  120. ; %4 = list of names to define to registers
  121. ; PROLOGUE can also be invoked by adding the same options to cglobal
  122. ; e.g.
  123. ; cglobal foo, 2,3,0, dst, src, tmp
  124. ; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
  125. ; TODO Some functions can use some args directly from the stack. If they're the
  126. ; last args then you can just not declare them, but if they're in the middle
  127. ; we need more flexible macro.
  128. ; RET:
  129. ; Pops anything that was pushed by PROLOGUE, and returns.
  130. ; REP_RET:
  131. ; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
  132. ; which are slow when a normal ret follows a branch.
  133. ; registers:
  134. ; rN and rNq are the native-size register holding function argument N
  135. ; rNd, rNw, rNb are dword, word, and byte size
  136. ; rNm is the original location of arg N (a register or on the stack), dword
  137. ; rNmp is native size
  138. %macro DECLARE_REG 5-6
  139. %define r%1q %2
  140. %define r%1d %3
  141. %define r%1w %4
  142. %define r%1b %5
  143. %if %0 == 5
  144. %define r%1m %3
  145. %define r%1mp %2
  146. %elif ARCH_X86_64 ; memory
  147. %define r%1m [rsp + stack_offset + %6]
  148. %define r%1mp qword r %+ %1 %+ m
  149. %else
  150. %define r%1m [esp + stack_offset + %6]
  151. %define r%1mp dword r %+ %1 %+ m
  152. %endif
  153. %define r%1 %2
  154. %endmacro
  155. %macro DECLARE_REG_SIZE 2
  156. %define r%1q r%1
  157. %define e%1q r%1
  158. %define r%1d e%1
  159. %define e%1d e%1
  160. %define r%1w %1
  161. %define e%1w %1
  162. %define r%1b %2
  163. %define e%1b %2
  164. %if ARCH_X86_64 == 0
  165. %define r%1 e%1
  166. %endif
  167. %endmacro
  168. DECLARE_REG_SIZE ax, al
  169. DECLARE_REG_SIZE bx, bl
  170. DECLARE_REG_SIZE cx, cl
  171. DECLARE_REG_SIZE dx, dl
  172. DECLARE_REG_SIZE si, sil
  173. DECLARE_REG_SIZE di, dil
  174. DECLARE_REG_SIZE bp, bpl
  175. ; t# defines for when per-arch register allocation is more complex than just function arguments
  176. %macro DECLARE_REG_TMP 1-*
  177. %assign %%i 0
  178. %rep %0
  179. CAT_XDEFINE t, %%i, r%1
  180. %assign %%i %%i+1
  181. %rotate 1
  182. %endrep
  183. %endmacro
  184. %macro DECLARE_REG_TMP_SIZE 0-*
  185. %rep %0
  186. %define t%1q t%1 %+ q
  187. %define t%1d t%1 %+ d
  188. %define t%1w t%1 %+ w
  189. %define t%1b t%1 %+ b
  190. %rotate 1
  191. %endrep
  192. %endmacro
  193. DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
  194. %if ARCH_X86_64
  195. %define gprsize 8
  196. %else
  197. %define gprsize 4
  198. %endif
  199. %macro PUSH 1
  200. push %1
  201. %assign stack_offset stack_offset+gprsize
  202. %endmacro
  203. %macro POP 1
  204. pop %1
  205. %assign stack_offset stack_offset-gprsize
  206. %endmacro
  207. %macro PUSH_IF_USED 1-*
  208. %rep %0
  209. %if %1 < regs_used
  210. PUSH r%1
  211. %endif
  212. %rotate 1
  213. %endrep
  214. %endmacro
  215. %macro POP_IF_USED 1-*
  216. %rep %0
  217. %if %1 < regs_used
  218. pop r%1
  219. %endif
  220. %rotate 1
  221. %endrep
  222. %endmacro
  223. %macro LOAD_IF_USED 1-*
  224. %rep %0
  225. %if %1 < num_args
  226. mov r%1, r %+ %1 %+ mp
  227. %endif
  228. %rotate 1
  229. %endrep
  230. %endmacro
  231. %macro SUB 2
  232. sub %1, %2
  233. %ifidn %1, rsp
  234. %assign stack_offset stack_offset+(%2)
  235. %endif
  236. %endmacro
  237. %macro ADD 2
  238. add %1, %2
  239. %ifidn %1, rsp
  240. %assign stack_offset stack_offset-(%2)
  241. %endif
  242. %endmacro
  243. %macro movifnidn 2
  244. %ifnidn %1, %2
  245. mov %1, %2
  246. %endif
  247. %endmacro
  248. %macro movsxdifnidn 2
  249. %ifnidn %1, %2
  250. movsxd %1, %2
  251. %endif
  252. %endmacro
  253. %macro ASSERT 1
  254. %if (%1) == 0
  255. %error assert failed
  256. %endif
  257. %endmacro
  258. %macro DEFINE_ARGS 0-*
  259. %ifdef n_arg_names
  260. %assign %%i 0
  261. %rep n_arg_names
  262. CAT_UNDEF arg_name %+ %%i, q
  263. CAT_UNDEF arg_name %+ %%i, d
  264. CAT_UNDEF arg_name %+ %%i, w
  265. CAT_UNDEF arg_name %+ %%i, b
  266. CAT_UNDEF arg_name %+ %%i, m
  267. CAT_UNDEF arg_name %+ %%i, mp
  268. CAT_UNDEF arg_name, %%i
  269. %assign %%i %%i+1
  270. %endrep
  271. %endif
  272. %xdefine %%stack_offset stack_offset
  273. %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
  274. %assign %%i 0
  275. %rep %0
  276. %xdefine %1q r %+ %%i %+ q
  277. %xdefine %1d r %+ %%i %+ d
  278. %xdefine %1w r %+ %%i %+ w
  279. %xdefine %1b r %+ %%i %+ b
  280. %xdefine %1m r %+ %%i %+ m
  281. %xdefine %1mp r %+ %%i %+ mp
  282. CAT_XDEFINE arg_name, %%i, %1
  283. %assign %%i %%i+1
  284. %rotate 1
  285. %endrep
  286. %xdefine stack_offset %%stack_offset
  287. %assign n_arg_names %0
  288. %endmacro
  289. %if WIN64 ; Windows x64 ;=================================================
  290. DECLARE_REG 0, rcx, ecx, cx, cl
  291. DECLARE_REG 1, rdx, edx, dx, dl
  292. DECLARE_REG 2, R8, R8D, R8W, R8B
  293. DECLARE_REG 3, R9, R9D, R9W, R9B
  294. DECLARE_REG 4, R10, R10D, R10W, R10B, 40
  295. DECLARE_REG 5, R11, R11D, R11W, R11B, 48
  296. DECLARE_REG 6, rax, eax, ax, al, 56
  297. DECLARE_REG 7, rdi, edi, di, dil, 64
  298. DECLARE_REG 8, rsi, esi, si, sil, 72
  299. DECLARE_REG 9, rbx, ebx, bx, bl, 80
  300. DECLARE_REG 10, rbp, ebp, bp, bpl, 88
  301. DECLARE_REG 11, R12, R12D, R12W, R12B, 96
  302. DECLARE_REG 12, R13, R13D, R13W, R13B, 104
  303. DECLARE_REG 13, R14, R14D, R14W, R14B, 112
  304. DECLARE_REG 14, R15, R15D, R15W, R15B, 120
  305. %macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
  306. %assign num_args %1
  307. %assign regs_used %2
  308. ASSERT regs_used >= num_args
  309. ASSERT regs_used <= 15
  310. PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
  311. %if mmsize == 8
  312. %assign xmm_regs_used 0
  313. %else
  314. WIN64_SPILL_XMM %3
  315. %endif
  316. LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
  317. DEFINE_ARGS %4
  318. %endmacro
  319. %macro WIN64_SPILL_XMM 1
  320. %assign xmm_regs_used %1
  321. ASSERT xmm_regs_used <= 16
  322. %if xmm_regs_used > 6
  323. SUB rsp, (xmm_regs_used-6)*16+16
  324. %assign %%i xmm_regs_used
  325. %rep (xmm_regs_used-6)
  326. %assign %%i %%i-1
  327. movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
  328. %endrep
  329. %endif
  330. %endmacro
  331. %macro WIN64_RESTORE_XMM_INTERNAL 1
  332. %if xmm_regs_used > 6
  333. %assign %%i xmm_regs_used
  334. %rep (xmm_regs_used-6)
  335. %assign %%i %%i-1
  336. movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
  337. %endrep
  338. add %1, (xmm_regs_used-6)*16+16
  339. %endif
  340. %endmacro
  341. %macro WIN64_RESTORE_XMM 1
  342. WIN64_RESTORE_XMM_INTERNAL %1
  343. %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
  344. %assign xmm_regs_used 0
  345. %endmacro
  346. %macro RET 0
  347. WIN64_RESTORE_XMM_INTERNAL rsp
  348. POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
  349. ret
  350. %endmacro
  351. %macro REP_RET 0
  352. %if regs_used > 7 || xmm_regs_used > 6
  353. RET
  354. %else
  355. rep ret
  356. %endif
  357. %endmacro
  358. %elif ARCH_X86_64 ; *nix x64 ;=============================================
  359. DECLARE_REG 0, rdi, edi, di, dil
  360. DECLARE_REG 1, rsi, esi, si, sil
  361. DECLARE_REG 2, rdx, edx, dx, dl
  362. DECLARE_REG 3, rcx, ecx, cx, cl
  363. DECLARE_REG 4, R8, R8D, R8W, R8B
  364. DECLARE_REG 5, R9, R9D, R9W, R9B
  365. DECLARE_REG 6, rax, eax, ax, al, 8
  366. DECLARE_REG 7, R10, R10D, R10W, R10B, 16
  367. DECLARE_REG 8, R11, R11D, R11W, R11B, 24
  368. DECLARE_REG 9, rbx, ebx, bx, bl, 32
  369. DECLARE_REG 10, rbp, ebp, bp, bpl, 40
  370. DECLARE_REG 11, R12, R12D, R12W, R12B, 48
  371. DECLARE_REG 12, R13, R13D, R13W, R13B, 56
  372. DECLARE_REG 13, R14, R14D, R14W, R14B, 64
  373. DECLARE_REG 14, R15, R15D, R15W, R15B, 72
  374. %macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
  375. %assign num_args %1
  376. %assign regs_used %2
  377. ASSERT regs_used >= num_args
  378. ASSERT regs_used <= 15
  379. PUSH_IF_USED 9, 10, 11, 12, 13, 14
  380. LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
  381. DEFINE_ARGS %4
  382. %endmacro
  383. %macro RET 0
  384. POP_IF_USED 14, 13, 12, 11, 10, 9
  385. ret
  386. %endmacro
  387. %macro REP_RET 0
  388. %if regs_used > 9
  389. RET
  390. %else
  391. rep ret
  392. %endif
  393. %endmacro
  394. %else ; X86_32 ;==============================================================
  395. DECLARE_REG 0, eax, eax, ax, al, 4
  396. DECLARE_REG 1, ecx, ecx, cx, cl, 8
  397. DECLARE_REG 2, edx, edx, dx, dl, 12
  398. DECLARE_REG 3, ebx, ebx, bx, bl, 16
  399. DECLARE_REG 4, esi, esi, si, null, 20
  400. DECLARE_REG 5, edi, edi, di, null, 24
  401. DECLARE_REG 6, ebp, ebp, bp, null, 28
  402. %define rsp esp
  403. %macro DECLARE_ARG 1-*
  404. %rep %0
  405. %define r%1m [esp + stack_offset + 4*%1 + 4]
  406. %define r%1mp dword r%1m
  407. %rotate 1
  408. %endrep
  409. %endmacro
  410. DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
  411. %macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
  412. %assign num_args %1
  413. %assign regs_used %2
  414. %if regs_used > 7
  415. %assign regs_used 7
  416. %endif
  417. ASSERT regs_used >= num_args
  418. PUSH_IF_USED 3, 4, 5, 6
  419. LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
  420. DEFINE_ARGS %4
  421. %endmacro
  422. %macro RET 0
  423. POP_IF_USED 6, 5, 4, 3
  424. ret
  425. %endmacro
  426. %macro REP_RET 0
  427. %if regs_used > 3
  428. RET
  429. %else
  430. rep ret
  431. %endif
  432. %endmacro
  433. %endif ;======================================================================
  434. %if WIN64 == 0
  435. %macro WIN64_SPILL_XMM 1
  436. %endmacro
  437. %macro WIN64_RESTORE_XMM 1
  438. %endmacro
  439. %endif
  440. ;=============================================================================
  441. ; arch-independent part
  442. ;=============================================================================
  443. %assign function_align 16
  444. ; Begin a function.
  445. ; Applies any symbol mangling needed for C linkage, and sets up a define such that
  446. ; subsequent uses of the function name automatically refer to the mangled version.
  447. ; Appends cpuflags to the function name if cpuflags has been specified.
  448. %macro cglobal 1-2+ ; name, [PROLOGUE args]
  449. %if %0 == 1
  450. ; HACK: work around %+ broken with empty SUFFIX for nasm 2.09.10
  451. %ifndef cpuname
  452. cglobal_internal %1
  453. %else
  454. cglobal_internal %1 %+ SUFFIX
  455. %endif
  456. %else
  457. ; HACK: work around %+ broken with empty SUFFIX for nasm 2.09.10
  458. %ifndef cpuname
  459. cglobal_internal %1, %2
  460. %else
  461. cglobal_internal %1 %+ SUFFIX, %2
  462. %endif
  463. %endif
  464. %endmacro
  465. %macro cglobal_internal 1-2+
  466. %ifndef cglobaled_%1
  467. %xdefine %1 mangle(program_name %+ _ %+ %1)
  468. %xdefine %1.skip_prologue %1 %+ .skip_prologue
  469. CAT_XDEFINE cglobaled_, %1, 1
  470. %endif
  471. %xdefine current_function %1
  472. %ifidn __OUTPUT_FORMAT__,elf
  473. global %1:function hidden
  474. %else
  475. global %1
  476. %endif
  477. align function_align
  478. %1:
  479. RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
  480. %assign stack_offset 0
  481. %if %0 > 1
  482. PROLOGUE %2
  483. %endif
  484. %endmacro
  485. %macro cextern 1
  486. %xdefine %1 mangle(program_name %+ _ %+ %1)
  487. CAT_XDEFINE cglobaled_, %1, 1
  488. extern %1
  489. %endmacro
  490. ; like cextern, but without the prefix
  491. %macro cextern_naked 1
  492. %xdefine %1 mangle(%1)
  493. CAT_XDEFINE cglobaled_, %1, 1
  494. extern %1
  495. %endmacro
  496. %macro const 2+
  497. %xdefine %1 mangle(program_name %+ _ %+ %1)
  498. global %1
  499. %1: %2
  500. %endmacro
  501. ; This is needed for ELF, otherwise the GNU linker assumes the stack is
  502. ; executable by default.
  503. %ifidn __OUTPUT_FORMAT__,elf
  504. SECTION .note.GNU-stack noalloc noexec nowrite progbits
  505. %endif
  506. ; cpuflags
  507. %assign cpuflags_mmx (1<<0)
  508. %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
  509. %assign cpuflags_3dnow (1<<2) | cpuflags_mmx
  510. %assign cpuflags_3dnow2 (1<<3) | cpuflags_3dnow
  511. %assign cpuflags_sse (1<<4) | cpuflags_mmx2
  512. %assign cpuflags_sse2 (1<<5) | cpuflags_sse
  513. %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
  514. %assign cpuflags_sse3 (1<<7) | cpuflags_sse2
  515. %assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
  516. %assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
  517. %assign cpuflags_sse42 (1<<10)| cpuflags_sse4
  518. %assign cpuflags_avx (1<<11)| cpuflags_sse42
  519. %assign cpuflags_xop (1<<12)| cpuflags_avx
  520. %assign cpuflags_fma4 (1<<13)| cpuflags_avx
  521. %assign cpuflags_cache32 (1<<16)
  522. %assign cpuflags_cache64 (1<<17)
  523. %assign cpuflags_slowctz (1<<18)
  524. %assign cpuflags_lzcnt (1<<19)
  525. %assign cpuflags_misalign (1<<20)
  526. %assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant
  527. %assign cpuflags_atom (1<<22)
  528. %define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
  529. %define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
  530. ; Takes up to 2 cpuflags from the above list.
  531. ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
  532. ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
  533. %macro INIT_CPUFLAGS 0-2
  534. %if %0 >= 1
  535. %xdefine cpuname %1
  536. %assign cpuflags cpuflags_%1
  537. %if %0 >= 2
  538. %xdefine cpuname %1_%2
  539. %assign cpuflags cpuflags | cpuflags_%2
  540. %endif
  541. %xdefine SUFFIX _ %+ cpuname
  542. %if cpuflag(avx)
  543. %assign avx_enabled 1
  544. %endif
  545. %if mmsize == 16 && notcpuflag(sse2)
  546. %define mova movaps
  547. %define movu movups
  548. %define movnta movntps
  549. %endif
  550. %if cpuflag(aligned)
  551. %define movu mova
  552. %elifidn %1, sse3
  553. %define movu lddqu
  554. %endif
  555. %else
  556. %xdefine SUFFIX
  557. %undef cpuname
  558. %undef cpuflags
  559. %endif
  560. %endmacro
  561. ; merge mmx and sse*
  562. %macro CAT_XDEFINE 3
  563. %xdefine %1%2 %3
  564. %endmacro
  565. %macro CAT_UNDEF 2
  566. %undef %1%2
  567. %endmacro
  568. %macro INIT_MMX 0-1+
  569. %assign avx_enabled 0
  570. %define RESET_MM_PERMUTATION INIT_MMX %1
  571. %define mmsize 8
  572. %define num_mmregs 8
  573. %define mova movq
  574. %define movu movq
  575. %define movh movd
  576. %define movnta movntq
  577. %assign %%i 0
  578. %rep 8
  579. CAT_XDEFINE m, %%i, mm %+ %%i
  580. CAT_XDEFINE nmm, %%i, %%i
  581. %assign %%i %%i+1
  582. %endrep
  583. %rep 8
  584. CAT_UNDEF m, %%i
  585. CAT_UNDEF nmm, %%i
  586. %assign %%i %%i+1
  587. %endrep
  588. INIT_CPUFLAGS %1
  589. %endmacro
  590. %macro INIT_XMM 0-1+
  591. %assign avx_enabled 0
  592. %define RESET_MM_PERMUTATION INIT_XMM %1
  593. %define mmsize 16
  594. %define num_mmregs 8
  595. %if ARCH_X86_64
  596. %define num_mmregs 16
  597. %endif
  598. %define mova movdqa
  599. %define movu movdqu
  600. %define movh movq
  601. %define movnta movntdq
  602. %assign %%i 0
  603. %rep num_mmregs
  604. CAT_XDEFINE m, %%i, xmm %+ %%i
  605. CAT_XDEFINE nxmm, %%i, %%i
  606. %assign %%i %%i+1
  607. %endrep
  608. INIT_CPUFLAGS %1
  609. %endmacro
  610. ; FIXME: INIT_AVX can be replaced by INIT_XMM avx
  611. %macro INIT_AVX 0
  612. INIT_XMM
  613. %assign avx_enabled 1
  614. %define PALIGNR PALIGNR_SSSE3
  615. %define RESET_MM_PERMUTATION INIT_AVX
  616. %endmacro
  617. %macro INIT_YMM 0-1+
  618. %assign avx_enabled 1
  619. %define RESET_MM_PERMUTATION INIT_YMM %1
  620. %define mmsize 32
  621. %define num_mmregs 8
  622. %if ARCH_X86_64
  623. %define num_mmregs 16
  624. %endif
  625. %define mova vmovaps
  626. %define movu vmovups
  627. %undef movh
  628. %define movnta vmovntps
  629. %assign %%i 0
  630. %rep num_mmregs
  631. CAT_XDEFINE m, %%i, ymm %+ %%i
  632. CAT_XDEFINE nymm, %%i, %%i
  633. %assign %%i %%i+1
  634. %endrep
  635. INIT_CPUFLAGS %1
  636. %endmacro
  637. INIT_XMM
  638. ; I often want to use macros that permute their arguments. e.g. there's no
  639. ; efficient way to implement butterfly or transpose or dct without swapping some
  640. ; arguments.
  641. ;
  642. ; I would like to not have to manually keep track of the permutations:
  643. ; If I insert a permutation in the middle of a function, it should automatically
  644. ; change everything that follows. For more complex macros I may also have multiple
  645. ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
  646. ;
  647. ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
  648. ; permutes its arguments. It's equivalent to exchanging the contents of the
  649. ; registers, except that this way you exchange the register names instead, so it
  650. ; doesn't cost any cycles.
  651. %macro PERMUTE 2-* ; takes a list of pairs to swap
  652. %rep %0/2
  653. %xdefine tmp%2 m%2
  654. %xdefine ntmp%2 nm%2
  655. %rotate 2
  656. %endrep
  657. %rep %0/2
  658. %xdefine m%1 tmp%2
  659. %xdefine nm%1 ntmp%2
  660. %undef tmp%2
  661. %undef ntmp%2
  662. %rotate 2
  663. %endrep
  664. %endmacro
  665. %macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
  666. %rep %0-1
  667. %ifdef m%1
  668. %xdefine tmp m%1
  669. %xdefine m%1 m%2
  670. %xdefine m%2 tmp
  671. CAT_XDEFINE n, m%1, %1
  672. CAT_XDEFINE n, m%2, %2
  673. %else
  674. ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
  675. ; Be careful using this mode in nested macros though, as in some cases there may be
  676. ; other copies of m# that have already been dereferenced and don't get updated correctly.
  677. %xdefine %%n1 n %+ %1
  678. %xdefine %%n2 n %+ %2
  679. %xdefine tmp m %+ %%n1
  680. CAT_XDEFINE m, %%n1, m %+ %%n2
  681. CAT_XDEFINE m, %%n2, tmp
  682. CAT_XDEFINE n, m %+ %%n1, %%n1
  683. CAT_XDEFINE n, m %+ %%n2, %%n2
  684. %endif
  685. %undef tmp
  686. %rotate 1
  687. %endrep
  688. %endmacro
  689. ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
  690. ; calls to that function will automatically load the permutation, so values can
  691. ; be returned in mmregs.
  692. %macro SAVE_MM_PERMUTATION 0-1
  693. %if %0
  694. %xdefine %%f %1_m
  695. %else
  696. %xdefine %%f current_function %+ _m
  697. %endif
  698. %assign %%i 0
  699. %rep num_mmregs
  700. CAT_XDEFINE %%f, %%i, m %+ %%i
  701. %assign %%i %%i+1
  702. %endrep
  703. %endmacro
  704. %macro LOAD_MM_PERMUTATION 1 ; name to load from
  705. %ifdef %1_m0
  706. %assign %%i 0
  707. %rep num_mmregs
  708. CAT_XDEFINE m, %%i, %1_m %+ %%i
  709. CAT_XDEFINE n, m %+ %%i, %%i
  710. %assign %%i %%i+1
  711. %endrep
  712. %endif
  713. %endmacro
  714. ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
  715. %macro call 1
  716. ; HACK: work around %+ broken with empty SUFFIX for nasm 2.09.10
  717. %ifndef cpuname
  718. call_internal %1, %1
  719. %else
  720. call_internal %1, %1 %+ SUFFIX
  721. %endif
  722. %endmacro
  723. %macro call_internal 2
  724. %xdefine %%i %1
  725. %ifndef cglobaled_%1
  726. %ifdef cglobaled_%2
  727. %xdefine %%i %2
  728. %endif
  729. %endif
  730. call %%i
  731. LOAD_MM_PERMUTATION %%i
  732. %endmacro
  733. ; Substitutions that reduce instruction size but are functionally equivalent
  734. %macro add 2
  735. %ifnum %2
  736. %if %2==128
  737. sub %1, -128
  738. %else
  739. add %1, %2
  740. %endif
  741. %else
  742. add %1, %2
  743. %endif
  744. %endmacro
  745. %macro sub 2
  746. %ifnum %2
  747. %if %2==128
  748. add %1, -128
  749. %else
  750. sub %1, %2
  751. %endif
  752. %else
  753. sub %1, %2
  754. %endif
  755. %endmacro
  756. ;=============================================================================
  757. ; AVX abstraction layer
  758. ;=============================================================================
  759. %assign i 0
  760. %rep 16
  761. %if i < 8
  762. CAT_XDEFINE sizeofmm, i, 8
  763. %endif
  764. CAT_XDEFINE sizeofxmm, i, 16
  765. CAT_XDEFINE sizeofymm, i, 32
  766. %assign i i+1
  767. %endrep
  768. %undef i
  769. ;%1 == instruction
  770. ;%2 == 1 if float, 0 if int
  771. ;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
  772. ;%4 == number of operands given
  773. ;%5+: operands
  774. %macro RUN_AVX_INSTR 6-7+
  775. %ifid %5
  776. %define %%size sizeof%5
  777. %else
  778. %define %%size mmsize
  779. %endif
  780. %if %%size==32
  781. %if %0 >= 7
  782. v%1 %5, %6, %7
  783. %else
  784. v%1 %5, %6
  785. %endif
  786. %else
  787. %if %%size==8
  788. %define %%regmov movq
  789. %elif %2
  790. %define %%regmov movaps
  791. %else
  792. %define %%regmov movdqa
  793. %endif
  794. %if %4>=3+%3
  795. %ifnidn %5, %6
  796. %if avx_enabled && sizeof%5==16
  797. v%1 %5, %6, %7
  798. %else
  799. %%regmov %5, %6
  800. %1 %5, %7
  801. %endif
  802. %else
  803. %1 %5, %7
  804. %endif
  805. %elif %3
  806. %1 %5, %6, %7
  807. %else
  808. %1 %5, %6
  809. %endif
  810. %endif
  811. %endmacro
  812. ; 3arg AVX ops with a memory arg can only have it in src2,
  813. ; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov).
  814. ; So, if the op is symmetric and the wrong one is memory, swap them.
  815. %macro RUN_AVX_INSTR1 8
  816. %assign %%swap 0
  817. %if avx_enabled
  818. %ifnid %6
  819. %assign %%swap 1
  820. %endif
  821. %elifnidn %5, %6
  822. %ifnid %7
  823. %assign %%swap 1
  824. %endif
  825. %endif
  826. %if %%swap && %3 == 0 && %8 == 1
  827. RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6
  828. %else
  829. RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7
  830. %endif
  831. %endmacro
  832. ;%1 == instruction
  833. ;%2 == 1 if float, 0 if int
  834. ;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 3-operand (xmm, xmm, xmm)
  835. ;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not
  836. %macro AVX_INSTR 4
  837. %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4
  838. %ifidn %3, fnord
  839. RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
  840. %elifidn %4, fnord
  841. RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9
  842. %elifidn %5, fnord
  843. RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
  844. %else
  845. RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
  846. %endif
  847. %endmacro
  848. %endmacro
  849. AVX_INSTR addpd, 1, 0, 1
  850. AVX_INSTR addps, 1, 0, 1
  851. AVX_INSTR addsd, 1, 0, 1
  852. AVX_INSTR addss, 1, 0, 1
  853. AVX_INSTR addsubpd, 1, 0, 0
  854. AVX_INSTR addsubps, 1, 0, 0
  855. AVX_INSTR andpd, 1, 0, 1
  856. AVX_INSTR andps, 1, 0, 1
  857. AVX_INSTR andnpd, 1, 0, 0
  858. AVX_INSTR andnps, 1, 0, 0
  859. AVX_INSTR blendpd, 1, 0, 0
  860. AVX_INSTR blendps, 1, 0, 0
  861. AVX_INSTR blendvpd, 1, 0, 0
  862. AVX_INSTR blendvps, 1, 0, 0
  863. AVX_INSTR cmppd, 1, 0, 0
  864. AVX_INSTR cmpps, 1, 0, 0
  865. AVX_INSTR cmpsd, 1, 0, 0
  866. AVX_INSTR cmpss, 1, 0, 0
  867. AVX_INSTR cvtdq2ps, 1, 0, 0
  868. AVX_INSTR cvtps2dq, 1, 0, 0
  869. AVX_INSTR divpd, 1, 0, 0
  870. AVX_INSTR divps, 1, 0, 0
  871. AVX_INSTR divsd, 1, 0, 0
  872. AVX_INSTR divss, 1, 0, 0
  873. AVX_INSTR dppd, 1, 1, 0
  874. AVX_INSTR dpps, 1, 1, 0
  875. AVX_INSTR haddpd, 1, 0, 0
  876. AVX_INSTR haddps, 1, 0, 0
  877. AVX_INSTR hsubpd, 1, 0, 0
  878. AVX_INSTR hsubps, 1, 0, 0
  879. AVX_INSTR maxpd, 1, 0, 1
  880. AVX_INSTR maxps, 1, 0, 1
  881. AVX_INSTR maxsd, 1, 0, 1
  882. AVX_INSTR maxss, 1, 0, 1
  883. AVX_INSTR minpd, 1, 0, 1
  884. AVX_INSTR minps, 1, 0, 1
  885. AVX_INSTR minsd, 1, 0, 1
  886. AVX_INSTR minss, 1, 0, 1
  887. AVX_INSTR movhlps, 1, 0, 0
  888. AVX_INSTR movlhps, 1, 0, 0
  889. AVX_INSTR movsd, 1, 0, 0
  890. AVX_INSTR movss, 1, 0, 0
  891. AVX_INSTR mpsadbw, 0, 1, 0
  892. AVX_INSTR mulpd, 1, 0, 1
  893. AVX_INSTR mulps, 1, 0, 1
  894. AVX_INSTR mulsd, 1, 0, 1
  895. AVX_INSTR mulss, 1, 0, 1
  896. AVX_INSTR orpd, 1, 0, 1
  897. AVX_INSTR orps, 1, 0, 1
  898. AVX_INSTR packsswb, 0, 0, 0
  899. AVX_INSTR packssdw, 0, 0, 0
  900. AVX_INSTR packuswb, 0, 0, 0
  901. AVX_INSTR packusdw, 0, 0, 0
  902. AVX_INSTR paddb, 0, 0, 1
  903. AVX_INSTR paddw, 0, 0, 1
  904. AVX_INSTR paddd, 0, 0, 1
  905. AVX_INSTR paddq, 0, 0, 1
  906. AVX_INSTR paddsb, 0, 0, 1
  907. AVX_INSTR paddsw, 0, 0, 1
  908. AVX_INSTR paddusb, 0, 0, 1
  909. AVX_INSTR paddusw, 0, 0, 1
  910. AVX_INSTR palignr, 0, 1, 0
  911. AVX_INSTR pand, 0, 0, 1
  912. AVX_INSTR pandn, 0, 0, 0
  913. AVX_INSTR pavgb, 0, 0, 1
  914. AVX_INSTR pavgw, 0, 0, 1
  915. AVX_INSTR pblendvb, 0, 0, 0
  916. AVX_INSTR pblendw, 0, 1, 0
  917. AVX_INSTR pcmpestri, 0, 0, 0
  918. AVX_INSTR pcmpestrm, 0, 0, 0
  919. AVX_INSTR pcmpistri, 0, 0, 0
  920. AVX_INSTR pcmpistrm, 0, 0, 0
  921. AVX_INSTR pcmpeqb, 0, 0, 1
  922. AVX_INSTR pcmpeqw, 0, 0, 1
  923. AVX_INSTR pcmpeqd, 0, 0, 1
  924. AVX_INSTR pcmpeqq, 0, 0, 1
  925. AVX_INSTR pcmpgtb, 0, 0, 0
  926. AVX_INSTR pcmpgtw, 0, 0, 0
  927. AVX_INSTR pcmpgtd, 0, 0, 0
  928. AVX_INSTR pcmpgtq, 0, 0, 0
  929. AVX_INSTR phaddw, 0, 0, 0
  930. AVX_INSTR phaddd, 0, 0, 0
  931. AVX_INSTR phaddsw, 0, 0, 0
  932. AVX_INSTR phsubw, 0, 0, 0
  933. AVX_INSTR phsubd, 0, 0, 0
  934. AVX_INSTR phsubsw, 0, 0, 0
  935. AVX_INSTR pmaddwd, 0, 0, 1
  936. AVX_INSTR pmaddubsw, 0, 0, 0
  937. AVX_INSTR pmaxsb, 0, 0, 1
  938. AVX_INSTR pmaxsw, 0, 0, 1
  939. AVX_INSTR pmaxsd, 0, 0, 1
  940. AVX_INSTR pmaxub, 0, 0, 1
  941. AVX_INSTR pmaxuw, 0, 0, 1
  942. AVX_INSTR pmaxud, 0, 0, 1
  943. AVX_INSTR pminsb, 0, 0, 1
  944. AVX_INSTR pminsw, 0, 0, 1
  945. AVX_INSTR pminsd, 0, 0, 1
  946. AVX_INSTR pminub, 0, 0, 1
  947. AVX_INSTR pminuw, 0, 0, 1
  948. AVX_INSTR pminud, 0, 0, 1
  949. AVX_INSTR pmulhuw, 0, 0, 1
  950. AVX_INSTR pmulhrsw, 0, 0, 1
  951. AVX_INSTR pmulhw, 0, 0, 1
  952. AVX_INSTR pmullw, 0, 0, 1
  953. AVX_INSTR pmulld, 0, 0, 1
  954. AVX_INSTR pmuludq, 0, 0, 1
  955. AVX_INSTR pmuldq, 0, 0, 1
  956. AVX_INSTR por, 0, 0, 1
  957. AVX_INSTR psadbw, 0, 0, 1
  958. AVX_INSTR pshufb, 0, 0, 0
  959. AVX_INSTR psignb, 0, 0, 0
  960. AVX_INSTR psignw, 0, 0, 0
  961. AVX_INSTR psignd, 0, 0, 0
  962. AVX_INSTR psllw, 0, 0, 0
  963. AVX_INSTR pslld, 0, 0, 0
  964. AVX_INSTR psllq, 0, 0, 0
  965. AVX_INSTR pslldq, 0, 0, 0
  966. AVX_INSTR psraw, 0, 0, 0
  967. AVX_INSTR psrad, 0, 0, 0
  968. AVX_INSTR psrlw, 0, 0, 0
  969. AVX_INSTR psrld, 0, 0, 0
  970. AVX_INSTR psrlq, 0, 0, 0
  971. AVX_INSTR psrldq, 0, 0, 0
  972. AVX_INSTR psubb, 0, 0, 0
  973. AVX_INSTR psubw, 0, 0, 0
  974. AVX_INSTR psubd, 0, 0, 0
  975. AVX_INSTR psubq, 0, 0, 0
  976. AVX_INSTR psubsb, 0, 0, 0
  977. AVX_INSTR psubsw, 0, 0, 0
  978. AVX_INSTR psubusb, 0, 0, 0
  979. AVX_INSTR psubusw, 0, 0, 0
  980. AVX_INSTR punpckhbw, 0, 0, 0
  981. AVX_INSTR punpckhwd, 0, 0, 0
  982. AVX_INSTR punpckhdq, 0, 0, 0
  983. AVX_INSTR punpckhqdq, 0, 0, 0
  984. AVX_INSTR punpcklbw, 0, 0, 0
  985. AVX_INSTR punpcklwd, 0, 0, 0
  986. AVX_INSTR punpckldq, 0, 0, 0
  987. AVX_INSTR punpcklqdq, 0, 0, 0
  988. AVX_INSTR pxor, 0, 0, 1
  989. AVX_INSTR shufps, 1, 1, 0
  990. AVX_INSTR subpd, 1, 0, 0
  991. AVX_INSTR subps, 1, 0, 0
  992. AVX_INSTR subsd, 1, 0, 0
  993. AVX_INSTR subss, 1, 0, 0
  994. AVX_INSTR unpckhpd, 1, 0, 0
  995. AVX_INSTR unpckhps, 1, 0, 0
  996. AVX_INSTR unpcklpd, 1, 0, 0
  997. AVX_INSTR unpcklps, 1, 0, 0
  998. AVX_INSTR xorpd, 1, 0, 1
  999. AVX_INSTR xorps, 1, 0, 1
  1000. ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
  1001. AVX_INSTR pfadd, 1, 0, 1
  1002. AVX_INSTR pfsub, 1, 0, 0
  1003. AVX_INSTR pfmul, 1, 0, 1
  1004. ; base-4 constants for shuffles
  1005. %assign i 0
  1006. %rep 256
  1007. %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
  1008. %if j < 10
  1009. CAT_XDEFINE q000, j, i
  1010. %elif j < 100
  1011. CAT_XDEFINE q00, j, i
  1012. %elif j < 1000
  1013. CAT_XDEFINE q0, j, i
  1014. %else
  1015. CAT_XDEFINE q, j, i
  1016. %endif
  1017. %assign i i+1
  1018. %endrep
  1019. %undef i
  1020. %undef j
  1021. %macro FMA_INSTR 3
  1022. %macro %1 4-7 %1, %2, %3
  1023. %if cpuflag(xop)
  1024. v%5 %1, %2, %3, %4
  1025. %else
  1026. %6 %1, %2, %3
  1027. %7 %1, %4
  1028. %endif
  1029. %endmacro
  1030. %endmacro
  1031. FMA_INSTR pmacsdd, pmulld, paddd
  1032. FMA_INSTR pmacsww, pmullw, paddw
  1033. FMA_INSTR pmadcswd, pmaddwd, paddd