gf_4vect_dot_prod_sse.asm 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_4vect_dot_prod_sse(len, vec, *g_tbls, **buffs, **dests);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %ifidn __OUTPUT_FORMAT__, elf64
  34. %define arg0 rdi
  35. %define arg1 rsi
  36. %define arg2 rdx
  37. %define arg3 rcx
  38. %define arg4 r8
  39. %define arg5 r9
  40. %define tmp r11
  41. %define tmp2 r10
  42. %define tmp3 r13 ; must be saved and restored
  43. %define tmp4 r12 ; must be saved and restored
  44. %define tmp5 r14 ; must be saved and restored
  45. %define tmp6 r15 ; must be saved and restored
  46. %define return rax
  47. %macro SLDR 2
  48. %endmacro
  49. %define SSTR SLDR
  50. %define PS 8
  51. %define LOG_PS 3
  52. %define func(x) x:
  53. %macro FUNC_SAVE 0
  54. push r12
  55. push r13
  56. push r14
  57. push r15
  58. %endmacro
  59. %macro FUNC_RESTORE 0
  60. pop r15
  61. pop r14
  62. pop r13
  63. pop r12
  64. %endmacro
  65. %endif
  66. %ifidn __OUTPUT_FORMAT__, win64
  67. %define arg0 rcx
  68. %define arg1 rdx
  69. %define arg2 r8
  70. %define arg3 r9
  71. %define arg4 r12 ; must be saved, loaded and restored
  72. %define arg5 r15 ; must be saved and restored
  73. %define tmp r11
  74. %define tmp2 r10
  75. %define tmp3 r13 ; must be saved and restored
  76. %define tmp4 r14 ; must be saved and restored
  77. %define tmp5 rdi ; must be saved and restored
  78. %define tmp6 rsi ; must be saved and restored
  79. %define return rax
  80. %macro SLDR 2
  81. %endmacro
  82. %define SSTR SLDR
  83. %define PS 8
  84. %define LOG_PS 3
  85. %define stack_size 9*16 + 7*8 ; must be an odd multiple of 8
  86. %define arg(x) [rsp + stack_size + PS + PS*x]
  87. %define func(x) proc_frame x
  88. %macro FUNC_SAVE 0
  89. alloc_stack stack_size
  90. save_xmm128 xmm6, 0*16
  91. save_xmm128 xmm7, 1*16
  92. save_xmm128 xmm8, 2*16
  93. save_xmm128 xmm9, 3*16
  94. save_xmm128 xmm10, 4*16
  95. save_xmm128 xmm11, 5*16
  96. save_xmm128 xmm12, 6*16
  97. save_xmm128 xmm13, 7*16
  98. save_xmm128 xmm14, 8*16
  99. save_reg r12, 9*16 + 0*8
  100. save_reg r13, 9*16 + 1*8
  101. save_reg r14, 9*16 + 2*8
  102. save_reg r15, 9*16 + 3*8
  103. save_reg rdi, 9*16 + 4*8
  104. save_reg rsi, 9*16 + 5*8
  105. end_prolog
  106. mov arg4, arg(4)
  107. %endmacro
  108. %macro FUNC_RESTORE 0
  109. movdqa xmm6, [rsp + 0*16]
  110. movdqa xmm7, [rsp + 1*16]
  111. movdqa xmm8, [rsp + 2*16]
  112. movdqa xmm9, [rsp + 3*16]
  113. movdqa xmm10, [rsp + 4*16]
  114. movdqa xmm11, [rsp + 5*16]
  115. movdqa xmm12, [rsp + 6*16]
  116. movdqa xmm13, [rsp + 7*16]
  117. movdqa xmm14, [rsp + 8*16]
  118. mov r12, [rsp + 9*16 + 0*8]
  119. mov r13, [rsp + 9*16 + 1*8]
  120. mov r14, [rsp + 9*16 + 2*8]
  121. mov r15, [rsp + 9*16 + 3*8]
  122. mov rdi, [rsp + 9*16 + 4*8]
  123. mov rsi, [rsp + 9*16 + 5*8]
  124. add rsp, stack_size
  125. %endmacro
  126. %endif
  127. %ifidn __OUTPUT_FORMAT__, elf32
  128. ;;;================== High Address;
  129. ;;; arg4
  130. ;;; arg3
  131. ;;; arg2
  132. ;;; arg1
  133. ;;; arg0
  134. ;;; return
  135. ;;;<================= esp of caller
  136. ;;; ebp
  137. ;;;<================= ebp = esp
  138. ;;; var0
  139. ;;; var1
  140. ;;; var2
  141. ;;; var3
  142. ;;; esi
  143. ;;; edi
  144. ;;; ebx
  145. ;;;<================= esp of callee
  146. ;;;
  147. ;;;================== Low Address;
  148. %define PS 4
  149. %define LOG_PS 2
  150. %define func(x) x:
  151. %define arg(x) [ebp + PS*2 + PS*x]
  152. %define var(x) [ebp - PS - PS*x]
  153. %define trans ecx
  154. %define trans2 esi
  155. %define arg0 trans ;trans and trans2 are for the variables in stack
  156. %define arg0_m arg(0)
  157. %define arg1 ebx
  158. %define arg2 arg2_m
  159. %define arg2_m arg(2)
  160. %define arg3 trans
  161. %define arg3_m arg(3)
  162. %define arg4 trans
  163. %define arg4_m arg(4)
  164. %define arg5 trans2
  165. %define tmp edx
  166. %define tmp2 edi
  167. %define tmp3 trans2
  168. %define tmp3_m var(0)
  169. %define tmp4 trans2
  170. %define tmp4_m var(1)
  171. %define tmp5 trans2
  172. %define tmp5_m var(2)
  173. %define tmp6 trans2
  174. %define tmp6_m var(3)
  175. %define return eax
  176. %macro SLDR 2 ;stack load/restore
  177. mov %1, %2
  178. %endmacro
  179. %define SSTR SLDR
  180. %macro FUNC_SAVE 0
  181. push ebp
  182. mov ebp, esp
  183. sub esp, PS*4 ;4 local variables
  184. push esi
  185. push edi
  186. push ebx
  187. mov arg1, arg(1)
  188. %endmacro
  189. %macro FUNC_RESTORE 0
  190. pop ebx
  191. pop edi
  192. pop esi
  193. add esp, PS*4 ;4 local variables
  194. pop ebp
  195. %endmacro
  196. %endif ; output formats
  197. %define len arg0
  198. %define vec arg1
  199. %define mul_array arg2
  200. %define src arg3
  201. %define dest1 arg4
  202. %define ptr arg5
  203. %define vec_i tmp2
  204. %define dest2 tmp3
  205. %define dest3 tmp4
  206. %define dest4 tmp5
  207. %define vskip3 tmp6
  208. %define pos return
  209. %ifidn PS,4 ;32-bit code
  210. %define len_m arg0_m
  211. %define src_m arg3_m
  212. %define dest1_m arg4_m
  213. %define dest2_m tmp3_m
  214. %define dest3_m tmp4_m
  215. %define dest4_m tmp5_m
  216. %define vskip3_m tmp6_m
  217. %endif
  218. %ifndef EC_ALIGNED_ADDR
  219. ;;; Use Un-aligned load/store
  220. %define XLDR movdqu
  221. %define XSTR movdqu
  222. %else
  223. ;;; Use Non-temporal load/stor
  224. %ifdef NO_NT_LDST
  225. %define XLDR movdqa
  226. %define XSTR movdqa
  227. %else
  228. %define XLDR movntdqa
  229. %define XSTR movntdq
  230. %endif
  231. %endif
  232. %ifidn PS,8 ; 64-bit code
  233. default rel
  234. [bits 64]
  235. %endif
  236. section .text
  237. %ifidn PS,8 ;64-bit code
  238. %define xmask0f xmm14
  239. %define xgft1_lo xmm2
  240. %define xgft1_hi xmm3
  241. %define xgft2_lo xmm11
  242. %define xgft2_hi xmm4
  243. %define xgft3_lo xmm9
  244. %define xgft3_hi xmm5
  245. %define xgft4_lo xmm7
  246. %define xgft4_hi xmm6
  247. %define x0 xmm0
  248. %define xtmpa xmm1
  249. %define xp1 xmm8
  250. %define xp2 xmm10
  251. %define xp3 xmm12
  252. %define xp4 xmm13
  253. %else
  254. %define xmm_trans xmm7 ;reuse xmask0f and xgft1_lo
  255. %define xmask0f xmm_trans
  256. %define xgft1_lo xmm_trans
  257. %define xgft1_hi xmm6
  258. %define xgft2_lo xgft1_lo
  259. %define xgft2_hi xgft1_hi
  260. %define xgft3_lo xgft1_lo
  261. %define xgft3_hi xgft1_hi
  262. %define xgft4_lo xgft1_lo
  263. %define xgft4_hi xgft1_hi
  264. %define x0 xmm0
  265. %define xtmpa xmm1
  266. %define xp1 xmm2
  267. %define xp2 xmm3
  268. %define xp3 xmm4
  269. %define xp4 xmm5
  270. %endif
  271. align 16
  272. global gf_4vect_dot_prod_sse:ISAL_SYM_TYPE_FUNCTION
  273. func(gf_4vect_dot_prod_sse)
  274. %ifidn __OUTPUT_FORMAT__, macho64
  275. global _gf_4vect_dot_prod_sse:ISAL_SYM_TYPE_FUNCTION
  276. func(_gf_4vect_dot_prod_sse)
  277. %endif
  278. FUNC_SAVE
  279. SLDR len, len_m
  280. sub len, 16
  281. SSTR len_m, len
  282. jl .return_fail
  283. xor pos, pos
  284. movdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
  285. mov vskip3, vec
  286. imul vskip3, 96
  287. SSTR vskip3_m, vskip3
  288. sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS
  289. SLDR dest1, dest1_m
  290. mov dest2, [dest1+PS]
  291. SSTR dest2_m, dest2
  292. mov dest3, [dest1+2*PS]
  293. SSTR dest3_m, dest3
  294. mov dest4, [dest1+3*PS]
  295. SSTR dest4_m, dest4
  296. mov dest1, [dest1]
  297. SSTR dest1_m, dest1
  298. .loop16:
  299. pxor xp1, xp1
  300. pxor xp2, xp2
  301. pxor xp3, xp3
  302. pxor xp4, xp4
  303. mov tmp, mul_array
  304. xor vec_i, vec_i
  305. .next_vect:
  306. SLDR src, src_m
  307. mov ptr, [src+vec_i]
  308. %ifidn PS,8 ;64-bit code
  309. movdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
  310. movdqu xgft1_hi, [tmp+16] ; " Ax{00}, Ax{10}, ..., Ax{f0}
  311. movdqu xgft2_lo, [tmp+vec*(32/PS)] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
  312. movdqu xgft2_hi, [tmp+vec*(32/PS)+16] ; " Bx{00}, Bx{10}, ..., Bx{f0}
  313. movdqu xgft3_lo, [tmp+vec*(64/PS)] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
  314. movdqu xgft3_hi, [tmp+vec*(64/PS)+16] ; " Cx{00}, Cx{10}, ..., Cx{f0}
  315. movdqu xgft4_lo, [tmp+vskip3] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
  316. movdqu xgft4_hi, [tmp+vskip3+16] ; " Dx{00}, Dx{10}, ..., Dx{f0}
  317. XLDR x0, [ptr+pos] ;Get next source vector
  318. add tmp, 32
  319. add vec_i, PS
  320. movdqa xtmpa, x0 ;Keep unshifted copy of src
  321. psraw x0, 4 ;Shift to put high nibble into bits 4-0
  322. pand x0, xmask0f ;Mask high src nibble in bits 4-0
  323. pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
  324. %else ;32-bit code
  325. XLDR x0, [ptr+pos] ;Get next source vector
  326. movdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
  327. movdqa xtmpa, x0 ;Keep unshifted copy of src
  328. psraw x0, 4 ;Shift to put high nibble into bits 4-0
  329. pand x0, xmask0f ;Mask high src nibble in bits 4-0
  330. pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
  331. movdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
  332. movdqu xgft1_hi, [tmp+16] ; " Ax{00}, Ax{10}, ..., Ax{f0}
  333. %endif
  334. pshufb xgft1_hi, x0 ;Lookup mul table of high nibble
  335. pshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble
  336. pxor xgft1_hi, xgft1_lo ;GF add high and low partials
  337. pxor xp1, xgft1_hi ;xp1 += partial
  338. %ifidn PS,4 ;32-bit code
  339. movdqu xgft2_lo, [tmp+vec*(32/PS)] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
  340. movdqu xgft2_hi, [tmp+vec*(32/PS)+16] ; " Bx{00}, Bx{10}, ..., Bx{f0}
  341. %endif
  342. pshufb xgft2_hi, x0 ;Lookup mul table of high nibble
  343. pshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble
  344. pxor xgft2_hi, xgft2_lo ;GF add high and low partials
  345. pxor xp2, xgft2_hi ;xp2 += partial
  346. %ifidn PS,4 ;32-bit code
  347. sal vec, 1
  348. movdqu xgft3_lo, [tmp+vec*(32/PS)] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
  349. movdqu xgft3_hi, [tmp+vec*(32/PS)+16] ; " Cx{00}, Cx{10}, ..., Cx{f0}
  350. sar vec, 1
  351. %endif
  352. pshufb xgft3_hi, x0 ;Lookup mul table of high nibble
  353. pshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble
  354. pxor xgft3_hi, xgft3_lo ;GF add high and low partials
  355. pxor xp3, xgft3_hi ;xp3 += partial
  356. %ifidn PS,4 ;32-bit code
  357. SLDR vskip3, vskip3_m
  358. movdqu xgft4_lo, [tmp+vskip3] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
  359. movdqu xgft4_hi, [tmp+vskip3+16] ; " Dx{00}, Dx{10}, ..., Dx{f0}
  360. add tmp, 32
  361. add vec_i, PS
  362. %endif
  363. pshufb xgft4_hi, x0 ;Lookup mul table of high nibble
  364. pshufb xgft4_lo, xtmpa ;Lookup mul table of low nibble
  365. pxor xgft4_hi, xgft4_lo ;GF add high and low partials
  366. pxor xp4, xgft4_hi ;xp4 += partial
  367. cmp vec_i, vec
  368. jl .next_vect
  369. SLDR dest1, dest1_m
  370. SLDR dest2, dest2_m
  371. XSTR [dest1+pos], xp1
  372. XSTR [dest2+pos], xp2
  373. SLDR dest3, dest3_m
  374. XSTR [dest3+pos], xp3
  375. SLDR dest4, dest4_m
  376. XSTR [dest4+pos], xp4
  377. SLDR len, len_m
  378. add pos, 16 ;Loop on 16 bytes at a time
  379. cmp pos, len
  380. jle .loop16
  381. lea tmp, [len + 16]
  382. cmp pos, tmp
  383. je .return_pass
  384. ;; Tail len
  385. mov pos, len ;Overlapped offset length-16
  386. jmp .loop16 ;Do one more overlap pass
  387. .return_pass:
  388. mov return, 0
  389. FUNC_RESTORE
  390. ret
  391. .return_fail:
  392. mov return, 1
  393. FUNC_RESTORE
  394. ret
  395. endproc_frame
  396. section .data
  397. align 16
  398. mask0f: dq 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
  399. ;;; func core, ver, snum
  400. slversion gf_4vect_dot_prod_sse, 00, 06, 0064