gf_6vect_dot_prod_avx.asm 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_6vect_dot_prod_avx(len, vec, *g_tbls, **buffs, **dests);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %ifidn __OUTPUT_FORMAT__, elf64
  34. %define arg0 rdi
  35. %define arg1 rsi
  36. %define arg2 rdx
  37. %define arg3 rcx
  38. %define arg4 r8
  39. %define arg5 r9
  40. %define tmp r11
  41. %define tmp2 r10
  42. %define tmp3 r13 ; must be saved and restored
  43. %define tmp4 r12 ; must be saved and restored
  44. %define tmp5 r14 ; must be saved and restored
  45. %define tmp6 r15 ; must be saved and restored
  46. %define return rax
  47. %define PS 8
  48. %define LOG_PS 3
  49. %define func(x) x:
  50. %macro FUNC_SAVE 0
  51. push r12
  52. push r13
  53. push r14
  54. push r15
  55. %endmacro
  56. %macro FUNC_RESTORE 0
  57. pop r15
  58. pop r14
  59. pop r13
  60. pop r12
  61. %endmacro
  62. %endif
  63. %ifidn __OUTPUT_FORMAT__, win64
  64. %define arg0 rcx
  65. %define arg1 rdx
  66. %define arg2 r8
  67. %define arg3 r9
  68. %define arg4 r12 ; must be saved, loaded and restored
  69. %define arg5 r15 ; must be saved and restored
  70. %define tmp r11
  71. %define tmp2 r10
  72. %define tmp3 r13 ; must be saved and restored
  73. %define tmp4 r14 ; must be saved and restored
  74. %define tmp5 rdi ; must be saved and restored
  75. %define tmp6 rsi ; must be saved and restored
  76. %define return rax
  77. %define PS 8
  78. %define LOG_PS 3
  79. %define stack_size 10*16 + 7*8 ; must be an odd multiple of 8
  80. %define arg(x) [rsp + stack_size + PS + PS*x]
  81. %define func(x) proc_frame x
  82. %macro FUNC_SAVE 0
  83. alloc_stack stack_size
  84. save_xmm128 xmm6, 0*16
  85. save_xmm128 xmm7, 1*16
  86. save_xmm128 xmm8, 2*16
  87. save_xmm128 xmm9, 3*16
  88. save_xmm128 xmm10, 4*16
  89. save_xmm128 xmm11, 5*16
  90. save_xmm128 xmm12, 6*16
  91. save_xmm128 xmm13, 7*16
  92. save_xmm128 xmm14, 8*16
  93. save_xmm128 xmm15, 9*16
  94. save_reg r12, 10*16 + 0*8
  95. save_reg r13, 10*16 + 1*8
  96. save_reg r14, 10*16 + 2*8
  97. save_reg r15, 10*16 + 3*8
  98. save_reg rdi, 10*16 + 4*8
  99. save_reg rsi, 10*16 + 5*8
  100. end_prolog
  101. mov arg4, arg(4)
  102. %endmacro
  103. %macro FUNC_RESTORE 0
  104. vmovdqa xmm6, [rsp + 0*16]
  105. vmovdqa xmm7, [rsp + 1*16]
  106. vmovdqa xmm8, [rsp + 2*16]
  107. vmovdqa xmm9, [rsp + 3*16]
  108. vmovdqa xmm10, [rsp + 4*16]
  109. vmovdqa xmm11, [rsp + 5*16]
  110. vmovdqa xmm12, [rsp + 6*16]
  111. vmovdqa xmm13, [rsp + 7*16]
  112. vmovdqa xmm14, [rsp + 8*16]
  113. vmovdqa xmm15, [rsp + 9*16]
  114. mov r12, [rsp + 10*16 + 0*8]
  115. mov r13, [rsp + 10*16 + 1*8]
  116. mov r14, [rsp + 10*16 + 2*8]
  117. mov r15, [rsp + 10*16 + 3*8]
  118. mov rdi, [rsp + 10*16 + 4*8]
  119. mov rsi, [rsp + 10*16 + 5*8]
  120. add rsp, stack_size
  121. %endmacro
  122. %endif
  123. %define len arg0
  124. %define vec arg1
  125. %define mul_array arg2
  126. %define src arg3
  127. %define dest arg4
  128. %define ptr arg5
  129. %define vec_i tmp2
  130. %define dest1 tmp3
  131. %define dest2 tmp4
  132. %define vskip1 tmp5
  133. %define vskip3 tmp6
  134. %define pos return
  135. %ifndef EC_ALIGNED_ADDR
  136. ;;; Use Un-aligned load/store
  137. %define XLDR vmovdqu
  138. %define XSTR vmovdqu
  139. %else
  140. ;;; Use Non-temporal load/stor
  141. %ifdef NO_NT_LDST
  142. %define XLDR vmovdqa
  143. %define XSTR vmovdqa
  144. %else
  145. %define XLDR vmovntdqa
  146. %define XSTR vmovntdq
  147. %endif
  148. %endif
  149. default rel
  150. [bits 64]
  151. section .text
  152. %define xmask0f xmm15
  153. %define xgft1_lo xmm14
  154. %define xgft1_hi xmm13
  155. %define xgft2_lo xmm12
  156. %define xgft2_hi xmm11
  157. %define xgft3_lo xmm10
  158. %define xgft3_hi xmm9
  159. %define x0 xmm0
  160. %define xtmpa xmm1
  161. %define xp1 xmm2
  162. %define xp2 xmm3
  163. %define xp3 xmm4
  164. %define xp4 xmm5
  165. %define xp5 xmm6
  166. %define xp6 xmm7
  167. align 16
  168. global gf_6vect_dot_prod_avx:ISAL_SYM_TYPE_FUNCTION
  169. func(gf_6vect_dot_prod_avx)
  170. %ifidn __OUTPUT_FORMAT__, macho64
  171. global _gf_6vect_dot_prod_avx:ISAL_SYM_TYPE_FUNCTION
  172. func(_gf_6vect_dot_prod_avx)
  173. %endif
  174. FUNC_SAVE
  175. sub len, 16
  176. jl .return_fail
  177. xor pos, pos
  178. vmovdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
  179. mov vskip1, vec
  180. imul vskip1, 32
  181. mov vskip3, vec
  182. imul vskip3, 96
  183. sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS
  184. mov dest1, [dest]
  185. mov dest2, [dest+PS]
  186. .loop16:
  187. mov tmp, mul_array
  188. xor vec_i, vec_i
  189. vpxor xp1, xp1
  190. vpxor xp2, xp2
  191. vpxor xp3, xp3
  192. vpxor xp4, xp4
  193. vpxor xp5, xp5
  194. vpxor xp6, xp6
  195. .next_vect:
  196. mov ptr, [src+vec_i]
  197. add vec_i, PS
  198. XLDR x0, [ptr+pos] ;Get next source vector
  199. vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
  200. vmovdqu xgft1_hi, [tmp+16] ; " Ax{00}, Ax{10}, ..., Ax{f0}
  201. vmovdqu xgft2_lo, [tmp+vskip1*1] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
  202. vmovdqu xgft2_hi, [tmp+vskip1*1+16] ; " Bx{00}, Bx{10}, ..., Bx{f0}
  203. vmovdqu xgft3_lo, [tmp+vskip1*2] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
  204. vmovdqu xgft3_hi, [tmp+vskip1*2+16] ; " Cx{00}, Cx{10}, ..., Cx{f0}
  205. lea ptr, [vskip1 + vskip1*4] ;ptr = vskip5
  206. vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
  207. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  208. vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  209. vpshufb xgft1_hi, x0 ;Lookup mul table of high nibble
  210. vpshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble
  211. vpxor xgft1_hi, xgft1_lo ;GF add high and low partials
  212. vpxor xp1, xgft1_hi ;xp1 += partial
  213. vpshufb xgft2_hi, x0 ;Lookup mul table of high nibble
  214. vpshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble
  215. vpxor xgft2_hi, xgft2_lo ;GF add high and low partials
  216. vpxor xp2, xgft2_hi ;xp2 += partial
  217. vpshufb xgft3_hi, x0 ;Lookup mul table of high nibble
  218. vpshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble
  219. vpxor xgft3_hi, xgft3_lo ;GF add high and low partials
  220. vpxor xp3, xgft3_hi ;xp3 += partial
  221. vmovdqu xgft1_lo, [tmp+vskip3] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
  222. vmovdqu xgft1_hi, [tmp+vskip3+16] ; " Dx{00}, Dx{10}, ..., Dx{f0}
  223. vmovdqu xgft2_lo, [tmp+vskip1*4] ;Load array Ex{00}, Ex{01}, ..., Ex{0f}
  224. vmovdqu xgft2_hi, [tmp+vskip1*4+16] ; " Ex{00}, Ex{10}, ..., Ex{f0}
  225. vmovdqu xgft3_lo, [tmp+ptr] ;Load array Fx{00}, Fx{01}, ..., Fx{0f}
  226. vmovdqu xgft3_hi, [tmp+ptr+16] ; " Fx{00}, Fx{10}, ..., Fx{f0}
  227. add tmp, 32
  228. vpshufb xgft1_hi, x0 ;Lookup mul table of high nibble
  229. vpshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble
  230. vpxor xgft1_hi, xgft1_lo ;GF add high and low partials
  231. vpxor xp4, xgft1_hi ;xp4 += partial
  232. vpshufb xgft2_hi, x0 ;Lookup mul table of high nibble
  233. vpshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble
  234. vpxor xgft2_hi, xgft2_lo ;GF add high and low partials
  235. vpxor xp5, xgft2_hi ;xp5 += partial
  236. vpshufb xgft3_hi, x0 ;Lookup mul table of high nibble
  237. vpshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble
  238. vpxor xgft3_hi, xgft3_lo ;GF add high and low partials
  239. vpxor xp6, xgft3_hi ;xp6 += partial
  240. cmp vec_i, vec
  241. jl .next_vect
  242. mov tmp, [dest+2*PS]
  243. mov ptr, [dest+3*PS]
  244. mov vec_i, [dest+4*PS]
  245. XSTR [dest1+pos], xp1
  246. XSTR [dest2+pos], xp2
  247. XSTR [tmp+pos], xp3
  248. mov tmp, [dest+5*PS]
  249. XSTR [ptr+pos], xp4
  250. XSTR [vec_i+pos], xp5
  251. XSTR [tmp+pos], xp6
  252. add pos, 16 ;Loop on 16 bytes at a time
  253. cmp pos, len
  254. jle .loop16
  255. lea tmp, [len + 16]
  256. cmp pos, tmp
  257. je .return_pass
  258. ;; Tail len
  259. mov pos, len ;Overlapped offset length-16
  260. jmp .loop16 ;Do one more overlap pass
  261. .return_pass:
  262. FUNC_RESTORE
  263. mov return, 0
  264. ret
  265. .return_fail:
  266. FUNC_RESTORE
  267. mov return, 1
  268. ret
  269. endproc_frame
  270. section .data
  271. align 16
  272. mask0f: dq 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
  273. ;;; func core, ver, snum
  274. slversion gf_6vect_dot_prod_avx, 02, 04, 0195