gf_4vect_dot_prod_avx512.asm 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_4vect_dot_prod_avx512(len, vec, *g_tbls, **buffs, **dests);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %ifdef HAVE_AS_KNOWS_AVX512
  34. %ifidn __OUTPUT_FORMAT__, elf64
  35. %define arg0 rdi
  36. %define arg1 rsi
  37. %define arg2 rdx
  38. %define arg3 rcx
  39. %define arg4 r8
  40. %define arg5 r9
  41. %define tmp r11
  42. %define tmp.w r11d
  43. %define tmp.b r11b
  44. %define tmp2 r10
  45. %define tmp3 r13 ; must be saved and restored
  46. %define tmp4 r12 ; must be saved and restored
  47. %define tmp5 r14 ; must be saved and restored
  48. %define tmp6 r15 ; must be saved and restored
  49. %define return rax
  50. %define PS 8
  51. %define LOG_PS 3
  52. %define func(x) x:
  53. %macro FUNC_SAVE 0
  54. push r12
  55. push r13
  56. push r14
  57. push r15
  58. %endmacro
  59. %macro FUNC_RESTORE 0
  60. pop r15
  61. pop r14
  62. pop r13
  63. pop r12
  64. %endmacro
  65. %endif
  66. %ifidn __OUTPUT_FORMAT__, win64
  67. %define arg0 rcx
  68. %define arg1 rdx
  69. %define arg2 r8
  70. %define arg3 r9
  71. %define arg4 r12 ; must be saved, loaded and restored
  72. %define arg5 r15 ; must be saved and restored
  73. %define tmp r11
  74. %define tmp.w r11d
  75. %define tmp.b r11b
  76. %define tmp2 r10
  77. %define tmp3 r13 ; must be saved and restored
  78. %define tmp4 r14 ; must be saved and restored
  79. %define tmp5 rdi ; must be saved and restored
  80. %define tmp6 rsi ; must be saved and restored
  81. %define return rax
  82. %define PS 8
  83. %define LOG_PS 3
  84. %define stack_size 9*16 + 7*8 ; must be an odd multiple of 8
  85. %define arg(x) [rsp + stack_size + PS + PS*x]
  86. %define func(x) proc_frame x
  87. %macro FUNC_SAVE 0
  88. alloc_stack stack_size
  89. vmovdqa [rsp + 0*16], xmm6
  90. vmovdqa [rsp + 1*16], xmm7
  91. vmovdqa [rsp + 2*16], xmm8
  92. vmovdqa [rsp + 3*16], xmm9
  93. vmovdqa [rsp + 4*16], xmm10
  94. vmovdqa [rsp + 5*16], xmm11
  95. vmovdqa [rsp + 6*16], xmm12
  96. vmovdqa [rsp + 7*16], xmm13
  97. vmovdqa [rsp + 8*16], xmm14
  98. save_reg r12, 9*16 + 0*8
  99. save_reg r13, 9*16 + 1*8
  100. save_reg r14, 9*16 + 2*8
  101. save_reg r15, 9*16 + 3*8
  102. save_reg rdi, 9*16 + 4*8
  103. save_reg rsi, 9*16 + 5*8
  104. end_prolog
  105. mov arg4, arg(4)
  106. %endmacro
  107. %macro FUNC_RESTORE 0
  108. vmovdqa xmm6, [rsp + 0*16]
  109. vmovdqa xmm7, [rsp + 1*16]
  110. vmovdqa xmm8, [rsp + 2*16]
  111. vmovdqa xmm9, [rsp + 3*16]
  112. vmovdqa xmm10, [rsp + 4*16]
  113. vmovdqa xmm11, [rsp + 5*16]
  114. vmovdqa xmm12, [rsp + 6*16]
  115. vmovdqa xmm13, [rsp + 7*16]
  116. vmovdqa xmm14, [rsp + 8*16]
  117. mov r12, [rsp + 9*16 + 0*8]
  118. mov r13, [rsp + 9*16 + 1*8]
  119. mov r14, [rsp + 9*16 + 2*8]
  120. mov r15, [rsp + 9*16 + 3*8]
  121. mov rdi, [rsp + 9*16 + 4*8]
  122. mov rsi, [rsp + 9*16 + 5*8]
  123. add rsp, stack_size
  124. %endmacro
  125. %endif
  126. %define len arg0
  127. %define vec arg1
  128. %define mul_array arg2
  129. %define src arg3
  130. %define dest1 arg4
  131. %define ptr arg5
  132. %define vec_i tmp2
  133. %define dest2 tmp3
  134. %define dest3 tmp4
  135. %define dest4 tmp5
  136. %define vskip3 tmp6
  137. %define pos return
  138. %ifndef EC_ALIGNED_ADDR
  139. ;;; Use Un-aligned load/store
  140. %define XLDR vmovdqu8
  141. %define XSTR vmovdqu8
  142. %else
  143. ;;; Use Non-temporal load/stor
  144. %ifdef NO_NT_LDST
  145. %define XLDR vmovdqa
  146. %define XSTR vmovdqa
  147. %else
  148. %define XLDR vmovntdqa
  149. %define XSTR vmovntdq
  150. %endif
  151. %endif
  152. %define xmask0f zmm14
  153. %define xgft1_lo zmm13
  154. %define xgft1_loy ymm13
  155. %define xgft1_hi zmm12
  156. %define xgft2_lo zmm11
  157. %define xgft2_loy ymm11
  158. %define xgft2_hi zmm10
  159. %define xgft3_lo zmm9
  160. %define xgft3_loy ymm9
  161. %define xgft3_hi zmm8
  162. %define xgft4_lo zmm7
  163. %define xgft4_loy ymm7
  164. %define xgft4_hi zmm6
  165. %define x0 zmm0
  166. %define xtmpa zmm1
  167. %define xp1 zmm2
  168. %define xp2 zmm3
  169. %define xp3 zmm4
  170. %define xp4 zmm5
  171. default rel
  172. [bits 64]
  173. section .text
  174. align 16
  175. global gf_4vect_dot_prod_avx512:ISAL_SYM_TYPE_FUNCTION
  176. func(gf_4vect_dot_prod_avx512)
  177. %ifidn __OUTPUT_FORMAT__, macho64
  178. global _gf_4vect_dot_prod_avx512:ISAL_SYM_TYPE_FUNCTION
  179. func(_gf_4vect_dot_prod_avx512)
  180. %endif
  181. FUNC_SAVE
  182. sub len, 64
  183. jl .return_fail
  184. xor pos, pos
  185. mov tmp, 0x0f
  186. vpbroadcastb xmask0f, tmp ;Construct mask 0x0f0f0f...
  187. mov vskip3, vec
  188. imul vskip3, 96
  189. sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS
  190. mov dest2, [dest1+PS]
  191. mov dest3, [dest1+2*PS]
  192. mov dest4, [dest1+3*PS]
  193. mov dest1, [dest1]
  194. .loop64:
  195. vpxorq xp1, xp1, xp1
  196. vpxorq xp2, xp2, xp2
  197. vpxorq xp3, xp3, xp3
  198. vpxorq xp4, xp4, xp4
  199. mov tmp, mul_array
  200. xor vec_i, vec_i
  201. .next_vect:
  202. mov ptr, [src+vec_i]
  203. XLDR x0, [ptr+pos] ;Get next source vector
  204. add vec_i, PS
  205. vpandq xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
  206. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  207. vpandq x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  208. vmovdqu8 xgft1_loy, [tmp] ;Load array Ax{00}..{0f}, Ax{00}..{f0}
  209. vmovdqu8 xgft2_loy, [tmp+vec*(32/PS)] ;Load array Bx{00}..{0f}, Bx{00}..{f0}
  210. vmovdqu8 xgft3_loy, [tmp+vec*(64/PS)] ;Load array Cx{00}..{0f}, Cx{00}..{f0}
  211. vmovdqu8 xgft4_loy, [tmp+vskip3] ;Load array Dx{00}..{0f}, Dx{00}..{f0}
  212. add tmp, 32
  213. vshufi64x2 xgft1_hi, xgft1_lo, xgft1_lo, 0x55
  214. vshufi64x2 xgft1_lo, xgft1_lo, xgft1_lo, 0x00
  215. vshufi64x2 xgft2_hi, xgft2_lo, xgft2_lo, 0x55
  216. vshufi64x2 xgft2_lo, xgft2_lo, xgft2_lo, 0x00
  217. vpshufb xgft1_hi, xgft1_hi, x0 ;Lookup mul table of high nibble
  218. vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble
  219. vpxorq xgft1_hi, xgft1_hi, xgft1_lo ;GF add high and low partials
  220. vpxorq xp1, xp1, xgft1_hi ;xp1 += partial
  221. vpshufb xgft2_hi, xgft2_hi, x0 ;Lookup mul table of high nibble
  222. vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
  223. vpxorq xgft2_hi, xgft2_hi, xgft2_lo ;GF add high and low partials
  224. vpxorq xp2, xp2, xgft2_hi ;xp2 += partial
  225. vshufi64x2 xgft3_hi, xgft3_lo, xgft3_lo, 0x55
  226. vshufi64x2 xgft3_lo, xgft3_lo, xgft3_lo, 0x00
  227. vshufi64x2 xgft4_hi, xgft4_lo, xgft4_lo, 0x55
  228. vshufi64x2 xgft4_lo, xgft4_lo, xgft4_lo, 0x00
  229. vpshufb xgft3_hi, xgft3_hi, x0 ;Lookup mul table of high nibble
  230. vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
  231. vpxorq xgft3_hi, xgft3_hi, xgft3_lo ;GF add high and low partials
  232. vpxorq xp3, xp3, xgft3_hi ;xp3 += partial
  233. vpshufb xgft4_hi, xgft4_hi, x0 ;Lookup mul table of high nibble
  234. vpshufb xgft4_lo, xgft4_lo, xtmpa ;Lookup mul table of low nibble
  235. vpxorq xgft4_hi, xgft4_hi, xgft4_lo ;GF add high and low partials
  236. vpxorq xp4, xp4, xgft4_hi ;xp4 += partial
  237. cmp vec_i, vec
  238. jl .next_vect
  239. XSTR [dest1+pos], xp1
  240. XSTR [dest2+pos], xp2
  241. XSTR [dest3+pos], xp3
  242. XSTR [dest4+pos], xp4
  243. add pos, 64 ;Loop on 64 bytes at a time
  244. cmp pos, len
  245. jle .loop64
  246. lea tmp, [len + 64]
  247. cmp pos, tmp
  248. je .return_pass
  249. ;; Tail len
  250. mov pos, len ;Overlapped offset length-64
  251. jmp .loop64 ;Do one more overlap pass
  252. .return_pass:
  253. mov return, 0
  254. FUNC_RESTORE
  255. ret
  256. .return_fail:
  257. mov return, 1
  258. FUNC_RESTORE
  259. ret
  260. endproc_frame
  261. %else
  262. %ifidn __OUTPUT_FORMAT__, win64
  263. global no_gf_4vect_dot_prod_avx512
  264. no_gf_4vect_dot_prod_avx512:
  265. %endif
  266. %endif ; ifdef HAVE_AS_KNOWS_AVX512