gf_3vect_dot_prod_avx512.asm 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_3vect_dot_prod_avx512(len, vec, *g_tbls, **buffs, **dests);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %ifdef HAVE_AS_KNOWS_AVX512
  34. %ifidn __OUTPUT_FORMAT__, elf64
  35. %define arg0 rdi
  36. %define arg1 rsi
  37. %define arg2 rdx
  38. %define arg3 rcx
  39. %define arg4 r8
  40. %define arg5 r9
  41. %define tmp r11
  42. %define tmp2 r10
  43. %define tmp3 r13 ; must be saved and restored
  44. %define tmp4 r12 ; must be saved and restored
  45. %define return rax
  46. %define PS 8
  47. %define LOG_PS 3
  48. %define func(x) x: endbranch
  49. %macro FUNC_SAVE 0
  50. push r12
  51. push r13
  52. %endmacro
  53. %macro FUNC_RESTORE 0
  54. pop r13
  55. pop r12
  56. %endmacro
  57. %endif
  58. %ifidn __OUTPUT_FORMAT__, win64
  59. %define arg0 rcx
  60. %define arg1 rdx
  61. %define arg2 r8
  62. %define arg3 r9
  63. %define arg4 r12 ; must be saved, loaded and restored
  64. %define arg5 r15 ; must be saved and restored
  65. %define tmp r11
  66. %define tmp2 r10
  67. %define tmp3 r13 ; must be saved and restored
  68. %define tmp4 r14 ; must be saved and restored
  69. %define return rax
  70. %define PS 8
  71. %define LOG_PS 3
  72. %define stack_size 6*16 + 5*8 ; must be an odd multiple of 8
  73. %define arg(x) [rsp + stack_size + PS + PS*x]
  74. %define func(x) proc_frame x
  75. %macro FUNC_SAVE 0
  76. alloc_stack stack_size
  77. vmovdqa [rsp + 0*16], xmm6
  78. vmovdqa [rsp + 1*16], xmm7
  79. vmovdqa [rsp + 2*16], xmm8
  80. vmovdqa [rsp + 3*16], xmm9
  81. vmovdqa [rsp + 4*16], xmm10
  82. vmovdqa [rsp + 5*16], xmm11
  83. save_reg r12, 6*16 + 0*8
  84. save_reg r13, 6*16 + 1*8
  85. save_reg r14, 6*16 + 2*8
  86. save_reg r15, 6*16 + 3*8
  87. end_prolog
  88. mov arg4, arg(4)
  89. %endmacro
  90. %macro FUNC_RESTORE 0
  91. vmovdqa xmm6, [rsp + 0*16]
  92. vmovdqa xmm7, [rsp + 1*16]
  93. vmovdqa xmm8, [rsp + 2*16]
  94. vmovdqa xmm9, [rsp + 3*16]
  95. vmovdqa xmm10, [rsp + 4*16]
  96. vmovdqa xmm11, [rsp + 5*16]
  97. mov r12, [rsp + 6*16 + 0*8]
  98. mov r13, [rsp + 6*16 + 1*8]
  99. mov r14, [rsp + 6*16 + 2*8]
  100. mov r15, [rsp + 6*16 + 3*8]
  101. add rsp, stack_size
  102. %endmacro
  103. %endif
  104. %define len arg0
  105. %define vec arg1
  106. %define mul_array arg2
  107. %define src arg3
  108. %define dest1 arg4
  109. %define ptr arg5
  110. %define vec_i tmp2
  111. %define dest2 tmp3
  112. %define dest3 tmp4
  113. %define pos return
  114. %ifndef EC_ALIGNED_ADDR
  115. ;;; Use Un-aligned load/store
  116. %define XLDR vmovdqu8
  117. %define XSTR vmovdqu8
  118. %else
  119. ;;; Use Non-temporal load/stor
  120. %ifdef NO_NT_LDST
  121. %define XLDR vmovdqa64
  122. %define XSTR vmovdqa64
  123. %else
  124. %define XLDR vmovntdqa
  125. %define XSTR vmovntdq
  126. %endif
  127. %endif
  128. %define xmask0f zmm11
  129. %define xgft1_lo zmm10
  130. %define xgft1_loy ymm10
  131. %define xgft1_hi zmm9
  132. %define xgft2_lo zmm8
  133. %define xgft2_loy ymm8
  134. %define xgft2_hi zmm7
  135. %define xgft3_lo zmm6
  136. %define xgft3_loy ymm6
  137. %define xgft3_hi zmm5
  138. %define x0 zmm0
  139. %define xtmpa zmm1
  140. %define xp1 zmm2
  141. %define xp2 zmm3
  142. %define xp3 zmm4
  143. default rel
  144. [bits 64]
  145. section .text
  146. align 16
  147. global gf_3vect_dot_prod_avx512, function
  148. func(gf_3vect_dot_prod_avx512)
  149. FUNC_SAVE
  150. sub len, 64
  151. jl .return_fail
  152. xor pos, pos
  153. mov tmp, 0x0f
  154. vpbroadcastb xmask0f, tmp ;Construct mask 0x0f0f0f...
  155. sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS
  156. mov dest2, [dest1+PS]
  157. mov dest3, [dest1+2*PS]
  158. mov dest1, [dest1]
  159. .loop64:
  160. vpxorq xp1, xp1, xp1
  161. vpxorq xp2, xp2, xp2
  162. vpxorq xp3, xp3, xp3
  163. mov tmp, mul_array
  164. xor vec_i, vec_i
  165. .next_vect:
  166. mov ptr, [src+vec_i]
  167. XLDR x0, [ptr+pos] ;Get next source vector
  168. add vec_i, PS
  169. vpandq xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
  170. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  171. vpandq x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  172. vmovdqu8 xgft1_loy, [tmp] ;Load array Ax{00}..{0f}, Ax{00}..{f0}
  173. vmovdqu8 xgft2_loy, [tmp+vec*(32/PS)] ;Load array Bx{00}..{0f}, Bx{00}..{f0}
  174. vmovdqu8 xgft3_loy, [tmp+vec*(64/PS)] ;Load array Cx{00}..{0f}, Cx{00}..{f0}
  175. add tmp, 32
  176. vshufi64x2 xgft1_hi, xgft1_lo, xgft1_lo, 0x55
  177. vshufi64x2 xgft1_lo, xgft1_lo, xgft1_lo, 0x00
  178. vshufi64x2 xgft2_hi, xgft2_lo, xgft2_lo, 0x55
  179. vshufi64x2 xgft2_lo, xgft2_lo, xgft2_lo, 0x00
  180. vpshufb xgft1_hi, xgft1_hi, x0 ;Lookup mul table of high nibble
  181. vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble
  182. vpxorq xgft1_hi, xgft1_hi, xgft1_lo ;GF add high and low partials
  183. vpxorq xp1, xp1, xgft1_hi ;xp1 += partial
  184. vpshufb xgft2_hi, xgft2_hi, x0 ;Lookup mul table of high nibble
  185. vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
  186. vpxorq xgft2_hi, xgft2_hi, xgft2_lo ;GF add high and low partials
  187. vpxorq xp2, xp2, xgft2_hi ;xp2 += partial
  188. vshufi64x2 xgft3_hi, xgft3_lo, xgft3_lo, 0x55
  189. vshufi64x2 xgft3_lo, xgft3_lo, xgft3_lo, 0x00
  190. vpshufb xgft3_hi, xgft3_hi, x0 ;Lookup mul table of high nibble
  191. vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
  192. vpxorq xgft3_hi, xgft3_hi, xgft3_lo ;GF add high and low partials
  193. vpxorq xp3, xp3, xgft3_hi ;xp3 += partial
  194. cmp vec_i, vec
  195. jl .next_vect
  196. XSTR [dest1+pos], xp1
  197. XSTR [dest2+pos], xp2
  198. XSTR [dest3+pos], xp3
  199. add pos, 64 ;Loop on 64 bytes at a time
  200. cmp pos, len
  201. jle .loop64
  202. lea tmp, [len + 64]
  203. cmp pos, tmp
  204. je .return_pass
  205. ;; Tail len
  206. mov pos, len ;Overlapped offset length-64
  207. jmp .loop64 ;Do one more overlap pass
  208. .return_pass:
  209. mov return, 0
  210. FUNC_RESTORE
  211. ret
  212. .return_fail:
  213. mov return, 1
  214. FUNC_RESTORE
  215. ret
  216. endproc_frame
  217. %else
  218. %ifidn __OUTPUT_FORMAT__, win64
  219. global no_gf_3vect_dot_prod_avx512
  220. no_gf_3vect_dot_prod_avx512:
  221. %endif
  222. %endif ; ifdef HAVE_AS_KNOWS_AVX512