gf_6vect_mad_avx512_gfni.asm 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2023 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_6vect_mad_avx512_gfni(len, vec, vec_i, mul_array, src, dest);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %include "gf_vect_gfni.inc"
  34. %if AS_FEATURE_LEVEL >= 10
  35. %ifidn __OUTPUT_FORMAT__, elf64
  36. %define arg0 rdi
  37. %define arg1 rsi
  38. %define arg2 rdx
  39. %define arg3 rcx
  40. %define arg4 r8
  41. %define arg5 r9
  42. %define tmp r11
  43. %define tmp2 r10
  44. %define tmp3 r12 ;must be saved and restored
  45. %define func(x) x: endbranch
  46. %macro FUNC_SAVE 0
  47. push r12
  48. %endmacro
  49. %macro FUNC_RESTORE 0
  50. pop r12
  51. %endmacro
  52. %endif
  53. %ifidn __OUTPUT_FORMAT__, win64
  54. %define arg0 rcx
  55. %define arg1 rdx
  56. %define arg2 r8
  57. %define arg3 r9
  58. %define arg4 r12
  59. %define arg5 r14
  60. %define tmp r11
  61. %define tmp2 r10
  62. %define tmp3 r13
  63. %define stack_size 16*10 + 3*8
  64. %define arg(x) [rsp + stack_size + 8 + 8*x]
  65. %define func(x) proc_frame x
  66. %macro FUNC_SAVE 0
  67. sub rsp, stack_size
  68. vmovdqa [rsp + 16*0], xmm6
  69. vmovdqa [rsp + 16*1], xmm7
  70. vmovdqa [rsp + 16*2], xmm8
  71. vmovdqa [rsp + 16*3], xmm9
  72. vmovdqa [rsp + 16*4], xmm10
  73. vmovdqa [rsp + 16*5], xmm11
  74. vmovdqa [rsp + 16*6], xmm12
  75. vmovdqa [rsp + 16*7], xmm13
  76. vmovdqa [rsp + 16*8], xmm14
  77. vmovdqa [rsp + 16*9], xmm15
  78. mov [rsp + 10*16 + 0*8], r12
  79. mov [rsp + 10*16 + 1*8], r13
  80. mov [rsp + 10*16 + 2*8], r14
  81. end_prolog
  82. mov arg4, arg(4)
  83. mov arg5, arg(5)
  84. %endmacro
  85. %macro FUNC_RESTORE 0
  86. vmovdqa xmm6, [rsp + 16*0]
  87. vmovdqa xmm7, [rsp + 16*1]
  88. vmovdqa xmm8, [rsp + 16*2]
  89. vmovdqa xmm9, [rsp + 16*3]
  90. vmovdqa xmm10, [rsp + 16*4]
  91. vmovdqa xmm11, [rsp + 16*5]
  92. vmovdqa xmm12, [rsp + 16*6]
  93. vmovdqa xmm13, [rsp + 16*7]
  94. vmovdqa xmm14, [rsp + 16*8]
  95. vmovdqa xmm15, [rsp + 16*9]
  96. mov r12, [rsp + 10*16 + 0*8]
  97. mov r13, [rsp + 10*16 + 1*8]
  98. mov r14, [rsp + 10*16 + 2*8]
  99. add rsp, stack_size
  100. %endmacro
  101. %endif
  102. %define len arg0
  103. %define vec arg1
  104. %define vec_i arg2
  105. %define mul_array arg3
  106. %define src arg4
  107. %define dest1 arg5
  108. %define pos rax
  109. %define dest2 tmp3
  110. %define dest3 tmp2
  111. %define dest4 mul_array
  112. %define dest5 vec
  113. %define dest6 vec_i
  114. %ifndef EC_ALIGNED_ADDR
  115. ;;; Use Un-aligned load/store
  116. %define XLDR vmovdqu8
  117. %define XSTR vmovdqu8
  118. %else
  119. ;;; Use Non-temporal load/stor
  120. %ifdef NO_NT_LDST
  121. %define XLDR vmovdqa64
  122. %define XSTR vmovdqa64
  123. %else
  124. %define XLDR vmovntdqa
  125. %define XSTR vmovntdq
  126. %endif
  127. %endif
  128. default rel
  129. [bits 64]
  130. section .text
  131. %define x0 zmm0
  132. %define xd1 zmm1
  133. %define xd2 zmm2
  134. %define xd3 zmm3
  135. %define xd4 zmm4
  136. %define xd5 zmm5
  137. %define xd6 zmm6
  138. %define xgft1 zmm7
  139. %define xgft2 zmm8
  140. %define xgft3 zmm9
  141. %define xgft4 zmm10
  142. %define xgft5 zmm11
  143. %define xgft6 zmm12
  144. %define xret1 zmm13
  145. %define xret2 zmm14
  146. %define xret3 zmm15
  147. %define xret4 zmm16
  148. %define xret5 zmm17
  149. %define xret6 zmm18
  150. ;;
  151. ;; Encodes 64 bytes of a single source into 6x 64 bytes (parity disks)
  152. ;;
  153. %macro ENCODE_64B_6 0-1
  154. %define %%KMASK %1
  155. %if %0 == 1
  156. vmovdqu8 x0{%%KMASK}, [src + pos] ;Get next source vector
  157. vmovdqu8 xd1{%%KMASK}, [dest1 + pos] ;Get next dest vector
  158. vmovdqu8 xd2{%%KMASK}, [dest2 + pos] ;Get next dest vector
  159. vmovdqu8 xd3{%%KMASK}, [dest3 + pos] ;Get next dest vector
  160. vmovdqu8 xd4{%%KMASK}, [dest4 + pos] ;Get next dest vector
  161. vmovdqu8 xd5{%%KMASK}, [dest5 + pos] ;Get next dest vector
  162. vmovdqu8 xd6{%%KMASK}, [dest6 + pos] ;Get next dest vector
  163. %else
  164. XLDR x0, [src + pos] ;Get next source vector
  165. XLDR xd1, [dest1 + pos] ;Get next dest vector
  166. XLDR xd2, [dest2 + pos] ;Get next dest vector
  167. XLDR xd3, [dest3 + pos] ;Get next dest vector
  168. XLDR xd4, [dest4 + pos] ;Get next dest vector
  169. XLDR xd5, [dest5 + pos] ;Get next dest vector
  170. XLDR xd6, [dest6 + pos] ;Get next dest vector
  171. %endif
  172. GF_MUL_XOR EVEX, x0, xgft1, xret1, xd1, xgft2, xret2, xd2, xgft3, xret3, xd3, \
  173. xgft4, xret4, xd4, xgft5, xret5, xd5, xgft6, xret6, xd6
  174. %if %0 == 1
  175. vmovdqu8 [dest1 + pos]{%%KMASK}, xd1
  176. vmovdqu8 [dest2 + pos]{%%KMASK}, xd2
  177. vmovdqu8 [dest3 + pos]{%%KMASK}, xd3
  178. vmovdqu8 [dest4 + pos]{%%KMASK}, xd4
  179. vmovdqu8 [dest5 + pos]{%%KMASK}, xd5
  180. vmovdqu8 [dest6 + pos]{%%KMASK}, xd6
  181. %else
  182. XSTR [dest1 + pos], xd1
  183. XSTR [dest2 + pos], xd2
  184. XSTR [dest3 + pos], xd3
  185. XSTR [dest4 + pos], xd4
  186. XSTR [dest5 + pos], xd5
  187. XSTR [dest6 + pos], xd6
  188. %endif
  189. %endmacro
  190. align 16
  191. global gf_6vect_mad_avx512_gfni, function
  192. func(gf_6vect_mad_avx512_gfni)
  193. FUNC_SAVE
  194. xor pos, pos
  195. shl vec_i, 3 ;Multiply by 8
  196. shl vec, 3 ;Multiply by 8
  197. lea tmp, [mul_array + vec_i]
  198. vbroadcastf32x2 xgft1, [tmp]
  199. vbroadcastf32x2 xgft2, [tmp + vec]
  200. vbroadcastf32x2 xgft3, [tmp + vec*2]
  201. vbroadcastf32x2 xgft5, [tmp + vec*4]
  202. add tmp, vec
  203. vbroadcastf32x2 xgft4, [tmp + vec*2]
  204. vbroadcastf32x2 xgft6, [tmp + vec*4]
  205. mov dest2, [dest1 + 8]
  206. mov dest3, [dest1 + 2*8]
  207. mov dest4, [dest1 + 3*8] ; reuse mul_array
  208. mov dest5, [dest1 + 4*8] ; reuse vec
  209. mov dest6, [dest1 + 5*8] ; reuse vec_i
  210. mov dest1, [dest1]
  211. cmp len, 64
  212. jl .len_lt_64
  213. .loop64:
  214. ENCODE_64B_6
  215. add pos, 64 ;Loop on 64 bytes at a time
  216. sub len, 64
  217. cmp len, 64
  218. jge .loop64
  219. .len_lt_64:
  220. cmp len, 0
  221. jle .exit
  222. xor tmp, tmp
  223. bts tmp, len
  224. dec tmp
  225. kmovq k1, tmp
  226. ENCODE_64B_6 k1
  227. .exit:
  228. vzeroupper
  229. FUNC_RESTORE
  230. ret
  231. endproc_frame
  232. %endif ; if AS_FEATURE_LEVEL >= 10