gf_4vect_mad_avx512_gfni.asm 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2023 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_4vect_mad_avx512_gfni(len, vec, vec_i, mul_array, src, dest);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %include "gf_vect_gfni.inc"
  34. %if AS_FEATURE_LEVEL >= 10
  35. %ifidn __OUTPUT_FORMAT__, elf64
  36. %define arg0 rdi
  37. %define arg1 rsi
  38. %define arg2 rdx
  39. %define arg3 rcx
  40. %define arg4 r8
  41. %define arg5 r9
  42. %define tmp r11
  43. %define func(x) x: endbranch
  44. %define FUNC_SAVE
  45. %define FUNC_RESTORE
  46. %endif
  47. %ifidn __OUTPUT_FORMAT__, win64
  48. %define arg0 rcx
  49. %define arg1 rdx
  50. %define arg2 r8
  51. %define arg3 r9
  52. %define arg4 r12
  53. %define arg5 r13
  54. %define tmp r11
  55. %define stack_size 7*16 + 3*8
  56. %define arg(x) [rsp + stack_size + 8 + 8*x]
  57. %define func(x) proc_frame x
  58. %macro FUNC_SAVE 0
  59. sub rsp, stack_size
  60. vmovdqa [rsp + 16*0], xmm6
  61. vmovdqa [rsp + 16*1], xmm7
  62. vmovdqa [rsp + 16*2], xmm8
  63. vmovdqa [rsp + 16*3], xmm9
  64. vmovdqa [rsp + 16*4], xmm10
  65. vmovdqa [rsp + 16*5], xmm11
  66. vmovdqa [rsp + 16*6], xmm12
  67. mov [rsp + 7*16 + 0*8], r12
  68. mov [rsp + 7*16 + 1*8], r13
  69. end_prolog
  70. mov arg4, arg(4)
  71. mov arg5, arg(5)
  72. %endmacro
  73. %macro FUNC_RESTORE 0
  74. vmovdqa xmm6, [rsp + 16*0]
  75. vmovdqa xmm7, [rsp + 16*1]
  76. vmovdqa xmm8, [rsp + 16*2]
  77. vmovdqa xmm9, [rsp + 16*3]
  78. vmovdqa xmm10, [rsp + 16*4]
  79. vmovdqa xmm11, [rsp + 16*5]
  80. vmovdqa xmm12, [rsp + 16*6]
  81. mov r12, [rsp + 7*16 + 0*8]
  82. mov r13, [rsp + 7*16 + 1*8]
  83. add rsp, stack_size
  84. %endmacro
  85. %endif
  86. %define len arg0
  87. %define vec arg1
  88. %define vec_i arg2
  89. %define mul_array arg3
  90. %define src arg4
  91. %define dest1 arg5
  92. %define pos rax
  93. %define dest2 mul_array
  94. %define dest3 vec
  95. %define dest4 vec_i
  96. %ifndef EC_ALIGNED_ADDR
  97. ;;; Use Un-aligned load/store
  98. %define XLDR vmovdqu8
  99. %define XSTR vmovdqu8
  100. %else
  101. ;;; Use Non-temporal load/stor
  102. %ifdef NO_NT_LDST
  103. %define XLDR vmovdqa64
  104. %define XSTR vmovdqa64
  105. %else
  106. %define XLDR vmovntdqa
  107. %define XSTR vmovntdq
  108. %endif
  109. %endif
  110. default rel
  111. [bits 64]
  112. section .text
  113. %define x0 zmm0
  114. %define xd1 zmm1
  115. %define xd2 zmm2
  116. %define xd3 zmm3
  117. %define xd4 zmm4
  118. %define xgft1 zmm5
  119. %define xgft2 zmm6
  120. %define xgft3 zmm7
  121. %define xgft4 zmm8
  122. %define xret1 zmm9
  123. %define xret2 zmm10
  124. %define xret3 zmm11
  125. %define xret4 zmm12
  126. ;;
  127. ;; Encodes 64 bytes of a single source into 4x 64 bytes (parity disks)
  128. ;;
  129. %macro ENCODE_64B_4 0-1
  130. %define %%KMASK %1
  131. %if %0 == 1
  132. vmovdqu8 x0{%%KMASK}, [src + pos] ;Get next source vector
  133. vmovdqu8 xd1{%%KMASK}, [dest1 + pos] ;Get next dest vector
  134. vmovdqu8 xd2{%%KMASK}, [dest2 + pos] ;Get next dest vector
  135. vmovdqu8 xd3{%%KMASK}, [dest3 + pos] ;Get next dest vector
  136. vmovdqu8 xd4{%%KMASK}, [dest4 + pos] ;Get next dest vector
  137. %else
  138. XLDR x0, [src + pos] ;Get next source vector
  139. XLDR xd1, [dest1 + pos] ;Get next dest vector
  140. XLDR xd2, [dest2 + pos] ;Get next dest vector
  141. XLDR xd3, [dest3 + pos] ;Get next dest vector
  142. XLDR xd4, [dest4 + pos] ;Get next dest vector
  143. %endif
  144. GF_MUL_XOR EVEX, x0, xgft1, xret1, xd1, xgft2, xret2, xd2, xgft3, xret3, xd3, \
  145. xgft4, xret4, xd4
  146. %if %0 == 1
  147. vmovdqu8 [dest1 + pos]{%%KMASK}, xd1
  148. vmovdqu8 [dest2 + pos]{%%KMASK}, xd2
  149. vmovdqu8 [dest3 + pos]{%%KMASK}, xd3
  150. vmovdqu8 [dest4 + pos]{%%KMASK}, xd4
  151. %else
  152. XSTR [dest1 + pos], xd1
  153. XSTR [dest2 + pos], xd2
  154. XSTR [dest3 + pos], xd3
  155. XSTR [dest4 + pos], xd4
  156. %endif
  157. %endmacro
  158. align 16
  159. global gf_4vect_mad_avx512_gfni, function
  160. func(gf_4vect_mad_avx512_gfni)
  161. FUNC_SAVE
  162. xor pos, pos
  163. shl vec_i, 3 ;Multiply by 8
  164. shl vec, 3 ;Multiply by 8
  165. lea tmp, [mul_array + vec_i]
  166. vbroadcastf32x2 xgft1, [tmp]
  167. vbroadcastf32x2 xgft2, [tmp + vec]
  168. vbroadcastf32x2 xgft3, [tmp + vec*2]
  169. add tmp, vec
  170. vbroadcastf32x2 xgft4, [tmp + vec*2]
  171. mov dest2, [dest1 + 8] ; reuse mul_array
  172. mov dest3, [dest1 + 2*8] ; reuse vec
  173. mov dest4, [dest1 + 3*8] ; reuse vec_i
  174. mov dest1, [dest1]
  175. cmp len, 64
  176. jl .len_lt_64
  177. .loop64:
  178. ENCODE_64B_4
  179. add pos, 64 ;Loop on 64 bytes at a time
  180. sub len, 64
  181. cmp len, 64
  182. jge .loop64
  183. .len_lt_64:
  184. cmp len, 0
  185. jle .exit
  186. xor tmp, tmp
  187. bts tmp, len
  188. dec tmp
  189. kmovq k1, tmp
  190. ENCODE_64B_4 k1
  191. .exit:
  192. vzeroupper
  193. FUNC_RESTORE
  194. ret
  195. endproc_frame
  196. %endif ; if AS_FEATURE_LEVEL >= 10