gf_2vect_mad_avx2_gfni.asm 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2023 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_2vect_mad_avx2_gfni(len, vec, vec_i, mul_array, src, dest);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %include "gf_vect_gfni.inc"
  34. %include "memcpy.asm"
  35. %if AS_FEATURE_LEVEL >= 10
  36. %ifidn __OUTPUT_FORMAT__, elf64
  37. %define arg0 rdi
  38. %define arg1 rsi
  39. %define arg2 rdx
  40. %define arg3 rcx
  41. %define arg4 r8
  42. %define arg5 r9
  43. %define tmp r11
  44. %define tmp2 r10
  45. %define func(x) x: endbranch
  46. %define FUNC_SAVE
  47. %define FUNC_RESTORE
  48. %endif
  49. %ifidn __OUTPUT_FORMAT__, win64
  50. %define arg0 rcx
  51. %define arg1 rdx
  52. %define arg2 r8
  53. %define arg3 r9
  54. %define arg4 r12 ; must be saved, loaded and restored
  55. %define arg5 r13 ; must be saved and restored
  56. %define tmp r11
  57. %define tmp2 r10
  58. %define stack_size 16*9 + 3*8
  59. %define arg(x) [rsp + stack_size + 8 + 8*x]
  60. %define func(x) proc_frame x
  61. %macro FUNC_SAVE 0
  62. sub rsp, stack_size
  63. vmovdqa [rsp + 0*16], xmm6
  64. vmovdqa [rsp + 1*16], xmm7
  65. vmovdqa [rsp + 2*16], xmm8
  66. vmovdqa [rsp + 3*16], xmm9
  67. vmovdqa [rsp + 4*16], xmm10
  68. vmovdqa [rsp + 5*16], xmm11
  69. vmovdqa [rsp + 6*16], xmm12
  70. vmovdqa [rsp + 7*16], xmm13
  71. vmovdqa [rsp + 8*16], xmm14
  72. mov [rsp + 9*16 + 0*8], r12
  73. mov [rsp + 9*16 + 1*8], r13
  74. end_prolog
  75. mov arg4, arg(4)
  76. mov arg5, arg(5)
  77. %endmacro
  78. %macro FUNC_RESTORE 0
  79. vmovdqa xmm6, [rsp + 0*16]
  80. vmovdqa xmm7, [rsp + 1*16]
  81. vmovdqa xmm8, [rsp + 2*16]
  82. vmovdqa xmm9, [rsp + 3*16]
  83. vmovdqa xmm10, [rsp + 4*16]
  84. vmovdqa xmm11, [rsp + 5*16]
  85. vmovdqa xmm12, [rsp + 6*16]
  86. vmovdqa xmm13, [rsp + 7*16]
  87. vmovdqa xmm14, [rsp + 8*16]
  88. mov r12, [rsp + 9*16 + 0*8]
  89. mov r13, [rsp + 9*16 + 1*8]
  90. add rsp, stack_size
  91. %endmacro
  92. %endif
  93. %define len arg0
  94. %define vec arg1
  95. %define vec_i arg2
  96. %define mul_array arg3
  97. %define src arg4
  98. %define dest1 arg5
  99. %define pos rax
  100. %define dest2 mul_array
  101. %define dest3 vec_i
  102. %ifndef EC_ALIGNED_ADDR
  103. ;;; Use Un-aligned load/store
  104. %define XLDR vmovdqu
  105. %define XSTR vmovdqu
  106. %else
  107. ;;; Use Non-temporal load/stor
  108. %ifdef NO_NT_LDST
  109. %define XLDR vmovdqa
  110. %define XSTR vmovdqa
  111. %else
  112. %define XLDR vmovntdqa
  113. %define XSTR vmovntdq
  114. %endif
  115. %endif
  116. default rel
  117. [bits 64]
  118. section .text
  119. %define x0l ymm0
  120. %define x0h ymm1
  121. %define x0x ymm2
  122. %define xgft1 ymm3
  123. %define xgft2 ymm4
  124. %define xd1l ymm5
  125. %define xd1h ymm6
  126. %define xd1x ymm7
  127. %define xd2l ymm8
  128. %define xd2h ymm9
  129. %define xd2x ymm10
  130. %define xret1l ymm11
  131. %define xret1h ymm12
  132. %define xret2l ymm13
  133. %define xret2h ymm14
  134. %define x0 x0l
  135. %define xd1 xd1l
  136. %define xd2 xd2l
  137. %define xret1 xret1l
  138. %define xret2 xret2l
  139. ;;
  140. ;; Encodes 96 bytes of a single source into 2x 96 bytes (parity disks)
  141. ;;
  142. %macro ENCODE_96B_2 0
  143. ;Get next source vector
  144. XLDR x0l, [src + pos]
  145. XLDR x0h, [src + pos + 32]
  146. XLDR x0x, [src + pos + 64]
  147. ;Get next dest vectors
  148. XLDR xd1l, [dest1 + pos]
  149. XLDR xd1h, [dest1 + pos + 32]
  150. XLDR xd1x, [dest1 + pos + 64]
  151. XLDR xd2l, [dest2 + pos]
  152. XLDR xd2h, [dest2 + pos + 32]
  153. XLDR xd2x, [dest2 + pos + 64]
  154. GF_MUL_XOR VEX, x0l, xgft1, xret1l, xd1l, xgft2, xret2l, xd2l
  155. GF_MUL_XOR VEX, x0h, xgft1, xret1h, xd1h, xgft2, xret2h, xd2h
  156. GF_MUL_XOR VEX, x0x, xgft1, xret1l, xd1x, xgft2, xret2l, xd2x
  157. XSTR [dest1 + pos], xd1l
  158. XSTR [dest1 + pos + 32], xd1h
  159. XSTR [dest1 + pos + 64], xd1x
  160. XSTR [dest2 + pos], xd2l
  161. XSTR [dest2 + pos + 32], xd2h
  162. XSTR [dest2 + pos + 64], xd2x
  163. %endmacro
  164. ;;
  165. ;; Encodes 64 bytes of a single source into 2x 64 bytes (parity disks)
  166. ;;
  167. %macro ENCODE_64B_2 0
  168. ;Get next source vector
  169. XLDR x0l, [src + pos]
  170. XLDR x0h, [src + pos + 32]
  171. ;Get next dest vectors
  172. XLDR xd1l, [dest1 + pos]
  173. XLDR xd1h, [dest1 + pos + 32]
  174. XLDR xd2l, [dest2 + pos]
  175. XLDR xd2h, [dest2 + pos + 32]
  176. GF_MUL_XOR VEX, x0l, xgft1, xret1l, xd1l, xgft2, xret2l, xd2l
  177. GF_MUL_XOR VEX, x0h, xgft1, xret1h, xd1h, xgft2, xret2h, xd2h
  178. XSTR [dest1 + pos], xd1l
  179. XSTR [dest1 + pos + 32], xd1h
  180. XSTR [dest2 + pos], xd2l
  181. XSTR [dest2 + pos + 32], xd2h
  182. %endmacro
  183. ;;
  184. ;; Encodes 32 bytes of a single source into 2x 32 bytes (parity disks)
  185. ;;
  186. %macro ENCODE_32B_2 0
  187. ;Get next source vector
  188. XLDR x0, [src + pos]
  189. ;Get next dest vectors
  190. XLDR xd1, [dest1 + pos]
  191. XLDR xd2, [dest2 + pos]
  192. GF_MUL_XOR VEX, x0, xgft1, xret1, xd1, xgft2, xret2, xd2
  193. XSTR [dest1 + pos], xd1
  194. XSTR [dest2 + pos], xd2
  195. %endmacro
  196. ;;
  197. ;; Encodes less than 32 bytes of a single source into 2x parity disks
  198. ;;
  199. %macro ENCODE_LT_32B_2 1
  200. %define %%LEN %1
  201. ;Get next source vector
  202. simd_load_avx2 x0, src + pos, %%LEN, tmp, tmp2
  203. ;Get next dest vectors
  204. simd_load_avx2 xd1, dest1 + pos, %%LEN, tmp, tmp2
  205. simd_load_avx2 xd2, dest2 + pos, %%LEN, tmp, tmp2
  206. GF_MUL_XOR VEX, x0, xgft1, xret1, xd1, xgft2, xret2, xd2
  207. lea dest1, [dest1 + pos]
  208. simd_store_avx2 dest1, xd1, %%LEN, tmp, tmp2
  209. lea dest2, [dest2 + pos]
  210. simd_store_avx2 dest2, xd2, %%LEN, tmp, tmp2
  211. %endmacro
  212. align 16
  213. global gf_2vect_mad_avx2_gfni, function
  214. func(gf_2vect_mad_avx2_gfni)
  215. FUNC_SAVE
  216. xor pos, pos
  217. shl vec_i, 3 ;Multiply by 8
  218. shl vec, 3 ;Multiply by 8
  219. lea tmp, [mul_array + vec_i]
  220. vbroadcastsd xgft1, [tmp]
  221. vbroadcastsd xgft2, [tmp + vec]
  222. mov dest2, [dest1 + 8] ; reuse mul_array
  223. mov dest1, [dest1]
  224. cmp len, 96
  225. jl .len_lt_96
  226. .loop96:
  227. ENCODE_96B_2
  228. add pos, 96 ;; loop on 96 bytes at a time
  229. sub len, 96
  230. cmp len, 96
  231. jge .loop96
  232. .len_lt_96:
  233. cmp len, 64
  234. jl .len_lt_64
  235. ENCODE_64B_2 ;; encode next 64 bytes
  236. add pos, 64
  237. sub len, 64
  238. .len_lt_64:
  239. cmp len, 32
  240. jl .len_lt_32
  241. ENCODE_32B_2 ;; encode next 32 bytes
  242. add pos, 32
  243. sub len, 32
  244. .len_lt_32:
  245. cmp len, 0
  246. jle .exit
  247. ENCODE_LT_32B_2 len ;; encode final bytes
  248. .exit:
  249. vzeroupper
  250. FUNC_RESTORE
  251. ret
  252. endproc_frame
  253. %endif ; if AS_FEATURE_LEVEL >= 10