gf_6vect_mad_avx512.asm 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2019 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_6vect_mad_avx512(len, vec, vec_i, mul_array, src, dest);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %ifdef HAVE_AS_KNOWS_AVX512
  34. %ifidn __OUTPUT_FORMAT__, elf64
  35. %define arg0 rdi
  36. %define arg1 rsi
  37. %define arg2 rdx
  38. %define arg3 rcx
  39. %define arg4 r8
  40. %define arg5 r9
  41. %define tmp r11
  42. %define tmp2 r10
  43. %define tmp3 r12 ;must be saved and restored
  44. %define return rax
  45. %define func(x) x: endbranch
  46. %macro FUNC_SAVE 0
  47. push r12
  48. %endmacro
  49. %macro FUNC_RESTORE 0
  50. pop r12
  51. %endmacro
  52. %endif
  53. %ifidn __OUTPUT_FORMAT__, win64
  54. %define arg0 rcx
  55. %define arg1 rdx
  56. %define arg2 r8
  57. %define arg3 r9
  58. %define arg4 r12
  59. %define arg5 r15
  60. %define tmp r11
  61. %define tmp2 r10
  62. %define tmp3 r13
  63. %define return rax
  64. %define stack_size 16*10 + 3*8
  65. %define arg(x) [rsp + stack_size + PS + PS*x]
  66. %define func(x) proc_frame x
  67. %macro FUNC_SAVE 0
  68. sub rsp, stack_size
  69. vmovdqa [rsp+16*0],xmm6
  70. vmovdqa [rsp+16*1],xmm7
  71. vmovdqa [rsp+16*2],xmm8
  72. vmovdqa [rsp+16*3],xmm9
  73. vmovdqa [rsp+16*4],xmm10
  74. vmovdqa [rsp+16*5],xmm11
  75. vmovdqa [rsp+16*6],xmm12
  76. vmovdqa [rsp+16*7],xmm13
  77. vmovdqa [rsp+16*8],xmm14
  78. vmovdqa [rsp+16*9],xmm15
  79. save_reg r12, 10*16 + 0*8
  80. save_reg r15, 10*16 + 1*8
  81. save_reg r13, 10*16 + 2*8
  82. end_prolog
  83. mov arg4, arg(4)
  84. mov arg5, arg(5)
  85. %endmacro
  86. %macro FUNC_RESTORE 0
  87. vmovdqa xmm6, [rsp+16*0]
  88. vmovdqa xmm7, [rsp+16*1]
  89. vmovdqa xmm8, [rsp+16*2]
  90. vmovdqa xmm9, [rsp+16*3]
  91. vmovdqa xmm10, [rsp+16*4]
  92. vmovdqa xmm11, [rsp+16*5]
  93. vmovdqa xmm12, [rsp+16*6]
  94. vmovdqa xmm13, [rsp+16*7]
  95. vmovdqa xmm14, [rsp+16*8]
  96. vmovdqa xmm15, [rsp+16*9]
  97. mov r12, [rsp + 10*16 + 0*8]
  98. mov r15, [rsp + 10*16 + 1*8]
  99. mov r13, [rsp + 10*16 + 2*8]
  100. add rsp, stack_size
  101. %endmacro
  102. %endif
  103. %define PS 8
  104. %define len arg0
  105. %define vec arg1
  106. %define vec_i arg2
  107. %define mul_array arg3
  108. %define src arg4
  109. %define dest1 arg5
  110. %define pos return
  111. %define dest2 tmp3
  112. %define dest3 tmp2
  113. %define dest4 mul_array
  114. %define dest5 vec
  115. %define dest6 vec_i
  116. %ifndef EC_ALIGNED_ADDR
  117. ;;; Use Un-aligned load/store
  118. %define XLDR vmovdqu8
  119. %define XSTR vmovdqu8
  120. %else
  121. ;;; Use Non-temporal load/stor
  122. %ifdef NO_NT_LDST
  123. %define XLDR vmovdqa64
  124. %define XSTR vmovdqa64
  125. %else
  126. %define XLDR vmovntdqa
  127. %define XSTR vmovntdq
  128. %endif
  129. %endif
  130. default rel
  131. [bits 64]
  132. section .text
  133. %define x0 zmm0
  134. %define xtmpa zmm1
  135. %define xtmpl1 zmm2
  136. %define xtmph1 zmm3
  137. %define xgft1_hi zmm4
  138. %define xgft1_lo zmm5
  139. %define xgft1_loy ymm5
  140. %define xgft2_hi zmm6
  141. %define xgft2_lo zmm7
  142. %define xgft2_loy ymm7
  143. %define xgft3_hi zmm8
  144. %define xgft3_lo zmm9
  145. %define xgft3_loy ymm9
  146. %define xgft4_hi zmm10
  147. %define xgft4_lo zmm11
  148. %define xgft4_loy ymm11
  149. %define xgft5_hi zmm12
  150. %define xgft5_lo zmm13
  151. %define xgft5_loy ymm13
  152. %define xgft6_hi zmm14
  153. %define xgft6_lo zmm15
  154. %define xgft6_loy ymm15
  155. %define xd1 zmm16
  156. %define xd2 zmm17
  157. %define xd3 zmm18
  158. %define xd4 zmm19
  159. %define xd5 zmm20
  160. %define xd6 zmm21
  161. %define xmask0f zmm22
  162. %define xtmpl2 zmm23
  163. %define xtmpl3 zmm24
  164. %define xtmpl4 zmm25
  165. %define xtmpl5 zmm26
  166. %define xtmph2 zmm27
  167. %define xtmph3 zmm28
  168. %define xtmph4 zmm29
  169. %define xtmph5 zmm30
  170. %define xtmph6 zmm31
  171. align 16
  172. global gf_6vect_mad_avx512, function
  173. func(gf_6vect_mad_avx512)
  174. FUNC_SAVE
  175. sub len, 64
  176. jl .return_fail
  177. xor pos, pos
  178. mov tmp, 0x0f
  179. vpbroadcastb xmask0f, tmp ;Construct mask 0x0f0f0f...
  180. sal vec_i, 5 ;Multiply by 32
  181. sal vec, 5 ;Multiply by 32
  182. lea tmp, [mul_array + vec_i]
  183. mov vec_i, vec
  184. mov mul_array, vec
  185. sal vec_i, 1 ;vec_i=vec*64
  186. sal mul_array, 1 ;mul_array=vec*64
  187. add vec_i, vec ;vec_i=vec*96
  188. add mul_array, vec_i ;vec_i=vec*160
  189. vmovdqu xgft1_loy, [tmp] ;Load array Ax{00}..{0f}, Ax{00}..{f0}
  190. vmovdqu xgft2_loy, [tmp+vec] ;Load array Bx{00}..{0f}, Bx{00}..{f0}
  191. vmovdqu xgft3_loy, [tmp+2*vec] ;Load array Cx{00}..{0f}, Cx{00}..{f0}
  192. vmovdqu xgft4_loy, [tmp+vec_i] ;Load array Dx{00}..{0f}, Dx{00}..{f0}
  193. vmovdqu xgft5_loy, [tmp+4*vec] ;Load array Ex{00}..{0f}, Ex{00}..{f0}
  194. vmovdqu xgft6_loy, [tmp+mul_array] ;Load array Fx{00}..{0f}, Fx{00}..{f0}
  195. vshufi64x2 xgft1_hi, xgft1_lo, xgft1_lo, 0x55
  196. vshufi64x2 xgft1_lo, xgft1_lo, xgft1_lo, 0x00
  197. vshufi64x2 xgft2_hi, xgft2_lo, xgft2_lo, 0x55
  198. vshufi64x2 xgft2_lo, xgft2_lo, xgft2_lo, 0x00
  199. vshufi64x2 xgft3_hi, xgft3_lo, xgft3_lo, 0x55
  200. vshufi64x2 xgft3_lo, xgft3_lo, xgft3_lo, 0x00
  201. vshufi64x2 xgft4_hi, xgft4_lo, xgft4_lo, 0x55
  202. vshufi64x2 xgft4_lo, xgft4_lo, xgft4_lo, 0x00
  203. vshufi64x2 xgft5_hi, xgft5_lo, xgft5_lo, 0x55
  204. vshufi64x2 xgft5_lo, xgft5_lo, xgft5_lo, 0x00
  205. vshufi64x2 xgft6_hi, xgft6_lo, xgft6_lo, 0x55
  206. vshufi64x2 xgft6_lo, xgft6_lo, xgft6_lo, 0x00
  207. mov dest2, [dest1+PS]
  208. mov dest3, [dest1+2*PS]
  209. mov dest4, [dest1+3*PS] ; reuse mul_array
  210. mov dest5, [dest1+4*PS] ; reuse vec
  211. mov dest6, [dest1+5*PS] ; reuse vec_i
  212. mov dest1, [dest1]
  213. mov tmp, -1
  214. kmovq k1, tmp
  215. .loop64:
  216. XLDR x0, [src+pos] ;Get next source vector
  217. XLDR xd1, [dest1+pos] ;Get next dest vector
  218. XLDR xd2, [dest2+pos] ;Get next dest vector
  219. XLDR xd3, [dest3+pos] ;Get next dest vector
  220. XLDR xd4, [dest4+pos] ;Get next dest vector
  221. XLDR xd5, [dest5+pos] ;Get next dest vector
  222. XLDR xd6, [dest6+pos] ;Get next dest vector
  223. vpandq xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
  224. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  225. vpandq x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  226. ; dest1
  227. vpshufb xtmph1 {k1}{z}, xgft1_hi, x0 ;Lookup mul table of high nibble
  228. vpshufb xtmpl1 {k1}{z}, xgft1_lo, xtmpa ;Lookup mul table of low nibble
  229. vpxorq xtmph1, xtmph1, xtmpl1 ;GF add high and low partials
  230. vpxorq xd1, xd1, xtmph1 ;xd1 += partial
  231. ; dest2
  232. vpshufb xtmph2 {k1}{z}, xgft2_hi, x0 ;Lookup mul table of high nibble
  233. vpshufb xtmpl2 {k1}{z}, xgft2_lo, xtmpa ;Lookup mul table of low nibble
  234. vpxorq xtmph2, xtmph2, xtmpl2 ;GF add high and low partials
  235. vpxorq xd2, xd2, xtmph2 ;xd2 += partial
  236. ; dest3
  237. vpshufb xtmph3 {k1}{z}, xgft3_hi, x0 ;Lookup mul table of high nibble
  238. vpshufb xtmpl3 {k1}{z}, xgft3_lo, xtmpa ;Lookup mul table of low nibble
  239. vpxorq xtmph3, xtmph3, xtmpl3 ;GF add high and low partials
  240. vpxorq xd3, xd3, xtmph3 ;xd3 += partial
  241. ; dest4
  242. vpshufb xtmph4 {k1}{z}, xgft4_hi, x0 ;Lookup mul table of high nibble
  243. vpshufb xtmpl4 {k1}{z}, xgft4_lo, xtmpa ;Lookup mul table of low nibble
  244. vpxorq xtmph4, xtmph4, xtmpl4 ;GF add high and low partials
  245. vpxorq xd4, xd4, xtmph4 ;xd4 += partial
  246. ; dest5
  247. vpshufb xtmph5 {k1}{z}, xgft5_hi, x0 ;Lookup mul table of high nibble
  248. vpshufb xtmpl5 {k1}{z}, xgft5_lo, xtmpa ;Lookup mul table of low nibble
  249. vpxorq xtmph5, xtmph5, xtmpl5 ;GF add high and low partials
  250. vpxorq xd5, xd5, xtmph5 ;xd5 += partial
  251. ; dest6
  252. vpshufb xtmph6 {k1}{z}, xgft6_hi, x0 ;Lookup mul table of high nibble
  253. vpshufb xtmpl5 {k1}{z}, xgft6_lo, xtmpa ;Lookup mul table of low nibble. Reuse xtmpl5
  254. vpxorq xtmph6, xtmph6, xtmpl5 ;GF add high and low partials.
  255. vpxorq xd6, xd6, xtmph6 ;xd6 += partial
  256. XSTR [dest1+pos], xd1
  257. XSTR [dest2+pos], xd2
  258. XSTR [dest3+pos], xd3
  259. XSTR [dest4+pos], xd4
  260. XSTR [dest5+pos], xd5
  261. XSTR [dest6+pos], xd6
  262. add pos, 64 ;Loop on 64 bytes at a time
  263. cmp pos, len
  264. jle .loop64
  265. lea tmp, [len + 64]
  266. cmp pos, tmp
  267. je .return_pass
  268. ;; Tail len
  269. mov pos, (1 << 63)
  270. lea tmp, [len + 64 - 1]
  271. and tmp, 63
  272. sarx pos, pos, tmp
  273. kmovq k1, pos
  274. mov pos, len ;Overlapped offset length-64
  275. jmp .loop64 ;Do one more overlap pass
  276. .return_pass:
  277. mov return, 0
  278. FUNC_RESTORE
  279. ret
  280. .return_fail:
  281. mov return, 1
  282. FUNC_RESTORE
  283. ret
  284. endproc_frame
  285. %else
  286. %ifidn __OUTPUT_FORMAT__, win64
  287. global no_gf_6vect_mad_avx512
  288. no_gf_6vect_mad_avx512:
  289. %endif
  290. %endif ; ifdef HAVE_AS_KNOWS_AVX512