gf_3vect_mad_avx2.asm 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_3vect_mad_avx2(len, vec, vec_i, mul_array, src, dest);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %define PS 8
  34. %ifidn __OUTPUT_FORMAT__, win64
  35. %define arg0 rcx
  36. %define arg0.w ecx
  37. %define arg1 rdx
  38. %define arg2 r8
  39. %define arg3 r9
  40. %define arg4 r12 ; must be saved, loaded and restored
  41. %define arg5 r15 ; must be saved and restored
  42. %define tmp r11
  43. %define tmp.w r11d
  44. %define tmp.b r11b
  45. %define return rax
  46. %define return.w eax
  47. %define stack_size 16*10 + 3*8
  48. %define arg(x) [rsp + stack_size + PS + PS*x]
  49. %define func(x) proc_frame x
  50. %macro FUNC_SAVE 0
  51. sub rsp, stack_size
  52. vmovdqa [rsp+16*0],xmm6
  53. vmovdqa [rsp+16*1],xmm7
  54. vmovdqa [rsp+16*2],xmm8
  55. vmovdqa [rsp+16*3],xmm9
  56. vmovdqa [rsp+16*4],xmm10
  57. vmovdqa [rsp+16*5],xmm11
  58. vmovdqa [rsp+16*6],xmm12
  59. vmovdqa [rsp+16*7],xmm13
  60. vmovdqa [rsp+16*8],xmm14
  61. vmovdqa [rsp+16*9],xmm15
  62. save_reg r12, 10*16 + 0*8
  63. save_reg r15, 10*16 + 1*8
  64. end_prolog
  65. mov arg4, arg(4)
  66. mov arg5, arg(5)
  67. %endmacro
  68. %macro FUNC_RESTORE 0
  69. vmovdqa xmm6, [rsp+16*0]
  70. vmovdqa xmm7, [rsp+16*1]
  71. vmovdqa xmm8, [rsp+16*2]
  72. vmovdqa xmm9, [rsp+16*3]
  73. vmovdqa xmm10, [rsp+16*4]
  74. vmovdqa xmm11, [rsp+16*5]
  75. vmovdqa xmm12, [rsp+16*6]
  76. vmovdqa xmm13, [rsp+16*7]
  77. vmovdqa xmm14, [rsp+16*8]
  78. vmovdqa xmm15, [rsp+16*9]
  79. mov r12, [rsp + 10*16 + 0*8]
  80. mov r15, [rsp + 10*16 + 1*8]
  81. add rsp, stack_size
  82. %endmacro
  83. %elifidn __OUTPUT_FORMAT__, elf64
  84. %define arg0 rdi
  85. %define arg0.w edi
  86. %define arg1 rsi
  87. %define arg2 rdx
  88. %define arg3 rcx
  89. %define arg4 r8
  90. %define arg5 r9
  91. %define tmp r11
  92. %define tmp.w r11d
  93. %define tmp.b r11b
  94. %define return rax
  95. %define return.w eax
  96. %define func(x) x: endbranch
  97. %define FUNC_SAVE
  98. %define FUNC_RESTORE
  99. %endif
  100. ;;; gf_3vect_mad_avx2(len, vec, vec_i, mul_array, src, dest)
  101. %define len arg0
  102. %define len.w arg0.w
  103. %define vec arg1
  104. %define vec_i arg2
  105. %define mul_array arg3
  106. %define src arg4
  107. %define dest1 arg5
  108. %define pos return
  109. %define pos.w return.w
  110. %define dest2 mul_array
  111. %define dest3 vec_i
  112. %ifndef EC_ALIGNED_ADDR
  113. ;;; Use Un-aligned load/store
  114. %define XLDR vmovdqu
  115. %define XSTR vmovdqu
  116. %else
  117. ;;; Use Non-temporal load/stor
  118. %ifdef NO_NT_LDST
  119. %define XLDR vmovdqa
  120. %define XSTR vmovdqa
  121. %else
  122. %define XLDR vmovntdqa
  123. %define XSTR vmovntdq
  124. %endif
  125. %endif
  126. default rel
  127. [bits 64]
  128. section .text
  129. %define xmask0f ymm15
  130. %define xmask0fx xmm15
  131. %define xgft1_lo ymm14
  132. %define xgft1_hi ymm13
  133. %define xgft2_lo ymm12
  134. %define xgft3_lo ymm11
  135. %define x0 ymm0
  136. %define xtmpa ymm1
  137. %define xtmph1 ymm2
  138. %define xtmpl1 ymm3
  139. %define xtmph2 ymm4
  140. %define xtmpl2 ymm5
  141. %define xtmpl2x xmm5
  142. %define xtmph3 ymm6
  143. %define xtmpl3 ymm7
  144. %define xtmpl3x xmm7
  145. %define xd1 ymm8
  146. %define xd2 ymm9
  147. %define xd3 ymm10
  148. align 16
  149. global gf_3vect_mad_avx2, function
  150. func(gf_3vect_mad_avx2)
  151. FUNC_SAVE
  152. sub len, 32
  153. jl .return_fail
  154. xor pos, pos
  155. mov tmp.b, 0x0f
  156. vpinsrb xmask0fx, xmask0fx, tmp.w, 0
  157. vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
  158. sal vec_i, 5 ;Multiply by 32
  159. sal vec, 5
  160. lea tmp, [mul_array + vec_i]
  161. vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
  162. ; " Ax{00}, Ax{10}, ..., Ax{f0}
  163. vperm2i128 xgft1_hi, xgft1_lo, xgft1_lo, 0x11 ; swapped to hi | hi
  164. vperm2i128 xgft1_lo, xgft1_lo, xgft1_lo, 0x00 ; swapped to lo | lo
  165. vmovdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
  166. ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
  167. vmovdqu xgft3_lo, [tmp+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
  168. ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
  169. mov dest2, [dest1+PS] ; reuse mul_array
  170. mov dest3, [dest1+2*PS] ; reuse vec_i
  171. mov dest1, [dest1]
  172. .loop32:
  173. XLDR x0, [src+pos] ;Get next source vector
  174. XLDR xd1, [dest1+pos] ;Get next dest vector
  175. XLDR xd2, [dest2+pos] ;Get next dest vector
  176. XLDR xd3, [dest3+pos] ;Get next dest vector
  177. vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x11 ; swapped to hi | hi
  178. vperm2i128 xtmpl2, xgft2_lo, xgft2_lo, 0x00 ; swapped to lo | lo
  179. vperm2i128 xtmph3, xgft3_lo, xgft3_lo, 0x11 ; swapped to hi | hi
  180. vperm2i128 xtmpl3, xgft3_lo, xgft3_lo, 0x00 ; swapped to lo | lo
  181. vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
  182. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  183. vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  184. ; dest1
  185. vpshufb xtmph1, xgft1_hi, x0 ;Lookup mul table of high nibble
  186. vpshufb xtmpl1, xgft1_lo, xtmpa ;Lookup mul table of low nibble
  187. vpxor xtmph1, xtmph1, xtmpl1 ;GF add high and low partials
  188. vpxor xd1, xd1, xtmph1 ;xd1 += partial
  189. ; dest2
  190. vpshufb xtmph2, x0 ;Lookup mul table of high nibble
  191. vpshufb xtmpl2, xtmpa ;Lookup mul table of low nibble
  192. vpxor xtmph2, xtmpl2 ;GF add high and low partials
  193. vpxor xd2, xtmph2 ;xd2 += partial
  194. ; dest3
  195. vpshufb xtmph3, x0 ;Lookup mul table of high nibble
  196. vpshufb xtmpl3, xtmpa ;Lookup mul table of low nibble
  197. vpxor xtmph3, xtmpl3 ;GF add high and low partials
  198. vpxor xd3, xtmph3 ;xd3 += partial
  199. XSTR [dest1+pos], xd1
  200. XSTR [dest2+pos], xd2
  201. XSTR [dest3+pos], xd3
  202. add pos, 32 ;Loop on 32 bytes at a time
  203. cmp pos, len
  204. jle .loop32
  205. lea tmp, [len + 32]
  206. cmp pos, tmp
  207. je .return_pass
  208. .lessthan32:
  209. ;; Tail len
  210. ;; Do one more overlap pass
  211. mov tmp.b, 0x1f
  212. vpinsrb xtmpl2x, xtmpl2x, tmp.w, 0
  213. vpbroadcastb xtmpl2, xtmpl2x ;Construct mask 0x1f1f1f...
  214. mov tmp, len ;Overlapped offset length-32
  215. XLDR x0, [src+tmp] ;Get next source vector
  216. XLDR xd1, [dest1+tmp] ;Get next dest vector
  217. XLDR xd2, [dest2+tmp] ;Get next dest vector
  218. XLDR xd3, [dest3+tmp] ;Get next dest vector
  219. sub len, pos
  220. vmovdqa xtmph3, [constip32] ;Load const of i + 32
  221. vpinsrb xtmpl3x, xtmpl3x, len.w, 15
  222. vinserti128 xtmpl3, xtmpl3, xtmpl3x, 1 ;swapped to xtmpl3x | xtmpl3x
  223. vpshufb xtmpl3, xtmpl3, xtmpl2 ;Broadcast len to all bytes. xtmpl2=0x1f1f1f...
  224. vpcmpgtb xtmpl3, xtmpl3, xtmph3
  225. vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x11 ; swapped to hi | hi
  226. vperm2i128 xgft2_lo, xgft2_lo, xgft2_lo, 0x00 ; swapped to lo | lo
  227. vperm2i128 xtmph3, xgft3_lo, xgft3_lo, 0x11 ; swapped to hi | hi
  228. vperm2i128 xgft3_lo, xgft3_lo, xgft3_lo, 0x00 ; swapped to lo | lo
  229. vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
  230. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  231. vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  232. ; dest1
  233. vpshufb xtmph1, xgft1_hi, x0 ;Lookup mul table of high nibble
  234. vpshufb xtmpl1, xgft1_lo, xtmpa ;Lookup mul table of low nibble
  235. vpxor xtmph1, xtmph1, xtmpl1 ;GF add high and low partials
  236. vpand xtmph1, xtmph1, xtmpl3
  237. vpxor xd1, xd1, xtmph1 ;xd1 += partial
  238. ; dest2
  239. vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
  240. vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
  241. vpxor xtmph2, xtmph2, xgft2_lo ;GF add high and low partials
  242. vpand xtmph2, xtmph2, xtmpl3
  243. vpxor xd2, xd2, xtmph2 ;xd2 += partial
  244. ; dest3
  245. vpshufb xtmph3, xtmph3, x0 ;Lookup mul table of high nibble
  246. vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
  247. vpxor xtmph3, xtmph3, xgft3_lo ;GF add high and low partials
  248. vpand xtmph3, xtmph3, xtmpl3
  249. vpxor xd3, xd3, xtmph3 ;xd3 += partial
  250. XSTR [dest1+tmp], xd1
  251. XSTR [dest2+tmp], xd2
  252. XSTR [dest3+tmp], xd3
  253. .return_pass:
  254. mov return, 0
  255. FUNC_RESTORE
  256. ret
  257. .return_fail:
  258. mov return, 1
  259. FUNC_RESTORE
  260. ret
  261. endproc_frame
  262. section .data
  263. align 32
  264. constip32:
  265. dq 0xf8f9fafbfcfdfeff, 0xf0f1f2f3f4f5f6f7
  266. dq 0xe8e9eaebecedeeef, 0xe0e1e2e3e4e5e6e7