gf_6vect_mad_avx2.asm 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_6vect_mad_avx2(len, vec, vec_i, mul_array, src, dest);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %define PS 8
  34. %ifidn __OUTPUT_FORMAT__, win64
  35. %define arg0 rcx
  36. %define arg0.w ecx
  37. %define arg1 rdx
  38. %define arg2 r8
  39. %define arg3 r9
  40. %define arg4 r12
  41. %define arg5 r15
  42. %define tmp r11
  43. %define tmp.w r11d
  44. %define tmp.b r11b
  45. %define tmp2 r10
  46. %define tmp3 r13
  47. %define return rax
  48. %define return.w eax
  49. %define stack_size 16*10 + 3*8
  50. %define arg(x) [rsp + stack_size + PS + PS*x]
  51. %define func(x) proc_frame x
  52. %macro FUNC_SAVE 0
  53. sub rsp, stack_size
  54. movdqa [rsp+16*0],xmm6
  55. movdqa [rsp+16*1],xmm7
  56. movdqa [rsp+16*2],xmm8
  57. movdqa [rsp+16*3],xmm9
  58. movdqa [rsp+16*4],xmm10
  59. movdqa [rsp+16*5],xmm11
  60. movdqa [rsp+16*6],xmm12
  61. movdqa [rsp+16*7],xmm13
  62. movdqa [rsp+16*8],xmm14
  63. movdqa [rsp+16*9],xmm15
  64. save_reg r12, 10*16 + 0*8
  65. save_reg r13, 10*16 + 1*8
  66. save_reg r15, 10*16 + 2*8
  67. end_prolog
  68. mov arg4, arg(4)
  69. mov arg5, arg(5)
  70. %endmacro
  71. %macro FUNC_RESTORE 0
  72. movdqa xmm6, [rsp+16*0]
  73. movdqa xmm7, [rsp+16*1]
  74. movdqa xmm8, [rsp+16*2]
  75. movdqa xmm9, [rsp+16*3]
  76. movdqa xmm10, [rsp+16*4]
  77. movdqa xmm11, [rsp+16*5]
  78. movdqa xmm12, [rsp+16*6]
  79. movdqa xmm13, [rsp+16*7]
  80. movdqa xmm14, [rsp+16*8]
  81. movdqa xmm15, [rsp+16*9]
  82. mov r12, [rsp + 10*16 + 0*8]
  83. mov r13, [rsp + 10*16 + 1*8]
  84. mov r15, [rsp + 10*16 + 2*8]
  85. add rsp, stack_size
  86. %endmacro
  87. %elifidn __OUTPUT_FORMAT__, elf64
  88. %define arg0 rdi
  89. %define arg0.w edi
  90. %define arg1 rsi
  91. %define arg2 rdx
  92. %define arg3 rcx
  93. %define arg4 r8
  94. %define arg5 r9
  95. %define tmp r11
  96. %define tmp.w r11d
  97. %define tmp.b r11b
  98. %define tmp2 r10
  99. %define tmp3 r12
  100. %define return rax
  101. %define return.w eax
  102. %define func(x) x:
  103. %macro FUNC_SAVE 0
  104. push r12
  105. %endmacro
  106. %macro FUNC_RESTORE 0
  107. pop r12
  108. %endmacro
  109. %endif
  110. ;;; gf_6vect_mad_avx2(len, vec, vec_i, mul_array, src, dest)
  111. %define len arg0
  112. %define len.w arg0.w
  113. %define vec arg1
  114. %define vec_i arg2
  115. %define mul_array arg3
  116. %define src arg4
  117. %define dest1 arg5
  118. %define pos return
  119. %define pos.w return.w
  120. %define dest2 tmp3
  121. %define dest3 tmp2
  122. %define dest4 mul_array
  123. %define dest5 vec
  124. %define dest6 vec_i
  125. %ifndef EC_ALIGNED_ADDR
  126. ;;; Use Un-aligned load/store
  127. %define XLDR vmovdqu
  128. %define XSTR vmovdqu
  129. %else
  130. ;;; Use Non-temporal load/stor
  131. %ifdef NO_NT_LDST
  132. %define XLDR vmovdqa
  133. %define XSTR vmovdqa
  134. %else
  135. %define XLDR vmovntdqa
  136. %define XSTR vmovntdq
  137. %endif
  138. %endif
  139. default rel
  140. [bits 64]
  141. section .text
  142. %define xmask0f ymm15
  143. %define xmask0fx xmm15
  144. %define xgft1_lo ymm14
  145. %define xgft2_lo ymm13
  146. %define xgft3_lo ymm12
  147. %define xgft4_lo ymm11
  148. %define xgft5_lo ymm10
  149. %define xgft6_lo ymm9
  150. %define x0 ymm0
  151. %define xtmpa ymm1
  152. %define xtmpl ymm2
  153. %define xtmplx xmm2
  154. %define xtmph ymm3
  155. %define xtmphx xmm3
  156. %define xd1 ymm4
  157. %define xd2 ymm5
  158. %define xd3 ymm6
  159. %define xd4 ymm7
  160. %define xd5 ymm8
  161. %define xd6 xd1
  162. align 16
  163. global gf_6vect_mad_avx2:ISAL_SYM_TYPE_FUNCTION
  164. func(gf_6vect_mad_avx2)
  165. %ifidn __OUTPUT_FORMAT__, macho64
  166. global _gf_6vect_mad_avx2:ISAL_SYM_TYPE_FUNCTION
  167. func(_gf_6vect_mad_avx2)
  168. %endif
  169. FUNC_SAVE
  170. sub len, 32
  171. jl .return_fail
  172. xor pos, pos
  173. mov tmp.b, 0x0f
  174. vpinsrb xmask0fx, xmask0fx, tmp.w, 0
  175. vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
  176. sal vec_i, 5 ;Multiply by 32
  177. sal vec, 5 ;Multiply by 32
  178. lea tmp, [mul_array + vec_i]
  179. mov vec_i, vec
  180. mov mul_array, vec
  181. sal vec_i, 1
  182. sal mul_array, 1
  183. add vec_i, vec ;vec_i=vec*96
  184. add mul_array, vec_i ;vec_i=vec*160
  185. vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
  186. ; " Ax{00}, Ax{10}, ..., Ax{f0}
  187. vmovdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
  188. ; " Bx{00}, Bx{10}, ..., Bx{f0}
  189. vmovdqu xgft3_lo, [tmp+2*vec] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
  190. ; " Cx{00}, Cx{10}, ..., Cx{f0}
  191. vmovdqu xgft4_lo, [tmp+vec_i] ;Load array Fx{00}, Fx{01}, ..., Fx{0f}
  192. ; " Fx{00}, Fx{10}, ..., Fx{f0}
  193. vmovdqu xgft5_lo, [tmp+4*vec] ;Load array Ex{00}, Ex{01}, ..., Ex{0f}
  194. ; " Ex{00}, Ex{10}, ..., Ex{f0}
  195. vmovdqu xgft6_lo, [tmp+mul_array] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
  196. ; " Dx{00}, Dx{10}, ..., Dx{f0}
  197. mov dest2, [dest1+PS] ; reuse tmp3
  198. mov dest3, [dest1+2*PS] ; reuse tmp2
  199. mov dest4, [dest1+3*PS] ; reuse mul_array
  200. mov dest5, [dest1+4*PS] ; reuse vec
  201. mov dest6, [dest1+5*PS] ; reuse vec_i
  202. mov dest1, [dest1]
  203. .loop32:
  204. XLDR x0, [src+pos] ;Get next source vector
  205. XLDR xd1, [dest1+pos] ;Get next dest vector
  206. XLDR xd2, [dest2+pos] ;Get next dest vector
  207. XLDR xd3, [dest3+pos] ;Get next dest vector
  208. XLDR xd4, [dest4+pos] ;Get next dest vector
  209. XLDR xd5, [dest5+pos] ;Get next dest vector
  210. vpand xtmpl, x0, xmask0f ;Mask low src nibble in bits 4-0
  211. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  212. vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  213. vperm2i128 xtmpa, xtmpl, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
  214. vperm2i128 x0, xtmpl, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
  215. ;dest1
  216. vperm2i128 xtmph, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
  217. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  218. vpshufb xtmpl, xgft1_lo, xtmpa ;Lookup mul table of low nibble
  219. vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
  220. vpxor xd1, xd1, xtmph ;xd1 += partial
  221. XSTR [dest1+pos], xd1 ;Store result into dest1
  222. ;dest2
  223. vperm2i128 xtmph, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
  224. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  225. vpshufb xtmpl, xgft2_lo, xtmpa ;Lookup mul table of low nibble
  226. vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
  227. vpxor xd2, xd2, xtmph ;xd2 += partial
  228. ;dest3
  229. vperm2i128 xtmph, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
  230. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  231. vpshufb xtmpl, xgft3_lo, xtmpa ;Lookup mul table of low nibble
  232. vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
  233. vpxor xd3, xd3, xtmph ;xd3 += partial
  234. XLDR xd6, [dest6+pos] ;reuse xd1. Get next dest vector
  235. ;dest4
  236. vperm2i128 xtmph, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
  237. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  238. vpshufb xtmpl, xgft4_lo, xtmpa ;Lookup mul table of low nibble
  239. vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
  240. vpxor xd4, xd4, xtmph ;xd4 += partial
  241. ;dest5
  242. vperm2i128 xtmph, xgft5_lo, xgft5_lo, 0x01 ; swapped to hi | lo
  243. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  244. vpshufb xtmpl, xgft5_lo, xtmpa ;Lookup mul table of low nibble
  245. vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
  246. vpxor xd5, xd5, xtmph ;xd5 += partial
  247. ;dest6
  248. vperm2i128 xtmph, xgft6_lo, xgft6_lo, 0x01 ; swapped to hi | lo
  249. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  250. vpshufb xtmpl, xgft6_lo, xtmpa ;Lookup mul table of low nibble
  251. vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
  252. vpxor xd6, xd6, xtmph ;xd6 += partial
  253. XSTR [dest2+pos], xd2 ;Store result into dest2
  254. XSTR [dest3+pos], xd3 ;Store result into dest3
  255. XSTR [dest4+pos], xd4 ;Store result into dest4
  256. XSTR [dest5+pos], xd5 ;Store result into dest5
  257. XSTR [dest6+pos], xd6 ;Store result into dest6
  258. add pos, 32 ;Loop on 32 bytes at a time
  259. cmp pos, len
  260. jle .loop32
  261. lea tmp, [len + 32]
  262. cmp pos, tmp
  263. je .return_pass
  264. .lessthan32:
  265. ;; Tail len
  266. ;; Do one more overlap pass
  267. mov tmp.b, 0x1f
  268. vpinsrb xtmphx, xtmphx, tmp.w, 0
  269. vpbroadcastb xtmph, xtmphx ;Construct mask 0x1f1f1f...
  270. mov tmp, len ;Overlapped offset length-32
  271. XLDR x0, [src+tmp] ;Get next source vector
  272. XLDR xd1, [dest1+tmp] ;Get next dest vector
  273. XLDR xd2, [dest2+tmp] ;Get next dest vector
  274. XLDR xd3, [dest3+tmp] ;Get next dest vector
  275. XLDR xd4, [dest4+tmp] ;Get next dest vector
  276. XLDR xd5, [dest5+tmp] ;Get next dest vector
  277. sub len, pos
  278. vpinsrb xtmplx, xtmplx, len.w, 15
  279. vinserti128 xtmpl, xtmpl, xtmplx, 1 ;swapped to xtmplx | xtmplx
  280. vpshufb xtmpl, xtmpl, xtmph ;Broadcast len to all bytes. xtmph=0x1f1f1f...
  281. vpcmpgtb xtmpl, xtmpl, [constip32]
  282. vpand xtmph, x0, xmask0f ;Mask low src nibble in bits 4-0
  283. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  284. vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  285. vperm2i128 xtmpa, xtmph, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
  286. vperm2i128 x0, xtmph, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
  287. ;dest1
  288. vperm2i128 xtmph, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
  289. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  290. vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble
  291. vpxor xtmph, xtmph, xgft1_lo ;GF add high and low partials
  292. vpand xtmph, xtmph, xtmpl
  293. vpxor xd1, xd1, xtmph ;xd1 += partial
  294. XSTR [dest1+tmp], xd1 ;Store result into dest1
  295. ;dest2
  296. vperm2i128 xtmph, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
  297. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  298. vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
  299. vpxor xtmph, xtmph, xgft2_lo ;GF add high and low partials
  300. vpand xtmph, xtmph, xtmpl
  301. vpxor xd2, xd2, xtmph ;xd2 += partial
  302. ;dest3
  303. vperm2i128 xtmph, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
  304. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  305. vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
  306. vpxor xtmph, xtmph, xgft3_lo ;GF add high and low partials
  307. vpand xtmph, xtmph, xtmpl
  308. vpxor xd3, xd3, xtmph ;xd3 += partial
  309. XLDR xd6, [dest6+tmp] ;reuse xd1. Get next dest vector
  310. ;dest4
  311. vperm2i128 xtmph, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
  312. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  313. vpshufb xgft4_lo, xgft4_lo, xtmpa ;Lookup mul table of low nibble
  314. vpxor xtmph, xtmph, xgft4_lo ;GF add high and low partials
  315. vpand xtmph, xtmph, xtmpl
  316. vpxor xd4, xd4, xtmph ;xd4 += partial
  317. ;dest5
  318. vperm2i128 xtmph, xgft5_lo, xgft5_lo, 0x01 ; swapped to hi | lo
  319. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  320. vpshufb xgft5_lo, xgft5_lo, xtmpa ;Lookup mul table of low nibble
  321. vpxor xtmph, xtmph, xgft5_lo ;GF add high and low partials
  322. vpand xtmph, xtmph, xtmpl
  323. vpxor xd5, xd5, xtmph ;xd5 += partial
  324. ;dest6
  325. vperm2i128 xtmph, xgft6_lo, xgft6_lo, 0x01 ; swapped to hi | lo
  326. vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
  327. vpshufb xgft6_lo, xgft6_lo, xtmpa ;Lookup mul table of low nibble
  328. vpxor xtmph, xtmph, xgft6_lo ;GF add high and low partials
  329. vpand xtmph, xtmph, xtmpl
  330. vpxor xd6, xd6, xtmph ;xd6 += partial
  331. XSTR [dest2+tmp], xd2 ;Store result into dest2
  332. XSTR [dest3+tmp], xd3 ;Store result into dest3
  333. XSTR [dest4+tmp], xd4 ;Store result into dest4
  334. XSTR [dest5+tmp], xd5 ;Store result into dest5
  335. XSTR [dest6+tmp], xd6 ;Store result into dest6
  336. .return_pass:
  337. FUNC_RESTORE
  338. mov return, 0
  339. ret
  340. .return_fail:
  341. FUNC_RESTORE
  342. mov return, 1
  343. ret
  344. endproc_frame
  345. section .data
  346. align 32
  347. constip32:
  348. dq 0xf8f9fafbfcfdfeff, 0xf0f1f2f3f4f5f6f7
  349. dq 0xe8e9eaebecedeeef, 0xe0e1e2e3e4e5e6e7
  350. ;;; func core, ver, snum
  351. slversion gf_6vect_mad_avx2, 04, 01, 0211