gf_4vect_dot_prod_avx2.asm 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_4vect_dot_prod_avx2(len, vec, *g_tbls, **buffs, **dests);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %ifidn __OUTPUT_FORMAT__, elf64
  34. %define arg0 rdi
  35. %define arg1 rsi
  36. %define arg2 rdx
  37. %define arg3 rcx
  38. %define arg4 r8
  39. %define arg5 r9
  40. %define tmp r11
  41. %define tmp.w r11d
  42. %define tmp.b r11b
  43. %define tmp2 r10
  44. %define tmp3 r13 ; must be saved and restored
  45. %define tmp4 r12 ; must be saved and restored
  46. %define tmp5 r14 ; must be saved and restored
  47. %define tmp6 r15 ; must be saved and restored
  48. %define return rax
  49. %macro SLDR 2
  50. %endmacro
  51. %define SSTR SLDR
  52. %define PS 8
  53. %define LOG_PS 3
  54. %define func(x) x:
  55. %macro FUNC_SAVE 0
  56. push r12
  57. push r13
  58. push r14
  59. push r15
  60. %endmacro
  61. %macro FUNC_RESTORE 0
  62. pop r15
  63. pop r14
  64. pop r13
  65. pop r12
  66. %endmacro
  67. %endif
  68. %ifidn __OUTPUT_FORMAT__, win64
  69. %define arg0 rcx
  70. %define arg1 rdx
  71. %define arg2 r8
  72. %define arg3 r9
  73. %define arg4 r12 ; must be saved, loaded and restored
  74. %define arg5 r15 ; must be saved and restored
  75. %define tmp r11
  76. %define tmp.w r11d
  77. %define tmp.b r11b
  78. %define tmp2 r10
  79. %define tmp3 r13 ; must be saved and restored
  80. %define tmp4 r14 ; must be saved and restored
  81. %define tmp5 rdi ; must be saved and restored
  82. %define tmp6 rsi ; must be saved and restored
  83. %define return rax
  84. %macro SLDR 2
  85. %endmacro
  86. %define SSTR SLDR
  87. %define PS 8
  88. %define LOG_PS 3
  89. %define stack_size 9*16 + 7*8 ; must be an odd multiple of 8
  90. %define arg(x) [rsp + stack_size + PS + PS*x]
  91. %define func(x) proc_frame x
  92. %macro FUNC_SAVE 0
  93. alloc_stack stack_size
  94. vmovdqa [rsp + 0*16], xmm6
  95. vmovdqa [rsp + 1*16], xmm7
  96. vmovdqa [rsp + 2*16], xmm8
  97. vmovdqa [rsp + 3*16], xmm9
  98. vmovdqa [rsp + 4*16], xmm10
  99. vmovdqa [rsp + 5*16], xmm11
  100. vmovdqa [rsp + 6*16], xmm12
  101. vmovdqa [rsp + 7*16], xmm13
  102. vmovdqa [rsp + 8*16], xmm14
  103. save_reg r12, 9*16 + 0*8
  104. save_reg r13, 9*16 + 1*8
  105. save_reg r14, 9*16 + 2*8
  106. save_reg r15, 9*16 + 3*8
  107. save_reg rdi, 9*16 + 4*8
  108. save_reg rsi, 9*16 + 5*8
  109. end_prolog
  110. mov arg4, arg(4)
  111. %endmacro
  112. %macro FUNC_RESTORE 0
  113. vmovdqa xmm6, [rsp + 0*16]
  114. vmovdqa xmm7, [rsp + 1*16]
  115. vmovdqa xmm8, [rsp + 2*16]
  116. vmovdqa xmm9, [rsp + 3*16]
  117. vmovdqa xmm10, [rsp + 4*16]
  118. vmovdqa xmm11, [rsp + 5*16]
  119. vmovdqa xmm12, [rsp + 6*16]
  120. vmovdqa xmm13, [rsp + 7*16]
  121. vmovdqa xmm14, [rsp + 8*16]
  122. mov r12, [rsp + 9*16 + 0*8]
  123. mov r13, [rsp + 9*16 + 1*8]
  124. mov r14, [rsp + 9*16 + 2*8]
  125. mov r15, [rsp + 9*16 + 3*8]
  126. mov rdi, [rsp + 9*16 + 4*8]
  127. mov rsi, [rsp + 9*16 + 5*8]
  128. add rsp, stack_size
  129. %endmacro
  130. %endif
  131. %ifidn __OUTPUT_FORMAT__, elf32
  132. ;;;================== High Address;
  133. ;;; arg4
  134. ;;; arg3
  135. ;;; arg2
  136. ;;; arg1
  137. ;;; arg0
  138. ;;; return
  139. ;;;<================= esp of caller
  140. ;;; ebp
  141. ;;;<================= ebp = esp
  142. ;;; var0
  143. ;;; var1
  144. ;;; var2
  145. ;;; var3
  146. ;;; esi
  147. ;;; edi
  148. ;;; ebx
  149. ;;;<================= esp of callee
  150. ;;;
  151. ;;;================== Low Address;
  152. %define PS 4
  153. %define LOG_PS 2
  154. %define func(x) x:
  155. %define arg(x) [ebp + PS*2 + PS*x]
  156. %define var(x) [ebp - PS - PS*x]
  157. %define trans ecx
  158. %define trans2 esi
  159. %define arg0 trans ;trans and trans2 are for the variables in stack
  160. %define arg0_m arg(0)
  161. %define arg1 ebx
  162. %define arg2 arg2_m
  163. %define arg2_m arg(2)
  164. %define arg3 trans
  165. %define arg3_m arg(3)
  166. %define arg4 trans
  167. %define arg4_m arg(4)
  168. %define arg5 trans2
  169. %define tmp edx
  170. %define tmp.w edx
  171. %define tmp.b dl
  172. %define tmp2 edi
  173. %define tmp3 trans2
  174. %define tmp3_m var(0)
  175. %define tmp4 trans2
  176. %define tmp4_m var(1)
  177. %define tmp5 trans2
  178. %define tmp5_m var(2)
  179. %define tmp6 trans2
  180. %define tmp6_m var(3)
  181. %define return eax
  182. %macro SLDR 2 ;stack load/restore
  183. mov %1, %2
  184. %endmacro
  185. %define SSTR SLDR
  186. %macro FUNC_SAVE 0
  187. push ebp
  188. mov ebp, esp
  189. sub esp, PS*4 ;4 local variables
  190. push esi
  191. push edi
  192. push ebx
  193. mov arg1, arg(1)
  194. %endmacro
  195. %macro FUNC_RESTORE 0
  196. pop ebx
  197. pop edi
  198. pop esi
  199. add esp, PS*4 ;4 local variables
  200. pop ebp
  201. %endmacro
  202. %endif ; output formats
  203. %define len arg0
  204. %define vec arg1
  205. %define mul_array arg2
  206. %define src arg3
  207. %define dest1 arg4
  208. %define ptr arg5
  209. %define vec_i tmp2
  210. %define dest2 tmp3
  211. %define dest3 tmp4
  212. %define dest4 tmp5
  213. %define vskip3 tmp6
  214. %define pos return
  215. %ifidn PS,4 ;32-bit code
  216. %define len_m arg0_m
  217. %define src_m arg3_m
  218. %define dest1_m arg4_m
  219. %define dest2_m tmp3_m
  220. %define dest3_m tmp4_m
  221. %define dest4_m tmp5_m
  222. %define vskip3_m tmp6_m
  223. %endif
  224. %ifndef EC_ALIGNED_ADDR
  225. ;;; Use Un-aligned load/store
  226. %define XLDR vmovdqu
  227. %define XSTR vmovdqu
  228. %else
  229. ;;; Use Non-temporal load/stor
  230. %ifdef NO_NT_LDST
  231. %define XLDR vmovdqa
  232. %define XSTR vmovdqa
  233. %else
  234. %define XLDR vmovntdqa
  235. %define XSTR vmovntdq
  236. %endif
  237. %endif
  238. %ifidn PS,8 ;64-bit code
  239. default rel
  240. [bits 64]
  241. %endif
  242. section .text
  243. %ifidn PS,8 ;64-bit code
  244. %define xmask0f ymm14
  245. %define xmask0fx xmm14
  246. %define xgft1_lo ymm13
  247. %define xgft1_hi ymm12
  248. %define xgft2_lo ymm11
  249. %define xgft2_hi ymm10
  250. %define xgft3_lo ymm9
  251. %define xgft3_hi ymm8
  252. %define xgft4_lo ymm7
  253. %define xgft4_hi ymm6
  254. %define x0 ymm0
  255. %define xtmpa ymm1
  256. %define xp1 ymm2
  257. %define xp2 ymm3
  258. %define xp3 ymm4
  259. %define xp4 ymm5
  260. %else
  261. %define ymm_trans ymm7 ;reuse xmask0f and xgft1_hi
  262. %define xmask0f ymm_trans
  263. %define xmask0fx xmm7
  264. %define xgft1_lo ymm6
  265. %define xgft1_hi ymm_trans
  266. %define xgft2_lo xgft1_lo
  267. %define xgft2_hi xgft1_hi
  268. %define xgft3_lo xgft1_lo
  269. %define xgft3_hi xgft1_hi
  270. %define xgft4_lo xgft1_lo
  271. %define xgft4_hi xgft1_hi
  272. %define x0 ymm0
  273. %define xtmpa ymm1
  274. %define xp1 ymm2
  275. %define xp2 ymm3
  276. %define xp3 ymm4
  277. %define xp4 ymm5
  278. %endif
  279. align 16
  280. global gf_4vect_dot_prod_avx2:ISAL_SYM_TYPE_FUNCTION
  281. func(gf_4vect_dot_prod_avx2)
  282. %ifidn __OUTPUT_FORMAT__, macho64
  283. global _gf_4vect_dot_prod_avx2:ISAL_SYM_TYPE_FUNCTION
  284. func(_gf_4vect_dot_prod_avx2)
  285. %endif
  286. FUNC_SAVE
  287. SLDR len, len_m
  288. sub len, 32
  289. SSTR len_m, len
  290. jl .return_fail
  291. xor pos, pos
  292. mov tmp.b, 0x0f
  293. vpinsrb xmask0fx, xmask0fx, tmp.w, 0
  294. vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
  295. mov vskip3, vec
  296. imul vskip3, 96
  297. SSTR vskip3_m, vskip3
  298. sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS
  299. SLDR dest1, dest1_m
  300. mov dest2, [dest1+PS]
  301. SSTR dest2_m, dest2
  302. mov dest3, [dest1+2*PS]
  303. SSTR dest3_m, dest3
  304. mov dest4, [dest1+3*PS]
  305. SSTR dest4_m, dest4
  306. mov dest1, [dest1]
  307. SSTR dest1_m, dest1
  308. .loop32:
  309. vpxor xp1, xp1
  310. vpxor xp2, xp2
  311. vpxor xp3, xp3
  312. vpxor xp4, xp4
  313. mov tmp, mul_array
  314. xor vec_i, vec_i
  315. .next_vect:
  316. SLDR src, src_m
  317. mov ptr, [src+vec_i]
  318. XLDR x0, [ptr+pos] ;Get next source vector
  319. add vec_i, PS
  320. %ifidn PS,8 ;64-bit code
  321. vpand xgft4_lo, x0, xmask0f ;Mask low src nibble in bits 4-0
  322. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  323. vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  324. vperm2i128 xtmpa, xgft4_lo, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
  325. vperm2i128 x0, xgft4_lo, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
  326. vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
  327. ; " Ax{00}, Ax{10}, ..., Ax{f0}
  328. vmovdqu xgft2_lo, [tmp+vec*(32/PS)] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
  329. ; " Bx{00}, Bx{10}, ..., Bx{f0}
  330. vmovdqu xgft3_lo, [tmp+vec*(64/PS)] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
  331. ; " Cx{00}, Cx{10}, ..., Cx{f0}
  332. vmovdqu xgft4_lo, [tmp+vskip3] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
  333. ; " Dx{00}, Dx{10}, ..., Dx{f0}
  334. vperm2i128 xgft1_hi, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
  335. vperm2i128 xgft2_hi, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
  336. vperm2i128 xgft3_hi, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
  337. vperm2i128 xgft4_hi, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
  338. add tmp, 32
  339. %else ;32-bit code
  340. mov cl, 0x0f ;use ecx as a temp variable
  341. vpinsrb xmask0fx, xmask0fx, ecx, 0
  342. vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
  343. vpand xgft4_lo, x0, xmask0f ;Mask low src nibble in bits 4-0
  344. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  345. vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  346. vperm2i128 xtmpa, xgft4_lo, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
  347. vperm2i128 x0, xgft4_lo, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
  348. vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
  349. ; " Ax{00}, Ax{10}, ..., Ax{f0}
  350. vperm2i128 xgft1_hi, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
  351. %endif
  352. vpshufb xgft1_hi, x0 ;Lookup mul table of high nibble
  353. vpshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble
  354. vpxor xgft1_hi, xgft1_lo ;GF add high and low partials
  355. vpxor xp1, xgft1_hi ;xp1 += partial
  356. %ifidn PS,4 ; 32-bit code
  357. vmovdqu xgft2_lo, [tmp+vec*(32/PS)] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
  358. ; " Bx{00}, Bx{10}, ..., Bx{f0}
  359. vperm2i128 xgft2_hi, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
  360. %endif
  361. vpshufb xgft2_hi, x0 ;Lookup mul table of high nibble
  362. vpshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble
  363. vpxor xgft2_hi, xgft2_lo ;GF add high and low partials
  364. vpxor xp2, xgft2_hi ;xp2 += partial
  365. %ifidn PS,4 ; 32-bit code
  366. sal vec, 1
  367. vmovdqu xgft3_lo, [tmp+vec*(32/PS)] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
  368. ; " Cx{00}, Cx{10}, ..., Cx{f0}
  369. vperm2i128 xgft3_hi, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
  370. sar vec, 1
  371. %endif
  372. vpshufb xgft3_hi, x0 ;Lookup mul table of high nibble
  373. vpshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble
  374. vpxor xgft3_hi, xgft3_lo ;GF add high and low partials
  375. vpxor xp3, xgft3_hi ;xp3 += partial
  376. %ifidn PS,4 ; 32-bit code
  377. SLDR vskip3, vskip3_m
  378. vmovdqu xgft4_lo, [tmp+vskip3] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
  379. ; " DX{00}, Dx{10}, ..., Dx{f0}
  380. vperm2i128 xgft4_hi, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
  381. add tmp, 32
  382. %endif
  383. vpshufb xgft4_hi, x0 ;Lookup mul table of high nibble
  384. vpshufb xgft4_lo, xtmpa ;Lookup mul table of low nibble
  385. vpxor xgft4_hi, xgft4_lo ;GF add high and low partials
  386. vpxor xp4, xgft4_hi ;xp4 += partial
  387. cmp vec_i, vec
  388. jl .next_vect
  389. SLDR dest1, dest1_m
  390. SLDR dest2, dest2_m
  391. XSTR [dest1+pos], xp1
  392. XSTR [dest2+pos], xp2
  393. SLDR dest3, dest3_m
  394. XSTR [dest3+pos], xp3
  395. SLDR dest4, dest4_m
  396. XSTR [dest4+pos], xp4
  397. SLDR len, len_m
  398. add pos, 32 ;Loop on 32 bytes at a time
  399. cmp pos, len
  400. jle .loop32
  401. lea tmp, [len + 32]
  402. cmp pos, tmp
  403. je .return_pass
  404. ;; Tail len
  405. mov pos, len ;Overlapped offset length-32
  406. jmp .loop32 ;Do one more overlap pass
  407. .return_pass:
  408. mov return, 0
  409. FUNC_RESTORE
  410. ret
  411. .return_fail:
  412. mov return, 1
  413. FUNC_RESTORE
  414. ret
  415. endproc_frame
  416. section .data
  417. ;;; func core, ver, snum
  418. slversion gf_4vect_dot_prod_avx2, 04, 05, 0198