gf_6vect_mad_avx.asm 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_6vect_mad_avx(len, vec, vec_i, mul_array, src, dest);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %define PS 8
  34. %ifidn __OUTPUT_FORMAT__, win64
  35. %define arg0 rcx
  36. %define arg0.w ecx
  37. %define arg1 rdx
  38. %define arg2 r8
  39. %define arg3 r9
  40. %define arg4 r12
  41. %define arg5 r15
  42. %define tmp r11
  43. %define tmp2 r10
  44. %define tmp3 r13
  45. %define tmp4 r14
  46. %define tmp5 rdi
  47. %define return rax
  48. %define return.w eax
  49. %define stack_size 16*10 + 5*8
  50. %define arg(x) [rsp + stack_size + PS + PS*x]
  51. %define func(x) proc_frame x
  52. %macro FUNC_SAVE 0
  53. sub rsp, stack_size
  54. movdqa [rsp+16*0],xmm6
  55. movdqa [rsp+16*1],xmm7
  56. movdqa [rsp+16*2],xmm8
  57. movdqa [rsp+16*3],xmm9
  58. movdqa [rsp+16*4],xmm10
  59. movdqa [rsp+16*5],xmm11
  60. movdqa [rsp+16*6],xmm12
  61. movdqa [rsp+16*7],xmm13
  62. movdqa [rsp+16*8],xmm14
  63. movdqa [rsp+16*9],xmm15
  64. save_reg r12, 10*16 + 0*8
  65. save_reg r13, 10*16 + 1*8
  66. save_reg r14, 10*16 + 2*8
  67. save_reg r15, 10*16 + 3*8
  68. save_reg rdi, 10*16 + 4*8
  69. end_prolog
  70. mov arg4, arg(4)
  71. mov arg5, arg(5)
  72. %endmacro
  73. %macro FUNC_RESTORE 0
  74. movdqa xmm6, [rsp+16*0]
  75. movdqa xmm7, [rsp+16*1]
  76. movdqa xmm8, [rsp+16*2]
  77. movdqa xmm9, [rsp+16*3]
  78. movdqa xmm10, [rsp+16*4]
  79. movdqa xmm11, [rsp+16*5]
  80. movdqa xmm12, [rsp+16*6]
  81. movdqa xmm13, [rsp+16*7]
  82. movdqa xmm14, [rsp+16*8]
  83. movdqa xmm15, [rsp+16*9]
  84. mov r12, [rsp + 10*16 + 0*8]
  85. mov r13, [rsp + 10*16 + 1*8]
  86. mov r14, [rsp + 10*16 + 2*8]
  87. mov r15, [rsp + 10*16 + 3*8]
  88. mov rdi, [rsp + 10*16 + 4*8]
  89. add rsp, stack_size
  90. %endmacro
  91. %elifidn __OUTPUT_FORMAT__, elf64
  92. %define arg0 rdi
  93. %define arg0.w edi
  94. %define arg1 rsi
  95. %define arg2 rdx
  96. %define arg3 rcx
  97. %define arg4 r8
  98. %define arg5 r9
  99. %define tmp r11
  100. %define tmp2 r10
  101. %define tmp3 r12
  102. %define tmp4 r13
  103. %define tmp5 r14
  104. %define return rax
  105. %define return.w eax
  106. %define func(x) x:
  107. %macro FUNC_SAVE 0
  108. push r12
  109. push r13
  110. push r14
  111. %endmacro
  112. %macro FUNC_RESTORE 0
  113. pop r14
  114. pop r13
  115. pop r12
  116. %endmacro
  117. %endif
  118. ;;; gf_6vect_mad_avx(len, vec, vec_i, mul_array, src, dest)
  119. %define len arg0
  120. %define len.w arg0.w
  121. %define vec arg1
  122. %define vec_i arg2
  123. %define mul_array arg3
  124. %define src arg4
  125. %define dest1 arg5
  126. %define pos return
  127. %define pos.w return.w
  128. %define dest2 tmp4
  129. %define dest3 tmp2
  130. %define dest4 mul_array
  131. %define dest5 tmp5
  132. %define dest6 vec_i
  133. %ifndef EC_ALIGNED_ADDR
  134. ;;; Use Un-aligned load/store
  135. %define XLDR vmovdqu
  136. %define XSTR vmovdqu
  137. %else
  138. ;;; Use Non-temporal load/stor
  139. %ifdef NO_NT_LDST
  140. %define XLDR vmovdqa
  141. %define XSTR vmovdqa
  142. %else
  143. %define XLDR vmovntdqa
  144. %define XSTR vmovntdq
  145. %endif
  146. %endif
  147. default rel
  148. [bits 64]
  149. section .text
  150. %define xmask0f xmm15
  151. %define xgft4_lo xmm14
  152. %define xgft4_hi xmm13
  153. %define xgft5_lo xmm12
  154. %define xgft5_hi xmm11
  155. %define xgft6_lo xmm10
  156. %define xgft6_hi xmm9
  157. %define x0 xmm0
  158. %define xtmpa xmm1
  159. %define xtmph1 xmm2
  160. %define xtmpl1 xmm3
  161. %define xtmph2 xmm4
  162. %define xtmpl2 xmm5
  163. %define xtmph3 xmm6
  164. %define xtmpl3 xmm7
  165. %define xd1 xmm8
  166. %define xd2 xtmpl1
  167. %define xd3 xtmph1
  168. align 16
  169. global gf_6vect_mad_avx:ISAL_SYM_TYPE_FUNCTION
  170. func(gf_6vect_mad_avx)
  171. %ifidn __OUTPUT_FORMAT__, macho64
  172. global _gf_6vect_mad_avx:ISAL_SYM_TYPE_FUNCTION
  173. func(_gf_6vect_mad_avx)
  174. %endif
  175. FUNC_SAVE
  176. sub len, 16
  177. jl .return_fail
  178. xor pos, pos
  179. vmovdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
  180. mov tmp, vec
  181. sal vec_i, 5 ;Multiply by 32
  182. lea tmp3, [mul_array + vec_i]
  183. sal tmp, 6 ;Multiply by 64
  184. sal vec, 5 ;Multiply by 32
  185. lea vec_i, [tmp + vec] ;vec_i = vec*96
  186. lea mul_array, [tmp + vec_i] ;mul_array = vec*160
  187. vmovdqu xgft5_lo, [tmp3+2*tmp] ;Load array Ex{00}, Ex{01}, ..., Ex{0f}
  188. vmovdqu xgft5_hi, [tmp3+2*tmp+16] ; " Ex{00}, Ex{10}, ..., Ex{f0}
  189. vmovdqu xgft4_lo, [tmp3+vec_i] ;Load array Dx{00}, Dx{01}, Dx{02}, ...
  190. vmovdqu xgft4_hi, [tmp3+vec_i+16] ; " Dx{00}, Dx{10}, Dx{20}, ... , Dx{f0}
  191. vmovdqu xgft6_lo, [tmp3+mul_array] ;Load array Fx{00}, Fx{01}, ..., Fx{0f}
  192. vmovdqu xgft6_hi, [tmp3+mul_array+16] ; " Fx{00}, Fx{10}, ..., Fx{f0}
  193. mov dest2, [dest1+PS]
  194. mov dest3, [dest1+2*PS]
  195. mov dest4, [dest1+3*PS] ; reuse mul_array
  196. mov dest5, [dest1+4*PS]
  197. mov dest6, [dest1+5*PS] ; reuse vec_i
  198. mov dest1, [dest1]
  199. .loop16:
  200. XLDR x0, [src+pos] ;Get next source vector
  201. vmovdqu xtmpl1, [tmp3] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
  202. vmovdqu xtmph1, [tmp3+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
  203. vmovdqu xtmpl2, [tmp3+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
  204. vmovdqu xtmph2, [tmp3+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
  205. vmovdqu xtmpl3, [tmp3+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
  206. vmovdqu xtmph3, [tmp3+2*vec+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
  207. XLDR xd1, [dest1+pos] ;Get next dest vector
  208. vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
  209. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  210. vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  211. ;dest1
  212. vpshufb xtmph1, x0 ;Lookup mul table of high nibble
  213. vpshufb xtmpl1, xtmpa ;Lookup mul table of low nibble
  214. vpxor xtmph1, xtmpl1 ;GF add high and low partials
  215. vpxor xd1, xtmph1
  216. XLDR xd2, [dest2+pos] ;reuse xtmpl1. Get next dest vector
  217. XLDR xd3, [dest3+pos] ;reuse xtmph1. Get next dest vector
  218. ;dest2
  219. vpshufb xtmph2, x0 ;Lookup mul table of high nibble
  220. vpshufb xtmpl2, xtmpa ;Lookup mul table of low nibble
  221. vpxor xtmph2, xtmpl2 ;GF add high and low partials
  222. vpxor xd2, xtmph2
  223. ;dest3
  224. vpshufb xtmph3, x0 ;Lookup mul table of high nibble
  225. vpshufb xtmpl3, xtmpa ;Lookup mul table of low nibble
  226. vpxor xtmph3, xtmpl3 ;GF add high and low partials
  227. vpxor xd3, xtmph3
  228. XSTR [dest1+pos], xd1 ;Store result into dest1
  229. XSTR [dest2+pos], xd2 ;Store result into dest2
  230. XSTR [dest3+pos], xd3 ;Store result into dest3
  231. ;dest4
  232. XLDR xd1, [dest4+pos] ;Get next dest vector
  233. vpshufb xtmph1, xgft4_hi, x0 ;Lookup mul table of high nibble
  234. vpshufb xtmpl1, xgft4_lo, xtmpa ;Lookup mul table of low nibble
  235. vpxor xtmph1, xtmph1, xtmpl1 ;GF add high and low partials
  236. vpxor xd1, xd1, xtmph1
  237. XLDR xd2, [dest5+pos] ;reuse xtmpl1. Get next dest vector
  238. XLDR xd3, [dest6+pos] ;reuse xtmph1. Get next dest vector
  239. ;dest5
  240. vpshufb xtmph2, xgft5_hi, x0 ;Lookup mul table of high nibble
  241. vpshufb xtmpl2, xgft5_lo, xtmpa ;Lookup mul table of low nibble
  242. vpxor xtmph2, xtmph2, xtmpl2 ;GF add high and low partials
  243. vpxor xd2, xd2, xtmph2
  244. ;dest6
  245. vpshufb xtmph3, xgft6_hi, x0 ;Lookup mul table of high nibble
  246. vpshufb xtmpl3, xgft6_lo, xtmpa ;Lookup mul table of low nibble
  247. vpxor xtmph3, xtmph3, xtmpl3 ;GF add high and low partials
  248. vpxor xd3, xd3, xtmph3
  249. XSTR [dest4+pos], xd1 ;Store result into dest4
  250. XSTR [dest5+pos], xd2 ;Store result into dest5
  251. XSTR [dest6+pos], xd3 ;Store result into dest6
  252. add pos, 16 ;Loop on 16 bytes at a time
  253. cmp pos, len
  254. jle .loop16
  255. lea tmp, [len + 16]
  256. cmp pos, tmp
  257. je .return_pass
  258. .lessthan16:
  259. ;; Tail len
  260. ;; Do one more overlap pass
  261. ;; Overlapped offset length-16
  262. mov tmp, len ;Backup len as len=rdi
  263. XLDR x0, [src+tmp] ;Get next source vector
  264. XLDR xd1, [dest4+tmp] ;Get next dest vector
  265. XLDR xd2, [dest5+tmp] ;reuse xtmpl1. Get next dest vector
  266. XLDR xd3, [dest6+tmp] ;reuse xtmph1. Get next dest vector
  267. sub len, pos
  268. vmovdqa xtmph3, [constip16] ;Load const of i + 16
  269. vpinsrb xtmpl3, len.w, 15
  270. vpshufb xtmpl3, xmask0f ;Broadcast len to all bytes
  271. vpcmpgtb xtmpl3, xtmpl3, xtmph3
  272. vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
  273. vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
  274. vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
  275. ;dest4
  276. vpshufb xgft4_hi, xgft4_hi, x0 ;Lookup mul table of high nibble
  277. vpshufb xgft4_lo, xgft4_lo, xtmpa ;Lookup mul table of low nibble
  278. vpxor xgft4_hi, xgft4_hi, xgft4_lo ;GF add high and low partials
  279. vpand xgft4_hi, xgft4_hi, xtmpl3
  280. vpxor xd1, xd1, xgft4_hi
  281. ;dest5
  282. vpshufb xgft5_hi, xgft5_hi, x0 ;Lookup mul table of high nibble
  283. vpshufb xgft5_lo, xgft5_lo, xtmpa ;Lookup mul table of low nibble
  284. vpxor xgft5_hi, xgft5_hi, xgft5_lo ;GF add high and low partials
  285. vpand xgft5_hi, xgft5_hi, xtmpl3
  286. vpxor xd2, xd2, xgft5_hi
  287. ;dest6
  288. vpshufb xgft6_hi, xgft6_hi, x0 ;Lookup mul table of high nibble
  289. vpshufb xgft6_lo, xgft6_lo, xtmpa ;Lookup mul table of low nibble
  290. vpxor xgft6_hi, xgft6_hi, xgft6_lo ;GF add high and low partials
  291. vpand xgft6_hi, xgft6_hi, xtmpl3
  292. vpxor xd3, xd3, xgft6_hi
  293. XSTR [dest4+tmp], xd1 ;Store result into dest4
  294. XSTR [dest5+tmp], xd2 ;Store result into dest5
  295. XSTR [dest6+tmp], xd3 ;Store result into dest6
  296. vmovdqu xgft4_lo, [tmp3] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
  297. vmovdqu xgft4_hi, [tmp3+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
  298. vmovdqu xgft5_lo, [tmp3+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
  299. vmovdqu xgft5_hi, [tmp3+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
  300. vmovdqu xgft6_lo, [tmp3+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
  301. vmovdqu xgft6_hi, [tmp3+2*vec+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
  302. XLDR xd1, [dest1+tmp] ;Get next dest vector
  303. XLDR xd2, [dest2+tmp] ;reuse xtmpl1. Get next dest vector
  304. XLDR xd3, [dest3+tmp] ;reuse xtmph1. Get next dest3 vector
  305. ;dest1
  306. vpshufb xgft4_hi, xgft4_hi, x0 ;Lookup mul table of high nibble
  307. vpshufb xgft4_lo, xgft4_lo, xtmpa ;Lookup mul table of low nibble
  308. vpxor xgft4_hi, xgft4_hi, xgft4_lo ;GF add high and low partials
  309. vpand xgft4_hi, xgft4_hi, xtmpl3
  310. vpxor xd1, xd1, xgft4_hi
  311. ;dest2
  312. vpshufb xgft5_hi, xgft5_hi, x0 ;Lookup mul table of high nibble
  313. vpshufb xgft5_lo, xgft5_lo, xtmpa ;Lookup mul table of low nibble
  314. vpxor xgft5_hi, xgft5_hi, xgft5_lo ;GF add high and low partials
  315. vpand xgft5_hi, xgft5_hi, xtmpl3
  316. vpxor xd2, xd2, xgft5_hi
  317. ;dest3
  318. vpshufb xgft6_hi, xgft6_hi, x0 ;Lookup mul table of high nibble
  319. vpshufb xgft6_lo, xgft6_lo, xtmpa ;Lookup mul table of low nibble
  320. vpxor xgft6_hi, xgft6_hi, xgft6_lo ;GF add high and low partials
  321. vpand xgft6_hi, xgft6_hi, xtmpl3
  322. vpxor xd3, xd3, xgft6_hi
  323. XSTR [dest1+tmp], xd1 ;Store result into dest1
  324. XSTR [dest2+tmp], xd2 ;Store result into dest2
  325. XSTR [dest3+tmp], xd3 ;Store result into dest3
  326. .return_pass:
  327. FUNC_RESTORE
  328. mov return, 0
  329. ret
  330. .return_fail:
  331. FUNC_RESTORE
  332. mov return, 1
  333. ret
  334. endproc_frame
  335. section .data
  336. align 16
  337. mask0f: dq 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
  338. constip16:
  339. dq 0xf8f9fafbfcfdfeff, 0xf0f1f2f3f4f5f6f7
  340. ;;; func core, ver, snum
  341. slversion gf_6vect_mad_avx, 02, 01, 0210