gf_4vect_mad_sse.asm 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_4vect_mad_sse(len, vec, vec_i, mul_array, src, dest);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %define PS 8
  34. %ifidn __OUTPUT_FORMAT__, win64
  35. %define arg0 rcx
  36. %define arg0.w ecx
  37. %define arg1 rdx
  38. %define arg2 r8
  39. %define arg3 r9
  40. %define arg4 r12
  41. %define arg5 r15
  42. %define tmp r11
  43. %define tmp2 r10
  44. %define tmp3 r13
  45. %define return rax
  46. %define return.w eax
  47. %define stack_size 16*10 + 3*8
  48. %define arg(x) [rsp + stack_size + PS + PS*x]
  49. %define func(x) proc_frame x
  50. %macro FUNC_SAVE 0
  51. sub rsp, stack_size
  52. movdqa [rsp+16*0],xmm6
  53. movdqa [rsp+16*1],xmm7
  54. movdqa [rsp+16*2],xmm8
  55. movdqa [rsp+16*3],xmm9
  56. movdqa [rsp+16*4],xmm10
  57. movdqa [rsp+16*5],xmm11
  58. movdqa [rsp+16*6],xmm12
  59. movdqa [rsp+16*7],xmm13
  60. movdqa [rsp+16*8],xmm14
  61. movdqa [rsp+16*9],xmm15
  62. save_reg r12, 10*16 + 0*8
  63. save_reg r13, 10*16 + 1*8
  64. save_reg r15, 10*16 + 2*8
  65. end_prolog
  66. mov arg4, arg(4)
  67. mov arg5, arg(5)
  68. %endmacro
  69. %macro FUNC_RESTORE 0
  70. movdqa xmm6, [rsp+16*0]
  71. movdqa xmm7, [rsp+16*1]
  72. movdqa xmm8, [rsp+16*2]
  73. movdqa xmm9, [rsp+16*3]
  74. movdqa xmm10, [rsp+16*4]
  75. movdqa xmm11, [rsp+16*5]
  76. movdqa xmm12, [rsp+16*6]
  77. movdqa xmm13, [rsp+16*7]
  78. movdqa xmm14, [rsp+16*8]
  79. movdqa xmm15, [rsp+16*9]
  80. mov r12, [rsp + 10*16 + 0*8]
  81. mov r13, [rsp + 10*16 + 1*8]
  82. mov r15, [rsp + 10*16 + 2*8]
  83. add rsp, stack_size
  84. %endmacro
  85. %elifidn __OUTPUT_FORMAT__, elf64
  86. %define arg0 rdi
  87. %define arg0.w edi
  88. %define arg1 rsi
  89. %define arg2 rdx
  90. %define arg3 rcx
  91. %define arg4 r8
  92. %define arg5 r9
  93. %define tmp r11
  94. %define tmp2 r10
  95. %define tmp3 r12
  96. %define return rax
  97. %define return.w eax
  98. %define func(x) x: endbranch
  99. %macro FUNC_SAVE 0
  100. push r12
  101. %endmacro
  102. %macro FUNC_RESTORE 0
  103. pop r12
  104. %endmacro
  105. %endif
  106. ;;; gf_4vect_mad_sse(len, vec, vec_i, mul_array, src, dest)
  107. %define len arg0
  108. %define len.w arg0.w
  109. %define vec arg1
  110. %define vec_i arg2
  111. %define mul_array arg3
  112. %define src arg4
  113. %define dest1 arg5
  114. %define pos return
  115. %define pos.w return.w
  116. %define dest2 mul_array
  117. %define dest3 tmp2
  118. %define dest4 vec_i
  119. %ifndef EC_ALIGNED_ADDR
  120. ;;; Use Un-aligned load/store
  121. %define XLDR movdqu
  122. %define XSTR movdqu
  123. %else
  124. ;;; Use Non-temporal load/stor
  125. %ifdef NO_NT_LDST
  126. %define XLDR movdqa
  127. %define XSTR movdqa
  128. %else
  129. %define XLDR movntdqa
  130. %define XSTR movntdq
  131. %endif
  132. %endif
  133. default rel
  134. [bits 64]
  135. section .text
  136. %define xmask0f xmm15
  137. %define xgft3_hi xmm14
  138. %define xgft4_hi xmm13
  139. %define xgft4_lo xmm12
  140. %define x0 xmm0
  141. %define xtmpa xmm1
  142. %define xtmph1 xmm2
  143. %define xtmpl1 xmm3
  144. %define xtmph2 xmm4
  145. %define xtmpl2 xmm5
  146. %define xtmph3 xmm6
  147. %define xtmpl3 xmm7
  148. %define xtmph4 xmm8
  149. %define xtmpl4 xmm9
  150. %define xd1 xmm10
  151. %define xd2 xmm11
  152. %define xd3 xtmph1
  153. %define xd4 xtmpl1
  154. align 16
  155. global gf_4vect_mad_sse, function
  156. func(gf_4vect_mad_sse)
  157. FUNC_SAVE
  158. sub len, 16
  159. jl .return_fail
  160. xor pos, pos
  161. movdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
  162. mov tmp, vec
  163. sal vec_i, 5 ;Multiply by 32
  164. lea tmp3, [mul_array + vec_i]
  165. sal tmp, 6 ;Multiply by 64
  166. movdqu xgft3_hi, [tmp3+tmp+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
  167. sal vec, 5 ;Multiply by 32
  168. add tmp, vec
  169. movdqu xgft4_lo, [tmp3+tmp] ;Load array Dx{00}, Dx{01}, Dx{02}, ...
  170. movdqu xgft4_hi, [tmp3+tmp+16] ; " Dx{00}, Dx{10}, Dx{20}, ... , Dx{f0}
  171. mov dest2, [dest1+PS] ; reuse mul_array
  172. mov dest3, [dest1+2*PS]
  173. mov dest4, [dest1+3*PS] ; reuse vec_i
  174. mov dest1, [dest1]
  175. .loop16:
  176. XLDR x0, [src+pos] ;Get next source vector
  177. movdqu xtmph1, [tmp3+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
  178. movdqu xtmpl1, [tmp3] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
  179. movdqu xtmph2, [tmp3+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
  180. movdqu xtmpl2, [tmp3+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
  181. movdqu xtmpl3, [tmp3+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
  182. movdqa xtmph3, xgft3_hi
  183. movdqa xtmpl4, xgft4_lo
  184. movdqa xtmph4, xgft4_hi
  185. XLDR xd1, [dest1+pos] ;Get next dest vector
  186. XLDR xd2, [dest2+pos] ;Get next dest vector
  187. movdqa xtmpa, x0 ;Keep unshifted copy of src
  188. psraw x0, 4 ;Shift to put high nibble into bits 4-0
  189. pand x0, xmask0f ;Mask high src nibble in bits 4-0
  190. pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
  191. ; dest1
  192. pshufb xtmph1, x0 ;Lookup mul table of high nibble
  193. pshufb xtmpl1, xtmpa ;Lookup mul table of low nibble
  194. pxor xtmph1, xtmpl1 ;GF add high and low partials
  195. pxor xd1, xtmph1
  196. XLDR xd3, [dest3+pos] ;Reuse xtmph1, Get next dest vector
  197. XLDR xd4, [dest4+pos] ;Reuse xtmpl1, Get next dest vector
  198. ; dest2
  199. pshufb xtmph2, x0 ;Lookup mul table of high nibble
  200. pshufb xtmpl2, xtmpa ;Lookup mul table of low nibble
  201. pxor xtmph2, xtmpl2 ;GF add high and low partials
  202. pxor xd2, xtmph2
  203. ; dest3
  204. pshufb xtmph3, x0 ;Lookup mul table of high nibble
  205. pshufb xtmpl3, xtmpa ;Lookup mul table of low nibble
  206. pxor xtmph3, xtmpl3 ;GF add high and low partials
  207. pxor xd3, xtmph3
  208. ; dest4
  209. pshufb xtmph4, x0 ;Lookup mul table of high nibble
  210. pshufb xtmpl4, xtmpa ;Lookup mul table of low nibble
  211. pxor xtmph4, xtmpl4 ;GF add high and low partials
  212. pxor xd4, xtmph4
  213. XSTR [dest1+pos], xd1 ;Store result
  214. XSTR [dest2+pos], xd2 ;Store result
  215. XSTR [dest3+pos], xd3 ;Store result
  216. XSTR [dest4+pos], xd4 ;Store result
  217. add pos, 16 ;Loop on 16 bytes at a time
  218. cmp pos, len
  219. jle .loop16
  220. lea tmp, [len + 16]
  221. cmp pos, tmp
  222. je .return_pass
  223. .lessthan16:
  224. ;; Tail len
  225. ;; Do one more overlap pass
  226. mov tmp, len ;Overlapped offset length-16
  227. XLDR x0, [src+tmp] ;Get next source vector
  228. movdqu xtmph1, [tmp3+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
  229. movdqu xtmpl1, [tmp3] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
  230. movdqu xtmph2, [tmp3+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
  231. movdqu xtmpl2, [tmp3+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
  232. movdqu xtmpl3, [tmp3+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
  233. XLDR xd1, [dest1+tmp] ;Get next dest vector
  234. XLDR xd2, [dest2+tmp] ;Get next dest vector
  235. XLDR xtmph4, [dest3+tmp] ;Reuse xtmph1. Get next dest vector
  236. sub len, pos
  237. movdqa xtmpl4, [constip16] ;Load const of i + 16
  238. pinsrb xtmph3, len.w, 15
  239. pshufb xtmph3, xmask0f ;Broadcast len to all bytes
  240. pcmpgtb xtmph3, xtmpl4
  241. XLDR xtmpl4, [dest4+tmp] ;Get next dest vector
  242. movdqa xtmpa, x0 ;Keep unshifted copy of src
  243. psraw x0, 4 ;Shift to put high nibble into bits 4-0
  244. pand x0, xmask0f ;Mask high src nibble in bits 4-0
  245. pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
  246. ; dest1
  247. pshufb xtmph1, x0 ;Lookup mul table of high nibble
  248. pshufb xtmpl1, xtmpa ;Lookup mul table of low nibble
  249. pxor xtmph1, xtmpl1 ;GF add high and low partials
  250. pand xtmph1, xtmph3
  251. pxor xd1, xtmph1
  252. ; dest2
  253. pshufb xtmph2, x0 ;Lookup mul table of high nibble
  254. pshufb xtmpl2, xtmpa ;Lookup mul table of low nibble
  255. pxor xtmph2, xtmpl2 ;GF add high and low partials
  256. pand xtmph2, xtmph3
  257. pxor xd2, xtmph2
  258. ; dest3
  259. pshufb xgft3_hi, x0 ;Lookup mul table of high nibble
  260. pshufb xtmpl3, xtmpa ;Lookup mul table of low nibble
  261. pxor xgft3_hi, xtmpl3 ;GF add high and low partials
  262. pand xgft3_hi, xtmph3
  263. pxor xtmph4, xgft3_hi
  264. ; dest4
  265. pshufb xgft4_hi, x0 ;Lookup mul table of high nibble
  266. pshufb xgft4_lo, xtmpa ;Lookup mul table of low nibble
  267. pxor xgft4_hi, xgft4_lo ;GF add high and low partials
  268. pand xgft4_hi, xtmph3
  269. pxor xtmpl4, xgft4_hi
  270. XSTR [dest1+tmp], xd1 ;Store result
  271. XSTR [dest2+tmp], xd2 ;Store result
  272. XSTR [dest3+tmp], xtmph4 ;Store result
  273. XSTR [dest4+tmp], xtmpl4 ;Store result
  274. .return_pass:
  275. FUNC_RESTORE
  276. mov return, 0
  277. ret
  278. .return_fail:
  279. FUNC_RESTORE
  280. mov return, 1
  281. ret
  282. endproc_frame
  283. section .data
  284. align 16
  285. mask0f:
  286. dq 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
  287. constip16:
  288. dq 0xf8f9fafbfcfdfeff, 0xf0f1f2f3f4f5f6f7