gf_2vect_mad_sse.asm 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  2. ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
  3. ;
  4. ; Redistribution and use in source and binary forms, with or without
  5. ; modification, are permitted provided that the following conditions
  6. ; are met:
  7. ; * Redistributions of source code must retain the above copyright
  8. ; notice, this list of conditions and the following disclaimer.
  9. ; * Redistributions in binary form must reproduce the above copyright
  10. ; notice, this list of conditions and the following disclaimer in
  11. ; the documentation and/or other materials provided with the
  12. ; distribution.
  13. ; * Neither the name of Intel Corporation nor the names of its
  14. ; contributors may be used to endorse or promote products derived
  15. ; from this software without specific prior written permission.
  16. ;
  17. ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
  29. ;;;
  30. ;;; gf_2vect_mad_sse(len, vec, vec_i, mul_array, src, dest);
  31. ;;;
  32. %include "reg_sizes.asm"
  33. %define PS 8
  34. %ifidn __OUTPUT_FORMAT__, win64
  35. %define arg0 rcx
  36. %define arg0.w ecx
  37. %define arg1 rdx
  38. %define arg2 r8
  39. %define arg3 r9
  40. %define arg4 r12
  41. %define arg5 r15
  42. %define tmp r11
  43. %define tmp2 r10
  44. %define return rax
  45. %define return.w eax
  46. %define stack_size 16*9 + 3*8
  47. %define arg(x) [rsp + stack_size + PS + PS*x]
  48. %define func(x) proc_frame x
  49. %macro FUNC_SAVE 0
  50. sub rsp, stack_size
  51. movdqa [rsp+16*0],xmm6
  52. movdqa [rsp+16*1],xmm7
  53. movdqa [rsp+16*2],xmm8
  54. movdqa [rsp+16*3],xmm9
  55. movdqa [rsp+16*4],xmm10
  56. movdqa [rsp+16*5],xmm11
  57. movdqa [rsp+16*6],xmm12
  58. movdqa [rsp+16*7],xmm13
  59. movdqa [rsp+16*8],xmm14
  60. save_reg r12, 9*16 + 0*8
  61. save_reg r15, 9*16 + 1*8
  62. end_prolog
  63. mov arg4, arg(4)
  64. mov arg5, arg(5)
  65. %endmacro
  66. %macro FUNC_RESTORE 0
  67. movdqa xmm6, [rsp+16*0]
  68. movdqa xmm7, [rsp+16*1]
  69. movdqa xmm8, [rsp+16*2]
  70. movdqa xmm9, [rsp+16*3]
  71. movdqa xmm10, [rsp+16*4]
  72. movdqa xmm11, [rsp+16*5]
  73. movdqa xmm12, [rsp+16*6]
  74. movdqa xmm13, [rsp+16*7]
  75. movdqa xmm14, [rsp+16*8]
  76. mov r12, [rsp + 9*16 + 0*8]
  77. mov r15, [rsp + 9*16 + 1*8]
  78. add rsp, stack_size
  79. %endmacro
  80. %elifidn __OUTPUT_FORMAT__, elf64
  81. %define arg0 rdi
  82. %define arg0.w edi
  83. %define arg1 rsi
  84. %define arg2 rdx
  85. %define arg3 rcx
  86. %define arg4 r8
  87. %define arg5 r9
  88. %define tmp r11
  89. %define tmp2 r10
  90. %define return rax
  91. %define return.w eax
  92. %define func(x) x:
  93. %define FUNC_SAVE
  94. %define FUNC_RESTORE
  95. %endif
  96. ;;; gf_2vect_mad_sse(len, vec, vec_i, mul_array, src, dest)
  97. %define len arg0
  98. %define len.w arg0.w
  99. %define vec arg1
  100. %define vec_i arg2
  101. %define mul_array arg3
  102. %define src arg4
  103. %define dest1 arg5
  104. %define pos return
  105. %define pos.w return.w
  106. %define dest2 tmp2
  107. %ifndef EC_ALIGNED_ADDR
  108. ;;; Use Un-aligned load/store
  109. %define XLDR movdqu
  110. %define XSTR movdqu
  111. %else
  112. ;;; Use Non-temporal load/stor
  113. %ifdef NO_NT_LDST
  114. %define XLDR movdqa
  115. %define XSTR movdqa
  116. %else
  117. %define XLDR movntdqa
  118. %define XSTR movntdq
  119. %endif
  120. %endif
  121. default rel
  122. [bits 64]
  123. section .text
  124. %define xmask0f xmm14
  125. %define xgft1_lo xmm13
  126. %define xgft1_hi xmm12
  127. %define xgft2_lo xmm11
  128. %define xgft2_hi xmm10
  129. %define x0 xmm0
  130. %define xtmpa xmm1
  131. %define xtmph1 xmm2
  132. %define xtmpl1 xmm3
  133. %define xtmph2 xmm4
  134. %define xtmpl2 xmm5
  135. %define xd1 xmm6
  136. %define xd2 xmm7
  137. %define xtmpd1 xmm8
  138. %define xtmpd2 xmm9
  139. align 16
  140. global gf_2vect_mad_sse:ISAL_SYM_TYPE_FUNCTION
  141. func(gf_2vect_mad_sse)
  142. %ifidn __OUTPUT_FORMAT__, macho64
  143. global _gf_2vect_mad_sse:ISAL_SYM_TYPE_FUNCTION
  144. func(_gf_2vect_mad_sse)
  145. %endif
  146. FUNC_SAVE
  147. sub len, 16
  148. jl .return_fail
  149. xor pos, pos
  150. movdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
  151. sal vec_i, 5 ;Multiply by 32
  152. sal vec, 5
  153. lea tmp, [mul_array + vec_i]
  154. movdqu xgft1_lo,[tmp] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
  155. movdqu xgft1_hi, [tmp+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
  156. movdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
  157. movdqu xgft2_hi, [tmp+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
  158. mov dest2, [dest1+PS]
  159. mov dest1, [dest1]
  160. XLDR xtmpd1, [dest1+len] ;backup the last 16 bytes in dest
  161. XLDR xtmpd2, [dest2+len] ;backup the last 16 bytes in dest
  162. .loop16:
  163. XLDR xd1, [dest1+pos] ;Get next dest vector
  164. XLDR xd2, [dest2+pos] ;Get next dest vector
  165. .loop16_overlap:
  166. XLDR x0, [src+pos] ;Get next source vector
  167. movdqa xtmph1, xgft1_hi ;Reload const array registers
  168. movdqa xtmpl1, xgft1_lo
  169. movdqa xtmph2, xgft2_hi ;Reload const array registers
  170. movdqa xtmpl2, xgft2_lo
  171. movdqa xtmpa, x0 ;Keep unshifted copy of src
  172. psraw x0, 4 ;Shift to put high nibble into bits 4-0
  173. pand x0, xmask0f ;Mask high src nibble in bits 4-0
  174. pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
  175. pshufb xtmph1, x0 ;Lookup mul table of high nibble
  176. pshufb xtmpl1, xtmpa ;Lookup mul table of low nibble
  177. pxor xtmph1, xtmpl1 ;GF add high and low partials
  178. pxor xd1, xtmph1
  179. pshufb xtmph2, x0 ;Lookup mul table of high nibble
  180. pshufb xtmpl2, xtmpa ;Lookup mul table of low nibble
  181. pxor xtmph2, xtmpl2 ;GF add high and low partials
  182. pxor xd2, xtmph2
  183. XSTR [dest1+pos], xd1 ;Store result
  184. XSTR [dest2+pos], xd2 ;Store result
  185. add pos, 16 ;Loop on 16 bytes at a time
  186. cmp pos, len
  187. jle .loop16
  188. lea tmp, [len + 16]
  189. cmp pos, tmp
  190. je .return_pass
  191. ;; Tail len
  192. mov pos, len ;Overlapped offset length-16
  193. movdqa xd1, xtmpd1 ;Restore xd1
  194. movdqa xd2, xtmpd2 ;Restore xd2
  195. jmp .loop16_overlap ;Do one more overlap pass
  196. .return_pass:
  197. FUNC_RESTORE
  198. mov return, 0
  199. ret
  200. .return_fail:
  201. FUNC_RESTORE
  202. mov return, 1
  203. ret
  204. endproc_frame
  205. section .data
  206. align 16
  207. mask0f:
  208. dq 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
  209. ;;; func core, ver, snum
  210. slversion gf_2vect_mad_sse, 00, 01, 0203