123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208 |
- %include "reg_sizes.asm"
- %ifidn __OUTPUT_FORMAT__, win64
- %define arg0 rcx
- %define arg0.w ecx
- %define arg1 rdx
- %define arg2 r8
- %define arg3 r9
- %define arg4 r12
- %define arg5 r15
- %define tmp r11
- %define tmp.w r11d
- %define tmp.b r11b
- %define return rax
- %define return.w eax
- %define PS 8
- %define stack_size 16*3 + 3*8
- %define arg(x) [rsp + stack_size + PS + PS*x]
- %define func(x) proc_frame x
- %macro FUNC_SAVE 0
- sub rsp, stack_size
- vmovdqa [rsp+16*0],xmm6
- vmovdqa [rsp+16*1],xmm7
- vmovdqa [rsp+16*2],xmm8
- save_reg r12, 3*16 + 0*8
- save_reg r15, 3*16 + 1*8
- end_prolog
- mov arg4, arg(4)
- mov arg5, arg(5)
- %endmacro
- %macro FUNC_RESTORE 0
- vmovdqa xmm6, [rsp+16*0]
- vmovdqa xmm7, [rsp+16*1]
- vmovdqa xmm8, [rsp+16*2]
- mov r12, [rsp + 3*16 + 0*8]
- mov r15, [rsp + 3*16 + 1*8]
- add rsp, stack_size
- %endmacro
- %elifidn __OUTPUT_FORMAT__, elf64
- %define arg0 rdi
- %define arg0.w edi
- %define arg1 rsi
- %define arg2 rdx
- %define arg3 rcx
- %define arg4 r8
- %define arg5 r9
- %define tmp r11
- %define tmp.w r11d
- %define tmp.b r11b
- %define return rax
- %define return.w eax
- %define func(x) x:
- %define FUNC_SAVE
- %define FUNC_RESTORE
- %endif
- %define len arg0
- %define len.w arg0.w
- %define vec arg1
- %define vec_i arg2
- %define mul_array arg3
- %define src arg4
- %define dest arg5
- %define pos return
- %define pos.w return.w
- %ifndef EC_ALIGNED_ADDR
- %define XLDR vmovdqu
- %define XSTR vmovdqu
- %else
- %ifdef NO_NT_LDST
- %define XLDR vmovdqa
- %define XSTR vmovdqa
- %else
- %define XLDR vmovntdqa
- %define XSTR vmovntdq
- %endif
- %endif
- default rel
- [bits 64]
- section .text
- %define xmask0f ymm8
- %define xmask0fx xmm8
- %define xgft_lo ymm7
- %define xgft_hi ymm6
- %define x0 ymm0
- %define xtmpa ymm1
- %define xtmph ymm2
- %define xtmpl ymm3
- %define xd ymm4
- %define xtmpd ymm5
- align 16
- global gf_vect_mad_avx2:ISAL_SYM_TYPE_FUNCTION
- func(gf_vect_mad_avx2)
- %ifidn __OUTPUT_FORMAT__, macho64
- global _gf_vect_mad_avx2:ISAL_SYM_TYPE_FUNCTION
- func(_gf_vect_mad_avx2)
- %endif
- FUNC_SAVE
- sub len, 32
- jl .return_fail
- xor pos, pos
- mov tmp.b, 0x0f
- vpinsrb xmask0fx, xmask0fx, tmp.w, 0
- vpbroadcastb xmask0f, xmask0fx
- sal vec_i, 5
- vmovdqu xgft_lo, [vec_i+mul_array]
-
- vperm2i128 xgft_hi, xgft_lo, xgft_lo, 0x11
- vperm2i128 xgft_lo, xgft_lo, xgft_lo, 0x00
- XLDR xtmpd, [dest+len]
- .loop32:
- XLDR xd, [dest+pos]
- .loop32_overlap:
- XLDR x0, [src+pos]
- vpand xtmpa, x0, xmask0f
- vpsraw x0, x0, 4
- vpand x0, x0, xmask0f
- vpshufb xtmph, xgft_hi, x0
- vpshufb xtmpl, xgft_lo, xtmpa
- vpxor xtmph, xtmph, xtmpl
- vpxor xd, xd, xtmph
- XSTR [dest+pos], xd
- add pos, 32
- cmp pos, len
- jle .loop32
- lea tmp, [len + 32]
- cmp pos, tmp
- je .return_pass
-
- mov pos, len
- vmovdqa xd, xtmpd
- jmp .loop32_overlap
- .return_pass:
- mov return, 0
- FUNC_RESTORE
- ret
- .return_fail:
- mov return, 1
- FUNC_RESTORE
- ret
- endproc_frame
- section .data
- slversion gf_vect_mad_avx2, 04, 01, 0202
|