float_dsp.asm 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. ;*****************************************************************************
  2. ;* x86-optimized Float DSP functions
  3. ;*
  4. ;* Copyright 2006 Loren Merritt
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "x86util.asm"
  23. SECTION .text
  24. ;-----------------------------------------------------------------------------
  25. ; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
  26. ;-----------------------------------------------------------------------------
  27. %macro VECTOR_FMUL 0
  28. cglobal vector_fmul, 4,4,2, dst, src0, src1, len
  29. lea lenq, [lend*4 - 2*mmsize]
  30. ALIGN 16
  31. .loop:
  32. mova m0, [src0q + lenq]
  33. mova m1, [src0q + lenq + mmsize]
  34. mulps m0, m0, [src1q + lenq]
  35. mulps m1, m1, [src1q + lenq + mmsize]
  36. mova [dstq + lenq], m0
  37. mova [dstq + lenq + mmsize], m1
  38. sub lenq, 2*mmsize
  39. jge .loop
  40. REP_RET
  41. %endmacro
  42. INIT_XMM sse
  43. VECTOR_FMUL
  44. %if HAVE_AVX_EXTERNAL
  45. INIT_YMM avx
  46. VECTOR_FMUL
  47. %endif
  48. ;------------------------------------------------------------------------------
  49. ; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
  50. ;------------------------------------------------------------------------------
  51. %macro VECTOR_FMAC_SCALAR 0
  52. %if UNIX64
  53. cglobal vector_fmac_scalar, 3,3,3, dst, src, len
  54. %else
  55. cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
  56. %endif
  57. %if ARCH_X86_32
  58. VBROADCASTSS m0, mulm
  59. %else
  60. %if WIN64
  61. mova xmm0, xmm2
  62. %endif
  63. shufps xmm0, xmm0, 0
  64. %if cpuflag(avx)
  65. vinsertf128 m0, m0, xmm0, 1
  66. %endif
  67. %endif
  68. lea lenq, [lend*4-2*mmsize]
  69. .loop:
  70. mulps m1, m0, [srcq+lenq ]
  71. mulps m2, m0, [srcq+lenq+mmsize]
  72. addps m1, m1, [dstq+lenq ]
  73. addps m2, m2, [dstq+lenq+mmsize]
  74. mova [dstq+lenq ], m1
  75. mova [dstq+lenq+mmsize], m2
  76. sub lenq, 2*mmsize
  77. jge .loop
  78. REP_RET
  79. %endmacro
  80. INIT_XMM sse
  81. VECTOR_FMAC_SCALAR
  82. %if HAVE_AVX_EXTERNAL
  83. INIT_YMM avx
  84. VECTOR_FMAC_SCALAR
  85. %endif
  86. ;------------------------------------------------------------------------------
  87. ; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
  88. ;------------------------------------------------------------------------------
  89. %macro VECTOR_FMUL_SCALAR 0
  90. %if UNIX64
  91. cglobal vector_fmul_scalar, 3,3,2, dst, src, len
  92. %else
  93. cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
  94. %endif
  95. %if ARCH_X86_32
  96. movss m0, mulm
  97. %elif WIN64
  98. SWAP 0, 2
  99. %endif
  100. shufps m0, m0, 0
  101. lea lenq, [lend*4-mmsize]
  102. .loop:
  103. mova m1, [srcq+lenq]
  104. mulps m1, m0
  105. mova [dstq+lenq], m1
  106. sub lenq, mmsize
  107. jge .loop
  108. REP_RET
  109. %endmacro
  110. INIT_XMM sse
  111. VECTOR_FMUL_SCALAR
  112. ;------------------------------------------------------------------------------
  113. ; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
  114. ; int len)
  115. ;------------------------------------------------------------------------------
  116. %macro VECTOR_DMUL_SCALAR 0
  117. %if ARCH_X86_32
  118. cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
  119. mov lenq, lenaddrm
  120. %elif UNIX64
  121. cglobal vector_dmul_scalar, 3,3,3, dst, src, len
  122. %else
  123. cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
  124. %endif
  125. %if ARCH_X86_32
  126. VBROADCASTSD m0, mulm
  127. %else
  128. %if WIN64
  129. movlhps xmm2, xmm2
  130. %if cpuflag(avx)
  131. vinsertf128 ymm2, ymm2, xmm2, 1
  132. %endif
  133. SWAP 0, 2
  134. %else
  135. movlhps xmm0, xmm0
  136. %if cpuflag(avx)
  137. vinsertf128 ymm0, ymm0, xmm0, 1
  138. %endif
  139. %endif
  140. %endif
  141. lea lenq, [lend*8-2*mmsize]
  142. .loop:
  143. mulpd m1, m0, [srcq+lenq ]
  144. mulpd m2, m0, [srcq+lenq+mmsize]
  145. mova [dstq+lenq ], m1
  146. mova [dstq+lenq+mmsize], m2
  147. sub lenq, 2*mmsize
  148. jge .loop
  149. REP_RET
  150. %endmacro
  151. INIT_XMM sse2
  152. VECTOR_DMUL_SCALAR
  153. %if HAVE_AVX_EXTERNAL
  154. INIT_YMM avx
  155. VECTOR_DMUL_SCALAR
  156. %endif