audiodsp.asm 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. ;******************************************************************************
  2. ;* optimized audio functions
  3. ;* Copyright (c) 2008 Loren Merritt
  4. ;*
  5. ;* This file is part of FFmpeg.
  6. ;*
  7. ;* FFmpeg is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* FFmpeg is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with FFmpeg; if not, write to the Free Software
  19. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. %include "libavutil/x86/x86util.asm"
  22. SECTION .text
  23. ; int ff_scalarproduct_int16(int16_t *v1, int16_t *v2, int order)
  24. INIT_XMM sse2
  25. cglobal scalarproduct_int16, 3,3,3, v1, v2, order
  26. add orderd, orderd
  27. add v1q, orderq
  28. add v2q, orderq
  29. neg orderq
  30. pxor m2, m2
  31. .loop:
  32. movu m0, [v1q + orderq]
  33. movu m1, [v1q + orderq + mmsize]
  34. pmaddwd m0, [v2q + orderq]
  35. pmaddwd m1, [v2q + orderq + mmsize]
  36. paddd m2, m0
  37. paddd m2, m1
  38. add orderq, mmsize*2
  39. jl .loop
  40. HADDD m2, m0
  41. movd eax, m2
  42. RET
  43. %if HAVE_AVX2_EXTERNAL
  44. INIT_YMM avx2
  45. cglobal scalarproduct_int16, 3,3,2, v1, v2, order
  46. add orderd, orderd
  47. add v1q, orderq
  48. add v2q, orderq
  49. neg orderq
  50. pxor m1, m1
  51. .loop:
  52. movu m0, [v1q + orderq]
  53. pmaddwd m0, [v2q + orderq]
  54. paddd m1, m0
  55. add orderq, mmsize
  56. jl .loop
  57. HADDD m1, m0
  58. movd eax, xm1
  59. RET
  60. %endif
  61. ;-----------------------------------------------------------------------------
  62. ; void ff_vector_clip_int32(int32_t *dst, const int32_t *src, int32_t min,
  63. ; int32_t max, unsigned int len)
  64. ;-----------------------------------------------------------------------------
  65. ; %1 = number of xmm registers used
  66. ; %2 = number of inline load/process/store loops per asm loop
  67. ; %3 = process 4*mmsize (%3=0) or 8*mmsize (%3=1) bytes per loop
  68. ; %4 = CLIPD function takes min/max as float instead of int (SSE2 version)
  69. ; %5 = suffix
  70. %macro VECTOR_CLIP_INT32 4-5
  71. cglobal vector_clip_int32%5, 5,5,%1, dst, src, min, max, len
  72. %if %4
  73. cvtsi2ss m4, minm
  74. cvtsi2ss m5, maxm
  75. %else
  76. movd m4, minm
  77. movd m5, maxm
  78. %endif
  79. SPLATD m4
  80. SPLATD m5
  81. .loop:
  82. %assign %%i 0
  83. %rep %2
  84. mova m0, [srcq + mmsize * (0 + %%i)]
  85. mova m1, [srcq + mmsize * (1 + %%i)]
  86. mova m2, [srcq + mmsize * (2 + %%i)]
  87. mova m3, [srcq + mmsize * (3 + %%i)]
  88. %if %3
  89. mova m7, [srcq + mmsize * (4 + %%i)]
  90. mova m8, [srcq + mmsize * (5 + %%i)]
  91. mova m9, [srcq + mmsize * (6 + %%i)]
  92. mova m10, [srcq + mmsize * (7 + %%i)]
  93. %endif
  94. CLIPD m0, m4, m5, m6
  95. CLIPD m1, m4, m5, m6
  96. CLIPD m2, m4, m5, m6
  97. CLIPD m3, m4, m5, m6
  98. %if %3
  99. CLIPD m7, m4, m5, m6
  100. CLIPD m8, m4, m5, m6
  101. CLIPD m9, m4, m5, m6
  102. CLIPD m10, m4, m5, m6
  103. %endif
  104. mova [dstq + mmsize * (0 + %%i)], m0
  105. mova [dstq + mmsize * (1 + %%i)], m1
  106. mova [dstq + mmsize * (2 + %%i)], m2
  107. mova [dstq + mmsize * (3 + %%i)], m3
  108. %if %3
  109. mova [dstq + mmsize * (4 + %%i)], m7
  110. mova [dstq + mmsize * (5 + %%i)], m8
  111. mova [dstq + mmsize * (6 + %%i)], m9
  112. mova [dstq + mmsize * (7 + %%i)], m10
  113. %endif
  114. %assign %%i (%%i + 4 * (1 + %3))
  115. %endrep
  116. add srcq, mmsize*4*(%2+%3)
  117. add dstq, mmsize*4*(%2+%3)
  118. sub lend, mmsize*(%2+%3)
  119. jg .loop
  120. RET
  121. %endmacro
  122. INIT_XMM sse2
  123. VECTOR_CLIP_INT32 6, 1, 0, 0, _int
  124. VECTOR_CLIP_INT32 6, 2, 0, 1
  125. INIT_XMM sse4
  126. %ifdef m8
  127. VECTOR_CLIP_INT32 11, 1, 1, 0
  128. %else
  129. VECTOR_CLIP_INT32 6, 1, 0, 0
  130. %endif
  131. ; void ff_vector_clipf_sse(float *dst, const float *src,
  132. ; int len, float min, float max)
  133. INIT_XMM sse
  134. cglobal vector_clipf, 3, 3, 6, dst, src, len, min, max
  135. %if ARCH_X86_32
  136. VBROADCASTSS m0, minm
  137. VBROADCASTSS m1, maxm
  138. %elif WIN64
  139. SWAP 0, 3
  140. VBROADCASTSS m0, m0
  141. VBROADCASTSS m1, maxm
  142. %else ; 64bit sysv
  143. VBROADCASTSS m0, m0
  144. VBROADCASTSS m1, m1
  145. %endif
  146. movsxdifnidn lenq, lend
  147. .loop:
  148. mova m2, [srcq + 4 * lenq - 4 * mmsize]
  149. mova m3, [srcq + 4 * lenq - 3 * mmsize]
  150. mova m4, [srcq + 4 * lenq - 2 * mmsize]
  151. mova m5, [srcq + 4 * lenq - 1 * mmsize]
  152. maxps m2, m0
  153. maxps m3, m0
  154. maxps m4, m0
  155. maxps m5, m0
  156. minps m2, m1
  157. minps m3, m1
  158. minps m4, m1
  159. minps m5, m1
  160. mova [dstq + 4 * lenq - 4 * mmsize], m2
  161. mova [dstq + 4 * lenq - 3 * mmsize], m3
  162. mova [dstq + 4 * lenq - 2 * mmsize], m4
  163. mova [dstq + 4 * lenq - 1 * mmsize], m5
  164. sub lenq, mmsize
  165. jg .loop
  166. RET