float_dsp_init.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "config.h"
  19. #include "libavutil/cpu.h"
  20. #include "libavutil/float_dsp.h"
  21. #include "cpu.h"
  22. #include "asm.h"
  23. void ff_vector_fmul_sse(float *dst, const float *src0, const float *src1,
  24. int len);
  25. void ff_vector_fmul_avx(float *dst, const float *src0, const float *src1,
  26. int len);
  27. void ff_vector_fmac_scalar_sse(float *dst, const float *src, float mul,
  28. int len);
  29. void ff_vector_fmac_scalar_avx(float *dst, const float *src, float mul,
  30. int len);
  31. void ff_vector_fmul_scalar_sse(float *dst, const float *src, float mul,
  32. int len);
  33. void ff_vector_dmul_scalar_sse2(double *dst, const double *src,
  34. double mul, int len);
  35. void ff_vector_dmul_scalar_avx(double *dst, const double *src,
  36. double mul, int len);
  37. void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
  38. const float *src2, int len);
  39. void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
  40. const float *src2, int len);
  41. void ff_vector_fmul_reverse_sse(float *dst, const float *src0,
  42. const float *src1, int len);
  43. void ff_vector_fmul_reverse_avx(float *dst, const float *src0,
  44. const float *src1, int len);
  45. float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
  46. #if HAVE_6REGS && HAVE_INLINE_ASM
  47. static void vector_fmul_window_3dnowext(float *dst, const float *src0,
  48. const float *src1, const float *win,
  49. int len)
  50. {
  51. x86_reg i = -len * 4;
  52. x86_reg j = len * 4 - 8;
  53. __asm__ volatile (
  54. "1: \n"
  55. "pswapd (%5, %1), %%mm1 \n"
  56. "movq (%5, %0), %%mm0 \n"
  57. "pswapd (%4, %1), %%mm5 \n"
  58. "movq (%3, %0), %%mm4 \n"
  59. "movq %%mm0, %%mm2 \n"
  60. "movq %%mm1, %%mm3 \n"
  61. "pfmul %%mm4, %%mm2 \n" // src0[len + i] * win[len + i]
  62. "pfmul %%mm5, %%mm3 \n" // src1[j] * win[len + j]
  63. "pfmul %%mm4, %%mm1 \n" // src0[len + i] * win[len + j]
  64. "pfmul %%mm5, %%mm0 \n" // src1[j] * win[len + i]
  65. "pfadd %%mm3, %%mm2 \n"
  66. "pfsub %%mm0, %%mm1 \n"
  67. "pswapd %%mm2, %%mm2 \n"
  68. "movq %%mm1, (%2, %0) \n"
  69. "movq %%mm2, (%2, %1) \n"
  70. "sub $8, %1 \n"
  71. "add $8, %0 \n"
  72. "jl 1b \n"
  73. "femms \n"
  74. : "+r"(i), "+r"(j)
  75. : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
  76. );
  77. }
  78. static void vector_fmul_window_sse(float *dst, const float *src0,
  79. const float *src1, const float *win, int len)
  80. {
  81. x86_reg i = -len * 4;
  82. x86_reg j = len * 4 - 16;
  83. __asm__ volatile (
  84. "1: \n"
  85. "movaps (%5, %1), %%xmm1 \n"
  86. "movaps (%5, %0), %%xmm0 \n"
  87. "movaps (%4, %1), %%xmm5 \n"
  88. "movaps (%3, %0), %%xmm4 \n"
  89. "shufps $0x1b, %%xmm1, %%xmm1 \n"
  90. "shufps $0x1b, %%xmm5, %%xmm5 \n"
  91. "movaps %%xmm0, %%xmm2 \n"
  92. "movaps %%xmm1, %%xmm3 \n"
  93. "mulps %%xmm4, %%xmm2 \n" // src0[len + i] * win[len + i]
  94. "mulps %%xmm5, %%xmm3 \n" // src1[j] * win[len + j]
  95. "mulps %%xmm4, %%xmm1 \n" // src0[len + i] * win[len + j]
  96. "mulps %%xmm5, %%xmm0 \n" // src1[j] * win[len + i]
  97. "addps %%xmm3, %%xmm2 \n"
  98. "subps %%xmm0, %%xmm1 \n"
  99. "shufps $0x1b, %%xmm2, %%xmm2 \n"
  100. "movaps %%xmm1, (%2, %0) \n"
  101. "movaps %%xmm2, (%2, %1) \n"
  102. "sub $16, %1 \n"
  103. "add $16, %0 \n"
  104. "jl 1b \n"
  105. : "+r"(i), "+r"(j)
  106. : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
  107. );
  108. }
  109. #endif /* HAVE_6REGS && HAVE_INLINE_ASM */
  110. void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp)
  111. {
  112. int mm_flags = av_get_cpu_flags();
  113. #if HAVE_6REGS && HAVE_INLINE_ASM
  114. if (INLINE_AMD3DNOWEXT(mm_flags)) {
  115. fdsp->vector_fmul_window = vector_fmul_window_3dnowext;
  116. }
  117. if (INLINE_SSE(mm_flags)) {
  118. fdsp->vector_fmul_window = vector_fmul_window_sse;
  119. }
  120. #endif
  121. if (EXTERNAL_SSE(mm_flags)) {
  122. fdsp->vector_fmul = ff_vector_fmul_sse;
  123. fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_sse;
  124. fdsp->vector_fmul_scalar = ff_vector_fmul_scalar_sse;
  125. fdsp->vector_fmul_add = ff_vector_fmul_add_sse;
  126. fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_sse;
  127. fdsp->scalarproduct_float = ff_scalarproduct_float_sse;
  128. }
  129. if (EXTERNAL_SSE2(mm_flags)) {
  130. fdsp->vector_dmul_scalar = ff_vector_dmul_scalar_sse2;
  131. }
  132. if (EXTERNAL_AVX(mm_flags)) {
  133. fdsp->vector_fmul = ff_vector_fmul_avx;
  134. fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_avx;
  135. fdsp->vector_dmul_scalar = ff_vector_dmul_scalar_avx;
  136. fdsp->vector_fmul_add = ff_vector_fmul_add_avx;
  137. fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_avx;
  138. }
  139. }