ac3dsp_init.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. * x86-optimized AC-3 DSP utils
  3. * Copyright (c) 2011 Justin Ruggles
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/mem.h"
  22. #include "libavutil/x86/asm.h"
  23. #include "libavutil/x86/cpu.h"
  24. #include "dsputil_mmx.h"
  25. #include "libavcodec/ac3.h"
  26. #include "libavcodec/ac3dsp.h"
  27. void ff_ac3_exponent_min_mmx (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
  28. void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
  29. void ff_ac3_exponent_min_sse2 (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
  30. int ff_ac3_max_msb_abs_int16_mmx (const int16_t *src, int len);
  31. int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len);
  32. int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
  33. int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len);
  34. void ff_ac3_lshift_int16_mmx (int16_t *src, unsigned int len, unsigned int shift);
  35. void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift);
  36. void ff_ac3_rshift_int32_mmx (int32_t *src, unsigned int len, unsigned int shift);
  37. void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift);
  38. void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len);
  39. void ff_float_to_fixed24_sse (int32_t *dst, const float *src, unsigned int len);
  40. void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned int len);
  41. int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]);
  42. void ff_ac3_extract_exponents_3dnow(uint8_t *exp, int32_t *coef, int nb_coefs);
  43. void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs);
  44. void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs);
  45. #if ARCH_X86_32 && defined(__INTEL_COMPILER)
  46. # undef HAVE_7REGS
  47. # define HAVE_7REGS 0
  48. #endif
  49. #if HAVE_SSE_INLINE && HAVE_7REGS
  50. #define IF1(x) x
  51. #define IF0(x)
  52. #define MIX5(mono, stereo) \
  53. __asm__ volatile ( \
  54. "movss 0(%1), %%xmm5 \n" \
  55. "movss 8(%1), %%xmm6 \n" \
  56. "movss 24(%1), %%xmm7 \n" \
  57. "shufps $0, %%xmm5, %%xmm5 \n" \
  58. "shufps $0, %%xmm6, %%xmm6 \n" \
  59. "shufps $0, %%xmm7, %%xmm7 \n" \
  60. "1: \n" \
  61. "movaps (%0, %2), %%xmm0 \n" \
  62. "movaps (%0, %3), %%xmm1 \n" \
  63. "movaps (%0, %4), %%xmm2 \n" \
  64. "movaps (%0, %5), %%xmm3 \n" \
  65. "movaps (%0, %6), %%xmm4 \n" \
  66. "mulps %%xmm5, %%xmm0 \n" \
  67. "mulps %%xmm6, %%xmm1 \n" \
  68. "mulps %%xmm5, %%xmm2 \n" \
  69. "mulps %%xmm7, %%xmm3 \n" \
  70. "mulps %%xmm7, %%xmm4 \n" \
  71. stereo("addps %%xmm1, %%xmm0 \n") \
  72. "addps %%xmm1, %%xmm2 \n" \
  73. "addps %%xmm3, %%xmm0 \n" \
  74. "addps %%xmm4, %%xmm2 \n" \
  75. mono("addps %%xmm2, %%xmm0 \n") \
  76. "movaps %%xmm0, (%0, %2) \n" \
  77. stereo("movaps %%xmm2, (%0, %3) \n") \
  78. "add $16, %0 \n" \
  79. "jl 1b \n" \
  80. : "+&r"(i) \
  81. : "r"(matrix), \
  82. "r"(samples[0] + len), \
  83. "r"(samples[1] + len), \
  84. "r"(samples[2] + len), \
  85. "r"(samples[3] + len), \
  86. "r"(samples[4] + len) \
  87. : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
  88. "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
  89. "memory" \
  90. );
  91. #define MIX_MISC(stereo) \
  92. __asm__ volatile ( \
  93. "mov %5, %2 \n" \
  94. "1: \n" \
  95. "mov -%c7(%6, %2, %c8), %3 \n" \
  96. "movaps (%3, %0), %%xmm0 \n" \
  97. stereo("movaps %%xmm0, %%xmm1 \n") \
  98. "mulps %%xmm4, %%xmm0 \n" \
  99. stereo("mulps %%xmm5, %%xmm1 \n") \
  100. "2: \n" \
  101. "mov (%6, %2, %c8), %1 \n" \
  102. "movaps (%1, %0), %%xmm2 \n" \
  103. stereo("movaps %%xmm2, %%xmm3 \n") \
  104. "mulps (%4, %2, 8), %%xmm2 \n" \
  105. stereo("mulps 16(%4, %2, 8), %%xmm3 \n") \
  106. "addps %%xmm2, %%xmm0 \n" \
  107. stereo("addps %%xmm3, %%xmm1 \n") \
  108. "add $4, %2 \n" \
  109. "jl 2b \n" \
  110. "mov %5, %2 \n" \
  111. stereo("mov (%6, %2, %c8), %1 \n") \
  112. "movaps %%xmm0, (%3, %0) \n" \
  113. stereo("movaps %%xmm1, (%1, %0) \n") \
  114. "add $16, %0 \n" \
  115. "jl 1b \n" \
  116. : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m) \
  117. : "r"(matrix_simd + in_ch), \
  118. "g"((intptr_t) - 4 * (in_ch - 1)), \
  119. "r"(samp + in_ch), \
  120. "i"(sizeof(float *)), "i"(sizeof(float *)/4) \
  121. : "memory" \
  122. );
  123. static void ac3_downmix_sse(float **samples, float (*matrix)[2],
  124. int out_ch, int in_ch, int len)
  125. {
  126. int (*matrix_cmp)[2] = (int(*)[2])matrix;
  127. intptr_t i, j, k, m;
  128. i = -len * sizeof(float);
  129. if (in_ch == 5 && out_ch == 2 &&
  130. !(matrix_cmp[0][1] | matrix_cmp[2][0] |
  131. matrix_cmp[3][1] | matrix_cmp[4][0] |
  132. (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
  133. (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
  134. MIX5(IF0, IF1);
  135. } else if (in_ch == 5 && out_ch == 1 &&
  136. matrix_cmp[0][0] == matrix_cmp[2][0] &&
  137. matrix_cmp[3][0] == matrix_cmp[4][0]) {
  138. MIX5(IF1, IF0);
  139. } else {
  140. DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
  141. float *samp[AC3_MAX_CHANNELS];
  142. for (j = 0; j < in_ch; j++)
  143. samp[j] = samples[j] + len;
  144. j = 2 * in_ch * sizeof(float);
  145. __asm__ volatile (
  146. "1: \n"
  147. "sub $8, %0 \n"
  148. "movss (%2, %0), %%xmm4 \n"
  149. "movss 4(%2, %0), %%xmm5 \n"
  150. "shufps $0, %%xmm4, %%xmm4 \n"
  151. "shufps $0, %%xmm5, %%xmm5 \n"
  152. "movaps %%xmm4, (%1, %0, 4) \n"
  153. "movaps %%xmm5, 16(%1, %0, 4) \n"
  154. "jg 1b \n"
  155. : "+&r"(j)
  156. : "r"(matrix_simd), "r"(matrix)
  157. : "memory"
  158. );
  159. if (out_ch == 2) {
  160. MIX_MISC(IF1);
  161. } else {
  162. MIX_MISC(IF0);
  163. }
  164. }
  165. }
  166. #endif /* HAVE_SSE_INLINE && HAVE_7REGS */
  167. av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
  168. {
  169. int mm_flags = av_get_cpu_flags();
  170. if (EXTERNAL_MMX(mm_flags)) {
  171. c->ac3_exponent_min = ff_ac3_exponent_min_mmx;
  172. c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx;
  173. c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx;
  174. c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx;
  175. }
  176. if (EXTERNAL_AMD3DNOW(mm_flags)) {
  177. c->extract_exponents = ff_ac3_extract_exponents_3dnow;
  178. if (!bit_exact) {
  179. c->float_to_fixed24 = ff_float_to_fixed24_3dnow;
  180. }
  181. }
  182. if (EXTERNAL_MMXEXT(mm_flags)) {
  183. c->ac3_exponent_min = ff_ac3_exponent_min_mmxext;
  184. c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmxext;
  185. }
  186. if (EXTERNAL_SSE(mm_flags)) {
  187. c->float_to_fixed24 = ff_float_to_fixed24_sse;
  188. }
  189. if (EXTERNAL_SSE2(mm_flags)) {
  190. c->ac3_exponent_min = ff_ac3_exponent_min_sse2;
  191. c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2;
  192. c->float_to_fixed24 = ff_float_to_fixed24_sse2;
  193. c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2;
  194. c->extract_exponents = ff_ac3_extract_exponents_sse2;
  195. if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
  196. c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2;
  197. c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2;
  198. }
  199. }
  200. if (EXTERNAL_SSSE3(mm_flags)) {
  201. c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3;
  202. if (!(mm_flags & AV_CPU_FLAG_ATOM)) {
  203. c->extract_exponents = ff_ac3_extract_exponents_ssse3;
  204. }
  205. }
  206. #if HAVE_SSE_INLINE && HAVE_7REGS
  207. if (INLINE_SSE(mm_flags)) {
  208. c->downmix = ac3_downmix_sse;
  209. }
  210. #endif
  211. }