resample_mmx.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /*
  2. * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/x86/asm.h"
  21. #include "libavutil/cpu.h"
  22. #include "libswresample/swresample_internal.h"
  23. DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
  24. #define COMMON_CORE_INT16_MMX2 \
  25. x86_reg len= -2*c->filter_length;\
  26. __asm__ volatile(\
  27. "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
  28. "1: \n\t"\
  29. "movq (%1, %0), %%mm1 \n\t"\
  30. "pmaddwd (%2, %0), %%mm1 \n\t"\
  31. "paddd %%mm1, %%mm0 \n\t"\
  32. "add $8, %0 \n\t"\
  33. " js 1b \n\t"\
  34. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  35. "paddd %%mm1, %%mm0 \n\t"\
  36. "psrad $15, %%mm0 \n\t"\
  37. "packssdw %%mm0, %%mm0 \n\t"\
  38. "movd %%mm0, (%3) \n\t"\
  39. : "+r" (len)\
  40. : "r" (((uint8_t*)(src+sample_index))-len),\
  41. "r" (((uint8_t*)filter)-len),\
  42. "r" (dst+dst_index)\
  43. NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
  44. );
  45. #define LINEAR_CORE_INT16_MMX2 \
  46. x86_reg len= -2*c->filter_length;\
  47. __asm__ volatile(\
  48. "pxor %%mm0, %%mm0 \n\t"\
  49. "pxor %%mm2, %%mm2 \n\t"\
  50. "1: \n\t"\
  51. "movq (%3, %0), %%mm1 \n\t"\
  52. "movq %%mm1, %%mm3 \n\t"\
  53. "pmaddwd (%4, %0), %%mm1 \n\t"\
  54. "pmaddwd (%5, %0), %%mm3 \n\t"\
  55. "paddd %%mm1, %%mm0 \n\t"\
  56. "paddd %%mm3, %%mm2 \n\t"\
  57. "add $8, %0 \n\t"\
  58. " js 1b \n\t"\
  59. "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
  60. "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
  61. "paddd %%mm1, %%mm0 \n\t"\
  62. "paddd %%mm3, %%mm2 \n\t"\
  63. "movd %%mm0, %1 \n\t"\
  64. "movd %%mm2, %2 \n\t"\
  65. : "+r" (len),\
  66. "=r" (val),\
  67. "=r" (v2)\
  68. : "r" (((uint8_t*)(src+sample_index))-len),\
  69. "r" (((uint8_t*)filter)-len),\
  70. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  71. );
  72. #define COMMON_CORE_INT16_SSE2 \
  73. x86_reg len= -2*c->filter_length;\
  74. __asm__ volatile(\
  75. "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
  76. "1: \n\t"\
  77. "movdqu (%1, %0), %%xmm1 \n\t"\
  78. "pmaddwd (%2, %0), %%xmm1 \n\t"\
  79. "paddd %%xmm1, %%xmm0 \n\t"\
  80. "add $16, %0 \n\t"\
  81. " js 1b \n\t"\
  82. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  83. "paddd %%xmm1, %%xmm0 \n\t"\
  84. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  85. "paddd %%xmm1, %%xmm0 \n\t"\
  86. "psrad $15, %%xmm0 \n\t"\
  87. "packssdw %%xmm0, %%xmm0 \n\t"\
  88. "movd %%xmm0, (%3) \n\t"\
  89. : "+r" (len)\
  90. : "r" (((uint8_t*)(src+sample_index))-len),\
  91. "r" (((uint8_t*)filter)-len),\
  92. "r" (dst+dst_index)\
  93. NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
  94. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  95. );
  96. #define LINEAR_CORE_INT16_SSE2 \
  97. x86_reg len= -2*c->filter_length;\
  98. __asm__ volatile(\
  99. "pxor %%xmm0, %%xmm0 \n\t"\
  100. "pxor %%xmm2, %%xmm2 \n\t"\
  101. "1: \n\t"\
  102. "movdqu (%3, %0), %%xmm1 \n\t"\
  103. "movdqa %%xmm1, %%xmm3 \n\t"\
  104. "pmaddwd (%4, %0), %%xmm1 \n\t"\
  105. "pmaddwd (%5, %0), %%xmm3 \n\t"\
  106. "paddd %%xmm1, %%xmm0 \n\t"\
  107. "paddd %%xmm3, %%xmm2 \n\t"\
  108. "add $16, %0 \n\t"\
  109. " js 1b \n\t"\
  110. "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
  111. "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
  112. "paddd %%xmm1, %%xmm0 \n\t"\
  113. "paddd %%xmm3, %%xmm2 \n\t"\
  114. "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
  115. "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
  116. "paddd %%xmm1, %%xmm0 \n\t"\
  117. "paddd %%xmm3, %%xmm2 \n\t"\
  118. "movd %%xmm0, %1 \n\t"\
  119. "movd %%xmm2, %2 \n\t"\
  120. : "+r" (len),\
  121. "=r" (val),\
  122. "=r" (v2)\
  123. : "r" (((uint8_t*)(src+sample_index))-len),\
  124. "r" (((uint8_t*)filter)-len),\
  125. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  126. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  127. );
  128. #define COMMON_CORE_DBL_SSE2 \
  129. x86_reg len= -8*c->filter_length;\
  130. __asm__ volatile(\
  131. "xorpd %%xmm0, %%xmm0 \n\t"\
  132. "1: \n\t"\
  133. "movupd (%1, %0), %%xmm1 \n\t"\
  134. "mulpd (%2, %0), %%xmm1 \n\t"\
  135. "addpd %%xmm1, %%xmm0 \n\t"\
  136. "add $16, %0 \n\t"\
  137. " js 1b \n\t"\
  138. "movhlps %%xmm0, %%xmm1 \n\t"\
  139. "addpd %%xmm1, %%xmm0 \n\t"\
  140. "movsd %%xmm0, (%3) \n\t"\
  141. : "+r" (len)\
  142. : "r" (((uint8_t*)(src+sample_index))-len),\
  143. "r" (((uint8_t*)filter)-len),\
  144. "r" (dst+dst_index)\
  145. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
  146. );
  147. #define LINEAR_CORE_DBL_SSE2 \
  148. x86_reg len= -8*c->filter_length;\
  149. __asm__ volatile(\
  150. "xorpd %%xmm0, %%xmm0 \n\t"\
  151. "xorpd %%xmm2, %%xmm2 \n\t"\
  152. "1: \n\t"\
  153. "movupd (%3, %0), %%xmm1 \n\t"\
  154. "movapd %%xmm1, %%xmm3 \n\t"\
  155. "mulpd (%4, %0), %%xmm1 \n\t"\
  156. "mulpd (%5, %0), %%xmm3 \n\t"\
  157. "addpd %%xmm1, %%xmm0 \n\t"\
  158. "addpd %%xmm3, %%xmm2 \n\t"\
  159. "add $16, %0 \n\t"\
  160. " js 1b \n\t"\
  161. "movhlps %%xmm0, %%xmm1 \n\t"\
  162. "movhlps %%xmm2, %%xmm3 \n\t"\
  163. "addpd %%xmm1, %%xmm0 \n\t"\
  164. "addpd %%xmm3, %%xmm2 \n\t"\
  165. "movsd %%xmm0, %1 \n\t"\
  166. "movsd %%xmm2, %2 \n\t"\
  167. : "+r" (len),\
  168. "=m" (val),\
  169. "=m" (v2)\
  170. : "r" (((uint8_t*)(src+sample_index))-len),\
  171. "r" (((uint8_t*)filter)-len),\
  172. "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
  173. XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
  174. );