vf_spp.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  19. */
  20. #include "libavutil/attributes.h"
  21. #include "libavutil/cpu.h"
  22. #include "libavutil/mem.h"
  23. #include "libavutil/x86/asm.h"
  24. #include "libavfilter/vf_spp.h"
  25. #if HAVE_MMX_INLINE
  26. static void hardthresh_mmx(int16_t dst[64], const int16_t src[64],
  27. int qp, const uint8_t *permutation)
  28. {
  29. int bias = 0; //FIXME
  30. unsigned int threshold1;
  31. threshold1 = qp * ((1<<4) - bias) - 1;
  32. #define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
  33. "movq " #src0 ", %%mm0 \n" \
  34. "movq " #src1 ", %%mm1 \n" \
  35. "movq " #src2 ", %%mm2 \n" \
  36. "movq " #src3 ", %%mm3 \n" \
  37. "psubw %%mm4, %%mm0 \n" \
  38. "psubw %%mm4, %%mm1 \n" \
  39. "psubw %%mm4, %%mm2 \n" \
  40. "psubw %%mm4, %%mm3 \n" \
  41. "paddusw %%mm5, %%mm0 \n" \
  42. "paddusw %%mm5, %%mm1 \n" \
  43. "paddusw %%mm5, %%mm2 \n" \
  44. "paddusw %%mm5, %%mm3 \n" \
  45. "paddw %%mm6, %%mm0 \n" \
  46. "paddw %%mm6, %%mm1 \n" \
  47. "paddw %%mm6, %%mm2 \n" \
  48. "paddw %%mm6, %%mm3 \n" \
  49. "psubusw %%mm6, %%mm0 \n" \
  50. "psubusw %%mm6, %%mm1 \n" \
  51. "psubusw %%mm6, %%mm2 \n" \
  52. "psubusw %%mm6, %%mm3 \n" \
  53. "psraw $3, %%mm0 \n" \
  54. "psraw $3, %%mm1 \n" \
  55. "psraw $3, %%mm2 \n" \
  56. "psraw $3, %%mm3 \n" \
  57. \
  58. "movq %%mm0, %%mm7 \n" \
  59. "punpcklwd %%mm2, %%mm0 \n" /*A*/ \
  60. "punpckhwd %%mm2, %%mm7 \n" /*C*/ \
  61. "movq %%mm1, %%mm2 \n" \
  62. "punpcklwd %%mm3, %%mm1 \n" /*B*/ \
  63. "punpckhwd %%mm3, %%mm2 \n" /*D*/ \
  64. "movq %%mm0, %%mm3 \n" \
  65. "punpcklwd %%mm1, %%mm0 \n" /*A*/ \
  66. "punpckhwd %%mm7, %%mm3 \n" /*C*/ \
  67. "punpcklwd %%mm2, %%mm7 \n" /*B*/ \
  68. "punpckhwd %%mm2, %%mm1 \n" /*D*/ \
  69. \
  70. "movq %%mm0, " #dst0 " \n" \
  71. "movq %%mm7, " #dst1 " \n" \
  72. "movq %%mm3, " #dst2 " \n" \
  73. "movq %%mm1, " #dst3 " \n"
  74. __asm__ volatile(
  75. "movd %2, %%mm4 \n"
  76. "movd %3, %%mm5 \n"
  77. "movd %4, %%mm6 \n"
  78. "packssdw %%mm4, %%mm4 \n"
  79. "packssdw %%mm5, %%mm5 \n"
  80. "packssdw %%mm6, %%mm6 \n"
  81. "packssdw %%mm4, %%mm4 \n"
  82. "packssdw %%mm5, %%mm5 \n"
  83. "packssdw %%mm6, %%mm6 \n"
  84. REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
  85. REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
  86. REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
  87. REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
  88. : : "r" (src), "r" (dst), "g" (threshold1+1), "g" (threshold1+5), "g" (threshold1-4) //FIXME maybe more accurate then needed?
  89. );
  90. dst[0] = (src[0] + 4) >> 3;
  91. }
  92. static void softthresh_mmx(int16_t dst[64], const int16_t src[64],
  93. int qp, const uint8_t *permutation)
  94. {
  95. int bias = 0; //FIXME
  96. unsigned int threshold1;
  97. threshold1 = qp*((1<<4) - bias) - 1;
  98. #undef REQUANT_CORE
  99. #define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
  100. "movq " #src0 ", %%mm0 \n" \
  101. "movq " #src1 ", %%mm1 \n" \
  102. "pxor %%mm6, %%mm6 \n" \
  103. "pxor %%mm7, %%mm7 \n" \
  104. "pcmpgtw %%mm0, %%mm6 \n" \
  105. "pcmpgtw %%mm1, %%mm7 \n" \
  106. "pxor %%mm6, %%mm0 \n" \
  107. "pxor %%mm7, %%mm1 \n" \
  108. "psubusw %%mm4, %%mm0 \n" \
  109. "psubusw %%mm4, %%mm1 \n" \
  110. "pxor %%mm6, %%mm0 \n" \
  111. "pxor %%mm7, %%mm1 \n" \
  112. "movq " #src2 ", %%mm2 \n" \
  113. "movq " #src3 ", %%mm3 \n" \
  114. "pxor %%mm6, %%mm6 \n" \
  115. "pxor %%mm7, %%mm7 \n" \
  116. "pcmpgtw %%mm2, %%mm6 \n" \
  117. "pcmpgtw %%mm3, %%mm7 \n" \
  118. "pxor %%mm6, %%mm2 \n" \
  119. "pxor %%mm7, %%mm3 \n" \
  120. "psubusw %%mm4, %%mm2 \n" \
  121. "psubusw %%mm4, %%mm3 \n" \
  122. "pxor %%mm6, %%mm2 \n" \
  123. "pxor %%mm7, %%mm3 \n" \
  124. \
  125. "paddsw %%mm5, %%mm0 \n" \
  126. "paddsw %%mm5, %%mm1 \n" \
  127. "paddsw %%mm5, %%mm2 \n" \
  128. "paddsw %%mm5, %%mm3 \n" \
  129. "psraw $3, %%mm0 \n" \
  130. "psraw $3, %%mm1 \n" \
  131. "psraw $3, %%mm2 \n" \
  132. "psraw $3, %%mm3 \n" \
  133. \
  134. "movq %%mm0, %%mm7 \n" \
  135. "punpcklwd %%mm2, %%mm0 \n" /*A*/ \
  136. "punpckhwd %%mm2, %%mm7 \n" /*C*/ \
  137. "movq %%mm1, %%mm2 \n" \
  138. "punpcklwd %%mm3, %%mm1 \n" /*B*/ \
  139. "punpckhwd %%mm3, %%mm2 \n" /*D*/ \
  140. "movq %%mm0, %%mm3 \n" \
  141. "punpcklwd %%mm1, %%mm0 \n" /*A*/ \
  142. "punpckhwd %%mm7, %%mm3 \n" /*C*/ \
  143. "punpcklwd %%mm2, %%mm7 \n" /*B*/ \
  144. "punpckhwd %%mm2, %%mm1 \n" /*D*/ \
  145. \
  146. "movq %%mm0, " #dst0 " \n" \
  147. "movq %%mm7, " #dst1 " \n" \
  148. "movq %%mm3, " #dst2 " \n" \
  149. "movq %%mm1, " #dst3 " \n"
  150. __asm__ volatile(
  151. "movd %2, %%mm4 \n"
  152. "movd %3, %%mm5 \n"
  153. "packssdw %%mm4, %%mm4 \n"
  154. "packssdw %%mm5, %%mm5 \n"
  155. "packssdw %%mm4, %%mm4 \n"
  156. "packssdw %%mm5, %%mm5 \n"
  157. REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
  158. REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
  159. REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
  160. REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
  161. : : "r" (src), "r" (dst), "g" (threshold1), "rm" (4) //FIXME maybe more accurate then needed?
  162. );
  163. dst[0] = (src[0] + 4) >> 3;
  164. }
  165. static void store_slice_mmx(uint8_t *dst, const int16_t *src,
  166. int dst_stride, int src_stride,
  167. int width, int height, int log2_scale,
  168. const uint8_t dither[8][8])
  169. {
  170. int y;
  171. for (y = 0; y < height; y++) {
  172. uint8_t *dst1 = dst;
  173. const int16_t *src1 = src;
  174. __asm__ volatile(
  175. "movq (%3), %%mm3 \n"
  176. "movq (%3), %%mm4 \n"
  177. "movd %4, %%mm2 \n"
  178. "pxor %%mm0, %%mm0 \n"
  179. "punpcklbw %%mm0, %%mm3 \n"
  180. "punpckhbw %%mm0, %%mm4 \n"
  181. "psraw %%mm2, %%mm3 \n"
  182. "psraw %%mm2, %%mm4 \n"
  183. "movd %5, %%mm2 \n"
  184. "1: \n"
  185. "movq (%0), %%mm0 \n"
  186. "movq 8(%0), %%mm1 \n"
  187. "paddw %%mm3, %%mm0 \n"
  188. "paddw %%mm4, %%mm1 \n"
  189. "psraw %%mm2, %%mm0 \n"
  190. "psraw %%mm2, %%mm1 \n"
  191. "packuswb %%mm1, %%mm0 \n"
  192. "movq %%mm0, (%1) \n"
  193. "add $16, %0 \n"
  194. "add $8, %1 \n"
  195. "cmp %2, %1 \n"
  196. " jb 1b \n"
  197. : "+r" (src1), "+r"(dst1)
  198. : "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(MAX_LEVEL - log2_scale)
  199. );
  200. src += src_stride;
  201. dst += dst_stride;
  202. }
  203. }
  204. #endif /* HAVE_MMX_INLINE */
  205. av_cold void ff_spp_init_x86(SPPContext *s)
  206. {
  207. #if HAVE_MMX_INLINE
  208. int cpu_flags = av_get_cpu_flags();
  209. if (cpu_flags & AV_CPU_FLAG_MMX) {
  210. s->store_slice = store_slice_mmx;
  211. switch (s->mode) {
  212. case 0: s->requantize = hardthresh_mmx; break;
  213. case 1: s->requantize = softthresh_mmx; break;
  214. }
  215. }
  216. #endif
  217. }