gradfun.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /*
  2. * Copyright (C) 2009 Loren Merritt <lorenm@u.washignton.edu>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/cpu.h"
  21. #include "libavutil/x86_cpu.h"
  22. #include "libavfilter/gradfun.h"
  23. DECLARE_ALIGNED(16, static const uint16_t, pw_7f)[8] = {0x7F,0x7F,0x7F,0x7F,0x7F,0x7F,0x7F,0x7F};
  24. DECLARE_ALIGNED(16, static const uint16_t, pw_ff)[8] = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
  25. void ff_gradfun_filter_line_mmx2(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
  26. {
  27. #if HAVE_MMX
  28. intptr_t x;
  29. if (width & 3) {
  30. x = width & ~3;
  31. ff_gradfun_filter_line_c(dst + x, src + x, dc + x / 2, width - x, thresh, dithers);
  32. width = x;
  33. }
  34. x = -width;
  35. __asm__ volatile(
  36. "movd %4, %%mm5 \n"
  37. "pxor %%mm7, %%mm7 \n"
  38. "pshufw $0, %%mm5, %%mm5 \n"
  39. "movq %6, %%mm6 \n"
  40. "movq %5, %%mm4 \n"
  41. "1: \n"
  42. "movd (%2,%0), %%mm0 \n"
  43. "movd (%3,%0), %%mm1 \n"
  44. "punpcklbw %%mm7, %%mm0 \n"
  45. "punpcklwd %%mm1, %%mm1 \n"
  46. "psllw $7, %%mm0 \n"
  47. "pxor %%mm2, %%mm2 \n"
  48. "psubw %%mm0, %%mm1 \n" // delta = dc - pix
  49. "psubw %%mm1, %%mm2 \n"
  50. "pmaxsw %%mm1, %%mm2 \n"
  51. "pmulhuw %%mm5, %%mm2 \n" // m = abs(delta) * thresh >> 16
  52. "psubw %%mm6, %%mm2 \n"
  53. "pminsw %%mm7, %%mm2 \n" // m = -max(0, 127-m)
  54. "pmullw %%mm2, %%mm2 \n"
  55. "paddw %%mm4, %%mm0 \n" // pix += dither
  56. "pmulhw %%mm2, %%mm1 \n"
  57. "psllw $2, %%mm1 \n" // m = m*m*delta >> 14
  58. "paddw %%mm1, %%mm0 \n" // pix += m
  59. "psraw $7, %%mm0 \n"
  60. "packuswb %%mm0, %%mm0 \n"
  61. "movd %%mm0, (%1,%0) \n" // dst = clip(pix>>7)
  62. "add $4, %0 \n"
  63. "jl 1b \n"
  64. "emms \n"
  65. :"+r"(x)
  66. :"r"(dst+width), "r"(src+width), "r"(dc+width/2),
  67. "rm"(thresh), "m"(*dithers), "m"(*pw_7f)
  68. :"memory"
  69. );
  70. #endif
  71. }
  72. void ff_gradfun_filter_line_ssse3(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
  73. {
  74. #if HAVE_SSSE3
  75. intptr_t x;
  76. if (width & 7) {
  77. // could be 10% faster if I somehow eliminated this
  78. x = width & ~7;
  79. ff_gradfun_filter_line_c(dst + x, src + x, dc + x / 2, width - x, thresh, dithers);
  80. width = x;
  81. }
  82. x = -width;
  83. __asm__ volatile(
  84. "movd %4, %%xmm5 \n"
  85. "pxor %%xmm7, %%xmm7 \n"
  86. "pshuflw $0,%%xmm5, %%xmm5 \n"
  87. "movdqa %6, %%xmm6 \n"
  88. "punpcklqdq %%xmm5, %%xmm5 \n"
  89. "movdqa %5, %%xmm4 \n"
  90. "1: \n"
  91. "movq (%2,%0), %%xmm0 \n"
  92. "movq (%3,%0), %%xmm1 \n"
  93. "punpcklbw %%xmm7, %%xmm0 \n"
  94. "punpcklwd %%xmm1, %%xmm1 \n"
  95. "psllw $7, %%xmm0 \n"
  96. "psubw %%xmm0, %%xmm1 \n" // delta = dc - pix
  97. "pabsw %%xmm1, %%xmm2 \n"
  98. "pmulhuw %%xmm5, %%xmm2 \n" // m = abs(delta) * thresh >> 16
  99. "psubw %%xmm6, %%xmm2 \n"
  100. "pminsw %%xmm7, %%xmm2 \n" // m = -max(0, 127-m)
  101. "pmullw %%xmm2, %%xmm2 \n"
  102. "psllw $1, %%xmm2 \n"
  103. "paddw %%xmm4, %%xmm0 \n" // pix += dither
  104. "pmulhrsw %%xmm2, %%xmm1 \n" // m = m*m*delta >> 14
  105. "paddw %%xmm1, %%xmm0 \n" // pix += m
  106. "psraw $7, %%xmm0 \n"
  107. "packuswb %%xmm0, %%xmm0 \n"
  108. "movq %%xmm0, (%1,%0) \n" // dst = clip(pix>>7)
  109. "add $8, %0 \n"
  110. "jl 1b \n"
  111. :"+&r"(x)
  112. :"r"(dst+width), "r"(src+width), "r"(dc+width/2),
  113. "rm"(thresh), "m"(*dithers), "m"(*pw_7f)
  114. :"memory"
  115. );
  116. #endif // HAVE_SSSE3
  117. }
  118. void ff_gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width)
  119. {
  120. #if HAVE_SSE
  121. #define BLURV(load)\
  122. intptr_t x = -2*width;\
  123. __asm__ volatile(\
  124. "movdqa %6, %%xmm7 \n"\
  125. "1: \n"\
  126. load" (%4,%0), %%xmm0 \n"\
  127. load" (%5,%0), %%xmm1 \n"\
  128. "movdqa %%xmm0, %%xmm2 \n"\
  129. "movdqa %%xmm1, %%xmm3 \n"\
  130. "psrlw $8, %%xmm0 \n"\
  131. "psrlw $8, %%xmm1 \n"\
  132. "pand %%xmm7, %%xmm2 \n"\
  133. "pand %%xmm7, %%xmm3 \n"\
  134. "paddw %%xmm1, %%xmm0 \n"\
  135. "paddw %%xmm3, %%xmm2 \n"\
  136. "paddw %%xmm2, %%xmm0 \n"\
  137. "paddw (%2,%0), %%xmm0 \n"\
  138. "movdqa (%1,%0), %%xmm1 \n"\
  139. "movdqa %%xmm0, (%1,%0) \n"\
  140. "psubw %%xmm1, %%xmm0 \n"\
  141. "movdqa %%xmm0, (%3,%0) \n"\
  142. "add $16, %0 \n"\
  143. "jl 1b \n"\
  144. :"+&r"(x)\
  145. :"r"(buf+width),\
  146. "r"(buf1+width),\
  147. "r"(dc+width),\
  148. "r"(src+width*2),\
  149. "r"(src+width*2+src_linesize),\
  150. "m"(*pw_ff)\
  151. :"memory"\
  152. );
  153. if (((intptr_t) src | src_linesize) & 15) {
  154. BLURV("movdqu");
  155. } else {
  156. BLURV("movdqa");
  157. }
  158. #endif // HAVE_SSE
  159. }