vf_ssim.asm 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. ;*****************************************************************************
  2. ;* x86-optimized functions for ssim filter
  3. ;*
  4. ;* Copyright (C) 2015 Ronald S. Bultje <rsbultje@gmail.com>
  5. ;*
  6. ;* This file is part of FFmpeg.
  7. ;*
  8. ;* FFmpeg is free software; you can redistribute it and/or
  9. ;* modify it under the terms of the GNU Lesser General Public
  10. ;* License as published by the Free Software Foundation; either
  11. ;* version 2.1 of the License, or (at your option) any later version.
  12. ;*
  13. ;* FFmpeg is distributed in the hope that it will be useful,
  14. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. ;* Lesser General Public License for more details.
  17. ;*
  18. ;* You should have received a copy of the GNU Lesser General Public
  19. ;* License along with FFmpeg; if not, write to the Free Software
  20. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. ;******************************************************************************
  22. %include "libavutil/x86/x86util.asm"
  23. SECTION_RODATA
  24. pw_1: times 8 dw 1
  25. ssim_c1: times 4 dd 416 ;(.01*.01*255*255*64 + .5)
  26. ssim_c2: times 4 dd 235963 ;(.03*.03*255*255*64*63 + .5)
  27. SECTION .text
  28. %macro SSIM_4X4_LINE 1
  29. %if ARCH_X86_64
  30. cglobal ssim_4x4_line, 6, 8, %1, buf, buf_stride, ref, ref_stride, sums, w, buf_stride3, ref_stride3
  31. %else
  32. cglobal ssim_4x4_line, 5, 7, %1, buf, buf_stride, ref, ref_stride, sums, buf_stride3, ref_stride3
  33. %define wd r5mp
  34. %endif
  35. lea ref_stride3q, [ref_strideq*3]
  36. lea buf_stride3q, [buf_strideq*3]
  37. %if notcpuflag(xop)
  38. pxor m7, m7
  39. mova m15, [pw_1]
  40. %endif
  41. .loop:
  42. %if cpuflag(xop)
  43. pmovzxbw m0, [bufq+buf_strideq*0]
  44. pmovzxbw m1, [refq+ref_strideq*0]
  45. pmaddwd m4, m0, m0
  46. pmaddwd m6, m0, m1
  47. pmovzxbw m2, [bufq+buf_strideq*1]
  48. vpmadcswd m4, m1, m1, m4
  49. pmovzxbw m3, [refq+ref_strideq*1]
  50. paddw m0, m2
  51. vpmadcswd m4, m2, m2, m4
  52. vpmadcswd m6, m2, m3, m6
  53. paddw m1, m3
  54. vpmadcswd m4, m3, m3, m4
  55. pmovzxbw m2, [bufq+buf_strideq*2]
  56. pmovzxbw m3, [refq+ref_strideq*2]
  57. vpmadcswd m4, m2, m2, m4
  58. vpmadcswd m6, m2, m3, m6
  59. pmovzxbw m5, [bufq+buf_stride3q]
  60. pmovzxbw m7, [refq+ref_stride3q]
  61. vpmadcswd m4, m3, m3, m4
  62. vpmadcswd m6, m5, m7, m6
  63. paddw m0, m2
  64. paddw m1, m3
  65. vpmadcswd m4, m5, m5, m4
  66. paddw m0, m5
  67. paddw m1, m7
  68. vpmadcswd m4, m7, m7, m4
  69. %else
  70. movh m0, [bufq+buf_strideq*0] ; a1
  71. movh m1, [refq+ref_strideq*0] ; b1
  72. movh m2, [bufq+buf_strideq*1] ; a2
  73. movh m3, [refq+ref_strideq*1] ; b2
  74. punpcklbw m0, m7 ; s1 [word]
  75. punpcklbw m1, m7 ; s2 [word]
  76. punpcklbw m2, m7 ; s1 [word]
  77. punpcklbw m3, m7 ; s2 [word]
  78. pmaddwd m4, m0, m0 ; a1 * a1
  79. pmaddwd m5, m1, m1 ; b1 * b1
  80. pmaddwd m8, m2, m2 ; a2 * a2
  81. pmaddwd m9, m3, m3 ; b2 * b2
  82. paddd m4, m5 ; ss
  83. paddd m8, m9 ; ss
  84. pmaddwd m6, m0, m1 ; a1 * b1 = ss12
  85. pmaddwd m5, m2, m3 ; a2 * b2 = ss12
  86. paddw m0, m2
  87. paddw m1, m3
  88. paddd m6, m5 ; s12
  89. paddd m4, m8 ; ss
  90. movh m2, [bufq+buf_strideq*2] ; a3
  91. movh m3, [refq+ref_strideq*2] ; b3
  92. movh m5, [bufq+buf_stride3q] ; a4
  93. movh m8, [refq+ref_stride3q] ; b4
  94. punpcklbw m2, m7 ; s1 [word]
  95. punpcklbw m3, m7 ; s2 [word]
  96. punpcklbw m5, m7 ; s1 [word]
  97. punpcklbw m8, m7 ; s2 [word]
  98. pmaddwd m9, m2, m2 ; a3 * a3
  99. pmaddwd m10, m3, m3 ; b3 * b3
  100. pmaddwd m12, m5, m5 ; a4 * a4
  101. pmaddwd m13, m8, m8 ; b4 * b4
  102. pmaddwd m11, m2, m3 ; a3 * b3 = ss12
  103. pmaddwd m14, m5, m8 ; a4 * b4 = ss12
  104. paddd m9, m10
  105. paddd m12, m13
  106. paddw m0, m2
  107. paddw m1, m3
  108. paddw m0, m5
  109. paddw m1, m8
  110. paddd m6, m11
  111. paddd m4, m9
  112. paddd m6, m14
  113. paddd m4, m12
  114. %endif
  115. ; m0 = [word] s1 a,a,a,a,b,b,b,b
  116. ; m1 = [word] s2 a,a,a,a,b,b,b,b
  117. ; m4 = [dword] ss a,a,b,b
  118. ; m6 = [dword] s12 a,a,b,b
  119. %if cpuflag(xop)
  120. vphaddwq m0, m0 ; [dword] s1 a, 0, b, 0
  121. vphaddwq m1, m1 ; [dword] s2 a, 0, b, 0
  122. vphadddq m4, m4 ; [dword] ss a, 0, b, 0
  123. vphadddq m6, m6 ; [dword] s12 a, 0, b, 0
  124. punpckhdq m2, m0, m1 ; [dword] s1 b, s2 b, 0, 0
  125. punpckldq m0, m1 ; [dword] s1 a, s2 a, 0, 0
  126. punpckhdq m3, m4, m6 ; [dword] ss b, s12 b, 0, 0
  127. punpckldq m4, m6 ; [dword] ss a, s12 a, 0, 0
  128. punpcklqdq m1, m2, m3 ; [dword] b s1, s2, ss, s12
  129. punpcklqdq m0, m4 ; [dword] a s1, s2, ss, s12
  130. %else
  131. pmaddwd m0, m15 ; [dword] s1 a,a,b,b
  132. pmaddwd m1, m15 ; [dword] s2 a,a,b,b
  133. phaddd m0, m4 ; [dword] s1 a, b, ss a, b
  134. phaddd m1, m6 ; [dword] s2 a, b, s12 a, b
  135. punpckhdq m2, m0, m1 ; [dword] ss a, s12 a, ss b, s12 b
  136. punpckldq m0, m1 ; [dword] s1 a, s2 a, s1 b, s2 b
  137. punpckhqdq m1, m0, m2 ; [dword] b s1, s2, ss, s12
  138. punpcklqdq m0, m2 ; [dword] a s1, s2, ss, s12
  139. %endif
  140. mova [sumsq+ 0], m0
  141. mova [sumsq+mmsize], m1
  142. add bufq, mmsize/2
  143. add refq, mmsize/2
  144. add sumsq, mmsize*2
  145. sub wd, mmsize/8
  146. jg .loop
  147. RET
  148. %endmacro
  149. %if ARCH_X86_64
  150. INIT_XMM ssse3
  151. SSIM_4X4_LINE 16
  152. %endif
  153. %if HAVE_XOP_EXTERNAL
  154. INIT_XMM xop
  155. SSIM_4X4_LINE 8
  156. %endif
  157. INIT_XMM sse4
  158. cglobal ssim_end_line, 3, 3, 6, sum0, sum1, w
  159. pxor m0, m0
  160. .loop:
  161. mova m1, [sum0q+mmsize*0]
  162. mova m2, [sum0q+mmsize*1]
  163. mova m3, [sum0q+mmsize*2]
  164. mova m4, [sum0q+mmsize*3]
  165. paddd m1, [sum1q+mmsize*0]
  166. paddd m2, [sum1q+mmsize*1]
  167. paddd m3, [sum1q+mmsize*2]
  168. paddd m4, [sum1q+mmsize*3]
  169. paddd m1, m2
  170. paddd m2, m3
  171. paddd m3, m4
  172. paddd m4, [sum0q+mmsize*4]
  173. paddd m4, [sum1q+mmsize*4]
  174. TRANSPOSE4x4D 1, 2, 3, 4, 5
  175. ; m1 = fs1, m2 = fs2, m3 = fss, m4 = fs12
  176. pslld m3, 6
  177. pslld m4, 6
  178. pmulld m5, m1, m2 ; fs1 * fs2
  179. pmulld m1, m1 ; fs1 * fs1
  180. pmulld m2, m2 ; fs2 * fs2
  181. psubd m3, m1
  182. psubd m4, m5 ; covariance
  183. psubd m3, m2 ; variance
  184. ; m1 = fs1 * fs1, m2 = fs2 * fs2, m3 = variance, m4 = covariance, m5 = fs1 * fs2
  185. paddd m4, m4 ; 2 * covariance
  186. paddd m5, m5 ; 2 * fs1 * fs2
  187. paddd m1, m2 ; fs1 * fs1 + fs2 * fs2
  188. paddd m3, [ssim_c2] ; variance + ssim_c2
  189. paddd m4, [ssim_c2] ; 2 * covariance + ssim_c2
  190. paddd m5, [ssim_c1] ; 2 * fs1 * fs2 + ssim_c1
  191. paddd m1, [ssim_c1] ; fs1 * fs1 + fs2 * fs2 + ssim_c1
  192. ; convert to float
  193. cvtdq2ps m3, m3
  194. cvtdq2ps m4, m4
  195. cvtdq2ps m5, m5
  196. cvtdq2ps m1, m1
  197. mulps m4, m5
  198. mulps m3, m1
  199. divps m4, m3 ; ssim_endl
  200. addps m0, m4 ; ssim
  201. add sum0q, mmsize*4
  202. add sum1q, mmsize*4
  203. sub wd, 4
  204. jg .loop
  205. ; subps the ones we added too much
  206. test wd, wd
  207. jz .end
  208. add wd, 4
  209. test wd, 2
  210. jz .skip2
  211. psrldq m4, 8
  212. .skip2:
  213. test wd, 1
  214. jz .skip1
  215. psrldq m4, 4
  216. .skip1:
  217. subps m0, m4
  218. .end:
  219. movhlps m4, m0
  220. addps m0, m4
  221. movss m4, m0
  222. shufps m0, m0, 1
  223. addss m0, m4
  224. %if ARCH_X86_32
  225. movss r0m, m0
  226. fld r0mp
  227. %endif
  228. RET