me_cmp_neon.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * Copyright (c) 2022 Jonathan Swinney <jswinney@amazon.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/aarch64/asm.S"
  21. function ff_pix_abs16_neon, export=1
  22. // x0 unused
  23. // x1 uint8_t *pix1
  24. // x2 uint8_t *pix2
  25. // x3 ptrdiff_t stride
  26. // w4 int h
  27. cmp w4, #4 // if h < 4, jump to completion section
  28. movi v18.4S, #0 // clear result accumulator
  29. b.lt 2f
  30. 1:
  31. ld1 {v0.16b}, [x1], x3 // load pix1
  32. ld1 {v4.16b}, [x2], x3 // load pix2
  33. ld1 {v1.16b}, [x1], x3 // load pix1
  34. ld1 {v5.16b}, [x2], x3 // load pix2
  35. uabdl v16.8h, v0.8b, v4.8b // absolute difference accumulate
  36. uabdl2 v17.8h, v0.16b, v4.16b
  37. ld1 {v2.16b}, [x1], x3 // load pix1
  38. ld1 {v6.16b}, [x2], x3 // load pix2
  39. uabal v16.8h, v1.8b, v5.8b // absolute difference accumulate
  40. uabal2 v17.8h, v1.16b, v5.16b
  41. ld1 {v3.16b}, [x1], x3
  42. ld1 {v7.16b}, [x2], x3
  43. uabal v16.8h, v2.8b, v6.8b
  44. uabal2 v17.8h, v2.16b, v6.16b
  45. sub w4, w4, #4 // h -= 4
  46. uabal v16.8h, v3.8b, v7.8b
  47. uabal2 v17.8h, v3.16b, v7.16b
  48. cmp w4, #4 // if h >= 4, loop
  49. add v16.8h, v16.8h, v17.8h
  50. uaddlv s16, v16.8h // add up everything in v16 accumulator
  51. add d18, d16, d18 // add to the end result register
  52. b.ge 1b
  53. cbnz w4, 2f // if iterations remain, jump to completion section
  54. fmov w0, s18 // copy result to general purpose register
  55. ret
  56. 2:
  57. ld1 {v0.16b}, [x1], x3 // load pix1
  58. ld1 {v4.16b}, [x2], x3 // load pix2
  59. uabdl v16.8h, v0.8b, v4.8b // absolute difference accumulate
  60. uabal2 v16.8h, v0.16b, v4.16b
  61. subs w4, w4, #1 // h -= 1
  62. addv h16, v16.8h // add up v16
  63. add d18, d16, d18 // add to result
  64. b.ne 2b
  65. fmov w0, s18 // copy result to general purpose register
  66. ret
  67. endfunc
  68. function ff_pix_abs16_xy2_neon, export=1
  69. // x0 unused
  70. // x1 uint8_t *pix1
  71. // x2 uint8_t *pix2
  72. // x3 ptrdiff_t stride
  73. // w4 int h
  74. add x5, x2, x3 // use x5 to hold uint8_t *pix3
  75. movi v0.2d, #0 // initialize the result register
  76. // Load initial pix2 values for either the unrolled version or completion version.
  77. ldur q4, [x2, #1] // load pix2+1
  78. ldr q3, [x2] // load pix2
  79. uaddl v2.8h, v4.8b, v3.8b // pix2 + pix2+1 0..7
  80. uaddl2 v3.8h, v4.16b, v3.16b // pix2 + pix2+1 8..15
  81. cmp w4, #4 // if h < 4 jump to the completion version
  82. b.lt 2f
  83. 1:
  84. // This is an unrolled implementation. It completes 4 iterations of the C for each branch.
  85. // In each iteration, pix2[i+1] == pix3[i]. This means we need only three loads per iteration,
  86. // plus two at the beginning to start.
  87. ldur q5, [x5, #1] // load pix3+1
  88. ld1 {v4.16b}, [x5], x3 // load pix3
  89. ld1 {v1.16b}, [x1], x3 // load pix1
  90. ldur q7, [x5, #1] // load pix3+1
  91. ld1 {v6.16b}, [x5], x3 // load pix3
  92. ld1 {v16.16b}, [x1], x3 // load pix1
  93. // These blocks compute the average: avg(pix2[n], pix2[n+1], pix3[n], pix3[n+1])
  94. uaddl v30.8h, v4.8b, v5.8b // pix3 + pix3+1 0..7
  95. uaddl2 v31.8h, v4.16b, v5.16b // pix3 + pix3+1 8..15
  96. ldur q19, [x5, #1] // load pix3+1
  97. add v23.8h, v2.8h, v30.8h // add up 0..7, using pix2 + pix2+1 values from previous iteration
  98. add v24.8h, v3.8h, v31.8h // add up 8..15, using pix2 + pix2+1 values from previous iteration
  99. ld1 {v18.16b}, [x5], x3 // load pix3
  100. ld1 {v17.16b}, [x1], x3 // load pix1
  101. rshrn v23.8b, v23.8h, #2 // shift right 2 0..7 (rounding shift right)
  102. rshrn2 v23.16b, v24.8h, #2 // shift right 2 8..15
  103. uaddl v2.8h, v6.8b, v7.8b // pix3 + pix3+1 0..7
  104. uaddl2 v3.8h, v6.16b, v7.16b // pix3 + pix3+1 8..15
  105. ldur q22, [x5, #1] // load pix3+1
  106. add v26.8h, v30.8h, v2.8h // add up 0..7, using pix2 + pix2+1 values from pix3 above
  107. add v27.8h, v31.8h, v3.8h // add up 8..15, using pix2 + pix2+1 values from pix3 above
  108. uabdl v24.8h, v1.8b, v23.8b // absolute difference 0..7, i=0
  109. uabdl2 v23.8h, v1.16b, v23.16b // absolute difference 8..15, i=0
  110. ld1 {v21.16b}, [x5], x3 // load pix3
  111. ld1 {v20.16b}, [x1], x3 // load pix1
  112. rshrn v26.8b, v26.8h, #2 // shift right 2 0..7 (rounding shift right)
  113. rshrn2 v26.16b, v27.8h, #2 // shift right 2 8..15
  114. uaddl v4.8h, v18.8b, v19.8b // pix3 + pix3+1 0..7
  115. uaddl2 v5.8h, v18.16b, v19.16b // pix3 + pix3+1 8..15
  116. add v28.8h, v2.8h, v4.8h // add up 0..7, using pix2 + pix2+1 values from pix3 above
  117. add v29.8h, v3.8h, v5.8h // add up 8..15, using pix2 + pix2+1 values from pix3 above
  118. rshrn v28.8b, v28.8h, #2 // shift right 2 0..7 (rounding shift right)
  119. rshrn2 v28.16b, v29.8h, #2 // shift right 2 8..15
  120. uabal v24.8h, v16.8b, v26.8b // absolute difference 0..7, i=1
  121. uabal2 v23.8h, v16.16b, v26.16b // absolute difference 8..15, i=1
  122. uaddl v2.8h, v21.8b, v22.8b // pix3 + pix3+1 0..7
  123. uaddl2 v3.8h, v21.16b, v22.16b // pix3 + pix3+1 8..15
  124. add v30.8h, v4.8h, v2.8h // add up 0..7, using pix2 + pix2+1 values from pix3 above
  125. add v31.8h, v5.8h, v3.8h // add up 8..15, using pix2 + pix2+1 values from pix3 above
  126. rshrn v30.8b, v30.8h, #2 // shift right 2 0..7 (rounding shift right)
  127. rshrn2 v30.16b, v31.8h, #2 // shift right 2 8..15
  128. uabal v24.8h, v17.8b, v28.8b // absolute difference 0..7, i=2
  129. uabal2 v23.8h, v17.16b, v28.16b // absolute difference 8..15, i=2
  130. sub w4, w4, #4 // h -= 4
  131. uabal v24.8h, v20.8b, v30.8b // absolute difference 0..7, i=3
  132. uabal2 v23.8h, v20.16b, v30.16b // absolute difference 8..15, i=3
  133. cmp w4, #4 // loop if h >= 4
  134. add v4.8h, v23.8h, v24.8h
  135. uaddlv s4, v4.8h // finish adding up accumulated values
  136. add d0, d0, d4 // add the value to the top level accumulator
  137. b.ge 1b
  138. cbnz w4, 2f // if iterations remain jump to completion section
  139. fmov w0, s0 // copy result to general purpose register
  140. ret
  141. 2:
  142. // v2 and v3 are set either at the end of this loop or at from the unrolled version
  143. // which branches here to complete iterations when h % 4 != 0.
  144. ldur q5, [x5, #1] // load pix3+1
  145. ld1 {v4.16b}, [x5], x3 // load pix3
  146. ld1 {v1.16b}, [x1], x3 // load pix1
  147. subs w4, w4, #1 // decrement h
  148. uaddl v18.8h, v4.8b, v5.8b // pix3 + pix3+1 0..7
  149. uaddl2 v19.8h, v4.16b, v5.16b // pix3 + pix3+1 8..15
  150. add v16.8h, v2.8h, v18.8h // add up 0..7, using pix2 + pix2+1 values from previous iteration
  151. add v17.8h, v3.8h, v19.8h // add up 8..15, using pix2 + pix2+1 values from previous iteration
  152. // divide by 4 to compute the average of values summed above
  153. urshr v16.8h, v16.8h, #2 // shift right by 2 0..7 (rounding shift right)
  154. urshr v17.8h, v17.8h, #2 // shift right by 2 8..15
  155. uxtl2 v7.8h, v1.16b // 8->16 bits pix1 8..15
  156. uxtl v1.8h, v1.8b // 8->16 bits pix1 0..7
  157. uabd v6.8h, v1.8h, v16.8h // absolute difference 0..7
  158. uaba v6.8h, v7.8h, v17.8h // absolute difference accumulate 8..15
  159. mov v2.16b, v18.16b // pix3 -> pix2
  160. mov v3.16b, v19.16b // pix3+1 -> pix2+1
  161. uaddlv s6, v6.8h // add up accumulator in v6
  162. add d0, d0, d6 // add to the final result
  163. b.ne 2b // loop if h > 0
  164. fmov w0, s0 // copy result to general purpose register
  165. ret
  166. endfunc
  167. function ff_pix_abs16_x2_neon, export=1
  168. // x0 unused
  169. // x1 uint8_t *pix1
  170. // x2 uint8_t *pix2
  171. // x3 ptrdiff_t stride
  172. // w4 int h
  173. cmp w4, #4
  174. // initialize buffers
  175. movi d20, #0
  176. add x5, x2, #1 // pix2 + 1
  177. b.lt 2f
  178. // make 4 iterations at once
  179. 1:
  180. // abs(pix1[0] - avg2(pix2[0], pix2[1]))
  181. // avg2(a,b) = (((a) + (b) + 1) >> 1)
  182. // abs(x) = (x < 0 ? -x : x)
  183. ld1 {v1.16b}, [x2], x3
  184. ld1 {v2.16b}, [x5], x3
  185. urhadd v30.16b, v1.16b, v2.16b
  186. ld1 {v0.16b}, [x1], x3
  187. uabdl v16.8h, v0.8b, v30.8b
  188. ld1 {v4.16b}, [x2], x3
  189. uabdl2 v17.8h, v0.16b, v30.16b
  190. ld1 {v5.16b}, [x5], x3
  191. urhadd v29.16b, v4.16b, v5.16b
  192. ld1 {v3.16b}, [x1], x3
  193. uabal v16.8h, v3.8b, v29.8b
  194. ld1 {v7.16b}, [x2], x3
  195. uabal2 v17.8h, v3.16b, v29.16b
  196. ld1 {v22.16b}, [x5], x3
  197. urhadd v28.16b, v7.16b, v22.16b
  198. ld1 {v6.16b}, [x1], x3
  199. uabal v16.8h, v6.8b, v28.8b
  200. ld1 {v24.16b}, [x2], x3
  201. uabal2 v17.8h, v6.16b, v28.16b
  202. ld1 {v25.16b}, [x5], x3
  203. urhadd v27.16b, v24.16b, v25.16b
  204. ld1 {v23.16b}, [x1], x3
  205. uabal v16.8h, v23.8b, v27.8b
  206. uabal2 v17.8h, v23.16b, v27.16b
  207. sub w4, w4, #4
  208. add v16.8h, v16.8h, v17.8h
  209. uaddlv s16, v16.8h
  210. cmp w4, #4
  211. add d20, d20, d16
  212. b.ge 1b
  213. cbz w4, 3f
  214. // iterate by one
  215. 2:
  216. ld1 {v1.16b}, [x2], x3
  217. ld1 {v2.16b}, [x5], x3
  218. urhadd v29.16b, v1.16b, v2.16b
  219. ld1 {v0.16b}, [x1], x3
  220. uabd v28.16b, v0.16b, v29.16b
  221. uaddlv h28, v28.16b
  222. subs w4, w4, #1
  223. add d20, d20, d28
  224. b.ne 2b
  225. 3:
  226. fmov w0, s20
  227. ret
  228. endfunc