hscale.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. /*
  2. * Copyright (c) 2016 Clément Bœsch <clement stupeflix.com>
  3. * Copyright (c) 2019-2021 Sebastian Pop <spop@amazon.com>
  4. * Copyright (c) 2022 Jonathan Swinney <jswinney@amazon.com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/aarch64/asm.S"
  23. /*
  24. ;-----------------------------------------------------------------------------
  25. ; horizontal line scaling
  26. ;
  27. ; void hscale<source_width>to<intermediate_nbits>_<filterSize>_<opt>
  28. ; (SwsContext *c, int{16,32}_t *dst,
  29. ; int dstW, const uint{8,16}_t *src,
  30. ; const int16_t *filter,
  31. ; const int32_t *filterPos, int filterSize);
  32. ;
  33. ; Scale one horizontal line. Input is either 8-bit width or 16-bit width
  34. ; ($source_width can be either 8, 9, 10 or 16, difference is whether we have to
  35. ; downscale before multiplying). Filter is 14 bits. Output is either 15 bits
  36. ; (in int16_t) or 19 bits (in int32_t), as given in $intermediate_nbits. Each
  37. ; output pixel is generated from $filterSize input pixels, the position of
  38. ; the first pixel is given in filterPos[nOutputPixel].
  39. ;----------------------------------------------------------------------------- */
  40. function ff_hscale8to15_X8_neon, export=1
  41. sbfiz x7, x6, #1, #32 // filterSize*2 (*2 because int16)
  42. 1: ldr w8, [x5], #4 // filterPos[idx]
  43. ldr w0, [x5], #4 // filterPos[idx + 1]
  44. ldr w11, [x5], #4 // filterPos[idx + 2]
  45. ldr w9, [x5], #4 // filterPos[idx + 3]
  46. mov x16, x4 // filter0 = filter
  47. add x12, x16, x7 // filter1 = filter0 + filterSize*2
  48. add x13, x12, x7 // filter2 = filter1 + filterSize*2
  49. add x4, x13, x7 // filter3 = filter2 + filterSize*2
  50. movi v0.2D, #0 // val sum part 1 (for dst[0])
  51. movi v1.2D, #0 // val sum part 2 (for dst[1])
  52. movi v2.2D, #0 // val sum part 3 (for dst[2])
  53. movi v3.2D, #0 // val sum part 4 (for dst[3])
  54. add x17, x3, w8, UXTW // srcp + filterPos[0]
  55. add x8, x3, w0, UXTW // srcp + filterPos[1]
  56. add x0, x3, w11, UXTW // srcp + filterPos[2]
  57. add x11, x3, w9, UXTW // srcp + filterPos[3]
  58. mov w15, w6 // filterSize counter
  59. 2: ld1 {v4.8B}, [x17], #8 // srcp[filterPos[0] + {0..7}]
  60. ld1 {v5.8H}, [x16], #16 // load 8x16-bit filter values, part 1
  61. ld1 {v6.8B}, [x8], #8 // srcp[filterPos[1] + {0..7}]
  62. ld1 {v7.8H}, [x12], #16 // load 8x16-bit at filter+filterSize
  63. uxtl v4.8H, v4.8B // unpack part 1 to 16-bit
  64. smlal v0.4S, v4.4H, v5.4H // v0 accumulates srcp[filterPos[0] + {0..3}] * filter[{0..3}]
  65. smlal2 v0.4S, v4.8H, v5.8H // v0 accumulates srcp[filterPos[0] + {4..7}] * filter[{4..7}]
  66. ld1 {v16.8B}, [x0], #8 // srcp[filterPos[2] + {0..7}]
  67. ld1 {v17.8H}, [x13], #16 // load 8x16-bit at filter+2*filterSize
  68. uxtl v6.8H, v6.8B // unpack part 2 to 16-bit
  69. smlal v1.4S, v6.4H, v7.4H // v1 accumulates srcp[filterPos[1] + {0..3}] * filter[{0..3}]
  70. uxtl v16.8H, v16.8B // unpack part 3 to 16-bit
  71. smlal v2.4S, v16.4H, v17.4H // v2 accumulates srcp[filterPos[2] + {0..3}] * filter[{0..3}]
  72. smlal2 v2.4S, v16.8H, v17.8H // v2 accumulates srcp[filterPos[2] + {4..7}] * filter[{4..7}]
  73. ld1 {v18.8B}, [x11], #8 // srcp[filterPos[3] + {0..7}]
  74. smlal2 v1.4S, v6.8H, v7.8H // v1 accumulates srcp[filterPos[1] + {4..7}] * filter[{4..7}]
  75. ld1 {v19.8H}, [x4], #16 // load 8x16-bit at filter+3*filterSize
  76. subs w15, w15, #8 // j -= 8: processed 8/filterSize
  77. uxtl v18.8H, v18.8B // unpack part 4 to 16-bit
  78. smlal v3.4S, v18.4H, v19.4H // v3 accumulates srcp[filterPos[3] + {0..3}] * filter[{0..3}]
  79. smlal2 v3.4S, v18.8H, v19.8H // v3 accumulates srcp[filterPos[3] + {4..7}] * filter[{4..7}]
  80. b.gt 2b // inner loop if filterSize not consumed completely
  81. addp v0.4S, v0.4S, v1.4S // part01 horizontal pair adding
  82. addp v2.4S, v2.4S, v3.4S // part23 horizontal pair adding
  83. addp v0.4S, v0.4S, v2.4S // part0123 horizontal pair adding
  84. subs w2, w2, #4 // dstW -= 4
  85. sqshrn v0.4H, v0.4S, #7 // shift and clip the 2x16-bit final values
  86. st1 {v0.4H}, [x1], #8 // write to destination part0123
  87. b.gt 1b // loop until end of line
  88. ret
  89. endfunc
  90. function ff_hscale8to15_4_neon, export=1
  91. // x0 SwsContext *c (not used)
  92. // x1 int16_t *dst
  93. // x2 int dstW
  94. // x3 const uint8_t *src
  95. // x4 const int16_t *filter
  96. // x5 const int32_t *filterPos
  97. // x6 int filterSize
  98. // x8-x15 registers for gathering src data
  99. // v0 madd accumulator 4S
  100. // v1-v4 filter values (16 bit) 8H
  101. // v5 madd accumulator 4S
  102. // v16-v19 src values (8 bit) 8B
  103. // This implementation has 4 sections:
  104. // 1. Prefetch src data
  105. // 2. Interleaved prefetching src data and madd
  106. // 3. Complete madd
  107. // 4. Complete remaining iterations when dstW % 8 != 0
  108. sub sp, sp, #32 // allocate 32 bytes on the stack
  109. cmp w2, #16 // if dstW <16, skip to the last block used for wrapping up
  110. b.lt 2f
  111. // load 8 values from filterPos to be used as offsets into src
  112. ldp w8, w9, [x5] // filterPos[idx + 0], [idx + 1]
  113. ldp w10, w11, [x5, #8] // filterPos[idx + 2], [idx + 3]
  114. ldp w12, w13, [x5, #16] // filterPos[idx + 4], [idx + 5]
  115. ldp w14, w15, [x5, #24] // filterPos[idx + 6], [idx + 7]
  116. add x5, x5, #32 // advance filterPos
  117. // gather random access data from src into contiguous memory
  118. ldr w8, [x3, w8, UXTW] // src[filterPos[idx + 0]][0..3]
  119. ldr w9, [x3, w9, UXTW] // src[filterPos[idx + 1]][0..3]
  120. ldr w10, [x3, w10, UXTW] // src[filterPos[idx + 2]][0..3]
  121. ldr w11, [x3, w11, UXTW] // src[filterPos[idx + 3]][0..3]
  122. ldr w12, [x3, w12, UXTW] // src[filterPos[idx + 4]][0..3]
  123. ldr w13, [x3, w13, UXTW] // src[filterPos[idx + 5]][0..3]
  124. ldr w14, [x3, w14, UXTW] // src[filterPos[idx + 6]][0..3]
  125. ldr w15, [x3, w15, UXTW] // src[filterPos[idx + 7]][0..3]
  126. stp w8, w9, [sp] // *scratch_mem = { src[filterPos[idx + 0]][0..3], src[filterPos[idx + 1]][0..3] }
  127. stp w10, w11, [sp, #8] // *scratch_mem = { src[filterPos[idx + 2]][0..3], src[filterPos[idx + 3]][0..3] }
  128. stp w12, w13, [sp, #16] // *scratch_mem = { src[filterPos[idx + 4]][0..3], src[filterPos[idx + 5]][0..3] }
  129. stp w14, w15, [sp, #24] // *scratch_mem = { src[filterPos[idx + 6]][0..3], src[filterPos[idx + 7]][0..3] }
  130. 1:
  131. ld4 {v16.8B, v17.8B, v18.8B, v19.8B}, [sp] // transpose 8 bytes each from src into 4 registers
  132. // load 8 values from filterPos to be used as offsets into src
  133. ldp w8, w9, [x5] // filterPos[idx + 0][0..3], [idx + 1][0..3], next iteration
  134. ldp w10, w11, [x5, #8] // filterPos[idx + 2][0..3], [idx + 3][0..3], next iteration
  135. ldp w12, w13, [x5, #16] // filterPos[idx + 4][0..3], [idx + 5][0..3], next iteration
  136. ldp w14, w15, [x5, #24] // filterPos[idx + 6][0..3], [idx + 7][0..3], next iteration
  137. movi v0.2D, #0 // Clear madd accumulator for idx 0..3
  138. movi v5.2D, #0 // Clear madd accumulator for idx 4..7
  139. ld4 {v1.8H, v2.8H, v3.8H, v4.8H}, [x4], #64 // load filter idx + 0..7
  140. add x5, x5, #32 // advance filterPos
  141. // interleaved SIMD and prefetching intended to keep ld/st and vector pipelines busy
  142. uxtl v16.8H, v16.8B // unsigned extend long, covert src data to 16-bit
  143. uxtl v17.8H, v17.8B // unsigned extend long, covert src data to 16-bit
  144. ldr w8, [x3, w8, UXTW] // src[filterPos[idx + 0]], next iteration
  145. ldr w9, [x3, w9, UXTW] // src[filterPos[idx + 1]], next iteration
  146. uxtl v18.8H, v18.8B // unsigned extend long, covert src data to 16-bit
  147. uxtl v19.8H, v19.8B // unsigned extend long, covert src data to 16-bit
  148. ldr w10, [x3, w10, UXTW] // src[filterPos[idx + 2]], next iteration
  149. ldr w11, [x3, w11, UXTW] // src[filterPos[idx + 3]], next iteration
  150. smlal v0.4S, v1.4H, v16.4H // multiply accumulate inner loop j = 0, idx = 0..3
  151. smlal v0.4S, v2.4H, v17.4H // multiply accumulate inner loop j = 1, idx = 0..3
  152. ldr w12, [x3, w12, UXTW] // src[filterPos[idx + 4]], next iteration
  153. ldr w13, [x3, w13, UXTW] // src[filterPos[idx + 5]], next iteration
  154. smlal v0.4S, v3.4H, v18.4H // multiply accumulate inner loop j = 2, idx = 0..3
  155. smlal v0.4S, v4.4H, v19.4H // multiply accumulate inner loop j = 3, idx = 0..3
  156. ldr w14, [x3, w14, UXTW] // src[filterPos[idx + 6]], next iteration
  157. ldr w15, [x3, w15, UXTW] // src[filterPos[idx + 7]], next iteration
  158. smlal2 v5.4S, v1.8H, v16.8H // multiply accumulate inner loop j = 0, idx = 4..7
  159. smlal2 v5.4S, v2.8H, v17.8H // multiply accumulate inner loop j = 1, idx = 4..7
  160. stp w8, w9, [sp] // *scratch_mem = { src[filterPos[idx + 0]][0..3], src[filterPos[idx + 1]][0..3] }
  161. stp w10, w11, [sp, #8] // *scratch_mem = { src[filterPos[idx + 2]][0..3], src[filterPos[idx + 3]][0..3] }
  162. smlal2 v5.4S, v3.8H, v18.8H // multiply accumulate inner loop j = 2, idx = 4..7
  163. smlal2 v5.4S, v4.8H, v19.8H // multiply accumulate inner loop j = 3, idx = 4..7
  164. stp w12, w13, [sp, #16] // *scratch_mem = { src[filterPos[idx + 4]][0..3], src[filterPos[idx + 5]][0..3] }
  165. stp w14, w15, [sp, #24] // *scratch_mem = { src[filterPos[idx + 6]][0..3], src[filterPos[idx + 7]][0..3] }
  166. sub w2, w2, #8 // dstW -= 8
  167. sqshrn v0.4H, v0.4S, #7 // shift and clip the 2x16-bit final values
  168. sqshrn v1.4H, v5.4S, #7 // shift and clip the 2x16-bit final values
  169. st1 {v0.4H, v1.4H}, [x1], #16 // write to dst[idx + 0..7]
  170. cmp w2, #16 // continue on main loop if there are at least 16 iterations left
  171. b.ge 1b
  172. // last full iteration
  173. ld4 {v16.8B, v17.8B, v18.8B, v19.8B}, [sp]
  174. ld4 {v1.8H, v2.8H, v3.8H, v4.8H}, [x4], #64 // load filter idx + 0..7
  175. movi v0.2D, #0 // Clear madd accumulator for idx 0..3
  176. movi v5.2D, #0 // Clear madd accumulator for idx 4..7
  177. uxtl v16.8H, v16.8B // unsigned extend long, covert src data to 16-bit
  178. uxtl v17.8H, v17.8B // unsigned extend long, covert src data to 16-bit
  179. uxtl v18.8H, v18.8B // unsigned extend long, covert src data to 16-bit
  180. uxtl v19.8H, v19.8B // unsigned extend long, covert src data to 16-bit
  181. smlal v0.4S, v1.4H, v16.4H // multiply accumulate inner loop j = 0, idx = 0..3
  182. smlal v0.4S, v2.4H, v17.4H // multiply accumulate inner loop j = 1, idx = 0..3
  183. smlal v0.4S, v3.4H, v18.4H // multiply accumulate inner loop j = 2, idx = 0..3
  184. smlal v0.4S, v4.4H, v19.4H // multiply accumulate inner loop j = 3, idx = 0..3
  185. smlal2 v5.4S, v1.8H, v16.8H // multiply accumulate inner loop j = 0, idx = 4..7
  186. smlal2 v5.4S, v2.8H, v17.8H // multiply accumulate inner loop j = 1, idx = 4..7
  187. smlal2 v5.4S, v3.8H, v18.8H // multiply accumulate inner loop j = 2, idx = 4..7
  188. smlal2 v5.4S, v4.8H, v19.8H // multiply accumulate inner loop j = 3, idx = 4..7
  189. subs w2, w2, #8 // dstW -= 8
  190. sqshrn v0.4H, v0.4S, #7 // shift and clip the 2x16-bit final values
  191. sqshrn v1.4H, v5.4S, #7 // shift and clip the 2x16-bit final values
  192. st1 {v0.4H, v1.4H}, [x1], #16 // write to dst[idx + 0..7]
  193. cbnz w2, 2f // if >0 iterations remain, jump to the wrap up section
  194. add sp, sp, #32 // clean up stack
  195. ret
  196. // finish up when dstW % 8 != 0 or dstW < 16
  197. 2:
  198. // load src
  199. ldr w8, [x5], #4 // filterPos[i]
  200. add x9, x3, w8, UXTW // calculate the address for src load
  201. ld1 {v5.S}[0], [x9] // src[filterPos[i] + 0..3]
  202. // load filter
  203. ld1 {v6.4H}, [x4], #8 // filter[filterSize * i + 0..3]
  204. uxtl v5.8H, v5.8B // unsigned exten long, convert src data to 16-bit
  205. smull v0.4S, v5.4H, v6.4H // 4 iterations of src[...] * filter[...]
  206. addv s0, v0.4S // add up products of src and filter values
  207. sqshrn h0, s0, #7 // shift and clip the 2x16-bit final value
  208. st1 {v0.H}[0], [x1], #2 // dst[i] = ...
  209. sub w2, w2, #1 // dstW--
  210. cbnz w2, 2b
  211. add sp, sp, #32 // clean up stack
  212. ret
  213. endfunc