input_rvv.S 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /*
  2. * Copyright © 2024 Rémi Denis-Courmont.
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/riscv/asm.S"
  21. func ff_bgr24ToY_rvv, zve32x
  22. lw t1, 8(a5) # BY
  23. lw t3, 0(a5) # RY
  24. j 1f
  25. endfunc
  26. func ff_rgb24ToY_rvv, zve32x
  27. lw t1, 0(a5) # RY
  28. lw t3, 8(a5) # BY
  29. 1:
  30. lw t2, 4(a5) # GY
  31. li t4, (32 << (15 - 1)) + (1 << (15 - 7))
  32. 2:
  33. vsetvli t0, a4, e32, m8, ta, ma
  34. vlseg3e8.v v0, (a1)
  35. sub a4, a4, t0
  36. vzext.vf4 v8, v0
  37. sh1add t5, t0, t0 # t1 = 3 * t0
  38. vzext.vf4 v16, v2
  39. vzext.vf4 v24, v4
  40. add a1, t5, a1
  41. vmul.vx v8, v8, t1
  42. vmacc.vx v8, t2, v16
  43. vmacc.vx v8, t3, v24
  44. vadd.vx v8, v8, t4
  45. vsetvli zero, zero, e16, m4, ta, ma
  46. vnsra.wi v0, v8, 15 - 6
  47. vse16.v v0, (a0)
  48. sh1add a0, t0, a0
  49. bnez a4, 2b
  50. ret
  51. endfunc
  52. func ff_bgr24ToUV_rvv, zve32x
  53. lw t1, 20(a6) # BU
  54. lw t4, 32(a6) # BV
  55. lw t3, 12(a6) # RU
  56. lw t6, 24(a6) # RV
  57. j 1f
  58. endfunc
  59. func ff_rgb24ToUV_rvv, zve32x
  60. lw t1, 12(a6) # RU
  61. lw t4, 24(a6) # RV
  62. lw t3, 20(a6) # BU
  63. lw t6, 32(a6) # BV
  64. 1:
  65. lw t2, 16(a6) # GU
  66. lw t5, 28(a6) # GV
  67. li a7, (256 << (15 - 1)) + (1 << (15 - 7))
  68. 2:
  69. vsetvli t0, a5, e32, m8, ta, ma
  70. vlseg3e8.v v0, (a3)
  71. sub a5, a5, t0
  72. vzext.vf4 v16, v0
  73. sh1add a6, t0, t0
  74. vzext.vf4 v24, v2
  75. vmul.vx v8, v16, t1
  76. add a3, a6, a3
  77. vmul.vx v16, v16, t4
  78. vmacc.vx v8, t2, v24
  79. vmacc.vx v16, t5, v24
  80. vzext.vf4 v24, v4
  81. vadd.vx v8, v8, a7
  82. vadd.vx v16, v16, a7
  83. vmacc.vx v8, t3, v24
  84. vmacc.vx v16, t6, v24
  85. vsetvli zero, zero, e16, m4, ta, ma
  86. vnsra.wi v0, v8, 15 - 6
  87. vnsra.wi v4, v16, 15 - 6
  88. vse16.v v0, (a0)
  89. sh1add a0, t0, a0
  90. vse16.v v4, (a1)
  91. sh1add a1, t0, a1
  92. bnez a5, 2b
  93. ret
  94. endfunc
  95. func ff_bgr24ToUV_half_rvv, zve32x
  96. lw t1, 20(a6) # BU
  97. lw t4, 32(a6) # BV
  98. lw t3, 12(a6) # RU
  99. lw t6, 24(a6) # RV
  100. j 1f
  101. endfunc
  102. func ff_rgb24ToUV_half_rvv, zve32x
  103. lw t1, 12(a6) # RU
  104. lw t4, 24(a6) # RV
  105. lw t3, 20(a6) # BU
  106. lw t6, 32(a6) # BV
  107. 1:
  108. lw t2, 16(a6) # GU
  109. lw t5, 28(a6) # GV
  110. li a7, (256 << 15) + (1 << (15 - 6))
  111. 2:
  112. vsetvli t0, a5, e8, m1, ta, ma
  113. vlseg6e8.v v0, (a3)
  114. sh1add a6, t0, t0
  115. vwaddu.vv v8, v0, v3
  116. sub a5, a5, t0
  117. vwaddu.vv v10, v1, v4
  118. sh1add a3, a6, a3
  119. vwaddu.vv v12, v2, v5
  120. vsetvli zero, zero, e32, m4, ta, ma
  121. vzext.vf2 v20, v8
  122. vzext.vf2 v24, v10
  123. vzext.vf2 v28, v12
  124. vmul.vx v0, v20, t1
  125. vmul.vx v4, v20, t4
  126. vmacc.vx v0, t2, v24
  127. vmacc.vx v4, t5, v24
  128. vmacc.vx v0, t3, v28
  129. vmacc.vx v4, t6, v28
  130. vadd.vx v0, v0, a7
  131. vadd.vx v4, v4, a7
  132. vsetvli zero, zero, e16, m2, ta, ma
  133. vnsra.wi v0, v0, 15 - 5
  134. vnsra.wi v2, v4, 15 - 5
  135. vse16.v v0, (a0)
  136. sh1add a0, t0, a0
  137. vse16.v v2, (a1)
  138. sh1add a1, t0, a1
  139. bnez a5, 2b
  140. ret
  141. endfunc
  142. .macro rgba_input chr0, chr1, high
  143. func ff_\chr1\()ToY_rvv, zve32x
  144. lw t1, 8(a5) # BY
  145. lw t3, 0(a5) # RY
  146. j 1f
  147. endfunc
  148. func ff_\chr0\()ToY_rvv, zve32x
  149. lw t1, 0(a5) # RY
  150. lw t3, 8(a5) # BY
  151. 1:
  152. lw t2, 4(a5) # GY
  153. li t4, (32 << (15 - 1)) + (1 << (15 - 7))
  154. li t5, 0xff
  155. 2:
  156. vsetvli t0, a4, e32, m8, ta, ma
  157. vle32.v v0, (a1)
  158. sub a4, a4, t0
  159. .if \high
  160. vsrl.vi v8, v0, 24
  161. .else
  162. vand.vx v8, v0, t5
  163. .endif
  164. sh2add a1, t0, a1
  165. vsrl.vi v16, v0, 8 * (1 + \high)
  166. vmul.vx v24, v8, t1
  167. vand.vx v16, v16, t5
  168. vsrl.vi v8, v0, 8 * (2 - \high)
  169. vmacc.vx v24, t2, v16
  170. vand.vx v8, v8, t5
  171. vadd.vx v24, v24, t4
  172. vmacc.vx v24, t3, v8
  173. vsetvli zero, zero, e16, m4, ta, ma
  174. vnsra.wi v0, v24, 15 - 6
  175. vse16.v v0, (a0)
  176. sh1add a0, t0, a0
  177. bnez a4, 2b
  178. ret
  179. endfunc
  180. func ff_\chr1\()ToUV_rvv, zve32x
  181. lw t1, 20(a6) # BU
  182. lw t4, 32(a6) # BV
  183. lw t3, 12(a6) # RU
  184. lw t6, 24(a6) # RV
  185. j 1f
  186. endfunc
  187. func ff_\chr0\()ToUV_rvv, zve32x
  188. lw t1, 12(a6) # RU
  189. lw t4, 24(a6) # RV
  190. lw t3, 20(a6) # BU
  191. lw t6, 32(a6) # BV
  192. 1:
  193. lw t2, 16(a6) # GU
  194. lw t5, 28(a6) # GV
  195. li a6, 0xff
  196. li a7, (256 << (15 - 1)) + (1 << (15 - 7))
  197. 2:
  198. vsetvli t0, a5, e32, m8, ta, ma
  199. vle32.v v0, (a3)
  200. sub a5, a5, t0
  201. .if \high
  202. vsrl.vi v24, v0, 24
  203. .else
  204. vand.vx v24, v0, a6
  205. .endif
  206. sh2add a3, t0, a3
  207. vsrl.vi v8, v0, 8 * (1 + \high)
  208. vmul.vx v16, v24, t1
  209. vand.vx v8, v8, a6
  210. vmul.vx v24, v24, t4
  211. vmacc.vx v16, t2, v8
  212. vsrl.vi v0, v0, 8 * (2 - \high)
  213. vmacc.vx v24, t5, v8
  214. vand.vx v0, v0, a6
  215. vadd.vx v16, v16, a7
  216. vadd.vx v24, v24, a7
  217. vmacc.vx v16, t3, v0
  218. vmacc.vx v24, t6, v0
  219. vsetvli zero, zero, e16, m4, ta, ma
  220. vnsra.wi v0, v16, 15 - 6
  221. vnsra.wi v4, v24, 15 - 6
  222. vse16.v v0, (a0)
  223. sh1add a0, t0, a0
  224. vse16.v v4, (a1)
  225. sh1add a1, t0, a1
  226. bnez a5, 2b
  227. ret
  228. endfunc
  229. func ff_\chr1\()ToUV_half_rvv, zve32x
  230. lw t1, 20(a6) # BU
  231. lw t4, 32(a6) # BV
  232. lw t3, 12(a6) # RU
  233. lw t6, 24(a6) # RV
  234. j 1f
  235. endfunc
  236. func ff_\chr0\()ToUV_half_rvv, zve32x
  237. lw t1, 12(a6) # RU
  238. lw t4, 24(a6) # RV
  239. lw t3, 20(a6) # BU
  240. lw t6, 32(a6) # BV
  241. 1:
  242. lw t2, 16(a6) # GU
  243. lw t5, 28(a6) # GV
  244. li a6, 0xff
  245. li a7, (256 << 15) + (1 << (15 - 6))
  246. 2:
  247. vsetvli t0, a5, e32, m4, ta, ma
  248. vlseg2e32.v v0, (a3)
  249. sub a5, a5, t0
  250. .if \high
  251. vsrl.vi v8, v0, 24
  252. vsrl.vi v12, v4, 24
  253. .else
  254. vand.vx v8, v0, a6
  255. vand.vx v12, v4, a6
  256. .endif
  257. sh3add a3, t0, a3
  258. vsrl.vi v16, v0, 8 * (1 + \high)
  259. vsrl.vi v20, v4, 8 * (1 + \high)
  260. vsrl.vi v24, v0, 8 * (2 - \high)
  261. vsrl.vi v28, v4, 8 * (2 - \high)
  262. vand.vx v16, v16, a6
  263. vand.vx v20, v20, a6
  264. vand.vx v24, v24, a6
  265. vand.vx v28, v28, a6
  266. vadd.vv v8, v8, v12
  267. vadd.vv v16, v16, v20
  268. vadd.vv v24, v24, v28
  269. vmul.vx v0, v8, t1
  270. vmul.vx v4, v8, t4
  271. vmacc.vx v0, t2, v16
  272. vmacc.vx v4, t5, v16
  273. vmacc.vx v0, t3, v24
  274. vmacc.vx v4, t6, v24
  275. vadd.vx v0, v0, a7
  276. vadd.vx v4, v4, a7
  277. vsetvli zero, zero, e16, m2, ta, ma
  278. vnsra.wi v0, v0, 15 - 5
  279. vnsra.wi v2, v4, 15 - 5
  280. vse16.v v0, (a0)
  281. sh1add a0, t0, a0
  282. vse16.v v2, (a1)
  283. sh1add a1, t0, a1
  284. bnez a5, 2b
  285. ret
  286. endfunc
  287. .endm
  288. rgba_input rgba32, bgra32, 0
  289. rgba_input abgr32, argb32, 1