input_rvv.S 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /*
  2. * Copyright © 2024 Rémi Denis-Courmont.
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/riscv/asm.S"
  21. func ff_bgr24ToY_rvv, zve32x
  22. lpad 0
  23. lw t1, 8(a5) # BY
  24. lw t3, 0(a5) # RY
  25. j 1f
  26. endfunc
  27. func ff_rgb24ToY_rvv, zve32x, zba
  28. lpad 0
  29. lw t1, 0(a5) # RY
  30. lw t3, 8(a5) # BY
  31. 1:
  32. lw t2, 4(a5) # GY
  33. li t4, (32 << (15 - 1)) + (1 << (15 - 7))
  34. 2:
  35. vsetvli t0, a4, e32, m8, ta, ma
  36. vlseg3e8.v v0, (a1)
  37. sub a4, a4, t0
  38. vzext.vf4 v8, v0
  39. sh1add t5, t0, t0 # t1 = 3 * t0
  40. vzext.vf4 v16, v2
  41. vzext.vf4 v24, v4
  42. add a1, t5, a1
  43. vmul.vx v8, v8, t1
  44. vmacc.vx v8, t2, v16
  45. vmacc.vx v8, t3, v24
  46. vadd.vx v8, v8, t4
  47. vsetvli zero, zero, e16, m4, ta, ma
  48. vnsra.wi v0, v8, 15 - 6
  49. vse16.v v0, (a0)
  50. sh1add a0, t0, a0
  51. bnez a4, 2b
  52. ret
  53. endfunc
  54. func ff_bgr24ToUV_rvv, zve32x
  55. lpad 0
  56. lw t1, 20(a6) # BU
  57. lw t4, 32(a6) # BV
  58. lw t3, 12(a6) # RU
  59. lw t6, 24(a6) # RV
  60. j 1f
  61. endfunc
  62. func ff_rgb24ToUV_rvv, zve32x, zba
  63. lpad 0
  64. lw t1, 12(a6) # RU
  65. lw t4, 24(a6) # RV
  66. lw t3, 20(a6) # BU
  67. lw t6, 32(a6) # BV
  68. 1:
  69. lw t2, 16(a6) # GU
  70. lw t5, 28(a6) # GV
  71. li a7, (256 << (15 - 1)) + (1 << (15 - 7))
  72. 2:
  73. vsetvli t0, a5, e32, m8, ta, ma
  74. vlseg3e8.v v0, (a3)
  75. sub a5, a5, t0
  76. vzext.vf4 v16, v0
  77. sh1add a6, t0, t0
  78. vzext.vf4 v24, v2
  79. vmul.vx v8, v16, t1
  80. add a3, a6, a3
  81. vmul.vx v16, v16, t4
  82. vmacc.vx v8, t2, v24
  83. vmacc.vx v16, t5, v24
  84. vzext.vf4 v24, v4
  85. vadd.vx v8, v8, a7
  86. vadd.vx v16, v16, a7
  87. vmacc.vx v8, t3, v24
  88. vmacc.vx v16, t6, v24
  89. vsetvli zero, zero, e16, m4, ta, ma
  90. vnsra.wi v0, v8, 15 - 6
  91. vnsra.wi v4, v16, 15 - 6
  92. vse16.v v0, (a0)
  93. sh1add a0, t0, a0
  94. vse16.v v4, (a1)
  95. sh1add a1, t0, a1
  96. bnez a5, 2b
  97. ret
  98. endfunc
  99. func ff_bgr24ToUV_half_rvv, zve32x
  100. lpad 0
  101. lw t1, 20(a6) # BU
  102. lw t4, 32(a6) # BV
  103. lw t3, 12(a6) # RU
  104. lw t6, 24(a6) # RV
  105. j 1f
  106. endfunc
  107. func ff_rgb24ToUV_half_rvv, zve32x, zba
  108. lpad 0
  109. lw t1, 12(a6) # RU
  110. lw t4, 24(a6) # RV
  111. lw t3, 20(a6) # BU
  112. lw t6, 32(a6) # BV
  113. 1:
  114. lw t2, 16(a6) # GU
  115. lw t5, 28(a6) # GV
  116. li a7, (256 << 15) + (1 << (15 - 6))
  117. 2:
  118. vsetvli t0, a5, e8, m1, ta, ma
  119. vlseg6e8.v v0, (a3)
  120. sh1add a6, t0, t0
  121. vwaddu.vv v8, v0, v3
  122. sub a5, a5, t0
  123. vwaddu.vv v10, v1, v4
  124. sh1add a3, a6, a3
  125. vwaddu.vv v12, v2, v5
  126. vsetvli zero, zero, e32, m4, ta, ma
  127. vzext.vf2 v20, v8
  128. vzext.vf2 v24, v10
  129. vzext.vf2 v28, v12
  130. vmul.vx v0, v20, t1
  131. vmul.vx v4, v20, t4
  132. vmacc.vx v0, t2, v24
  133. vmacc.vx v4, t5, v24
  134. vmacc.vx v0, t3, v28
  135. vmacc.vx v4, t6, v28
  136. vadd.vx v0, v0, a7
  137. vadd.vx v4, v4, a7
  138. vsetvli zero, zero, e16, m2, ta, ma
  139. vnsra.wi v0, v0, 15 - 5
  140. vnsra.wi v2, v4, 15 - 5
  141. vse16.v v0, (a0)
  142. sh1add a0, t0, a0
  143. vse16.v v2, (a1)
  144. sh1add a1, t0, a1
  145. bnez a5, 2b
  146. ret
  147. endfunc
  148. .macro rgba_input chr0, chr1, high
  149. func ff_\chr1\()ToY_rvv, zve32x
  150. lpad 0
  151. lw t1, 8(a5) # BY
  152. lw t3, 0(a5) # RY
  153. j 1f
  154. endfunc
  155. func ff_\chr0\()ToY_rvv, zve32x, zba
  156. lpad 0
  157. lw t1, 0(a5) # RY
  158. lw t3, 8(a5) # BY
  159. 1:
  160. lw t2, 4(a5) # GY
  161. li t4, (32 << (15 - 1)) + (1 << (15 - 7))
  162. li t5, 0xff
  163. 2:
  164. vsetvli t0, a4, e32, m8, ta, ma
  165. vle32.v v0, (a1)
  166. sub a4, a4, t0
  167. .if \high
  168. vsrl.vi v8, v0, 24
  169. .else
  170. vand.vx v8, v0, t5
  171. .endif
  172. sh2add a1, t0, a1
  173. vsrl.vi v16, v0, 8 * (1 + \high)
  174. vmul.vx v24, v8, t1
  175. vand.vx v16, v16, t5
  176. vsrl.vi v8, v0, 8 * (2 - \high)
  177. vmacc.vx v24, t2, v16
  178. vand.vx v8, v8, t5
  179. vadd.vx v24, v24, t4
  180. vmacc.vx v24, t3, v8
  181. vsetvli zero, zero, e16, m4, ta, ma
  182. vnsra.wi v0, v24, 15 - 6
  183. vse16.v v0, (a0)
  184. sh1add a0, t0, a0
  185. bnez a4, 2b
  186. ret
  187. endfunc
  188. func ff_\chr1\()ToUV_rvv, zve32x
  189. lpad 0
  190. lw t1, 20(a6) # BU
  191. lw t4, 32(a6) # BV
  192. lw t3, 12(a6) # RU
  193. lw t6, 24(a6) # RV
  194. j 1f
  195. endfunc
  196. func ff_\chr0\()ToUV_rvv, zve32x, zba
  197. lpad 0
  198. lw t1, 12(a6) # RU
  199. lw t4, 24(a6) # RV
  200. lw t3, 20(a6) # BU
  201. lw t6, 32(a6) # BV
  202. 1:
  203. lw t2, 16(a6) # GU
  204. lw t5, 28(a6) # GV
  205. li a6, 0xff
  206. li a7, (256 << (15 - 1)) + (1 << (15 - 7))
  207. 2:
  208. vsetvli t0, a5, e32, m8, ta, ma
  209. vle32.v v0, (a3)
  210. sub a5, a5, t0
  211. .if \high
  212. vsrl.vi v24, v0, 24
  213. .else
  214. vand.vx v24, v0, a6
  215. .endif
  216. sh2add a3, t0, a3
  217. vsrl.vi v8, v0, 8 * (1 + \high)
  218. vmul.vx v16, v24, t1
  219. vand.vx v8, v8, a6
  220. vmul.vx v24, v24, t4
  221. vmacc.vx v16, t2, v8
  222. vsrl.vi v0, v0, 8 * (2 - \high)
  223. vmacc.vx v24, t5, v8
  224. vand.vx v0, v0, a6
  225. vadd.vx v16, v16, a7
  226. vadd.vx v24, v24, a7
  227. vmacc.vx v16, t3, v0
  228. vmacc.vx v24, t6, v0
  229. vsetvli zero, zero, e16, m4, ta, ma
  230. vnsra.wi v0, v16, 15 - 6
  231. vnsra.wi v4, v24, 15 - 6
  232. vse16.v v0, (a0)
  233. sh1add a0, t0, a0
  234. vse16.v v4, (a1)
  235. sh1add a1, t0, a1
  236. bnez a5, 2b
  237. ret
  238. endfunc
  239. func ff_\chr1\()ToUV_half_rvv, zve32x
  240. lpad 0
  241. lw t1, 20(a6) # BU
  242. lw t4, 32(a6) # BV
  243. lw t3, 12(a6) # RU
  244. lw t6, 24(a6) # RV
  245. j 1f
  246. endfunc
  247. func ff_\chr0\()ToUV_half_rvv, zve32x, zba
  248. lpad 0
  249. lw t1, 12(a6) # RU
  250. lw t4, 24(a6) # RV
  251. lw t3, 20(a6) # BU
  252. lw t6, 32(a6) # BV
  253. 1:
  254. lw t2, 16(a6) # GU
  255. lw t5, 28(a6) # GV
  256. li a6, 0xff
  257. li a7, (256 << 15) + (1 << (15 - 6))
  258. 2:
  259. vsetvli t0, a5, e32, m4, ta, ma
  260. vlseg2e32.v v0, (a3)
  261. sub a5, a5, t0
  262. .if \high
  263. vsrl.vi v8, v0, 24
  264. vsrl.vi v12, v4, 24
  265. .else
  266. vand.vx v8, v0, a6
  267. vand.vx v12, v4, a6
  268. .endif
  269. sh3add a3, t0, a3
  270. vsrl.vi v16, v0, 8 * (1 + \high)
  271. vsrl.vi v20, v4, 8 * (1 + \high)
  272. vsrl.vi v24, v0, 8 * (2 - \high)
  273. vsrl.vi v28, v4, 8 * (2 - \high)
  274. vand.vx v16, v16, a6
  275. vand.vx v20, v20, a6
  276. vand.vx v24, v24, a6
  277. vand.vx v28, v28, a6
  278. vadd.vv v8, v8, v12
  279. vadd.vv v16, v16, v20
  280. vadd.vv v24, v24, v28
  281. vmul.vx v0, v8, t1
  282. vmul.vx v4, v8, t4
  283. vmacc.vx v0, t2, v16
  284. vmacc.vx v4, t5, v16
  285. vmacc.vx v0, t3, v24
  286. vmacc.vx v4, t6, v24
  287. vadd.vx v0, v0, a7
  288. vadd.vx v4, v4, a7
  289. vsetvli zero, zero, e16, m2, ta, ma
  290. vnsra.wi v0, v0, 15 - 5
  291. vnsra.wi v2, v4, 15 - 5
  292. vse16.v v0, (a0)
  293. sh1add a0, t0, a0
  294. vse16.v v2, (a1)
  295. sh1add a1, t0, a1
  296. bnez a5, 2b
  297. ret
  298. endfunc
  299. .endm
  300. rgba_input rgba32, bgra32, 0
  301. rgba_input abgr32, argb32, 1