123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358 |
- /*
- * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
- #include "libavutil/arm/asm.S"
- #include "asm-offsets.h"
- .macro resample_one fmt, es=2
- function ff_resample_one_\fmt\()_neon, export=1
- push {r4, r5}
- add r1, r1, r2, lsl #\es
- ldr r2, [r0, #PHASE_SHIFT+4] /* phase_mask */
- ldr ip, [sp, #8] /* index */
- ldr r5, [r0, #FILTER_LENGTH]
- and r2, ip, r2 /* (index & phase_mask) */
- ldr r4, [r0, #PHASE_SHIFT]
- lsr r4, ip, r4 /* compute sample_index */
- mul r2, r2, r5
- ldr ip, [r0, #FILTER_BANK]
- add r3, r3, r4, lsl #\es /* &src[sample_index] */
- cmp r5, #8
- add r0, ip, r2, lsl #\es /* filter = &filter_bank[...] */
- blt 5f
- 8:
- subs r5, r5, #8
- LOAD4
- MUL4
- 7:
- LOAD4
- beq 6f
- cmp r5, #8
- MLA4
- blt 4f
- subs r5, r5, #8
- LOAD4
- MLA4
- b 7b
- 6:
- MLA4
- STORE
- pop {r4, r5}
- bx lr
- 5:
- INIT4
- 4: /* remaining filter_length 1 to 7 */
- cmp r5, #4
- blt 2f
- subs r5, r5, #4
- LOAD4
- MLA4
- beq 0f
- 2: /* remaining filter_length 1 to 3 */
- cmp r5, #2
- blt 1f
- subs r5, r5, #2
- LOAD2
- MLA2
- beq 0f
- 1: /* remaining filter_length 1 */
- LOAD1
- MLA1
- 0:
- STORE
- pop {r4, r5}
- bx lr
- endfunc
- .purgem LOAD1
- .purgem LOAD2
- .purgem LOAD4
- .purgem MLA1
- .purgem MLA2
- .purgem MLA4
- .purgem MUL4
- .purgem INIT4
- .purgem STORE
- .endm
- /* float32 */
- .macro LOAD1
- veor.32 d0, d0
- vld1.32 {d0[0]}, [r0]! /* load filter */
- vld1.32 {d4[0]}, [r3]! /* load src */
- .endm
- .macro LOAD2
- vld1.32 {d0}, [r0]! /* load filter */
- vld1.32 {d4}, [r3]! /* load src */
- .endm
- .macro LOAD4
- vld1.32 {d0,d1}, [r0]! /* load filter */
- vld1.32 {d4,d5}, [r3]! /* load src */
- .endm
- .macro MLA1
- vmla.f32 d16, d0, d4[0]
- .endm
- .macro MLA2
- vmla.f32 d16, d0, d4
- .endm
- .macro MLA4
- vmla.f32 d16, d0, d4
- vmla.f32 d17, d1, d5
- .endm
- .macro MUL4
- vmul.f32 d16, d0, d4
- vmul.f32 d17, d1, d5
- .endm
- .macro INIT4
- veor.f32 q8, q8
- .endm
- .macro STORE
- vpadd.f32 d16, d16, d17
- vpadd.f32 d16, d16, d16
- vst1.32 d16[0], [r1]
- .endm
- resample_one flt, 2
- /* s32 */
- .macro LOAD1
- veor.32 d0, d0
- vld1.32 {d0[0]}, [r0]! /* load filter */
- vld1.32 {d4[0]}, [r3]! /* load src */
- .endm
- .macro LOAD2
- vld1.32 {d0}, [r0]! /* load filter */
- vld1.32 {d4}, [r3]! /* load src */
- .endm
- .macro LOAD4
- vld1.32 {d0,d1}, [r0]! /* load filter */
- vld1.32 {d4,d5}, [r3]! /* load src */
- .endm
- .macro MLA1
- vmlal.s32 q8, d0, d4[0]
- .endm
- .macro MLA2
- vmlal.s32 q8, d0, d4
- .endm
- .macro MLA4
- vmlal.s32 q8, d0, d4
- vmlal.s32 q9, d1, d5
- .endm
- .macro MUL4
- vmull.s32 q8, d0, d4
- vmull.s32 q9, d1, d5
- .endm
- .macro INIT4
- veor.s64 q8, q8
- veor.s64 q9, q9
- .endm
- .macro STORE
- vadd.s64 q8, q8, q9
- vadd.s64 d16, d16, d17
- vqrshrn.s64 d16, q8, #30
- vst1.32 d16[0], [r1]
- .endm
- resample_one s32, 2
- /* s16 */
- .macro LOAD1
- veor.16 d0, d0
- vld1.16 {d0[0]}, [r0]! /* load filter */
- vld1.16 {d4[0]}, [r3]! /* load src */
- .endm
- .macro LOAD2
- veor.16 d0, d0
- vld1.32 {d0[0]}, [r0]! /* load filter */
- veor.16 d4, d4
- vld1.32 {d4[0]}, [r3]! /* load src */
- .endm
- .macro LOAD4
- vld1.16 {d0}, [r0]! /* load filter */
- vld1.16 {d4}, [r3]! /* load src */
- .endm
- .macro MLA1
- vmlal.s16 q8, d0, d4[0]
- .endm
- .macro MLA2
- vmlal.s16 q8, d0, d4
- .endm
- .macro MLA4
- vmlal.s16 q8, d0, d4
- .endm
- .macro MUL4
- vmull.s16 q8, d0, d4
- .endm
- .macro INIT4
- veor.s32 q8, q8
- .endm
- .macro STORE
- vpadd.s32 d16, d16, d17
- vpadd.s32 d16, d16, d16
- vqrshrn.s32 d16, q8, #15
- vst1.16 d16[0], [r1]
- .endm
- resample_one s16, 1
- .macro resample_linear fmt, es=2
- function ff_resample_linear_\fmt\()_neon, export=1
- push {r4, r5}
- add r1, r1, r2, lsl #\es
- ldr r2, [r0, #PHASE_SHIFT+4] /* phase_mask */
- ldr ip, [sp, #8] /* index */
- ldr r5, [r0, #FILTER_LENGTH]
- and r2, ip, r2 /* (index & phase_mask) */
- ldr r4, [r0, #PHASE_SHIFT]
- lsr r4, ip, r4 /* compute sample_index */
- mul r2, r2, r5
- ldr ip, [r0, #FILTER_BANK]
- add r3, r3, r4, lsl #\es /* &src[sample_index] */
- cmp r5, #8
- ldr r4, [r0, #SRC_INCR]
- add r0, ip, r2, lsl #\es /* filter = &filter_bank[...] */
- add r2, r0, r5, lsl #\es /* filter[... + c->filter_length] */
- blt 5f
- 8:
- subs r5, r5, #8
- LOAD4
- MUL4
- 7:
- LOAD4
- beq 6f
- cmp r5, #8
- MLA4
- blt 4f
- subs r5, r5, #8
- LOAD4
- MLA4
- b 7b
- 6:
- MLA4
- STORE
- pop {r4, r5}
- bx lr
- 5:
- INIT4
- 4: /* remaining filter_length 1 to 7 */
- cmp r5, #4
- blt 2f
- subs r5, r5, #4
- LOAD4
- MLA4
- beq 0f
- 2: /* remaining filter_length 1 to 3 */
- cmp r5, #2
- blt 1f
- subs r5, r5, #2
- LOAD2
- MLA2
- beq 0f
- 1: /* remaining filter_length 1 */
- LOAD1
- MLA1
- 0:
- STORE
- pop {r4, r5}
- bx lr
- endfunc
- .purgem LOAD1
- .purgem LOAD2
- .purgem LOAD4
- .purgem MLA1
- .purgem MLA2
- .purgem MLA4
- .purgem MUL4
- .purgem INIT4
- .purgem STORE
- .endm
- /* float32 linear */
- .macro LOAD1
- veor.32 d0, d0
- veor.32 d2, d2
- vld1.32 {d0[0]}, [r0]! /* load filter */
- vld1.32 {d2[0]}, [r2]! /* load filter */
- vld1.32 {d4[0]}, [r3]! /* load src */
- .endm
- .macro LOAD2
- vld1.32 {d0}, [r0]! /* load filter */
- vld1.32 {d2}, [r2]! /* load filter */
- vld1.32 {d4}, [r3]! /* load src */
- .endm
- .macro LOAD4
- vld1.32 {d0,d1}, [r0]! /* load filter */
- vld1.32 {d2,d3}, [r2]! /* load filter */
- vld1.32 {d4,d5}, [r3]! /* load src */
- .endm
- .macro MLA1
- vmla.f32 d18, d0, d4[0]
- vmla.f32 d16, d2, d4[0]
- .endm
- .macro MLA2
- vmla.f32 d18, d0, d4
- vmla.f32 d16, d2, d4
- .endm
- .macro MLA4
- vmla.f32 q9, q0, q2
- vmla.f32 q8, q1, q2
- .endm
- .macro MUL4
- vmul.f32 q9, q0, q2
- vmul.f32 q8, q1, q2
- .endm
- .macro INIT4
- veor.f32 q9, q9
- veor.f32 q8, q8
- .endm
- .macro STORE
- vldr s0, [sp, #12] /* frac */
- vmov s1, r4
- vcvt.f32.s32 d0, d0
- vsub.f32 q8, q8, q9 /* v2 - val */
- vpadd.f32 d18, d18, d19
- vpadd.f32 d16, d16, d17
- vpadd.f32 d2, d18, d18
- vpadd.f32 d1, d16, d16
- vmul.f32 s2, s2, s0 /* (v2 - val) * frac */
- vdiv.f32 s2, s2, s1 /* / c->src_incr */
- vadd.f32 s4, s4, s2
- vstr s4, [r1]
- .endm
- resample_linear flt, 2
|