dsputil_vfp.S 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. /*
  2. * Copyright (c) 2008 Siarhei Siamashka <ssvb@users.sourceforge.net>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "config.h"
  21. #include "asm.S"
  22. .fpu neon @ required for gas to accept UAL syntax
  23. /*
  24. * VFP is a floating point coprocessor used in some ARM cores. VFP11 has 1 cycle
  25. * throughput for almost all the instructions (except for double precision
  26. * arithmetics), but rather high latency. Latency is 4 cycles for loads and 8 cycles
  27. * for arithmetic operations. Scheduling code to avoid pipeline stalls is very
  28. * important for performance. One more interesting feature is that VFP has
  29. * independent load/store and arithmetics pipelines, so it is possible to make
  30. * them work simultaneously and get more than 1 operation per cycle. Load/store
  31. * pipeline can process 2 single precision floating point values per cycle and
  32. * supports bulk loads and stores for large sets of registers. Arithmetic operations
  33. * can be done on vectors, which allows to keep the arithmetics pipeline busy,
  34. * while the processor may issue and execute other instructions. Detailed
  35. * optimization manuals can be found at http://www.arm.com
  36. */
  37. /**
  38. * ARM VFP optimized implementation of 'vector_fmul_c' function.
  39. * Assume that len is a positive number and is multiple of 8
  40. */
  41. @ void ff_vector_fmul_vfp(float *dst, const float *src, int len)
  42. function ff_vector_fmul_vfp, export=1
  43. vpush {d8-d15}
  44. mov r3, r0
  45. fmrx r12, fpscr
  46. orr r12, r12, #(3 << 16) /* set vector size to 4 */
  47. fmxr fpscr, r12
  48. vldmia r3!, {s0-s3}
  49. vldmia r1!, {s8-s11}
  50. vldmia r3!, {s4-s7}
  51. vldmia r1!, {s12-s15}
  52. vmul.f32 s8, s0, s8
  53. 1:
  54. subs r2, r2, #16
  55. vmul.f32 s12, s4, s12
  56. vldmiage r3!, {s16-s19}
  57. vldmiage r1!, {s24-s27}
  58. vldmiage r3!, {s20-s23}
  59. vldmiage r1!, {s28-s31}
  60. vmulge.f32 s24, s16, s24
  61. vstmia r0!, {s8-s11}
  62. vstmia r0!, {s12-s15}
  63. vmulge.f32 s28, s20, s28
  64. vldmiagt r3!, {s0-s3}
  65. vldmiagt r1!, {s8-s11}
  66. vldmiagt r3!, {s4-s7}
  67. vldmiagt r1!, {s12-s15}
  68. vmulge.f32 s8, s0, s8
  69. vstmiage r0!, {s24-s27}
  70. vstmiage r0!, {s28-s31}
  71. bgt 1b
  72. bic r12, r12, #(7 << 16) /* set vector size back to 1 */
  73. fmxr fpscr, r12
  74. vpop {d8-d15}
  75. bx lr
  76. .endfunc
  77. /**
  78. * ARM VFP optimized implementation of 'vector_fmul_reverse_c' function.
  79. * Assume that len is a positive number and is multiple of 8
  80. */
  81. @ void ff_vector_fmul_reverse_vfp(float *dst, const float *src0,
  82. @ const float *src1, int len)
  83. function ff_vector_fmul_reverse_vfp, export=1
  84. vpush {d8-d15}
  85. add r2, r2, r3, lsl #2
  86. vldmdb r2!, {s0-s3}
  87. vldmia r1!, {s8-s11}
  88. vldmdb r2!, {s4-s7}
  89. vldmia r1!, {s12-s15}
  90. vmul.f32 s8, s3, s8
  91. vmul.f32 s9, s2, s9
  92. vmul.f32 s10, s1, s10
  93. vmul.f32 s11, s0, s11
  94. 1:
  95. subs r3, r3, #16
  96. vldmdbge r2!, {s16-s19}
  97. vmul.f32 s12, s7, s12
  98. vldmiage r1!, {s24-s27}
  99. vmul.f32 s13, s6, s13
  100. vldmdbge r2!, {s20-s23}
  101. vmul.f32 s14, s5, s14
  102. vldmiage r1!, {s28-s31}
  103. vmul.f32 s15, s4, s15
  104. vmulge.f32 s24, s19, s24
  105. vldmdbgt r2!, {s0-s3}
  106. vmulge.f32 s25, s18, s25
  107. vstmia r0!, {s8-s13}
  108. vmulge.f32 s26, s17, s26
  109. vldmiagt r1!, {s8-s11}
  110. vmulge.f32 s27, s16, s27
  111. vmulge.f32 s28, s23, s28
  112. vldmdbgt r2!, {s4-s7}
  113. vmulge.f32 s29, s22, s29
  114. vstmia r0!, {s14-s15}
  115. vmulge.f32 s30, s21, s30
  116. vmulge.f32 s31, s20, s31
  117. vmulge.f32 s8, s3, s8
  118. vldmiagt r1!, {s12-s15}
  119. vmulge.f32 s9, s2, s9
  120. vmulge.f32 s10, s1, s10
  121. vstmiage r0!, {s24-s27}
  122. vmulge.f32 s11, s0, s11
  123. vstmiage r0!, {s28-s31}
  124. bgt 1b
  125. vpop {d8-d15}
  126. bx lr
  127. .endfunc
  128. #if HAVE_ARMV6
  129. /**
  130. * ARM VFP optimized float to int16 conversion.
  131. * Assume that len is a positive number and is multiple of 8, destination
  132. * buffer is at least 4 bytes aligned (8 bytes alignment is better for
  133. * performance), little endian byte sex
  134. */
  135. @ void ff_float_to_int16_vfp(int16_t *dst, const float *src, int len)
  136. function ff_float_to_int16_vfp, export=1
  137. push {r4-r8,lr}
  138. vpush {d8-d11}
  139. vldmia r1!, {s16-s23}
  140. vcvt.s32.f32 s0, s16
  141. vcvt.s32.f32 s1, s17
  142. vcvt.s32.f32 s2, s18
  143. vcvt.s32.f32 s3, s19
  144. vcvt.s32.f32 s4, s20
  145. vcvt.s32.f32 s5, s21
  146. vcvt.s32.f32 s6, s22
  147. vcvt.s32.f32 s7, s23
  148. 1:
  149. subs r2, r2, #8
  150. vmov r3, r4, s0, s1
  151. vmov r5, r6, s2, s3
  152. vmov r7, r8, s4, s5
  153. vmov ip, lr, s6, s7
  154. vldmiagt r1!, {s16-s23}
  155. ssat r4, #16, r4
  156. ssat r3, #16, r3
  157. ssat r6, #16, r6
  158. ssat r5, #16, r5
  159. pkhbt r3, r3, r4, lsl #16
  160. pkhbt r4, r5, r6, lsl #16
  161. vcvtgt.s32.f32 s0, s16
  162. vcvtgt.s32.f32 s1, s17
  163. vcvtgt.s32.f32 s2, s18
  164. vcvtgt.s32.f32 s3, s19
  165. vcvtgt.s32.f32 s4, s20
  166. vcvtgt.s32.f32 s5, s21
  167. vcvtgt.s32.f32 s6, s22
  168. vcvtgt.s32.f32 s7, s23
  169. ssat r8, #16, r8
  170. ssat r7, #16, r7
  171. ssat lr, #16, lr
  172. ssat ip, #16, ip
  173. pkhbt r5, r7, r8, lsl #16
  174. pkhbt r6, ip, lr, lsl #16
  175. stmia r0!, {r3-r6}
  176. bgt 1b
  177. vpop {d8-d11}
  178. pop {r4-r8,pc}
  179. .endfunc
  180. #endif