motion_est_mvi_asm.S 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /*
  2. * Alpha optimized DSP utils
  3. * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "regdef.h"
  22. /* Some nicer register names. */
  23. #define ta t10
  24. #define tb t11
  25. #define tc t12
  26. #define td AT
  27. /* Danger: these overlap with the argument list and the return value */
  28. #define te a5
  29. #define tf a4
  30. #define tg a3
  31. #define th v0
  32. .set noat
  33. .set noreorder
  34. .arch pca56
  35. .text
  36. /*****************************************************************************
  37. * int pix_abs16x16_mvi_asm(uint8_t *pix1, uint8_t *pix2, int line_size)
  38. *
  39. * This code is written with a pca56 in mind. For ev6, one should
  40. * really take the increased latency of 3 cycles for MVI instructions
  41. * into account.
  42. *
  43. * It is important to keep the loading and first use of a register as
  44. * far apart as possible, because if a register is accessed before it
  45. * has been fetched from memory, the CPU will stall.
  46. */
  47. .align 4
  48. .globl pix_abs16x16_mvi_asm
  49. .ent pix_abs16x16_mvi_asm
  50. pix_abs16x16_mvi_asm:
  51. .frame sp, 0, ra, 0
  52. .prologue 0
  53. #if CONFIG_GPROF
  54. lda AT, _mcount
  55. jsr AT, (AT), _mcount
  56. #endif
  57. and a2, 7, t0
  58. clr v0
  59. beq t0, $aligned
  60. .align 4
  61. $unaligned:
  62. /* Registers:
  63. line 0:
  64. t0: left_u -> left lo -> left
  65. t1: mid
  66. t2: right_u -> right hi -> right
  67. t3: ref left
  68. t4: ref right
  69. line 1:
  70. t5: left_u -> left lo -> left
  71. t6: mid
  72. t7: right_u -> right hi -> right
  73. t8: ref left
  74. t9: ref right
  75. temp:
  76. ta: left hi
  77. tb: right lo
  78. tc: error left
  79. td: error right */
  80. /* load line 0 */
  81. ldq_u t0, 0(a2) # left_u
  82. ldq_u t1, 8(a2) # mid
  83. ldq_u t2, 16(a2) # right_u
  84. ldq t3, 0(a1) # ref left
  85. ldq t4, 8(a1) # ref right
  86. addq a1, a3, a1 # pix1
  87. addq a2, a3, a2 # pix2
  88. /* load line 1 */
  89. ldq_u t5, 0(a2) # left_u
  90. ldq_u t6, 8(a2) # mid
  91. ldq_u t7, 16(a2) # right_u
  92. ldq t8, 0(a1) # ref left
  93. ldq t9, 8(a1) # ref right
  94. addq a1, a3, a1 # pix1
  95. addq a2, a3, a2 # pix2
  96. /* calc line 0 */
  97. extql t0, a2, t0 # left lo
  98. extqh t1, a2, ta # left hi
  99. extql t1, a2, tb # right lo
  100. or t0, ta, t0 # left
  101. extqh t2, a2, t2 # right hi
  102. perr t3, t0, tc # error left
  103. or t2, tb, t2 # right
  104. perr t4, t2, td # error right
  105. addq v0, tc, v0 # add error left
  106. addq v0, td, v0 # add error left
  107. /* calc line 1 */
  108. extql t5, a2, t5 # left lo
  109. extqh t6, a2, ta # left hi
  110. extql t6, a2, tb # right lo
  111. or t5, ta, t5 # left
  112. extqh t7, a2, t7 # right hi
  113. perr t8, t5, tc # error left
  114. or t7, tb, t7 # right
  115. perr t9, t7, td # error right
  116. addq v0, tc, v0 # add error left
  117. addq v0, td, v0 # add error left
  118. /* loop */
  119. subq a4, 2, a4 # h -= 2
  120. bne a4, $unaligned
  121. ret
  122. .align 4
  123. $aligned:
  124. /* load line 0 */
  125. ldq t0, 0(a2) # left
  126. ldq t1, 8(a2) # right
  127. addq a2, a3, a2 # pix2
  128. ldq t2, 0(a1) # ref left
  129. ldq t3, 8(a1) # ref right
  130. addq a1, a3, a1 # pix1
  131. /* load line 1 */
  132. ldq t4, 0(a2) # left
  133. ldq t5, 8(a2) # right
  134. addq a2, a3, a2 # pix2
  135. ldq t6, 0(a1) # ref left
  136. ldq t7, 8(a1) # ref right
  137. addq a1, a3, a1 # pix1
  138. /* load line 2 */
  139. ldq t8, 0(a2) # left
  140. ldq t9, 8(a2) # right
  141. addq a2, a3, a2 # pix2
  142. ldq ta, 0(a1) # ref left
  143. ldq tb, 8(a1) # ref right
  144. addq a1, a3, a1 # pix1
  145. /* load line 3 */
  146. ldq tc, 0(a2) # left
  147. ldq td, 8(a2) # right
  148. addq a2, a3, a2 # pix2
  149. ldq te, 0(a1) # ref left
  150. ldq a0, 8(a1) # ref right
  151. /* calc line 0 */
  152. perr t0, t2, t0 # error left
  153. addq a1, a3, a1 # pix1
  154. perr t1, t3, t1 # error right
  155. addq v0, t0, v0 # add error left
  156. /* calc line 1 */
  157. perr t4, t6, t0 # error left
  158. addq v0, t1, v0 # add error right
  159. perr t5, t7, t1 # error right
  160. addq v0, t0, v0 # add error left
  161. /* calc line 2 */
  162. perr t8, ta, t0 # error left
  163. addq v0, t1, v0 # add error right
  164. perr t9, tb, t1 # error right
  165. addq v0, t0, v0 # add error left
  166. /* calc line 3 */
  167. perr tc, te, t0 # error left
  168. addq v0, t1, v0 # add error right
  169. perr td, a0, t1 # error right
  170. addq v0, t0, v0 # add error left
  171. addq v0, t1, v0 # add error right
  172. /* loop */
  173. subq a4, 4, a4 # h -= 4
  174. bne a4, $aligned
  175. ret
  176. .end pix_abs16x16_mvi_asm