mmi.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. /*
  2. * copyright (c) 2002 Leon van Stuivenberg
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #ifndef AVCODEC_PS2_MMI_H
  21. #define AVCODEC_PS2_MMI_H
  22. #define align16 __attribute__ ((aligned (16)))
  23. /*
  24. #define r0 $zero
  25. #define r1 $at //assembler!
  26. #define r2 $v0 //return
  27. #define r3 $v1 //return
  28. #define r4 $a0 //arg
  29. #define r5 $a1 //arg
  30. #define r6 $a2 //arg
  31. #define r7 $a3 //arg
  32. #define r8 $t0 //temp
  33. #define r9 $t1 //temp
  34. #define r10 $t2 //temp
  35. #define r11 $t3 //temp
  36. #define r12 $t4 //temp
  37. #define r13 $t5 //temp
  38. #define r14 $t6 //temp
  39. #define r15 $t7 //temp
  40. #define r16 $s0 //saved temp
  41. #define r17 $s1 //saved temp
  42. #define r18 $s2 //saved temp
  43. #define r19 $s3 //saved temp
  44. #define r20 $s4 //saved temp
  45. #define r21 $s5 //saved temp
  46. #define r22 $s6 //saved temp
  47. #define r23 $s7 //saved temp
  48. #define r24 $t8 //temp
  49. #define r25 $t9 //temp
  50. #define r26 $k0 //kernel
  51. #define r27 $k1 //kernel
  52. #define r28 $gp //global ptr
  53. #define r29 $sp //stack ptr
  54. #define r30 $fp //frame ptr
  55. #define r31 $ra //return addr
  56. */
  57. #define lq(base, off, reg) \
  58. __asm__ volatile ("lq " #reg ", %0("#base ")" : : "i" (off) )
  59. #define lq2(mem, reg) \
  60. __asm__ volatile ("lq " #reg ", %0" : : "r" (mem))
  61. #define sq(reg, off, base) \
  62. __asm__ volatile ("sq " #reg ", %0("#base ")" : : "i" (off) )
  63. /*
  64. #define ld(base, off, reg) \
  65. __asm__ volatile ("ld " #reg ", " #off "("#base ")")
  66. */
  67. #define ld3(base, off, reg) \
  68. __asm__ volatile (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off)))
  69. #define ldr3(base, off, reg) \
  70. __asm__ volatile (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off)))
  71. #define ldl3(base, off, reg) \
  72. __asm__ volatile (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off)))
  73. /*
  74. #define sd(reg, off, base) \
  75. __asm__ volatile ("sd " #reg ", " #off "("#base ")")
  76. */
  77. //seems assembler has bug encoding mnemonic 'sd', so DIY
  78. #define sd3(reg, off, base) \
  79. __asm__ volatile (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off)))
  80. #define sw(reg, off, base) \
  81. __asm__ volatile ("sw " #reg ", " #off "("#base ")")
  82. #define sq2(reg, mem) \
  83. __asm__ volatile ("sq " #reg ", %0" : : "m" (*(mem)))
  84. #define pinth(rs, rt, rd) \
  85. __asm__ volatile ("pinth " #rd ", " #rs ", " #rt )
  86. #define phmadh(rs, rt, rd) \
  87. __asm__ volatile ("phmadh " #rd ", " #rs ", " #rt )
  88. #define pcpyud(rs, rt, rd) \
  89. __asm__ volatile ("pcpyud " #rd ", " #rs ", " #rt )
  90. #define pcpyld(rs, rt, rd) \
  91. __asm__ volatile ("pcpyld " #rd ", " #rs ", " #rt )
  92. #define pcpyh(rt, rd) \
  93. __asm__ volatile ("pcpyh " #rd ", " #rt )
  94. #define paddw(rs, rt, rd) \
  95. __asm__ volatile ("paddw " #rd ", " #rs ", " #rt )
  96. #define pextlw(rs, rt, rd) \
  97. __asm__ volatile ("pextlw " #rd ", " #rs ", " #rt )
  98. #define pextuw(rs, rt, rd) \
  99. __asm__ volatile ("pextuw " #rd ", " #rs ", " #rt )
  100. #define pextlh(rs, rt, rd) \
  101. __asm__ volatile ("pextlh " #rd ", " #rs ", " #rt )
  102. #define pextuh(rs, rt, rd) \
  103. __asm__ volatile ("pextuh " #rd ", " #rs ", " #rt )
  104. #define psubw(rs, rt, rd) \
  105. __asm__ volatile ("psubw " #rd ", " #rs ", " #rt )
  106. #define psraw(rt, sa, rd) \
  107. __asm__ volatile ("psraw " #rd ", " #rt ", %0" : : "i"(sa) )
  108. #define ppach(rs, rt, rd) \
  109. __asm__ volatile ("ppach " #rd ", " #rs ", " #rt )
  110. #define ppacb(rs, rt, rd) \
  111. __asm__ volatile ("ppacb " #rd ", " #rs ", " #rt )
  112. #define prevh(rt, rd) \
  113. __asm__ volatile ("prevh " #rd ", " #rt )
  114. #define pmulth(rs, rt, rd) \
  115. __asm__ volatile ("pmulth " #rd ", " #rs ", " #rt )
  116. #define pmaxh(rs, rt, rd) \
  117. __asm__ volatile ("pmaxh " #rd ", " #rs ", " #rt )
  118. #define pminh(rs, rt, rd) \
  119. __asm__ volatile ("pminh " #rd ", " #rs ", " #rt )
  120. #define pinteh(rs, rt, rd) \
  121. __asm__ volatile ("pinteh " #rd ", " #rs ", " #rt )
  122. #define paddh(rs, rt, rd) \
  123. __asm__ volatile ("paddh " #rd ", " #rs ", " #rt )
  124. #define psubh(rs, rt, rd) \
  125. __asm__ volatile ("psubh " #rd ", " #rs ", " #rt )
  126. #define psrah(rt, sa, rd) \
  127. __asm__ volatile ("psrah " #rd ", " #rt ", %0" : : "i"(sa) )
  128. #define pmfhl_uw(rd) \
  129. __asm__ volatile ("pmfhl.uw " #rd)
  130. #define pextlb(rs, rt, rd) \
  131. __asm__ volatile ("pextlb " #rd ", " #rs ", " #rt )
  132. #endif /* AVCODEC_PS2_MMI_H */