intmath.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /*
  2. * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #ifndef AVUTIL_ARM_INTMATH_H
  21. #define AVUTIL_ARM_INTMATH_H
  22. #include <stdint.h>
  23. #include "config.h"
  24. #include "libavutil/attributes.h"
  25. #if HAVE_INLINE_ASM
  26. #if HAVE_ARMV6
  27. #define FASTDIV FASTDIV
  28. static av_always_inline av_const int FASTDIV(int a, int b)
  29. {
  30. int r;
  31. __asm__ ("cmp %2, #2 \n\t"
  32. "ldr %0, [%3, %2, lsl #2] \n\t"
  33. "ite le \n\t"
  34. "lsrle %0, %1, #1 \n\t"
  35. "smmulgt %0, %0, %1 \n\t"
  36. : "=&r"(r) : "r"(a), "r"(b), "r"(ff_inverse) : "cc");
  37. return r;
  38. }
  39. #define av_clip_uint8 av_clip_uint8_arm
  40. static av_always_inline av_const unsigned av_clip_uint8_arm(int a)
  41. {
  42. unsigned x;
  43. __asm__ ("usat %0, #8, %1" : "=r"(x) : "r"(a));
  44. return x;
  45. }
  46. #define av_clip_int8 av_clip_int8_arm
  47. static av_always_inline av_const int av_clip_int8_arm(int a)
  48. {
  49. int x;
  50. __asm__ ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
  51. return x;
  52. }
  53. #define av_clip_uint16 av_clip_uint16_arm
  54. static av_always_inline av_const unsigned av_clip_uint16_arm(int a)
  55. {
  56. unsigned x;
  57. __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
  58. return x;
  59. }
  60. #define av_clip_int16 av_clip_int16_arm
  61. static av_always_inline av_const int av_clip_int16_arm(int a)
  62. {
  63. int x;
  64. __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
  65. return x;
  66. }
  67. #define av_clip_uintp2 av_clip_uintp2_arm
  68. static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p)
  69. {
  70. unsigned x;
  71. __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p));
  72. return x;
  73. }
  74. #define av_sat_add32 av_sat_add32_arm
  75. static av_always_inline int av_sat_add32_arm(int a, int b)
  76. {
  77. int r;
  78. __asm__ ("qadd %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
  79. return r;
  80. }
  81. #define av_sat_dadd32 av_sat_dadd32_arm
  82. static av_always_inline int av_sat_dadd32_arm(int a, int b)
  83. {
  84. int r;
  85. __asm__ ("qdadd %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
  86. return r;
  87. }
  88. #else /* HAVE_ARMV6 */
  89. #define FASTDIV FASTDIV
  90. static av_always_inline av_const int FASTDIV(int a, int b)
  91. {
  92. int r, t;
  93. __asm__ ("umull %1, %0, %2, %3"
  94. : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
  95. return r;
  96. }
  97. #endif /* HAVE_ARMV6 */
  98. #if HAVE_ASM_MOD_Q
  99. #define av_clipl_int32 av_clipl_int32_arm
  100. static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
  101. {
  102. int x, y;
  103. __asm__ ("adds %1, %R2, %Q2, lsr #31 \n\t"
  104. "itet ne \n\t"
  105. "mvnne %1, #1<<31 \n\t"
  106. "moveq %0, %Q2 \n\t"
  107. "eorne %0, %1, %R2, asr #31 \n\t"
  108. : "=r"(x), "=&r"(y) : "r"(a) : "cc");
  109. return x;
  110. }
  111. #endif /* HAVE_ASM_MOD_Q */
  112. #endif /* HAVE_INLINE_ASM */
  113. #endif /* AVUTIL_ARM_INTMATH_H */