intmath.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. /*
  2. * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #ifndef AVUTIL_ARM_INTMATH_H
  21. #define AVUTIL_ARM_INTMATH_H
  22. #include <stdint.h>
  23. #include "config.h"
  24. #include "libavutil/attributes.h"
  25. #if HAVE_INLINE_ASM
  26. #if HAVE_ARMV6
  27. #define FASTDIV FASTDIV
  28. static av_always_inline av_const int FASTDIV(int a, int b)
  29. {
  30. int r;
  31. __asm__ ("cmp %2, #2 \n\t"
  32. "ldr %0, [%3, %2, lsl #2] \n\t"
  33. "ite le \n\t"
  34. "lsrle %0, %1, #1 \n\t"
  35. "smmulgt %0, %0, %1 \n\t"
  36. : "=&r"(r) : "r"(a), "r"(b), "r"(ff_inverse) : "cc");
  37. return r;
  38. }
  39. #define av_clip_uint8 av_clip_uint8_arm
  40. static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
  41. {
  42. unsigned x;
  43. __asm__ ("usat %0, #8, %1" : "=r"(x) : "r"(a));
  44. return x;
  45. }
  46. #define av_clip_int8 av_clip_int8_arm
  47. static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
  48. {
  49. unsigned x;
  50. __asm__ ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
  51. return x;
  52. }
  53. #define av_clip_uint16 av_clip_uint16_arm
  54. static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
  55. {
  56. unsigned x;
  57. __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
  58. return x;
  59. }
  60. #define av_clip_int16 av_clip_int16_arm
  61. static av_always_inline av_const int16_t av_clip_int16_arm(int a)
  62. {
  63. int x;
  64. __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
  65. return x;
  66. }
  67. #if !CONFIG_SMALL //the code below cannot be compiled without always_inline
  68. #define av_clip_uintp2 av_clip_uintp2_arm
  69. static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p)
  70. {
  71. unsigned x;
  72. __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p));
  73. return x;
  74. }
  75. #endif //!CONFIG_SMALL
  76. #else /* HAVE_ARMV6 */
  77. #define FASTDIV FASTDIV
  78. static av_always_inline av_const int FASTDIV(int a, int b)
  79. {
  80. int r, t;
  81. __asm__ ("umull %1, %0, %2, %3"
  82. : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
  83. return r;
  84. }
  85. #endif /* HAVE_ARMV6 */
  86. #define av_clipl_int32 av_clipl_int32_arm
  87. static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
  88. {
  89. int x, y;
  90. __asm__ ("adds %1, %R2, %Q2, lsr #31 \n\t"
  91. "itet ne \n\t"
  92. "mvnne %1, #1<<31 \n\t"
  93. "moveq %0, %Q2 \n\t"
  94. "eorne %0, %1, %R2, asr #31 \n\t"
  95. : "=r"(x), "=&r"(y) : "r"(a):"cc");
  96. return x;
  97. }
  98. #endif /* HAVE_INLINE_ASM */
  99. #endif /* AVUTIL_ARM_INTMATH_H */