intmath.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /*
  2. * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #ifndef AVUTIL_ARM_INTMATH_H
  21. #define AVUTIL_ARM_INTMATH_H
  22. #include <stdint.h>
  23. #include "config.h"
  24. #include "libavutil/attributes.h"
  25. #if HAVE_INLINE_ASM
  26. #if HAVE_ARMV6
  27. #define FASTDIV FASTDIV
  28. static av_always_inline av_const int FASTDIV(int a, int b)
  29. {
  30. int r;
  31. __asm__ ("cmp %2, #2 \n\t"
  32. "ldr %0, [%3, %2, lsl #2] \n\t"
  33. "lsrle %0, %1, #1 \n\t"
  34. "smmulgt %0, %0, %1 \n\t"
  35. : "=&r"(r) : "r"(a), "r"(b), "r"(ff_inverse) : "cc");
  36. return r;
  37. }
  38. #define av_clip_uint8 av_clip_uint8_arm
  39. static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
  40. {
  41. unsigned x;
  42. __asm__ ("usat %0, #8, %1" : "=r"(x) : "r"(a));
  43. return x;
  44. }
  45. #define av_clip_int8 av_clip_int8_arm
  46. static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
  47. {
  48. unsigned x;
  49. __asm__ ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
  50. return x;
  51. }
  52. #define av_clip_uint16 av_clip_uint16_arm
  53. static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
  54. {
  55. unsigned x;
  56. __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
  57. return x;
  58. }
  59. #define av_clip_int16 av_clip_int16_arm
  60. static av_always_inline av_const int16_t av_clip_int16_arm(int a)
  61. {
  62. int x;
  63. __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
  64. return x;
  65. }
  66. #define av_clip_uintp2 av_clip_uintp2_arm
  67. static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p)
  68. {
  69. unsigned x;
  70. __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p));
  71. return x;
  72. }
  73. #else /* HAVE_ARMV6 */
  74. #define FASTDIV FASTDIV
  75. static av_always_inline av_const int FASTDIV(int a, int b)
  76. {
  77. int r, t;
  78. __asm__ ("umull %1, %0, %2, %3"
  79. : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
  80. return r;
  81. }
  82. #endif /* HAVE_ARMV6 */
  83. #define av_clipl_int32 av_clipl_int32_arm
  84. static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
  85. {
  86. int x, y;
  87. __asm__ ("adds %1, %R2, %Q2, lsr #31 \n\t"
  88. "mvnne %1, #1<<31 \n\t"
  89. "moveq %0, %Q2 \n\t"
  90. "eorne %0, %1, %R2, asr #31 \n\t"
  91. : "=r"(x), "=&r"(y) : "r"(a):"cc");
  92. return x;
  93. }
  94. #endif /* HAVE_INLINE_ASM */
  95. #endif /* AVUTIL_ARM_INTMATH_H */