intmath.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. /*
  2. * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #ifndef AVUTIL_ARM_INTMATH_H
  21. #define AVUTIL_ARM_INTMATH_H
  22. #include <stdint.h>
  23. #include "config.h"
  24. #include "libavutil/attributes.h"
  25. #if HAVE_INLINE_ASM
  26. #if HAVE_ARMV6
  27. #define FASTDIV FASTDIV
  28. static av_always_inline av_const int FASTDIV(int a, int b)
  29. {
  30. int r;
  31. __asm__ ("cmp %2, #2 \n\t"
  32. "ldr %0, [%3, %2, lsl #2] \n\t"
  33. "ite le \n\t"
  34. "lsrle %0, %1, #1 \n\t"
  35. "smmulgt %0, %0, %1 \n\t"
  36. : "=&r"(r) : "r"(a), "r"(b), "r"(ff_inverse) : "cc");
  37. return r;
  38. }
  39. #define av_clip_uint8 av_clip_uint8_arm
  40. static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
  41. {
  42. unsigned x;
  43. __asm__ ("usat %0, #8, %1" : "=r"(x) : "r"(a));
  44. return x;
  45. }
  46. #define av_clip_int8 av_clip_int8_arm
  47. static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
  48. {
  49. unsigned x;
  50. __asm__ ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
  51. return x;
  52. }
  53. #define av_clip_uint16 av_clip_uint16_arm
  54. static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
  55. {
  56. unsigned x;
  57. __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
  58. return x;
  59. }
  60. #define av_clip_int16 av_clip_int16_arm
  61. static av_always_inline av_const int16_t av_clip_int16_arm(int a)
  62. {
  63. int x;
  64. __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
  65. return x;
  66. }
  67. #define av_clip_uintp2 av_clip_uintp2_arm
  68. static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p)
  69. {
  70. unsigned x;
  71. __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p));
  72. return x;
  73. }
  74. #else /* HAVE_ARMV6 */
  75. #define FASTDIV FASTDIV
  76. static av_always_inline av_const int FASTDIV(int a, int b)
  77. {
  78. int r, t;
  79. __asm__ ("umull %1, %0, %2, %3"
  80. : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
  81. return r;
  82. }
  83. #endif /* HAVE_ARMV6 */
  84. #define av_clipl_int32 av_clipl_int32_arm
  85. static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
  86. {
  87. int x, y;
  88. __asm__ ("adds %1, %R2, %Q2, lsr #31 \n\t"
  89. "itet ne \n\t"
  90. "mvnne %1, #1<<31 \n\t"
  91. "moveq %0, %Q2 \n\t"
  92. "eorne %0, %1, %R2, asr #31 \n\t"
  93. : "=r"(x), "=&r"(y) : "r"(a):"cc");
  94. return x;
  95. }
  96. #endif /* HAVE_INLINE_ASM */
  97. #endif /* AVUTIL_ARM_INTMATH_H */