imgresample_altivec.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * High quality image resampling with polyphase filters
  3. * Copyright (c) 2001 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavcodec/ppc/imgresample_altivec.c
  23. * High quality image resampling with polyphase filters - AltiVec bits
  24. */
  25. #include "util_altivec.h"
  26. #define FILTER_BITS 8
  27. typedef union {
  28. vector signed short v;
  29. signed short s[8];
  30. } vec_ss;
  31. void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
  32. int wrap, int16_t *filter)
  33. {
  34. int sum, i;
  35. const uint8_t *s;
  36. vector unsigned char *tv, tmp, dstv, zero;
  37. vec_ss srchv[4], srclv[4], fv[4];
  38. vector signed short zeros, sumhv, sumlv;
  39. s = src;
  40. for(i=0;i<4;i++) {
  41. /*
  42. The vec_madds later on does an implicit >>15 on the result.
  43. Since FILTER_BITS is 8, and we have 15 bits of magnitude in
  44. a signed short, we have just enough bits to pre-shift our
  45. filter constants <<7 to compensate for vec_madds.
  46. */
  47. fv[i].s[0] = filter[i] << (15-FILTER_BITS);
  48. fv[i].v = vec_splat(fv[i].v, 0);
  49. }
  50. zero = vec_splat_u8(0);
  51. zeros = vec_splat_s16(0);
  52. /*
  53. When we're resampling, we'd ideally like both our input buffers,
  54. and output buffers to be 16-byte aligned, so we can do both aligned
  55. reads and writes. Sadly we can't always have this at the moment, so
  56. we opt for aligned writes, as unaligned writes have a huge overhead.
  57. To do this, do enough scalar resamples to get dst 16-byte aligned.
  58. */
  59. i = (-(int)dst) & 0xf;
  60. while(i>0) {
  61. sum = s[0 * wrap] * filter[0] +
  62. s[1 * wrap] * filter[1] +
  63. s[2 * wrap] * filter[2] +
  64. s[3 * wrap] * filter[3];
  65. sum = sum >> FILTER_BITS;
  66. if (sum<0) sum = 0; else if (sum>255) sum=255;
  67. dst[0] = sum;
  68. dst++;
  69. s++;
  70. dst_width--;
  71. i--;
  72. }
  73. /* Do our altivec resampling on 16 pixels at once. */
  74. while(dst_width>=16) {
  75. /* Read 16 (potentially unaligned) bytes from each of
  76. 4 lines into 4 vectors, and split them into shorts.
  77. Interleave the multipy/accumulate for the resample
  78. filter with the loads to hide the 3 cycle latency
  79. the vec_madds have. */
  80. tv = (vector unsigned char *) &s[0 * wrap];
  81. tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[i * wrap]));
  82. srchv[0].v = (vector signed short) vec_mergeh(zero, tmp);
  83. srclv[0].v = (vector signed short) vec_mergel(zero, tmp);
  84. sumhv = vec_madds(srchv[0].v, fv[0].v, zeros);
  85. sumlv = vec_madds(srclv[0].v, fv[0].v, zeros);
  86. tv = (vector unsigned char *) &s[1 * wrap];
  87. tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[1 * wrap]));
  88. srchv[1].v = (vector signed short) vec_mergeh(zero, tmp);
  89. srclv[1].v = (vector signed short) vec_mergel(zero, tmp);
  90. sumhv = vec_madds(srchv[1].v, fv[1].v, sumhv);
  91. sumlv = vec_madds(srclv[1].v, fv[1].v, sumlv);
  92. tv = (vector unsigned char *) &s[2 * wrap];
  93. tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[2 * wrap]));
  94. srchv[2].v = (vector signed short) vec_mergeh(zero, tmp);
  95. srclv[2].v = (vector signed short) vec_mergel(zero, tmp);
  96. sumhv = vec_madds(srchv[2].v, fv[2].v, sumhv);
  97. sumlv = vec_madds(srclv[2].v, fv[2].v, sumlv);
  98. tv = (vector unsigned char *) &s[3 * wrap];
  99. tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[3 * wrap]));
  100. srchv[3].v = (vector signed short) vec_mergeh(zero, tmp);
  101. srclv[3].v = (vector signed short) vec_mergel(zero, tmp);
  102. sumhv = vec_madds(srchv[3].v, fv[3].v, sumhv);
  103. sumlv = vec_madds(srclv[3].v, fv[3].v, sumlv);
  104. /* Pack the results into our destination vector,
  105. and do an aligned write of that back to memory. */
  106. dstv = vec_packsu(sumhv, sumlv) ;
  107. vec_st(dstv, 0, (vector unsigned char *) dst);
  108. dst+=16;
  109. s+=16;
  110. dst_width-=16;
  111. }
  112. /* If there are any leftover pixels, resample them
  113. with the slow scalar method. */
  114. while(dst_width>0) {
  115. sum = s[0 * wrap] * filter[0] +
  116. s[1 * wrap] * filter[1] +
  117. s[2 * wrap] * filter[2] +
  118. s[3 * wrap] * filter[3];
  119. sum = sum >> FILTER_BITS;
  120. if (sum<0) sum = 0; else if (sum>255) sum=255;
  121. dst[0] = sum;
  122. dst++;
  123. s++;
  124. dst_width--;
  125. }
  126. }