swscale_ppc_template.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. /*
  2. * AltiVec-enhanced yuv2yuvX
  3. *
  4. * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
  5. * based on the equivalent C code in swscale.c
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "libavutil/attributes.h"
  24. #include "libavutil/mem_internal.h"
  25. static void FUNC(yuv2planeX_8_16)(const int16_t *filter, int filterSize,
  26. const int16_t **src, uint8_t *dest,
  27. const uint8_t *dither, int offset, int x)
  28. {
  29. register int i, j;
  30. LOCAL_ALIGNED(16, int, val, [16]);
  31. vector signed int vo1, vo2, vo3, vo4;
  32. vector unsigned short vs1, vs2;
  33. vector unsigned char vf;
  34. vector unsigned int altivec_vectorShiftInt19 =
  35. vec_add(vec_splat_u32(10), vec_splat_u32(9));
  36. for (i = 0; i < 16; i++)
  37. val[i] = dither[(x + i + offset) & 7] << 12;
  38. vo1 = vec_ld(0, val);
  39. vo2 = vec_ld(16, val);
  40. vo3 = vec_ld(32, val);
  41. vo4 = vec_ld(48, val);
  42. for (j = 0; j < filterSize; j++) {
  43. unsigned int joffset=j<<1;
  44. unsigned int xoffset=x<<1;
  45. vector unsigned char av_unused perm;
  46. vector signed short l1,vLumFilter;
  47. LOAD_FILTER(vLumFilter,filter);
  48. vLumFilter = vec_splat(vLumFilter, 0);
  49. LOAD_L1(l1,src[j],perm);
  50. yuv2planeX_8(vo1, vo2, l1, src[j], x, perm, vLumFilter);
  51. yuv2planeX_8(vo3, vo4, l1, src[j], x + 8, perm, vLumFilter);
  52. }
  53. vo1 = vec_sra(vo1, altivec_vectorShiftInt19);
  54. vo2 = vec_sra(vo2, altivec_vectorShiftInt19);
  55. vo3 = vec_sra(vo3, altivec_vectorShiftInt19);
  56. vo4 = vec_sra(vo4, altivec_vectorShiftInt19);
  57. vs1 = vec_packsu(vo1, vo2);
  58. vs2 = vec_packsu(vo3, vo4);
  59. vf = vec_packsu(vs1, vs2);
  60. VEC_ST(vf, 0, dest);
  61. }
  62. static inline void yuv2planeX_u(const int16_t *filter, int filterSize,
  63. const int16_t **src, uint8_t *dest, int dstW,
  64. const uint8_t *dither, int offset, int x)
  65. {
  66. int i, j;
  67. for (i = x; i < dstW; i++) {
  68. int t = dither[(i + offset) & 7] << 12;
  69. for (j = 0; j < filterSize; j++)
  70. t += src[j][i] * filter[j];
  71. dest[i] = av_clip_uint8(t >> 19);
  72. }
  73. }
  74. static void FUNC(yuv2planeX)(const int16_t *filter, int filterSize,
  75. const int16_t **src, uint8_t *dest, int dstW,
  76. const uint8_t *dither, int offset)
  77. {
  78. int dst_u = -(uintptr_t)dest & 15;
  79. int i;
  80. yuv2planeX_u(filter, filterSize, src, dest, dst_u, dither, offset, 0);
  81. for (i = dst_u; i < dstW - 15; i += 16)
  82. FUNC(yuv2planeX_8_16)(filter, filterSize, src, dest + i, dither,
  83. offset, i);
  84. yuv2planeX_u(filter, filterSize, src, dest, dstW, dither, offset, i);
  85. }
  86. static void FUNC(hScale_real)(SwsContext *c, int16_t *dst, int dstW,
  87. const uint8_t *src, const int16_t *filter,
  88. const int32_t *filterPos, int filterSize)
  89. {
  90. register int i;
  91. LOCAL_ALIGNED(16, int, tempo, [4]);
  92. if (filterSize % 4) {
  93. for (i = 0; i < dstW; i++) {
  94. register int j;
  95. register int srcPos = filterPos[i];
  96. register int val = 0;
  97. for (j = 0; j < filterSize; j++)
  98. val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
  99. dst[i] = FFMIN(val >> 7, (1 << 15) - 1);
  100. }
  101. } else
  102. switch (filterSize) {
  103. case 4:
  104. for (i = 0; i < dstW; i++) {
  105. register int srcPos = filterPos[i];
  106. vector unsigned char src_vF = unaligned_load(srcPos, src);
  107. vector signed short src_v, filter_v;
  108. vector signed int val_vEven, val_s;
  109. src_v = // vec_unpackh sign-extends...
  110. (vector signed short)(VEC_MERGEH((vector unsigned char)vzero, src_vF));
  111. // now put our elements in the even slots
  112. src_v = vec_mergeh(src_v, (vector signed short)vzero);
  113. GET_VF4(i, filter_v, filter);
  114. val_vEven = vec_mule(src_v, filter_v);
  115. val_s = vec_sums(val_vEven, vzero);
  116. vec_st(val_s, 0, tempo);
  117. dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
  118. }
  119. break;
  120. case 8:
  121. for (i = 0; i < dstW; i++) {
  122. register int srcPos = filterPos[i];
  123. vector unsigned char src_vF, av_unused src_v0, av_unused src_v1;
  124. vector unsigned char av_unused permS;
  125. vector signed short src_v, filter_v;
  126. vector signed int val_v, val_s;
  127. FIRST_LOAD(src_v0, srcPos, src, permS);
  128. LOAD_SRCV8(srcPos, 0, src, permS, src_v0, src_v1, src_vF);
  129. src_v = // vec_unpackh sign-extends...
  130. (vector signed short)(VEC_MERGEH((vector unsigned char)vzero, src_vF));
  131. filter_v = vec_ld(i << 4, filter);
  132. val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
  133. val_s = vec_sums(val_v, vzero);
  134. vec_st(val_s, 0, tempo);
  135. dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
  136. }
  137. break;
  138. case 16:
  139. for (i = 0; i < dstW; i++) {
  140. register int srcPos = filterPos[i];
  141. vector unsigned char src_vF = unaligned_load(srcPos, src);
  142. vector signed short src_vA = // vec_unpackh sign-extends...
  143. (vector signed short)(VEC_MERGEH((vector unsigned char)vzero, src_vF));
  144. vector signed short src_vB = // vec_unpackh sign-extends...
  145. (vector signed short)(VEC_MERGEL((vector unsigned char)vzero, src_vF));
  146. vector signed short filter_v0 = vec_ld(i << 5, filter);
  147. vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
  148. vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
  149. vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
  150. vector signed int val_s = vec_sums(val_v, vzero);
  151. VEC_ST(val_s, 0, tempo);
  152. dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
  153. }
  154. break;
  155. default:
  156. for (i = 0; i < dstW; i++) {
  157. register int j, av_unused offset = i * 2 * filterSize;
  158. register int srcPos = filterPos[i];
  159. vector signed int val_s, val_v = (vector signed int)vzero;
  160. vector signed short av_unused filter_v0R;
  161. vector unsigned char av_unused permF, av_unused src_v0, av_unused permS;
  162. FIRST_LOAD(filter_v0R, offset, filter, permF);
  163. FIRST_LOAD(src_v0, srcPos, src, permS);
  164. for (j = 0; j < filterSize - 15; j += 16) {
  165. vector unsigned char av_unused src_v1, src_vF;
  166. vector signed short av_unused filter_v1R, av_unused filter_v2R,
  167. filter_v0, filter_v1, src_vA, src_vB;
  168. vector signed int val_acc;
  169. LOAD_SRCV(srcPos, j, src, permS, src_v0, src_v1, src_vF);
  170. src_vA = // vec_unpackh sign-extends...
  171. (vector signed short)(VEC_MERGEH((vector unsigned char)vzero, src_vF));
  172. src_vB = // vec_unpackh sign-extends...
  173. (vector signed short)(VEC_MERGEL((vector unsigned char)vzero, src_vF));
  174. GET_VFD(i, j, filter, filter_v0R, filter_v1R, permF, filter_v0, 0);
  175. GET_VFD(i, j, filter, filter_v1R, filter_v2R, permF, filter_v1, 16);
  176. val_acc = vec_msums(src_vA, filter_v0, val_v);
  177. val_v = vec_msums(src_vB, filter_v1, val_acc);
  178. UPDATE_PTR(filter_v2R, filter_v0R, src_v1, src_v0);
  179. }
  180. if (j < filterSize - 7) {
  181. // loading src_v0 is useless, it's already done above
  182. vector unsigned char av_unused src_v1, src_vF;
  183. vector signed short src_v, av_unused filter_v1R, filter_v;
  184. LOAD_SRCV8(srcPos, j, src, permS, src_v0, src_v1, src_vF);
  185. src_v = // vec_unpackh sign-extends...
  186. (vector signed short)(VEC_MERGEH((vector unsigned char)vzero, src_vF));
  187. GET_VFD(i, j, filter, filter_v0R, filter_v1R, permF, filter_v, 0);
  188. val_v = vec_msums(src_v, filter_v, val_v);
  189. }
  190. val_s = vec_sums(val_v, vzero);
  191. VEC_ST(val_s, 0, tempo);
  192. dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1);
  193. }
  194. }
  195. }