rv40dsp.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * RV40 decoder motion compensation functions
  3. * Copyright (c) 2008 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavcodec/rv40dsp.c
  23. * RV40 decoder motion compensation functions
  24. */
  25. #include "avcodec.h"
  26. #include "dsputil.h"
  27. #define RV40_LOWPASS(OPNAME, OP) \
  28. static av_unused void OPNAME ## rv40_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
  29. const int h, const int C1, const int C2, const int SHIFT){\
  30. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  31. int i;\
  32. for(i=0; i<h; i++)\
  33. {\
  34. OP(dst[0], (src[-2] + src[ 3] - 5*(src[-1]+src[2]) + src[0]*C1 + src[1]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  35. OP(dst[1], (src[-1] + src[ 4] - 5*(src[ 0]+src[3]) + src[1]*C1 + src[2]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  36. OP(dst[2], (src[ 0] + src[ 5] - 5*(src[ 1]+src[4]) + src[2]*C1 + src[3]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  37. OP(dst[3], (src[ 1] + src[ 6] - 5*(src[ 2]+src[5]) + src[3]*C1 + src[4]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  38. OP(dst[4], (src[ 2] + src[ 7] - 5*(src[ 3]+src[6]) + src[4]*C1 + src[5]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  39. OP(dst[5], (src[ 3] + src[ 8] - 5*(src[ 4]+src[7]) + src[5]*C1 + src[6]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  40. OP(dst[6], (src[ 4] + src[ 9] - 5*(src[ 5]+src[8]) + src[6]*C1 + src[7]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  41. OP(dst[7], (src[ 5] + src[10] - 5*(src[ 6]+src[9]) + src[7]*C1 + src[8]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  42. dst+=dstStride;\
  43. src+=srcStride;\
  44. }\
  45. }\
  46. \
  47. static void OPNAME ## rv40_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
  48. const int w, const int C1, const int C2, const int SHIFT){\
  49. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
  50. int i;\
  51. for(i=0; i<w; i++)\
  52. {\
  53. const int srcB = src[-2*srcStride];\
  54. const int srcA = src[-1*srcStride];\
  55. const int src0 = src[0 *srcStride];\
  56. const int src1 = src[1 *srcStride];\
  57. const int src2 = src[2 *srcStride];\
  58. const int src3 = src[3 *srcStride];\
  59. const int src4 = src[4 *srcStride];\
  60. const int src5 = src[5 *srcStride];\
  61. const int src6 = src[6 *srcStride];\
  62. const int src7 = src[7 *srcStride];\
  63. const int src8 = src[8 *srcStride];\
  64. const int src9 = src[9 *srcStride];\
  65. const int src10= src[10*srcStride];\
  66. OP(dst[0*dstStride], (srcB + src3 - 5*(srcA+src2) + src0*C1 + src1*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  67. OP(dst[1*dstStride], (srcA + src4 - 5*(src0+src3) + src1*C1 + src2*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  68. OP(dst[2*dstStride], (src0 + src5 - 5*(src1+src4) + src2*C1 + src3*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  69. OP(dst[3*dstStride], (src1 + src6 - 5*(src2+src5) + src3*C1 + src4*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  70. OP(dst[4*dstStride], (src2 + src7 - 5*(src3+src6) + src4*C1 + src5*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  71. OP(dst[5*dstStride], (src3 + src8 - 5*(src4+src7) + src5*C1 + src6*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  72. OP(dst[6*dstStride], (src4 + src9 - 5*(src5+src8) + src6*C1 + src7*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  73. OP(dst[7*dstStride], (src5 + src10 - 5*(src6+src9) + src7*C1 + src8*C2 + (1<<(SHIFT-1))) >> SHIFT);\
  74. dst++;\
  75. src++;\
  76. }\
  77. }\
  78. \
  79. static void OPNAME ## rv40_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
  80. const int w, const int C1, const int C2, const int SHIFT){\
  81. OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
  82. OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
  83. src += 8*srcStride;\
  84. dst += 8*dstStride;\
  85. OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, w-8, C1, C2, SHIFT);\
  86. OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, w-8, C1, C2, SHIFT);\
  87. }\
  88. \
  89. static void OPNAME ## rv40_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
  90. const int h, const int C1, const int C2, const int SHIFT){\
  91. OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
  92. OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
  93. src += 8*srcStride;\
  94. dst += 8*dstStride;\
  95. OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, h-8, C1, C2, SHIFT);\
  96. OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, h-8, C1, C2, SHIFT);\
  97. }\
  98. \
  99. #define RV40_MC(OPNAME, SIZE) \
  100. static void OPNAME ## rv40_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
  101. OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
  102. }\
  103. \
  104. static void OPNAME ## rv40_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
  105. OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 20, 20, 5);\
  106. }\
  107. \
  108. static void OPNAME ## rv40_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
  109. OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
  110. }\
  111. \
  112. static void OPNAME ## rv40_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
  113. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
  114. }\
  115. \
  116. static void OPNAME ## rv40_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
  117. uint8_t full[SIZE*(SIZE+5)];\
  118. uint8_t * const full_mid= full + SIZE*2;\
  119. put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
  120. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
  121. }\
  122. \
  123. static void OPNAME ## rv40_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
  124. uint8_t full[SIZE*(SIZE+5)];\
  125. uint8_t * const full_mid= full + SIZE*2;\
  126. put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
  127. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
  128. }\
  129. \
  130. static void OPNAME ## rv40_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
  131. uint8_t full[SIZE*(SIZE+5)];\
  132. uint8_t * const full_mid= full + SIZE*2;\
  133. put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
  134. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
  135. }\
  136. \
  137. static void OPNAME ## rv40_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
  138. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 20, 20, 5);\
  139. }\
  140. \
  141. static void OPNAME ## rv40_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
  142. uint8_t full[SIZE*(SIZE+5)];\
  143. uint8_t * const full_mid= full + SIZE*2;\
  144. put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
  145. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
  146. }\
  147. \
  148. static void OPNAME ## rv40_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
  149. uint8_t full[SIZE*(SIZE+5)];\
  150. uint8_t * const full_mid= full + SIZE*2;\
  151. put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
  152. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
  153. }\
  154. \
  155. static void OPNAME ## rv40_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
  156. uint8_t full[SIZE*(SIZE+5)];\
  157. uint8_t * const full_mid= full + SIZE*2;\
  158. put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
  159. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
  160. }\
  161. \
  162. static void OPNAME ## rv40_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
  163. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
  164. }\
  165. \
  166. static void OPNAME ## rv40_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
  167. uint8_t full[SIZE*(SIZE+5)];\
  168. uint8_t * const full_mid= full + SIZE*2;\
  169. put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
  170. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\
  171. }\
  172. \
  173. static void OPNAME ## rv40_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
  174. uint8_t full[SIZE*(SIZE+5)];\
  175. uint8_t * const full_mid= full + SIZE*2;\
  176. put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
  177. OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\
  178. }\
  179. \
  180. #define op_avg(a, b) a = (((a)+cm[b]+1)>>1)
  181. #define op_put(a, b) a = cm[b]
  182. RV40_LOWPASS(put_ , op_put)
  183. RV40_LOWPASS(avg_ , op_avg)
  184. #undef op_avg
  185. #undef op_put
  186. RV40_MC(put_, 8)
  187. RV40_MC(put_, 16)
  188. RV40_MC(avg_, 8)
  189. RV40_MC(avg_, 16)
  190. static const int rv40_bias[4][4] = {
  191. { 0, 16, 32, 16 },
  192. { 32, 28, 32, 28 },
  193. { 0, 32, 16, 32 },
  194. { 32, 28, 32, 28 }
  195. };
  196. #define RV40_CHROMA_MC(OPNAME, OP)\
  197. static void OPNAME ## rv40_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  198. const int A=(8-x)*(8-y);\
  199. const int B=( x)*(8-y);\
  200. const int C=(8-x)*( y);\
  201. const int D=( x)*( y);\
  202. int i;\
  203. int bias = rv40_bias[y>>1][x>>1];\
  204. \
  205. assert(x<8 && y<8 && x>=0 && y>=0);\
  206. \
  207. if(D){\
  208. for(i=0; i<h; i++){\
  209. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
  210. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
  211. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
  212. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
  213. dst+= stride;\
  214. src+= stride;\
  215. }\
  216. }else{\
  217. const int E= B+C;\
  218. const int step= C ? stride : 1;\
  219. for(i=0; i<h; i++){\
  220. OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
  221. OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
  222. OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
  223. OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
  224. dst+= stride;\
  225. src+= stride;\
  226. }\
  227. }\
  228. }\
  229. \
  230. static void OPNAME ## rv40_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
  231. const int A=(8-x)*(8-y);\
  232. const int B=( x)*(8-y);\
  233. const int C=(8-x)*( y);\
  234. const int D=( x)*( y);\
  235. int i;\
  236. int bias = rv40_bias[y>>1][x>>1];\
  237. \
  238. assert(x<8 && y<8 && x>=0 && y>=0);\
  239. \
  240. if(D){\
  241. for(i=0; i<h; i++){\
  242. OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
  243. OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
  244. OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
  245. OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
  246. OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + bias));\
  247. OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + bias));\
  248. OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + bias));\
  249. OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + bias));\
  250. dst+= stride;\
  251. src+= stride;\
  252. }\
  253. }else{\
  254. const int E= B+C;\
  255. const int step= C ? stride : 1;\
  256. for(i=0; i<h; i++){\
  257. OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
  258. OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
  259. OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
  260. OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
  261. OP(dst[4], (A*src[4] + E*src[step+4] + bias));\
  262. OP(dst[5], (A*src[5] + E*src[step+5] + bias));\
  263. OP(dst[6], (A*src[6] + E*src[step+6] + bias));\
  264. OP(dst[7], (A*src[7] + E*src[step+7] + bias));\
  265. dst+= stride;\
  266. src+= stride;\
  267. }\
  268. }\
  269. }
  270. #define op_avg(a, b) a = (((a)+((b)>>6)+1)>>1)
  271. #define op_put(a, b) a = ((b)>>6)
  272. RV40_CHROMA_MC(put_, op_put)
  273. RV40_CHROMA_MC(avg_, op_avg)
  274. void ff_rv40dsp_init(DSPContext* c, AVCodecContext *avctx) {
  275. c->put_rv40_qpel_pixels_tab[0][ 0] = c->put_h264_qpel_pixels_tab[0][0];
  276. c->put_rv40_qpel_pixels_tab[0][ 1] = put_rv40_qpel16_mc10_c;
  277. c->put_rv40_qpel_pixels_tab[0][ 2] = put_rv40_qpel16_mc20_c;
  278. c->put_rv40_qpel_pixels_tab[0][ 3] = put_rv40_qpel16_mc30_c;
  279. c->put_rv40_qpel_pixels_tab[0][ 4] = put_rv40_qpel16_mc01_c;
  280. c->put_rv40_qpel_pixels_tab[0][ 5] = put_rv40_qpel16_mc11_c;
  281. c->put_rv40_qpel_pixels_tab[0][ 6] = put_rv40_qpel16_mc21_c;
  282. c->put_rv40_qpel_pixels_tab[0][ 7] = put_rv40_qpel16_mc31_c;
  283. c->put_rv40_qpel_pixels_tab[0][ 8] = put_rv40_qpel16_mc02_c;
  284. c->put_rv40_qpel_pixels_tab[0][ 9] = put_rv40_qpel16_mc12_c;
  285. c->put_rv40_qpel_pixels_tab[0][10] = put_rv40_qpel16_mc22_c;
  286. c->put_rv40_qpel_pixels_tab[0][11] = put_rv40_qpel16_mc32_c;
  287. c->put_rv40_qpel_pixels_tab[0][12] = put_rv40_qpel16_mc03_c;
  288. c->put_rv40_qpel_pixels_tab[0][13] = put_rv40_qpel16_mc13_c;
  289. c->put_rv40_qpel_pixels_tab[0][14] = put_rv40_qpel16_mc23_c;
  290. c->avg_rv40_qpel_pixels_tab[0][ 0] = c->avg_h264_qpel_pixels_tab[0][0];
  291. c->avg_rv40_qpel_pixels_tab[0][ 1] = avg_rv40_qpel16_mc10_c;
  292. c->avg_rv40_qpel_pixels_tab[0][ 2] = avg_rv40_qpel16_mc20_c;
  293. c->avg_rv40_qpel_pixels_tab[0][ 3] = avg_rv40_qpel16_mc30_c;
  294. c->avg_rv40_qpel_pixels_tab[0][ 4] = avg_rv40_qpel16_mc01_c;
  295. c->avg_rv40_qpel_pixels_tab[0][ 5] = avg_rv40_qpel16_mc11_c;
  296. c->avg_rv40_qpel_pixels_tab[0][ 6] = avg_rv40_qpel16_mc21_c;
  297. c->avg_rv40_qpel_pixels_tab[0][ 7] = avg_rv40_qpel16_mc31_c;
  298. c->avg_rv40_qpel_pixels_tab[0][ 8] = avg_rv40_qpel16_mc02_c;
  299. c->avg_rv40_qpel_pixels_tab[0][ 9] = avg_rv40_qpel16_mc12_c;
  300. c->avg_rv40_qpel_pixels_tab[0][10] = avg_rv40_qpel16_mc22_c;
  301. c->avg_rv40_qpel_pixels_tab[0][11] = avg_rv40_qpel16_mc32_c;
  302. c->avg_rv40_qpel_pixels_tab[0][12] = avg_rv40_qpel16_mc03_c;
  303. c->avg_rv40_qpel_pixels_tab[0][13] = avg_rv40_qpel16_mc13_c;
  304. c->avg_rv40_qpel_pixels_tab[0][14] = avg_rv40_qpel16_mc23_c;
  305. c->put_rv40_qpel_pixels_tab[1][ 0] = c->put_h264_qpel_pixels_tab[1][0];
  306. c->put_rv40_qpel_pixels_tab[1][ 1] = put_rv40_qpel8_mc10_c;
  307. c->put_rv40_qpel_pixels_tab[1][ 2] = put_rv40_qpel8_mc20_c;
  308. c->put_rv40_qpel_pixels_tab[1][ 3] = put_rv40_qpel8_mc30_c;
  309. c->put_rv40_qpel_pixels_tab[1][ 4] = put_rv40_qpel8_mc01_c;
  310. c->put_rv40_qpel_pixels_tab[1][ 5] = put_rv40_qpel8_mc11_c;
  311. c->put_rv40_qpel_pixels_tab[1][ 6] = put_rv40_qpel8_mc21_c;
  312. c->put_rv40_qpel_pixels_tab[1][ 7] = put_rv40_qpel8_mc31_c;
  313. c->put_rv40_qpel_pixels_tab[1][ 8] = put_rv40_qpel8_mc02_c;
  314. c->put_rv40_qpel_pixels_tab[1][ 9] = put_rv40_qpel8_mc12_c;
  315. c->put_rv40_qpel_pixels_tab[1][10] = put_rv40_qpel8_mc22_c;
  316. c->put_rv40_qpel_pixels_tab[1][11] = put_rv40_qpel8_mc32_c;
  317. c->put_rv40_qpel_pixels_tab[1][12] = put_rv40_qpel8_mc03_c;
  318. c->put_rv40_qpel_pixels_tab[1][13] = put_rv40_qpel8_mc13_c;
  319. c->put_rv40_qpel_pixels_tab[1][14] = put_rv40_qpel8_mc23_c;
  320. c->avg_rv40_qpel_pixels_tab[1][ 0] = c->avg_h264_qpel_pixels_tab[1][0];
  321. c->avg_rv40_qpel_pixels_tab[1][ 1] = avg_rv40_qpel8_mc10_c;
  322. c->avg_rv40_qpel_pixels_tab[1][ 2] = avg_rv40_qpel8_mc20_c;
  323. c->avg_rv40_qpel_pixels_tab[1][ 3] = avg_rv40_qpel8_mc30_c;
  324. c->avg_rv40_qpel_pixels_tab[1][ 4] = avg_rv40_qpel8_mc01_c;
  325. c->avg_rv40_qpel_pixels_tab[1][ 5] = avg_rv40_qpel8_mc11_c;
  326. c->avg_rv40_qpel_pixels_tab[1][ 6] = avg_rv40_qpel8_mc21_c;
  327. c->avg_rv40_qpel_pixels_tab[1][ 7] = avg_rv40_qpel8_mc31_c;
  328. c->avg_rv40_qpel_pixels_tab[1][ 8] = avg_rv40_qpel8_mc02_c;
  329. c->avg_rv40_qpel_pixels_tab[1][ 9] = avg_rv40_qpel8_mc12_c;
  330. c->avg_rv40_qpel_pixels_tab[1][10] = avg_rv40_qpel8_mc22_c;
  331. c->avg_rv40_qpel_pixels_tab[1][11] = avg_rv40_qpel8_mc32_c;
  332. c->avg_rv40_qpel_pixels_tab[1][12] = avg_rv40_qpel8_mc03_c;
  333. c->avg_rv40_qpel_pixels_tab[1][13] = avg_rv40_qpel8_mc13_c;
  334. c->avg_rv40_qpel_pixels_tab[1][14] = avg_rv40_qpel8_mc23_c;
  335. c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_c;
  336. c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_c;
  337. c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_c;
  338. c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_c;
  339. }