rv40.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. /*
  2. * RV40 decoder
  3. * Copyright (c) 2007 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavcodec/rv40.c
  23. * RV40 decoder
  24. */
  25. #include "avcodec.h"
  26. #include "dsputil.h"
  27. #include "mpegvideo.h"
  28. #include "golomb.h"
  29. #include "rv34.h"
  30. #include "rv40vlc2.h"
  31. #include "rv40data.h"
  32. static VLC aic_top_vlc;
  33. static VLC aic_mode1_vlc[AIC_MODE1_NUM], aic_mode2_vlc[AIC_MODE2_NUM];
  34. static VLC ptype_vlc[NUM_PTYPE_VLCS], btype_vlc[NUM_BTYPE_VLCS];
  35. /**
  36. * Initialize all tables.
  37. */
  38. static av_cold void rv40_init_tables(void)
  39. {
  40. int i;
  41. init_vlc(&aic_top_vlc, AIC_TOP_BITS, AIC_TOP_SIZE,
  42. rv40_aic_top_vlc_bits, 1, 1,
  43. rv40_aic_top_vlc_codes, 1, 1, INIT_VLC_USE_STATIC);
  44. for(i = 0; i < AIC_MODE1_NUM; i++){
  45. // Every tenth VLC table is empty
  46. if((i % 10) == 9) continue;
  47. init_vlc(&aic_mode1_vlc[i], AIC_MODE1_BITS, AIC_MODE1_SIZE,
  48. aic_mode1_vlc_bits[i], 1, 1,
  49. aic_mode1_vlc_codes[i], 1, 1, INIT_VLC_USE_STATIC);
  50. }
  51. for(i = 0; i < AIC_MODE2_NUM; i++){
  52. init_vlc(&aic_mode2_vlc[i], AIC_MODE2_BITS, AIC_MODE2_SIZE,
  53. aic_mode2_vlc_bits[i], 1, 1,
  54. aic_mode2_vlc_codes[i], 2, 2, INIT_VLC_USE_STATIC);
  55. }
  56. for(i = 0; i < NUM_PTYPE_VLCS; i++)
  57. init_vlc_sparse(&ptype_vlc[i], PTYPE_VLC_BITS, PTYPE_VLC_SIZE,
  58. ptype_vlc_bits[i], 1, 1,
  59. ptype_vlc_codes[i], 1, 1,
  60. ptype_vlc_syms, 1, 1, INIT_VLC_USE_STATIC);
  61. for(i = 0; i < NUM_BTYPE_VLCS; i++)
  62. init_vlc_sparse(&btype_vlc[i], BTYPE_VLC_BITS, BTYPE_VLC_SIZE,
  63. btype_vlc_bits[i], 1, 1,
  64. btype_vlc_codes[i], 1, 1,
  65. btype_vlc_syms, 1, 1, INIT_VLC_USE_STATIC);
  66. }
  67. /**
  68. * Get stored dimension from bitstream.
  69. *
  70. * If the width/height is the standard one then it's coded as a 3-bit index.
  71. * Otherwise it is coded as escaped 8-bit portions.
  72. */
  73. static int get_dimension(GetBitContext *gb, const int *dim)
  74. {
  75. int t = get_bits(gb, 3);
  76. int val = dim[t];
  77. if(val < 0)
  78. val = dim[get_bits1(gb) - val];
  79. if(!val){
  80. do{
  81. t = get_bits(gb, 8);
  82. val += t << 2;
  83. }while(t == 0xFF);
  84. }
  85. return val;
  86. }
  87. /**
  88. * Get encoded picture size - usually this is called from rv40_parse_slice_header.
  89. */
  90. static void rv40_parse_picture_size(GetBitContext *gb, int *w, int *h)
  91. {
  92. *w = get_dimension(gb, rv40_standard_widths);
  93. *h = get_dimension(gb, rv40_standard_heights);
  94. }
  95. static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
  96. {
  97. int mb_bits;
  98. int w = r->s.width, h = r->s.height;
  99. int mb_size;
  100. memset(si, 0, sizeof(SliceInfo));
  101. if(get_bits1(gb))
  102. return -1;
  103. si->type = get_bits(gb, 2);
  104. if(si->type == 1) si->type = 0;
  105. si->quant = get_bits(gb, 5);
  106. if(get_bits(gb, 2))
  107. return -1;
  108. si->vlc_set = get_bits(gb, 2);
  109. skip_bits1(gb);
  110. si->pts = get_bits(gb, 13);
  111. if(!si->type || !get_bits1(gb))
  112. rv40_parse_picture_size(gb, &w, &h);
  113. if(avcodec_check_dimensions(r->s.avctx, w, h) < 0)
  114. return -1;
  115. si->width = w;
  116. si->height = h;
  117. mb_size = ((w + 15) >> 4) * ((h + 15) >> 4);
  118. mb_bits = ff_rv34_get_start_offset(gb, mb_size);
  119. si->start = get_bits(gb, mb_bits);
  120. return 0;
  121. }
  122. /**
  123. * Decode 4x4 intra types array.
  124. */
  125. static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
  126. {
  127. MpegEncContext *s = &r->s;
  128. int i, j, k, v;
  129. int A, B, C;
  130. int pattern;
  131. int8_t *ptr;
  132. for(i = 0; i < 4; i++, dst += s->b4_stride){
  133. if(!i && s->first_slice_line){
  134. pattern = get_vlc2(gb, aic_top_vlc.table, AIC_TOP_BITS, 1);
  135. dst[0] = (pattern >> 2) & 2;
  136. dst[1] = (pattern >> 1) & 2;
  137. dst[2] = pattern & 2;
  138. dst[3] = (pattern << 1) & 2;
  139. continue;
  140. }
  141. ptr = dst;
  142. for(j = 0; j < 4; j++){
  143. /* Coefficients are read using VLC chosen by the prediction pattern
  144. * The first one (used for retrieving a pair of coefficients) is
  145. * constructed from the top, top right and left coefficients
  146. * The second one (used for retrieving only one coefficient) is
  147. * top + 10 * left.
  148. */
  149. A = ptr[-s->b4_stride + 1]; // it won't be used for the last coefficient in a row
  150. B = ptr[-s->b4_stride];
  151. C = ptr[-1];
  152. pattern = A + (B << 4) + (C << 8);
  153. for(k = 0; k < MODE2_PATTERNS_NUM; k++)
  154. if(pattern == rv40_aic_table_index[k])
  155. break;
  156. if(j < 3 && k < MODE2_PATTERNS_NUM){ //pattern is found, decoding 2 coefficients
  157. v = get_vlc2(gb, aic_mode2_vlc[k].table, AIC_MODE2_BITS, 2);
  158. *ptr++ = v/9;
  159. *ptr++ = v%9;
  160. j++;
  161. }else{
  162. if(B != -1 && C != -1)
  163. v = get_vlc2(gb, aic_mode1_vlc[B + C*10].table, AIC_MODE1_BITS, 1);
  164. else{ // tricky decoding
  165. v = 0;
  166. switch(C){
  167. case -1: // code 0 -> 1, 1 -> 0
  168. if(B < 2)
  169. v = get_bits1(gb) ^ 1;
  170. break;
  171. case 0:
  172. case 2: // code 0 -> 2, 1 -> 0
  173. v = (get_bits1(gb) ^ 1) << 1;
  174. break;
  175. }
  176. }
  177. *ptr++ = v;
  178. }
  179. }
  180. }
  181. return 0;
  182. }
  183. /**
  184. * Decode macroblock information.
  185. */
  186. static int rv40_decode_mb_info(RV34DecContext *r)
  187. {
  188. MpegEncContext *s = &r->s;
  189. GetBitContext *gb = &s->gb;
  190. int q, i;
  191. int prev_type = 0;
  192. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  193. int blocks[RV34_MB_TYPES] = {0};
  194. int count = 0;
  195. if(!r->s.mb_skip_run) {
  196. r->s.mb_skip_run = svq3_get_ue_golomb(gb) + 1;
  197. if(r->s.mb_skip_run > (unsigned)s->mb_num)
  198. return -1;
  199. }
  200. if(--r->s.mb_skip_run)
  201. return RV34_MB_SKIP;
  202. if(r->avail_cache[5-1])
  203. blocks[r->mb_type[mb_pos - 1]]++;
  204. if(r->avail_cache[5-4]){
  205. blocks[r->mb_type[mb_pos - s->mb_stride]]++;
  206. if(r->avail_cache[5-2])
  207. blocks[r->mb_type[mb_pos - s->mb_stride + 1]]++;
  208. if(r->avail_cache[5-5])
  209. blocks[r->mb_type[mb_pos - s->mb_stride - 1]]++;
  210. }
  211. for(i = 0; i < RV34_MB_TYPES; i++){
  212. if(blocks[i] > count){
  213. count = blocks[i];
  214. prev_type = i;
  215. }
  216. }
  217. if(s->pict_type == FF_P_TYPE){
  218. prev_type = block_num_to_ptype_vlc_num[prev_type];
  219. q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
  220. if(q < PBTYPE_ESCAPE)
  221. return q;
  222. q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
  223. av_log(s->avctx, AV_LOG_ERROR, "Dquant for P-frame\n");
  224. }else{
  225. prev_type = block_num_to_btype_vlc_num[prev_type];
  226. q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
  227. if(q < PBTYPE_ESCAPE)
  228. return q;
  229. q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
  230. av_log(s->avctx, AV_LOG_ERROR, "Dquant for B-frame\n");
  231. }
  232. return 0;
  233. }
  234. #define CLIP_SYMM(a, b) av_clip(a, -(b), b)
  235. /**
  236. * weaker deblocking very similar to the one described in 4.4.2 of JVT-A003r1
  237. */
  238. static inline void rv40_weak_loop_filter(uint8_t *src, const int step,
  239. const int filter_p1, const int filter_q1,
  240. const int alpha, const int beta,
  241. const int lim_p0q0,
  242. const int lim_q1, const int lim_p1,
  243. const int diff_p1p0, const int diff_q1q0,
  244. const int diff_p1p2, const int diff_q1q2)
  245. {
  246. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  247. int t, u, diff;
  248. t = src[0*step] - src[-1*step];
  249. if(!t)
  250. return;
  251. u = (alpha * FFABS(t)) >> 7;
  252. if(u > 3 - (filter_p1 && filter_q1))
  253. return;
  254. t <<= 2;
  255. if(filter_p1 && filter_q1)
  256. t += src[-2*step] - src[1*step];
  257. diff = CLIP_SYMM((t + 4) >> 3, lim_p0q0);
  258. src[-1*step] = cm[src[-1*step] + diff];
  259. src[ 0*step] = cm[src[ 0*step] - diff];
  260. if(FFABS(diff_p1p2) <= beta && filter_p1){
  261. t = (diff_p1p0 + diff_p1p2 - diff) >> 1;
  262. src[-2*step] = cm[src[-2*step] - CLIP_SYMM(t, lim_p1)];
  263. }
  264. if(FFABS(diff_q1q2) <= beta && filter_q1){
  265. t = (diff_q1q0 + diff_q1q2 + diff) >> 1;
  266. src[ 1*step] = cm[src[ 1*step] - CLIP_SYMM(t, lim_q1)];
  267. }
  268. }
  269. static inline void rv40_adaptive_loop_filter(uint8_t *src, const int step,
  270. const int stride, const int dmode,
  271. const int lim_q1, const int lim_p1,
  272. const int alpha,
  273. const int beta, const int beta2,
  274. const int chroma, const int edge)
  275. {
  276. int diff_p1p0[4], diff_q1q0[4], diff_p1p2[4], diff_q1q2[4];
  277. int sum_p1p0 = 0, sum_q1q0 = 0, sum_p1p2 = 0, sum_q1q2 = 0;
  278. uint8_t *ptr;
  279. int flag_strong0 = 1, flag_strong1 = 1;
  280. int filter_p1, filter_q1;
  281. int i;
  282. int lims;
  283. for(i = 0, ptr = src; i < 4; i++, ptr += stride){
  284. diff_p1p0[i] = ptr[-2*step] - ptr[-1*step];
  285. diff_q1q0[i] = ptr[ 1*step] - ptr[ 0*step];
  286. sum_p1p0 += diff_p1p0[i];
  287. sum_q1q0 += diff_q1q0[i];
  288. }
  289. filter_p1 = FFABS(sum_p1p0) < (beta<<2);
  290. filter_q1 = FFABS(sum_q1q0) < (beta<<2);
  291. if(!filter_p1 && !filter_q1)
  292. return;
  293. for(i = 0, ptr = src; i < 4; i++, ptr += stride){
  294. diff_p1p2[i] = ptr[-2*step] - ptr[-3*step];
  295. diff_q1q2[i] = ptr[ 1*step] - ptr[ 2*step];
  296. sum_p1p2 += diff_p1p2[i];
  297. sum_q1q2 += diff_q1q2[i];
  298. }
  299. if(edge){
  300. flag_strong0 = filter_p1 && (FFABS(sum_p1p2) < beta2);
  301. flag_strong1 = filter_q1 && (FFABS(sum_q1q2) < beta2);
  302. }else{
  303. flag_strong0 = flag_strong1 = 0;
  304. }
  305. lims = filter_p1 + filter_q1 + ((lim_q1 + lim_p1) >> 1) + 1;
  306. if(flag_strong0 && flag_strong1){ /* strong filtering */
  307. for(i = 0; i < 4; i++, src += stride){
  308. int sflag, p0, q0, p1, q1;
  309. int t = src[0*step] - src[-1*step];
  310. if(!t) continue;
  311. sflag = (alpha * FFABS(t)) >> 7;
  312. if(sflag > 1) continue;
  313. p0 = (25*src[-3*step] + 26*src[-2*step]
  314. + 26*src[-1*step]
  315. + 26*src[ 0*step] + 25*src[ 1*step] + rv40_dither_l[dmode + i]) >> 7;
  316. q0 = (25*src[-2*step] + 26*src[-1*step]
  317. + 26*src[ 0*step]
  318. + 26*src[ 1*step] + 25*src[ 2*step] + rv40_dither_r[dmode + i]) >> 7;
  319. if(sflag){
  320. p0 = av_clip(p0, src[-1*step] - lims, src[-1*step] + lims);
  321. q0 = av_clip(q0, src[ 0*step] - lims, src[ 0*step] + lims);
  322. }
  323. p1 = (25*src[-4*step] + 26*src[-3*step]
  324. + 26*src[-2*step]
  325. + 26*p0 + 25*src[ 0*step] + rv40_dither_l[dmode + i]) >> 7;
  326. q1 = (25*src[-1*step] + 26*q0
  327. + 26*src[ 1*step]
  328. + 26*src[ 2*step] + 25*src[ 3*step] + rv40_dither_r[dmode + i]) >> 7;
  329. if(sflag){
  330. p1 = av_clip(p1, src[-2*step] - lims, src[-2*step] + lims);
  331. q1 = av_clip(q1, src[ 1*step] - lims, src[ 1*step] + lims);
  332. }
  333. src[-2*step] = p1;
  334. src[-1*step] = p0;
  335. src[ 0*step] = q0;
  336. src[ 1*step] = q1;
  337. if(!chroma){
  338. src[-3*step] = (25*src[-1*step] + 26*src[-2*step] + 51*src[-3*step] + 26*src[-4*step] + 64) >> 7;
  339. src[ 2*step] = (25*src[ 0*step] + 26*src[ 1*step] + 51*src[ 2*step] + 26*src[ 3*step] + 64) >> 7;
  340. }
  341. }
  342. }else if(filter_p1 && filter_q1){
  343. for(i = 0; i < 4; i++, src += stride)
  344. rv40_weak_loop_filter(src, step, 1, 1, alpha, beta, lims, lim_q1, lim_p1,
  345. diff_p1p0[i], diff_q1q0[i], diff_p1p2[i], diff_q1q2[i]);
  346. }else{
  347. for(i = 0; i < 4; i++, src += stride)
  348. rv40_weak_loop_filter(src, step, filter_p1, filter_q1,
  349. alpha, beta, lims>>1, lim_q1>>1, lim_p1>>1,
  350. diff_p1p0[i], diff_q1q0[i], diff_p1p2[i], diff_q1q2[i]);
  351. }
  352. }
  353. static void rv40_v_loop_filter(uint8_t *src, int stride, int dmode,
  354. int lim_q1, int lim_p1,
  355. int alpha, int beta, int beta2, int chroma, int edge){
  356. rv40_adaptive_loop_filter(src, 1, stride, dmode, lim_q1, lim_p1,
  357. alpha, beta, beta2, chroma, edge);
  358. }
  359. static void rv40_h_loop_filter(uint8_t *src, int stride, int dmode,
  360. int lim_q1, int lim_p1,
  361. int alpha, int beta, int beta2, int chroma, int edge){
  362. rv40_adaptive_loop_filter(src, stride, 1, dmode, lim_q1, lim_p1,
  363. alpha, beta, beta2, chroma, edge);
  364. }
  365. enum RV40BlockPos{
  366. POS_CUR,
  367. POS_TOP,
  368. POS_LEFT,
  369. POS_BOTTOM,
  370. };
  371. #define MASK_CUR 0x0001
  372. #define MASK_RIGHT 0x0008
  373. #define MASK_BOTTOM 0x0010
  374. #define MASK_TOP 0x1000
  375. #define MASK_Y_TOP_ROW 0x000F
  376. #define MASK_Y_LAST_ROW 0xF000
  377. #define MASK_Y_LEFT_COL 0x1111
  378. #define MASK_Y_RIGHT_COL 0x8888
  379. #define MASK_C_TOP_ROW 0x0003
  380. #define MASK_C_LAST_ROW 0x000C
  381. #define MASK_C_LEFT_COL 0x0005
  382. #define MASK_C_RIGHT_COL 0x000A
  383. static const int neighbour_offs_x[4] = { 0, 0, -1, 0 };
  384. static const int neighbour_offs_y[4] = { 0, -1, 0, 1 };
  385. /**
  386. * RV40 loop filtering function
  387. */
  388. static void rv40_loop_filter(RV34DecContext *r, int row)
  389. {
  390. MpegEncContext *s = &r->s;
  391. int mb_pos, mb_x;
  392. int i, j, k;
  393. uint8_t *Y, *C;
  394. int alpha, beta, betaY, betaC;
  395. int q;
  396. int mbtype[4]; ///< current macroblock and its neighbours types
  397. /**
  398. * flags indicating that macroblock can be filtered with strong filter
  399. * it is set only for intra coded MB and MB with DCs coded separately
  400. */
  401. int mb_strong[4];
  402. int clip[4]; ///< MB filter clipping value calculated from filtering strength
  403. /**
  404. * coded block patterns for luma part of current macroblock and its neighbours
  405. * Format:
  406. * LSB corresponds to the top left block,
  407. * each nibble represents one row of subblocks.
  408. */
  409. int cbp[4];
  410. /**
  411. * coded block patterns for chroma part of current macroblock and its neighbours
  412. * Format is the same as for luma with two subblocks in a row.
  413. */
  414. int uvcbp[4][2];
  415. /**
  416. * This mask represents the pattern of luma subblocks that should be filtered
  417. * in addition to the coded ones because because they lie at the edge of
  418. * 8x8 block with different enough motion vectors
  419. */
  420. int mvmasks[4];
  421. mb_pos = row * s->mb_stride;
  422. for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
  423. int mbtype = s->current_picture_ptr->mb_type[mb_pos];
  424. if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
  425. r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
  426. if(IS_INTRA(mbtype))
  427. r->cbp_chroma[mb_pos] = 0xFF;
  428. }
  429. mb_pos = row * s->mb_stride;
  430. for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
  431. int y_h_deblock, y_v_deblock;
  432. int c_v_deblock[2], c_h_deblock[2];
  433. int clip_left;
  434. int avail[4];
  435. int y_to_deblock, c_to_deblock[2];
  436. q = s->current_picture_ptr->qscale_table[mb_pos];
  437. alpha = rv40_alpha_tab[q];
  438. beta = rv40_beta_tab [q];
  439. betaY = betaC = beta * 3;
  440. if(s->width * s->height <= 176*144)
  441. betaY += beta;
  442. avail[0] = 1;
  443. avail[1] = row;
  444. avail[2] = mb_x;
  445. avail[3] = row < s->mb_height - 1;
  446. for(i = 0; i < 4; i++){
  447. if(avail[i]){
  448. int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
  449. mvmasks[i] = r->deblock_coefs[pos];
  450. mbtype [i] = s->current_picture_ptr->mb_type[pos];
  451. cbp [i] = r->cbp_luma[pos];
  452. uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
  453. uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
  454. }else{
  455. mvmasks[i] = 0;
  456. mbtype [i] = mbtype[0];
  457. cbp [i] = 0;
  458. uvcbp[i][0] = uvcbp[i][1] = 0;
  459. }
  460. mb_strong[i] = IS_INTRA(mbtype[i]) || IS_SEPARATE_DC(mbtype[i]);
  461. clip[i] = rv40_filter_clip_tbl[mb_strong[i] + 1][q];
  462. }
  463. y_to_deblock = mvmasks[POS_CUR]
  464. | (mvmasks[POS_BOTTOM] << 16);
  465. /* This pattern contains bits signalling that horizontal edges of
  466. * the current block can be filtered.
  467. * That happens when either of adjacent subblocks is coded or lies on
  468. * the edge of 8x8 blocks with motion vectors differing by more than
  469. * 3/4 pel in any component (any edge orientation for some reason).
  470. */
  471. y_h_deblock = y_to_deblock
  472. | ((cbp[POS_CUR] << 4) & ~MASK_Y_TOP_ROW)
  473. | ((cbp[POS_TOP] & MASK_Y_LAST_ROW) >> 12);
  474. /* This pattern contains bits signalling that vertical edges of
  475. * the current block can be filtered.
  476. * That happens when either of adjacent subblocks is coded or lies on
  477. * the edge of 8x8 blocks with motion vectors differing by more than
  478. * 3/4 pel in any component (any edge orientation for some reason).
  479. */
  480. y_v_deblock = y_to_deblock
  481. | ((cbp[POS_CUR] << 1) & ~MASK_Y_LEFT_COL)
  482. | ((cbp[POS_LEFT] & MASK_Y_RIGHT_COL) >> 3);
  483. if(!mb_x)
  484. y_v_deblock &= ~MASK_Y_LEFT_COL;
  485. if(!row)
  486. y_h_deblock &= ~MASK_Y_TOP_ROW;
  487. if(row == s->mb_height - 1 || (mb_strong[POS_CUR] || mb_strong[POS_BOTTOM]))
  488. y_h_deblock &= ~(MASK_Y_TOP_ROW << 16);
  489. /* Calculating chroma patterns is similar and easier since there is
  490. * no motion vector pattern for them.
  491. */
  492. for(i = 0; i < 2; i++){
  493. c_to_deblock[i] = (uvcbp[POS_BOTTOM][i] << 4) | uvcbp[POS_CUR][i];
  494. c_v_deblock[i] = c_to_deblock[i]
  495. | ((uvcbp[POS_CUR] [i] << 1) & ~MASK_C_LEFT_COL)
  496. | ((uvcbp[POS_LEFT][i] & MASK_C_RIGHT_COL) >> 1);
  497. c_h_deblock[i] = c_to_deblock[i]
  498. | ((uvcbp[POS_TOP][i] & MASK_C_LAST_ROW) >> 2)
  499. | (uvcbp[POS_CUR][i] << 2);
  500. if(!mb_x)
  501. c_v_deblock[i] &= ~MASK_C_LEFT_COL;
  502. if(!row)
  503. c_h_deblock[i] &= ~MASK_C_TOP_ROW;
  504. if(row == s->mb_height - 1 || mb_strong[POS_CUR] || mb_strong[POS_BOTTOM])
  505. c_h_deblock[i] &= ~(MASK_C_TOP_ROW << 4);
  506. }
  507. for(j = 0; j < 16; j += 4){
  508. Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
  509. for(i = 0; i < 4; i++, Y += 4){
  510. int ij = i + j;
  511. int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
  512. int dither = j ? ij : i*4;
  513. // if bottom block is coded then we can filter its top edge
  514. // (or bottom edge of this block, which is the same)
  515. if(y_h_deblock & (MASK_BOTTOM << ij)){
  516. rv40_h_loop_filter(Y+4*s->linesize, s->linesize, dither,
  517. y_to_deblock & (MASK_BOTTOM << ij) ? clip[POS_CUR] : 0,
  518. clip_cur,
  519. alpha, beta, betaY, 0, 0);
  520. }
  521. // filter left block edge in ordinary mode (with low filtering strength)
  522. if(y_v_deblock & (MASK_CUR << ij) && (i || !(mb_strong[POS_CUR] || mb_strong[POS_LEFT]))){
  523. if(!i)
  524. clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
  525. else
  526. clip_left = y_to_deblock & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
  527. rv40_v_loop_filter(Y, s->linesize, dither,
  528. clip_cur,
  529. clip_left,
  530. alpha, beta, betaY, 0, 0);
  531. }
  532. // filter top edge of the current macroblock when filtering strength is high
  533. if(!j && y_h_deblock & (MASK_CUR << i) && (mb_strong[POS_CUR] || mb_strong[POS_TOP])){
  534. rv40_h_loop_filter(Y, s->linesize, dither,
  535. clip_cur,
  536. mvmasks[POS_TOP] & (MASK_TOP << i) ? clip[POS_TOP] : 0,
  537. alpha, beta, betaY, 0, 1);
  538. }
  539. // filter left block edge in edge mode (with high filtering strength)
  540. if(y_v_deblock & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] || mb_strong[POS_LEFT])){
  541. clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
  542. rv40_v_loop_filter(Y, s->linesize, dither,
  543. clip_cur,
  544. clip_left,
  545. alpha, beta, betaY, 0, 1);
  546. }
  547. }
  548. }
  549. for(k = 0; k < 2; k++){
  550. for(j = 0; j < 2; j++){
  551. C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
  552. for(i = 0; i < 2; i++, C += 4){
  553. int ij = i + j*2;
  554. int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
  555. if(c_h_deblock[k] & (MASK_CUR << (ij+2))){
  556. int clip_bot = c_to_deblock[k] & (MASK_CUR << (ij+2)) ? clip[POS_CUR] : 0;
  557. rv40_h_loop_filter(C+4*s->uvlinesize, s->uvlinesize, i*8,
  558. clip_bot,
  559. clip_cur,
  560. alpha, beta, betaC, 1, 0);
  561. }
  562. if((c_v_deblock[k] & (MASK_CUR << ij)) && (i || !(mb_strong[POS_CUR] || mb_strong[POS_LEFT]))){
  563. if(!i)
  564. clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
  565. else
  566. clip_left = c_to_deblock[k] & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
  567. rv40_v_loop_filter(C, s->uvlinesize, j*8,
  568. clip_cur,
  569. clip_left,
  570. alpha, beta, betaC, 1, 0);
  571. }
  572. if(!j && c_h_deblock[k] & (MASK_CUR << ij) && (mb_strong[POS_CUR] || mb_strong[POS_TOP])){
  573. int clip_top = uvcbp[POS_TOP][k] & (MASK_CUR << (ij+2)) ? clip[POS_TOP] : 0;
  574. rv40_h_loop_filter(C, s->uvlinesize, i*8,
  575. clip_cur,
  576. clip_top,
  577. alpha, beta, betaC, 1, 1);
  578. }
  579. if(c_v_deblock[k] & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] || mb_strong[POS_LEFT])){
  580. clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
  581. rv40_v_loop_filter(C, s->uvlinesize, j*8,
  582. clip_cur,
  583. clip_left,
  584. alpha, beta, betaC, 1, 1);
  585. }
  586. }
  587. }
  588. }
  589. }
  590. }
  591. /**
  592. * Initialize decoder.
  593. */
  594. static av_cold int rv40_decode_init(AVCodecContext *avctx)
  595. {
  596. RV34DecContext *r = avctx->priv_data;
  597. r->rv30 = 0;
  598. ff_rv34_decode_init(avctx);
  599. if(!aic_top_vlc.bits)
  600. rv40_init_tables();
  601. r->parse_slice_header = rv40_parse_slice_header;
  602. r->decode_intra_types = rv40_decode_intra_types;
  603. r->decode_mb_info = rv40_decode_mb_info;
  604. r->loop_filter = rv40_loop_filter;
  605. r->luma_dc_quant_i = rv40_luma_dc_quant[0];
  606. r->luma_dc_quant_p = rv40_luma_dc_quant[1];
  607. return 0;
  608. }
  609. AVCodec rv40_decoder = {
  610. "rv40",
  611. CODEC_TYPE_VIDEO,
  612. CODEC_ID_RV40,
  613. sizeof(RV34DecContext),
  614. rv40_decode_init,
  615. NULL,
  616. ff_rv34_decode_end,
  617. ff_rv34_decode_frame,
  618. CODEC_CAP_DR1 | CODEC_CAP_DELAY,
  619. .flush = ff_mpeg_flush,
  620. .long_name = NULL_IF_CONFIG_SMALL("RealVideo 4.0"),
  621. .pix_fmts= ff_pixfmt_list_420,
  622. };