svq1enc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. /*
  2. * SVQ1 Encoder
  3. * Copyright (C) 2004 Mike Melanson <melanson@pcisys.net>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavcodec/svq1enc.c
  23. * Sorenson Vector Quantizer #1 (SVQ1) video codec.
  24. * For more information of the SVQ1 algorithm, visit:
  25. * http://www.pcisys.net/~melanson/codecs/
  26. */
  27. #include "avcodec.h"
  28. #include "dsputil.h"
  29. #include "mpegvideo.h"
  30. #include "svq1.h"
  31. #include "svq1enc_cb.h"
  32. #undef NDEBUG
  33. #include <assert.h>
  34. typedef struct SVQ1Context {
  35. MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to make the motion estimation eventually independent of MpegEncContext, so this will be removed then (FIXME/XXX)
  36. AVCodecContext *avctx;
  37. DSPContext dsp;
  38. AVFrame picture;
  39. AVFrame current_picture;
  40. AVFrame last_picture;
  41. PutBitContext pb;
  42. GetBitContext gb;
  43. PutBitContext reorder_pb[6]; //why ooh why this sick breadth first order, everything is slower and more complex
  44. int frame_width;
  45. int frame_height;
  46. /* Y plane block dimensions */
  47. int y_block_width;
  48. int y_block_height;
  49. /* U & V plane (C planes) block dimensions */
  50. int c_block_width;
  51. int c_block_height;
  52. uint16_t *mb_type;
  53. uint32_t *dummy;
  54. int16_t (*motion_val8[3])[2];
  55. int16_t (*motion_val16[3])[2];
  56. int64_t rd_total;
  57. uint8_t *scratchbuf;
  58. } SVQ1Context;
  59. static void svq1_write_header(SVQ1Context *s, int frame_type)
  60. {
  61. int i;
  62. /* frame code */
  63. put_bits(&s->pb, 22, 0x20);
  64. /* temporal reference (sure hope this is a "don't care") */
  65. put_bits(&s->pb, 8, 0x00);
  66. /* frame type */
  67. put_bits(&s->pb, 2, frame_type - 1);
  68. if (frame_type == FF_I_TYPE) {
  69. /* no checksum since frame code is 0x20 */
  70. /* no embedded string either */
  71. /* output 5 unknown bits (2 + 2 + 1) */
  72. put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */
  73. for (i = 0; i < 7; i++)
  74. {
  75. if ((ff_svq1_frame_size_table[i].width == s->frame_width) &&
  76. (ff_svq1_frame_size_table[i].height == s->frame_height))
  77. {
  78. put_bits(&s->pb, 3, i);
  79. break;
  80. }
  81. }
  82. if (i == 7)
  83. {
  84. put_bits(&s->pb, 3, 7);
  85. put_bits(&s->pb, 12, s->frame_width);
  86. put_bits(&s->pb, 12, s->frame_height);
  87. }
  88. }
  89. /* no checksum or extra data (next 2 bits get 0) */
  90. put_bits(&s->pb, 2, 0);
  91. }
  92. #define QUALITY_THRESHOLD 100
  93. #define THRESHOLD_MULTIPLIER 0.6
  94. #if HAVE_ALTIVEC
  95. #undef vector
  96. #endif
  97. static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, int level, int threshold, int lambda, int intra){
  98. int count, y, x, i, j, split, best_mean, best_score, best_count;
  99. int best_vector[6];
  100. int block_sum[7]= {0, 0, 0, 0, 0, 0};
  101. int w= 2<<((level+2)>>1);
  102. int h= 2<<((level+1)>>1);
  103. int size=w*h;
  104. int16_t block[7][256];
  105. const int8_t *codebook_sum, *codebook;
  106. const uint16_t (*mean_vlc)[2];
  107. const uint8_t (*multistage_vlc)[2];
  108. best_score=0;
  109. //FIXME optimize, this doenst need to be done multiple times
  110. if(intra){
  111. codebook_sum= svq1_intra_codebook_sum[level];
  112. codebook= ff_svq1_intra_codebooks[level];
  113. mean_vlc= ff_svq1_intra_mean_vlc;
  114. multistage_vlc= ff_svq1_intra_multistage_vlc[level];
  115. for(y=0; y<h; y++){
  116. for(x=0; x<w; x++){
  117. int v= src[x + y*stride];
  118. block[0][x + w*y]= v;
  119. best_score += v*v;
  120. block_sum[0] += v;
  121. }
  122. }
  123. }else{
  124. codebook_sum= svq1_inter_codebook_sum[level];
  125. codebook= ff_svq1_inter_codebooks[level];
  126. mean_vlc= ff_svq1_inter_mean_vlc + 256;
  127. multistage_vlc= ff_svq1_inter_multistage_vlc[level];
  128. for(y=0; y<h; y++){
  129. for(x=0; x<w; x++){
  130. int v= src[x + y*stride] - ref[x + y*stride];
  131. block[0][x + w*y]= v;
  132. best_score += v*v;
  133. block_sum[0] += v;
  134. }
  135. }
  136. }
  137. best_count=0;
  138. best_score -= ((block_sum[0]*block_sum[0])>>(level+3));
  139. best_mean= (block_sum[0] + (size>>1)) >> (level+3);
  140. if(level<4){
  141. for(count=1; count<7; count++){
  142. int best_vector_score= INT_MAX;
  143. int best_vector_sum=-999, best_vector_mean=-999;
  144. const int stage= count-1;
  145. const int8_t *vector;
  146. for(i=0; i<16; i++){
  147. int sum= codebook_sum[stage*16 + i];
  148. int sqr, diff, score;
  149. vector = codebook + stage*size*16 + i*size;
  150. sqr = s->dsp.ssd_int8_vs_int16(vector, block[stage], size);
  151. diff= block_sum[stage] - sum;
  152. score= sqr - ((diff*(int64_t)diff)>>(level+3)); //FIXME 64bit slooow
  153. if(score < best_vector_score){
  154. int mean= (diff + (size>>1)) >> (level+3);
  155. assert(mean >-300 && mean<300);
  156. mean= av_clip(mean, intra?0:-256, 255);
  157. best_vector_score= score;
  158. best_vector[stage]= i;
  159. best_vector_sum= sum;
  160. best_vector_mean= mean;
  161. }
  162. }
  163. assert(best_vector_mean != -999);
  164. vector= codebook + stage*size*16 + best_vector[stage]*size;
  165. for(j=0; j<size; j++){
  166. block[stage+1][j] = block[stage][j] - vector[j];
  167. }
  168. block_sum[stage+1]= block_sum[stage] - best_vector_sum;
  169. best_vector_score +=
  170. lambda*(+ 1 + 4*count
  171. + multistage_vlc[1+count][1]
  172. + mean_vlc[best_vector_mean][1]);
  173. if(best_vector_score < best_score){
  174. best_score= best_vector_score;
  175. best_count= count;
  176. best_mean= best_vector_mean;
  177. }
  178. }
  179. }
  180. split=0;
  181. if(best_score > threshold && level){
  182. int score=0;
  183. int offset= (level&1) ? stride*h/2 : w/2;
  184. PutBitContext backup[6];
  185. for(i=level-1; i>=0; i--){
  186. backup[i]= s->reorder_pb[i];
  187. }
  188. score += encode_block(s, src , ref , decoded , stride, level-1, threshold>>1, lambda, intra);
  189. score += encode_block(s, src + offset, ref + offset, decoded + offset, stride, level-1, threshold>>1, lambda, intra);
  190. score += lambda;
  191. if(score < best_score){
  192. best_score= score;
  193. split=1;
  194. }else{
  195. for(i=level-1; i>=0; i--){
  196. s->reorder_pb[i]= backup[i];
  197. }
  198. }
  199. }
  200. if (level > 0)
  201. put_bits(&s->reorder_pb[level], 1, split);
  202. if(!split){
  203. assert((best_mean >= 0 && best_mean<256) || !intra);
  204. assert(best_mean >= -256 && best_mean<256);
  205. assert(best_count >=0 && best_count<7);
  206. assert(level<4 || best_count==0);
  207. /* output the encoding */
  208. put_bits(&s->reorder_pb[level],
  209. multistage_vlc[1 + best_count][1],
  210. multistage_vlc[1 + best_count][0]);
  211. put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
  212. mean_vlc[best_mean][0]);
  213. for (i = 0; i < best_count; i++){
  214. assert(best_vector[i]>=0 && best_vector[i]<16);
  215. put_bits(&s->reorder_pb[level], 4, best_vector[i]);
  216. }
  217. for(y=0; y<h; y++){
  218. for(x=0; x<w; x++){
  219. decoded[x + y*stride]= src[x + y*stride] - block[best_count][x + w*y] + best_mean;
  220. }
  221. }
  222. }
  223. return best_score;
  224. }
  225. static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane,
  226. int width, int height, int src_stride, int stride)
  227. {
  228. int x, y;
  229. int i;
  230. int block_width, block_height;
  231. int level;
  232. int threshold[6];
  233. const int lambda= (s->picture.quality*s->picture.quality) >> (2*FF_LAMBDA_SHIFT);
  234. /* figure out the acceptable level thresholds in advance */
  235. threshold[5] = QUALITY_THRESHOLD;
  236. for (level = 4; level >= 0; level--)
  237. threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER;
  238. block_width = (width + 15) / 16;
  239. block_height = (height + 15) / 16;
  240. if(s->picture.pict_type == FF_P_TYPE){
  241. s->m.avctx= s->avctx;
  242. s->m.current_picture_ptr= &s->m.current_picture;
  243. s->m.last_picture_ptr = &s->m.last_picture;
  244. s->m.last_picture.data[0]= ref_plane;
  245. s->m.linesize=
  246. s->m.last_picture.linesize[0]=
  247. s->m.new_picture.linesize[0]=
  248. s->m.current_picture.linesize[0]= stride;
  249. s->m.width= width;
  250. s->m.height= height;
  251. s->m.mb_width= block_width;
  252. s->m.mb_height= block_height;
  253. s->m.mb_stride= s->m.mb_width+1;
  254. s->m.b8_stride= 2*s->m.mb_width+1;
  255. s->m.f_code=1;
  256. s->m.pict_type= s->picture.pict_type;
  257. s->m.me_method= s->avctx->me_method;
  258. s->m.me.scene_change_score=0;
  259. s->m.flags= s->avctx->flags;
  260. // s->m.out_format = FMT_H263;
  261. // s->m.unrestricted_mv= 1;
  262. s->m.lambda= s->picture.quality;
  263. s->m.qscale= (s->m.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
  264. s->m.lambda2= (s->m.lambda*s->m.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
  265. if(!s->motion_val8[plane]){
  266. s->motion_val8 [plane]= av_mallocz((s->m.b8_stride*block_height*2 + 2)*2*sizeof(int16_t));
  267. s->motion_val16[plane]= av_mallocz((s->m.mb_stride*(block_height + 2) + 1)*2*sizeof(int16_t));
  268. }
  269. s->m.mb_type= s->mb_type;
  270. //dummies, to avoid segfaults
  271. s->m.current_picture.mb_mean= (uint8_t *)s->dummy;
  272. s->m.current_picture.mb_var= (uint16_t*)s->dummy;
  273. s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy;
  274. s->m.current_picture.mb_type= s->dummy;
  275. s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2;
  276. s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1;
  277. s->m.dsp= s->dsp; //move
  278. ff_init_me(&s->m);
  279. s->m.me.dia_size= s->avctx->dia_size;
  280. s->m.first_slice_line=1;
  281. for (y = 0; y < block_height; y++) {
  282. uint8_t src[stride*16];
  283. s->m.new_picture.data[0]= src - y*16*stride; //ugly
  284. s->m.mb_y= y;
  285. for(i=0; i<16 && i + 16*y<height; i++){
  286. memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width);
  287. for(x=width; x<16*block_width; x++)
  288. src[i*stride+x]= src[i*stride+x-1];
  289. }
  290. for(; i<16 && i + 16*y<16*block_height; i++)
  291. memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width);
  292. for (x = 0; x < block_width; x++) {
  293. s->m.mb_x= x;
  294. ff_init_block_index(&s->m);
  295. ff_update_block_index(&s->m);
  296. ff_estimate_p_frame_motion(&s->m, x, y);
  297. }
  298. s->m.first_slice_line=0;
  299. }
  300. ff_fix_long_p_mvs(&s->m);
  301. ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, CANDIDATE_MB_TYPE_INTER, 0);
  302. }
  303. s->m.first_slice_line=1;
  304. for (y = 0; y < block_height; y++) {
  305. uint8_t src[stride*16];
  306. for(i=0; i<16 && i + 16*y<height; i++){
  307. memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width);
  308. for(x=width; x<16*block_width; x++)
  309. src[i*stride+x]= src[i*stride+x-1];
  310. }
  311. for(; i<16 && i + 16*y<16*block_height; i++)
  312. memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width);
  313. s->m.mb_y= y;
  314. for (x = 0; x < block_width; x++) {
  315. uint8_t reorder_buffer[3][6][7*32];
  316. int count[3][6];
  317. int offset = y * 16 * stride + x * 16;
  318. uint8_t *decoded= decoded_plane + offset;
  319. uint8_t *ref= ref_plane + offset;
  320. int score[4]={0,0,0,0}, best;
  321. uint8_t *temp = s->scratchbuf;
  322. if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3000){ //FIXME check size
  323. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  324. return -1;
  325. }
  326. s->m.mb_x= x;
  327. ff_init_block_index(&s->m);
  328. ff_update_block_index(&s->m);
  329. if(s->picture.pict_type == FF_I_TYPE || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){
  330. for(i=0; i<6; i++){
  331. init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7*32);
  332. }
  333. if(s->picture.pict_type == FF_P_TYPE){
  334. const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
  335. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  336. score[0]= vlc[1]*lambda;
  337. }
  338. score[0]+= encode_block(s, src+16*x, NULL, temp, stride, 5, 64, lambda, 1);
  339. for(i=0; i<6; i++){
  340. count[0][i]= put_bits_count(&s->reorder_pb[i]);
  341. flush_put_bits(&s->reorder_pb[i]);
  342. }
  343. }else
  344. score[0]= INT_MAX;
  345. best=0;
  346. if(s->picture.pict_type == FF_P_TYPE){
  347. const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER];
  348. int mx, my, pred_x, pred_y, dxy;
  349. int16_t *motion_ptr;
  350. motion_ptr= h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
  351. if(s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER){
  352. for(i=0; i<6; i++)
  353. init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32);
  354. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  355. s->m.pb= s->reorder_pb[5];
  356. mx= motion_ptr[0];
  357. my= motion_ptr[1];
  358. assert(mx>=-32 && mx<=31);
  359. assert(my>=-32 && my<=31);
  360. assert(pred_x>=-32 && pred_x<=31);
  361. assert(pred_y>=-32 && pred_y<=31);
  362. ff_h263_encode_motion(&s->m, mx - pred_x, 1);
  363. ff_h263_encode_motion(&s->m, my - pred_y, 1);
  364. s->reorder_pb[5]= s->m.pb;
  365. score[1] += lambda*put_bits_count(&s->reorder_pb[5]);
  366. dxy= (mx&1) + 2*(my&1);
  367. s->dsp.put_pixels_tab[0][dxy](temp+16, ref + (mx>>1) + stride*(my>>1), stride, 16);
  368. score[1]+= encode_block(s, src+16*x, temp+16, decoded, stride, 5, 64, lambda, 0);
  369. best= score[1] <= score[0];
  370. vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_SKIP];
  371. score[2]= s->dsp.sse[0](NULL, src+16*x, ref, stride, 16);
  372. score[2]+= vlc[1]*lambda;
  373. if(score[2] < score[best] && mx==0 && my==0){
  374. best=2;
  375. s->dsp.put_pixels_tab[0][0](decoded, ref, stride, 16);
  376. for(i=0; i<6; i++){
  377. count[2][i]=0;
  378. }
  379. put_bits(&s->pb, vlc[1], vlc[0]);
  380. }
  381. }
  382. if(best==1){
  383. for(i=0; i<6; i++){
  384. count[1][i]= put_bits_count(&s->reorder_pb[i]);
  385. flush_put_bits(&s->reorder_pb[i]);
  386. }
  387. }else{
  388. motion_ptr[0 ] = motion_ptr[1 ]=
  389. motion_ptr[2 ] = motion_ptr[3 ]=
  390. motion_ptr[0+2*s->m.b8_stride] = motion_ptr[1+2*s->m.b8_stride]=
  391. motion_ptr[2+2*s->m.b8_stride] = motion_ptr[3+2*s->m.b8_stride]=0;
  392. }
  393. }
  394. s->rd_total += score[best];
  395. for(i=5; i>=0; i--){
  396. ff_copy_bits(&s->pb, reorder_buffer[best][i], count[best][i]);
  397. }
  398. if(best==0){
  399. s->dsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
  400. }
  401. }
  402. s->m.first_slice_line=0;
  403. }
  404. return 0;
  405. }
  406. static av_cold int svq1_encode_init(AVCodecContext *avctx)
  407. {
  408. SVQ1Context * const s = avctx->priv_data;
  409. dsputil_init(&s->dsp, avctx);
  410. avctx->coded_frame= (AVFrame*)&s->picture;
  411. s->frame_width = avctx->width;
  412. s->frame_height = avctx->height;
  413. s->y_block_width = (s->frame_width + 15) / 16;
  414. s->y_block_height = (s->frame_height + 15) / 16;
  415. s->c_block_width = (s->frame_width / 4 + 15) / 16;
  416. s->c_block_height = (s->frame_height / 4 + 15) / 16;
  417. s->avctx= avctx;
  418. s->m.avctx= avctx;
  419. s->m.me.scratchpad= av_mallocz((avctx->width+64)*2*16*2*sizeof(uint8_t));
  420. s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
  421. s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
  422. s->mb_type = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int16_t));
  423. s->dummy = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int32_t));
  424. h263_encode_init(&s->m); //mv_penalty
  425. return 0;
  426. }
  427. static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
  428. int buf_size, void *data)
  429. {
  430. SVQ1Context * const s = avctx->priv_data;
  431. AVFrame *pict = data;
  432. AVFrame * const p= (AVFrame*)&s->picture;
  433. AVFrame temp;
  434. int i;
  435. if(avctx->pix_fmt != PIX_FMT_YUV410P){
  436. av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
  437. return -1;
  438. }
  439. if(!s->current_picture.data[0]){
  440. avctx->get_buffer(avctx, &s->current_picture);
  441. avctx->get_buffer(avctx, &s->last_picture);
  442. s->scratchbuf = av_malloc(s->current_picture.linesize[0] * 16);
  443. }
  444. temp= s->current_picture;
  445. s->current_picture= s->last_picture;
  446. s->last_picture= temp;
  447. init_put_bits(&s->pb, buf, buf_size);
  448. *p = *pict;
  449. p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? FF_P_TYPE : FF_I_TYPE;
  450. p->key_frame = p->pict_type == FF_I_TYPE;
  451. svq1_write_header(s, p->pict_type);
  452. for(i=0; i<3; i++){
  453. if(svq1_encode_plane(s, i,
  454. s->picture.data[i], s->last_picture.data[i], s->current_picture.data[i],
  455. s->frame_width / (i?4:1), s->frame_height / (i?4:1),
  456. s->picture.linesize[i], s->current_picture.linesize[i]) < 0)
  457. return -1;
  458. }
  459. // align_put_bits(&s->pb);
  460. while(put_bits_count(&s->pb) & 31)
  461. put_bits(&s->pb, 1, 0);
  462. flush_put_bits(&s->pb);
  463. return put_bits_count(&s->pb) / 8;
  464. }
  465. static av_cold int svq1_encode_end(AVCodecContext *avctx)
  466. {
  467. SVQ1Context * const s = avctx->priv_data;
  468. int i;
  469. av_log(avctx, AV_LOG_DEBUG, "RD: %f\n", s->rd_total/(double)(avctx->width*avctx->height*avctx->frame_number));
  470. av_freep(&s->m.me.scratchpad);
  471. av_freep(&s->m.me.map);
  472. av_freep(&s->m.me.score_map);
  473. av_freep(&s->mb_type);
  474. av_freep(&s->dummy);
  475. av_freep(&s->scratchbuf);
  476. for(i=0; i<3; i++){
  477. av_freep(&s->motion_val8[i]);
  478. av_freep(&s->motion_val16[i]);
  479. }
  480. return 0;
  481. }
  482. AVCodec svq1_encoder = {
  483. "svq1",
  484. CODEC_TYPE_VIDEO,
  485. CODEC_ID_SVQ1,
  486. sizeof(SVQ1Context),
  487. svq1_encode_init,
  488. svq1_encode_frame,
  489. svq1_encode_end,
  490. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV410P, PIX_FMT_NONE},
  491. .long_name= NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1"),
  492. };