vf_mcdeint.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /*
  2. * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of MPlayer.
  5. *
  6. * MPlayer is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * MPlayer is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with MPlayer; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  19. */
  20. /*
  21. Known Issues:
  22. * The motion estimation is somewhat at the mercy of the input, if the input
  23. frames are created purely based on spatial interpolation then for example
  24. a thin black line or another random and not interpolateable pattern
  25. will cause problems
  26. Note: completly ignoring the "unavailable" lines during motion estimation
  27. didnt look any better, so the most obvious solution would be to improve
  28. tfields or penalize problematic motion vectors ...
  29. * If non iterative ME is used then snow currently ignores the OBMC window
  30. and as a result sometimes creates artifacts
  31. * only past frames are used, we should ideally use future frames too, something
  32. like filtering the whole movie in forward and then backward direction seems
  33. like a interresting idea but the current filter framework is FAR from
  34. supporting such things
  35. * combining the motion compensated image with the input image also isnt
  36. as trivial as it seems, simple blindly taking even lines from one and
  37. odd ones from the other doesnt work at all as ME/MC sometimes simple
  38. has nothing in the previous frames which matches the current, the current
  39. algo has been found by trial and error and almost certainly can be
  40. improved ...
  41. */
  42. #include <stdio.h>
  43. #include <stdlib.h>
  44. #include <string.h>
  45. #include <inttypes.h>
  46. #include <math.h>
  47. #include "mp_msg.h"
  48. #include "cpudetect.h"
  49. #include "libavutil/internal.h"
  50. #include "libavutil/intreadwrite.h"
  51. #include "libavcodec/avcodec.h"
  52. #include "libavcodec/dsputil.h"
  53. #undef fprintf
  54. #undef free
  55. #undef malloc
  56. #include "img_format.h"
  57. #include "mp_image.h"
  58. #include "vf.h"
  59. #include "vd_ffmpeg.h"
  60. #define MIN(a,b) ((a) > (b) ? (b) : (a))
  61. #define MAX(a,b) ((a) < (b) ? (b) : (a))
  62. #define ABS(a) ((a) > 0 ? (a) : (-(a)))
  63. //===========================================================================//
  64. struct vf_priv_s {
  65. int mode;
  66. int qp;
  67. int parity;
  68. #if 0
  69. int temp_stride[3];
  70. uint8_t *src[3];
  71. int16_t *temp[3];
  72. #endif
  73. int outbuf_size;
  74. uint8_t *outbuf;
  75. AVCodecContext *avctx_enc;
  76. AVFrame *frame;
  77. AVFrame *frame_dec;
  78. };
  79. static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height){
  80. int x, y, i;
  81. for(i=0; i<3; i++){
  82. p->frame->data[i]= src[i];
  83. p->frame->linesize[i]= src_stride[i];
  84. }
  85. p->avctx_enc->me_cmp=
  86. p->avctx_enc->me_sub_cmp= FF_CMP_SAD /*| (p->parity ? FF_CMP_ODD : FF_CMP_EVEN)*/;
  87. p->frame->quality= p->qp*FF_QP2LAMBDA;
  88. avcodec_encode_video(p->avctx_enc, p->outbuf, p->outbuf_size, p->frame);
  89. p->frame_dec = p->avctx_enc->coded_frame;
  90. for(i=0; i<3; i++){
  91. int is_chroma= !!i;
  92. int w= width >>is_chroma;
  93. int h= height>>is_chroma;
  94. int fils= p->frame_dec->linesize[i];
  95. int srcs= src_stride[i];
  96. for(y=0; y<h; y++){
  97. if((y ^ p->parity) & 1){
  98. for(x=0; x<w; x++){
  99. if((x-2)+(y-1)*w>=0 && (x+2)+(y+1)*w<w*h){ //FIXME either alloc larger images or optimize this
  100. uint8_t *filp= &p->frame_dec->data[i][x + y*fils];
  101. uint8_t *srcp= &src[i][x + y*srcs];
  102. int diff0= filp[-fils] - srcp[-srcs];
  103. int diff1= filp[+fils] - srcp[+srcs];
  104. int spatial_score= ABS(srcp[-srcs-1] - srcp[+srcs-1])
  105. +ABS(srcp[-srcs ] - srcp[+srcs ])
  106. +ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
  107. int temp= filp[0];
  108. #define CHECK(j)\
  109. { int score= ABS(srcp[-srcs-1+j] - srcp[+srcs-1-j])\
  110. + ABS(srcp[-srcs +j] - srcp[+srcs -j])\
  111. + ABS(srcp[-srcs+1+j] - srcp[+srcs+1-j]);\
  112. if(score < spatial_score){\
  113. spatial_score= score;\
  114. diff0= filp[-fils+j] - srcp[-srcs+j];\
  115. diff1= filp[+fils-j] - srcp[+srcs-j];
  116. CHECK(-1) CHECK(-2) }} }}
  117. CHECK( 1) CHECK( 2) }} }}
  118. #if 0
  119. if((diff0 ^ diff1) > 0){
  120. int mindiff= ABS(diff0) > ABS(diff1) ? diff1 : diff0;
  121. temp-= mindiff;
  122. }
  123. #elif 1
  124. if(diff0 + diff1 > 0)
  125. temp-= (diff0 + diff1 - ABS( ABS(diff0) - ABS(diff1) )/2)/2;
  126. else
  127. temp-= (diff0 + diff1 + ABS( ABS(diff0) - ABS(diff1) )/2)/2;
  128. #else
  129. temp-= (diff0 + diff1)/2;
  130. #endif
  131. #if 1
  132. filp[0]=
  133. dst[i][x + y*dst_stride[i]]= temp > 255U ? ~(temp>>31) : temp;
  134. #else
  135. dst[i][x + y*dst_stride[i]]= filp[0];
  136. filp[0]= temp > 255U ? ~(temp>>31) : temp;
  137. #endif
  138. }else
  139. dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
  140. }
  141. }
  142. }
  143. for(y=0; y<h; y++){
  144. if(!((y ^ p->parity) & 1)){
  145. for(x=0; x<w; x++){
  146. #if 1
  147. p->frame_dec->data[i][x + y*fils]=
  148. dst[i][x + y*dst_stride[i]]= src[i][x + y*srcs];
  149. #else
  150. dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
  151. p->frame_dec->data[i][x + y*fils]= src[i][x + y*srcs];
  152. #endif
  153. }
  154. }
  155. }
  156. }
  157. p->parity ^= 1;
  158. }
  159. static int config(struct vf_instance *vf,
  160. int width, int height, int d_width, int d_height,
  161. unsigned int flags, unsigned int outfmt){
  162. int i;
  163. AVCodec *enc= avcodec_find_encoder(CODEC_ID_SNOW);
  164. for(i=0; i<3; i++){
  165. AVCodecContext *avctx_enc;
  166. #if 0
  167. int is_chroma= !!i;
  168. int w= ((width + 31) & (~31))>>is_chroma;
  169. int h= ((height + 31) & (~31))>>is_chroma;
  170. vf->priv->temp_stride[i]= w;
  171. vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t));
  172. vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t));
  173. #endif
  174. avctx_enc=
  175. vf->priv->avctx_enc= avcodec_alloc_context();
  176. avctx_enc->width = width;
  177. avctx_enc->height = height;
  178. avctx_enc->time_base= (AVRational){1,25}; // meaningless
  179. avctx_enc->gop_size = 300;
  180. avctx_enc->max_b_frames= 0;
  181. avctx_enc->pix_fmt = PIX_FMT_YUV420P;
  182. avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
  183. avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
  184. avctx_enc->global_quality= 1;
  185. avctx_enc->flags2= CODEC_FLAG2_MEMC_ONLY;
  186. avctx_enc->me_cmp=
  187. avctx_enc->me_sub_cmp= FF_CMP_SAD; //SSE;
  188. avctx_enc->mb_cmp= FF_CMP_SSE;
  189. switch(vf->priv->mode){
  190. case 3:
  191. avctx_enc->refs= 3;
  192. case 2:
  193. avctx_enc->me_method= ME_ITER;
  194. case 1:
  195. avctx_enc->flags |= CODEC_FLAG_4MV;
  196. avctx_enc->dia_size=2;
  197. // avctx_enc->mb_decision = MB_DECISION_RD;
  198. case 0:
  199. avctx_enc->flags |= CODEC_FLAG_QPEL;
  200. }
  201. avcodec_open(avctx_enc, enc);
  202. }
  203. vf->priv->frame= avcodec_alloc_frame();
  204. vf->priv->outbuf_size= width*height*10;
  205. vf->priv->outbuf= malloc(vf->priv->outbuf_size);
  206. return vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
  207. }
  208. static void get_image(struct vf_instance *vf, mp_image_t *mpi){
  209. if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
  210. return; //caused problems, dunno why
  211. // ok, we can do pp in-place (or pp disabled):
  212. vf->dmpi=vf_get_image(vf->next,mpi->imgfmt,
  213. mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
  214. mpi->planes[0]=vf->dmpi->planes[0];
  215. mpi->stride[0]=vf->dmpi->stride[0];
  216. mpi->width=vf->dmpi->width;
  217. if(mpi->flags&MP_IMGFLAG_PLANAR){
  218. mpi->planes[1]=vf->dmpi->planes[1];
  219. mpi->planes[2]=vf->dmpi->planes[2];
  220. mpi->stride[1]=vf->dmpi->stride[1];
  221. mpi->stride[2]=vf->dmpi->stride[2];
  222. }
  223. mpi->flags|=MP_IMGFLAG_DIRECT;
  224. }
  225. static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
  226. mp_image_t *dmpi;
  227. if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
  228. // no DR, so get a new image! hope we'll get DR buffer:
  229. dmpi=vf_get_image(vf->next,mpi->imgfmt,
  230. MP_IMGTYPE_TEMP,
  231. MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
  232. mpi->width,mpi->height);
  233. vf_clone_mpi_attributes(dmpi, mpi);
  234. }else{
  235. dmpi=vf->dmpi;
  236. }
  237. filter(vf->priv, dmpi->planes, mpi->planes, dmpi->stride, mpi->stride, mpi->w, mpi->h);
  238. return vf_next_put_image(vf,dmpi, pts);
  239. }
  240. static void uninit(struct vf_instance *vf){
  241. if(!vf->priv) return;
  242. #if 0
  243. for(i=0; i<3; i++){
  244. free(vf->priv->temp[i]);
  245. vf->priv->temp[i]= NULL;
  246. free(vf->priv->src[i]);
  247. vf->priv->src[i]= NULL;
  248. }
  249. #endif
  250. if (vf->priv->avctx_enc) {
  251. avcodec_close(vf->priv->avctx_enc);
  252. av_freep(&vf->priv->avctx_enc);
  253. }
  254. free(vf->priv->outbuf);
  255. free(vf->priv);
  256. vf->priv=NULL;
  257. }
  258. //===========================================================================//
  259. static int query_format(struct vf_instance *vf, unsigned int fmt){
  260. switch(fmt){
  261. case IMGFMT_YV12:
  262. case IMGFMT_I420:
  263. case IMGFMT_IYUV:
  264. case IMGFMT_Y800:
  265. case IMGFMT_Y8:
  266. return vf_next_query_format(vf,fmt);
  267. }
  268. return 0;
  269. }
  270. static int vf_open(vf_instance_t *vf, char *args){
  271. vf->config=config;
  272. vf->put_image=put_image;
  273. vf->get_image=get_image;
  274. vf->query_format=query_format;
  275. vf->uninit=uninit;
  276. vf->priv=malloc(sizeof(struct vf_priv_s));
  277. memset(vf->priv, 0, sizeof(struct vf_priv_s));
  278. init_avcodec();
  279. vf->priv->mode=0;
  280. vf->priv->parity= -1;
  281. vf->priv->qp=1;
  282. if (args) sscanf(args, "%d:%d:%d", &vf->priv->mode, &vf->priv->parity, &vf->priv->qp);
  283. return 1;
  284. }
  285. const vf_info_t vf_info_mcdeint = {
  286. "motion compensating deinterlacer",
  287. "mcdeint",
  288. "Michael Niedermayer",
  289. "",
  290. vf_open,
  291. NULL
  292. };