vf_decimate.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. /*
  2. * Copyright (c) 2012 Fredrik Mellbin
  3. * Copyright (c) 2013 Clément Bœsch
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/opt.h"
  22. #include "libavutil/pixdesc.h"
  23. #include "libavutil/timestamp.h"
  24. #include "avfilter.h"
  25. #include "internal.h"
  26. #define INPUT_MAIN 0
  27. #define INPUT_CLEANSRC 1
  28. struct qitem {
  29. AVFrame *frame;
  30. int64_t maxbdiff;
  31. int64_t totdiff;
  32. };
  33. typedef struct {
  34. const AVClass *class;
  35. struct qitem *queue; ///< window of cycle frames and the associated data diff
  36. int fid; ///< current frame id in the queue
  37. int filled; ///< 1 if the queue is filled, 0 otherwise
  38. AVFrame *last; ///< last frame from the previous queue
  39. AVFrame **clean_src; ///< frame queue for the clean source
  40. int got_frame[2]; ///< frame request flag for each input stream
  41. AVRational ts_unit; ///< timestamp units for the output frames
  42. int64_t start_pts; ///< base for output timestamps
  43. uint32_t eof; ///< bitmask for end of stream
  44. int hsub, vsub; ///< chroma subsampling values
  45. int depth;
  46. int nxblocks, nyblocks;
  47. int bdiffsize;
  48. int64_t *bdiffs;
  49. /* options */
  50. int cycle;
  51. double dupthresh_flt;
  52. double scthresh_flt;
  53. int64_t dupthresh;
  54. int64_t scthresh;
  55. int blockx, blocky;
  56. int ppsrc;
  57. int chroma;
  58. } DecimateContext;
  59. #define OFFSET(x) offsetof(DecimateContext, x)
  60. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  61. static const AVOption decimate_options[] = {
  62. { "cycle", "set the number of frame from which one will be dropped", OFFSET(cycle), AV_OPT_TYPE_INT, {.i64 = 5}, 2, 25, FLAGS },
  63. { "dupthresh", "set duplicate threshold", OFFSET(dupthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 1.1}, 0, 100, FLAGS },
  64. { "scthresh", "set scene change threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl = 15.0}, 0, 100, FLAGS },
  65. { "blockx", "set the size of the x-axis blocks used during metric calculations", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
  66. { "blocky", "set the size of the y-axis blocks used during metric calculations", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64 = 32}, 4, 1<<9, FLAGS },
  67. { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
  68. { "chroma", "set whether or not chroma is considered in the metric calculations", OFFSET(chroma), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
  69. { NULL }
  70. };
  71. AVFILTER_DEFINE_CLASS(decimate);
  72. static void calc_diffs(const DecimateContext *dm, struct qitem *q,
  73. const AVFrame *f1, const AVFrame *f2)
  74. {
  75. int64_t maxdiff = -1;
  76. int64_t *bdiffs = dm->bdiffs;
  77. int plane, i, j;
  78. memset(bdiffs, 0, dm->bdiffsize * sizeof(*bdiffs));
  79. for (plane = 0; plane < (dm->chroma && f1->data[2] ? 3 : 1); plane++) {
  80. int x, y, xl;
  81. const int linesize1 = f1->linesize[plane];
  82. const int linesize2 = f2->linesize[plane];
  83. const uint8_t *f1p = f1->data[plane];
  84. const uint8_t *f2p = f2->data[plane];
  85. int width = plane ? AV_CEIL_RSHIFT(f1->width, dm->hsub) : f1->width;
  86. int height = plane ? AV_CEIL_RSHIFT(f1->height, dm->vsub) : f1->height;
  87. int hblockx = dm->blockx / 2;
  88. int hblocky = dm->blocky / 2;
  89. if (plane) {
  90. hblockx >>= dm->hsub;
  91. hblocky >>= dm->vsub;
  92. }
  93. for (y = 0; y < height; y++) {
  94. int ydest = y / hblocky;
  95. int xdest = 0;
  96. #define CALC_DIFF(nbits) do { \
  97. for (x = 0; x < width; x += hblockx) { \
  98. int64_t acc = 0; \
  99. int m = FFMIN(width, x + hblockx); \
  100. for (xl = x; xl < m; xl++) \
  101. acc += abs(((const uint##nbits##_t *)f1p)[xl] - \
  102. ((const uint##nbits##_t *)f2p)[xl]); \
  103. bdiffs[ydest * dm->nxblocks + xdest] += acc; \
  104. xdest++; \
  105. } \
  106. } while (0)
  107. if (dm->depth == 8) CALC_DIFF(8);
  108. else CALC_DIFF(16);
  109. f1p += linesize1;
  110. f2p += linesize2;
  111. }
  112. }
  113. for (i = 0; i < dm->nyblocks - 1; i++) {
  114. for (j = 0; j < dm->nxblocks - 1; j++) {
  115. int64_t tmp = bdiffs[ i * dm->nxblocks + j ]
  116. + bdiffs[ i * dm->nxblocks + j + 1]
  117. + bdiffs[(i + 1) * dm->nxblocks + j ]
  118. + bdiffs[(i + 1) * dm->nxblocks + j + 1];
  119. if (tmp > maxdiff)
  120. maxdiff = tmp;
  121. }
  122. }
  123. q->totdiff = 0;
  124. for (i = 0; i < dm->bdiffsize; i++)
  125. q->totdiff += bdiffs[i];
  126. q->maxbdiff = maxdiff;
  127. }
  128. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  129. {
  130. int scpos = -1, duppos = -1;
  131. int drop = INT_MIN, i, lowest = 0, ret;
  132. AVFilterContext *ctx = inlink->dst;
  133. AVFilterLink *outlink = ctx->outputs[0];
  134. DecimateContext *dm = ctx->priv;
  135. AVFrame *prv;
  136. /* update frames queue(s) */
  137. if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
  138. dm->queue[dm->fid].frame = in;
  139. dm->got_frame[INPUT_MAIN] = 1;
  140. } else {
  141. dm->clean_src[dm->fid] = in;
  142. dm->got_frame[INPUT_CLEANSRC] = 1;
  143. }
  144. if (!dm->got_frame[INPUT_MAIN] || (dm->ppsrc && !dm->got_frame[INPUT_CLEANSRC]))
  145. return 0;
  146. dm->got_frame[INPUT_MAIN] = dm->got_frame[INPUT_CLEANSRC] = 0;
  147. if (dm->ppsrc)
  148. in = dm->clean_src[dm->fid];
  149. if (in) {
  150. /* update frame metrics */
  151. prv = dm->fid ? (dm->ppsrc ? dm->clean_src[dm->fid - 1] : dm->queue[dm->fid - 1].frame) : dm->last;
  152. if (!prv) {
  153. dm->queue[dm->fid].maxbdiff = INT64_MAX;
  154. dm->queue[dm->fid].totdiff = INT64_MAX;
  155. } else {
  156. calc_diffs(dm, &dm->queue[dm->fid], prv, in);
  157. }
  158. if (++dm->fid != dm->cycle)
  159. return 0;
  160. av_frame_free(&dm->last);
  161. dm->last = av_frame_clone(in);
  162. dm->fid = 0;
  163. /* we have a complete cycle, select the frame to drop */
  164. lowest = 0;
  165. for (i = 0; i < dm->cycle; i++) {
  166. if (dm->queue[i].totdiff > dm->scthresh)
  167. scpos = i;
  168. if (dm->queue[i].maxbdiff < dm->queue[lowest].maxbdiff)
  169. lowest = i;
  170. }
  171. if (dm->queue[lowest].maxbdiff < dm->dupthresh)
  172. duppos = lowest;
  173. drop = scpos >= 0 && duppos < 0 ? scpos : lowest;
  174. }
  175. /* metrics debug */
  176. if (av_log_get_level() >= AV_LOG_DEBUG) {
  177. av_log(ctx, AV_LOG_DEBUG, "1/%d frame drop:\n", dm->cycle);
  178. for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
  179. av_log(ctx, AV_LOG_DEBUG," #%d: totdiff=%08"PRIx64" maxbdiff=%08"PRIx64"%s%s%s%s\n",
  180. i + 1, dm->queue[i].totdiff, dm->queue[i].maxbdiff,
  181. i == scpos ? " sc" : "",
  182. i == duppos ? " dup" : "",
  183. i == lowest ? " lowest" : "",
  184. i == drop ? " [DROP]" : "");
  185. }
  186. }
  187. /* push all frames except the drop */
  188. ret = 0;
  189. for (i = 0; i < dm->cycle && dm->queue[i].frame; i++) {
  190. if (i == drop) {
  191. if (dm->ppsrc)
  192. av_frame_free(&dm->clean_src[i]);
  193. av_frame_free(&dm->queue[i].frame);
  194. } else {
  195. AVFrame *frame = dm->queue[i].frame;
  196. if (frame->pts != AV_NOPTS_VALUE && dm->start_pts == AV_NOPTS_VALUE)
  197. dm->start_pts = frame->pts;
  198. if (dm->ppsrc) {
  199. av_frame_free(&frame);
  200. frame = dm->clean_src[i];
  201. }
  202. frame->pts = av_rescale_q(outlink->frame_count, dm->ts_unit, (AVRational){1,1}) +
  203. (dm->start_pts == AV_NOPTS_VALUE ? 0 : dm->start_pts);
  204. ret = ff_filter_frame(outlink, frame);
  205. if (ret < 0)
  206. break;
  207. }
  208. }
  209. return ret;
  210. }
  211. static int config_input(AVFilterLink *inlink)
  212. {
  213. int max_value;
  214. AVFilterContext *ctx = inlink->dst;
  215. DecimateContext *dm = ctx->priv;
  216. const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
  217. const int w = inlink->w;
  218. const int h = inlink->h;
  219. dm->hsub = pix_desc->log2_chroma_w;
  220. dm->vsub = pix_desc->log2_chroma_h;
  221. dm->depth = pix_desc->comp[0].depth;
  222. max_value = (1 << dm->depth) - 1;
  223. dm->scthresh = (int64_t)(((int64_t)max_value * w * h * dm->scthresh_flt) / 100);
  224. dm->dupthresh = (int64_t)(((int64_t)max_value * dm->blockx * dm->blocky * dm->dupthresh_flt) / 100);
  225. dm->nxblocks = (w + dm->blockx/2 - 1) / (dm->blockx/2);
  226. dm->nyblocks = (h + dm->blocky/2 - 1) / (dm->blocky/2);
  227. dm->bdiffsize = dm->nxblocks * dm->nyblocks;
  228. dm->bdiffs = av_malloc_array(dm->bdiffsize, sizeof(*dm->bdiffs));
  229. dm->queue = av_calloc(dm->cycle, sizeof(*dm->queue));
  230. if (!dm->bdiffs || !dm->queue)
  231. return AVERROR(ENOMEM);
  232. if (dm->ppsrc) {
  233. dm->clean_src = av_calloc(dm->cycle, sizeof(*dm->clean_src));
  234. if (!dm->clean_src)
  235. return AVERROR(ENOMEM);
  236. }
  237. return 0;
  238. }
  239. static av_cold int decimate_init(AVFilterContext *ctx)
  240. {
  241. DecimateContext *dm = ctx->priv;
  242. AVFilterPad pad = {
  243. .name = av_strdup("main"),
  244. .type = AVMEDIA_TYPE_VIDEO,
  245. .filter_frame = filter_frame,
  246. .config_props = config_input,
  247. };
  248. if (!pad.name)
  249. return AVERROR(ENOMEM);
  250. ff_insert_inpad(ctx, INPUT_MAIN, &pad);
  251. if (dm->ppsrc) {
  252. pad.name = av_strdup("clean_src");
  253. pad.config_props = NULL;
  254. if (!pad.name)
  255. return AVERROR(ENOMEM);
  256. ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad);
  257. }
  258. if ((dm->blockx & (dm->blockx - 1)) ||
  259. (dm->blocky & (dm->blocky - 1))) {
  260. av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
  261. return AVERROR(EINVAL);
  262. }
  263. dm->start_pts = AV_NOPTS_VALUE;
  264. return 0;
  265. }
  266. static av_cold void decimate_uninit(AVFilterContext *ctx)
  267. {
  268. int i;
  269. DecimateContext *dm = ctx->priv;
  270. av_frame_free(&dm->last);
  271. av_freep(&dm->bdiffs);
  272. av_freep(&dm->queue);
  273. av_freep(&dm->clean_src);
  274. for (i = 0; i < ctx->nb_inputs; i++)
  275. av_freep(&ctx->input_pads[i].name);
  276. }
  277. static int request_inlink(AVFilterContext *ctx, int lid)
  278. {
  279. int ret = 0;
  280. DecimateContext *dm = ctx->priv;
  281. if (!dm->got_frame[lid]) {
  282. AVFilterLink *inlink = ctx->inputs[lid];
  283. ret = ff_request_frame(inlink);
  284. if (ret == AVERROR_EOF) { // flushing
  285. dm->eof |= 1 << lid;
  286. ret = filter_frame(inlink, NULL);
  287. }
  288. }
  289. return ret;
  290. }
  291. static int request_frame(AVFilterLink *outlink)
  292. {
  293. int ret;
  294. AVFilterContext *ctx = outlink->src;
  295. DecimateContext *dm = ctx->priv;
  296. const uint32_t eof_mask = 1<<INPUT_MAIN | dm->ppsrc<<INPUT_CLEANSRC;
  297. if ((dm->eof & eof_mask) == eof_mask) // flush done?
  298. return AVERROR_EOF;
  299. if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0)
  300. return ret;
  301. if (dm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0)
  302. return ret;
  303. return 0;
  304. }
  305. static int query_formats(AVFilterContext *ctx)
  306. {
  307. static const enum AVPixelFormat pix_fmts[] = {
  308. #define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
  309. #define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
  310. #define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
  311. PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
  312. AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
  313. AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
  314. AV_PIX_FMT_NONE
  315. };
  316. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  317. if (!fmts_list)
  318. return AVERROR(ENOMEM);
  319. return ff_set_common_formats(ctx, fmts_list);
  320. }
  321. static int config_output(AVFilterLink *outlink)
  322. {
  323. AVFilterContext *ctx = outlink->src;
  324. DecimateContext *dm = ctx->priv;
  325. const AVFilterLink *inlink =
  326. ctx->inputs[dm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
  327. AVRational fps = inlink->frame_rate;
  328. if (!fps.num || !fps.den) {
  329. av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
  330. "current rate of %d/%d is invalid\n", fps.num, fps.den);
  331. return AVERROR(EINVAL);
  332. }
  333. fps = av_mul_q(fps, (AVRational){dm->cycle - 1, dm->cycle});
  334. av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
  335. inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
  336. outlink->time_base = inlink->time_base;
  337. outlink->frame_rate = fps;
  338. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  339. outlink->w = inlink->w;
  340. outlink->h = inlink->h;
  341. dm->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
  342. return 0;
  343. }
  344. static const AVFilterPad decimate_outputs[] = {
  345. {
  346. .name = "default",
  347. .type = AVMEDIA_TYPE_VIDEO,
  348. .request_frame = request_frame,
  349. .config_props = config_output,
  350. },
  351. { NULL }
  352. };
  353. AVFilter ff_vf_decimate = {
  354. .name = "decimate",
  355. .description = NULL_IF_CONFIG_SMALL("Decimate frames (post field matching filter)."),
  356. .init = decimate_init,
  357. .uninit = decimate_uninit,
  358. .priv_size = sizeof(DecimateContext),
  359. .query_formats = query_formats,
  360. .outputs = decimate_outputs,
  361. .priv_class = &decimate_class,
  362. .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
  363. };