vf_overlay.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. /*
  2. * Copyright (c) 2010 Stefano Sabatini
  3. * Copyright (c) 2010 Baptiste Coudurier
  4. * Copyright (c) 2007 Bobby Bingham
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * overlay one video on top of another
  25. */
  26. #include "avfilter.h"
  27. #include "formats.h"
  28. #include "libavutil/common.h"
  29. #include "libavutil/eval.h"
  30. #include "libavutil/avstring.h"
  31. #include "libavutil/avassert.h"
  32. #include "libavutil/pixdesc.h"
  33. #include "libavutil/imgutils.h"
  34. #include "libavutil/mathematics.h"
  35. #include "libavutil/opt.h"
  36. #include "internal.h"
  37. #include "video.h"
  38. static const char *const var_names[] = {
  39. "E",
  40. "PHI",
  41. "PI",
  42. "main_w", "W", ///< width of the main video
  43. "main_h", "H", ///< height of the main video
  44. "overlay_w", "w", ///< width of the overlay video
  45. "overlay_h", "h", ///< height of the overlay video
  46. NULL
  47. };
  48. enum var_name {
  49. VAR_E,
  50. VAR_PHI,
  51. VAR_PI,
  52. VAR_MAIN_W, VAR_MW,
  53. VAR_MAIN_H, VAR_MH,
  54. VAR_OVERLAY_W, VAR_OW,
  55. VAR_OVERLAY_H, VAR_OH,
  56. VAR_VARS_NB
  57. };
  58. enum EOFAction {
  59. EOF_ACTION_REPEAT,
  60. EOF_ACTION_ENDALL,
  61. EOF_ACTION_PASS
  62. };
  63. static const char *eof_action_str[] = {
  64. "repeat", "endall", "pass"
  65. };
  66. #define MAIN 0
  67. #define OVERLAY 1
  68. typedef struct OverlayContext {
  69. const AVClass *class;
  70. int x, y; ///< position of overlaid picture
  71. int max_plane_step[4]; ///< steps per pixel for each plane
  72. int hsub, vsub; ///< chroma subsampling values
  73. char *x_expr, *y_expr;
  74. enum EOFAction eof_action; ///< action to take on EOF from source
  75. AVFrame *main;
  76. AVFrame *over_prev, *over_next;
  77. } OverlayContext;
  78. static av_cold void uninit(AVFilterContext *ctx)
  79. {
  80. OverlayContext *s = ctx->priv;
  81. av_frame_free(&s->main);
  82. av_frame_free(&s->over_prev);
  83. av_frame_free(&s->over_next);
  84. }
  85. static int query_formats(AVFilterContext *ctx)
  86. {
  87. static const enum AVPixelFormat inout_pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
  88. static const enum AVPixelFormat blend_pix_fmts[] = { AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE };
  89. AVFilterFormats *inout_formats = ff_make_format_list(inout_pix_fmts);
  90. AVFilterFormats *blend_formats = ff_make_format_list(blend_pix_fmts);
  91. ff_formats_ref(inout_formats, &ctx->inputs [MAIN ]->out_formats);
  92. ff_formats_ref(blend_formats, &ctx->inputs [OVERLAY]->out_formats);
  93. ff_formats_ref(inout_formats, &ctx->outputs[MAIN ]->in_formats );
  94. return 0;
  95. }
  96. static int config_input_main(AVFilterLink *inlink)
  97. {
  98. OverlayContext *s = inlink->dst->priv;
  99. const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
  100. av_image_fill_max_pixsteps(s->max_plane_step, NULL, pix_desc);
  101. s->hsub = pix_desc->log2_chroma_w;
  102. s->vsub = pix_desc->log2_chroma_h;
  103. return 0;
  104. }
  105. static int config_input_overlay(AVFilterLink *inlink)
  106. {
  107. AVFilterContext *ctx = inlink->dst;
  108. OverlayContext *s = inlink->dst->priv;
  109. char *expr;
  110. double var_values[VAR_VARS_NB], res;
  111. int ret;
  112. /* Finish the configuration by evaluating the expressions
  113. now when both inputs are configured. */
  114. var_values[VAR_E ] = M_E;
  115. var_values[VAR_PHI] = M_PHI;
  116. var_values[VAR_PI ] = M_PI;
  117. var_values[VAR_MAIN_W ] = var_values[VAR_MW] = ctx->inputs[MAIN ]->w;
  118. var_values[VAR_MAIN_H ] = var_values[VAR_MH] = ctx->inputs[MAIN ]->h;
  119. var_values[VAR_OVERLAY_W] = var_values[VAR_OW] = ctx->inputs[OVERLAY]->w;
  120. var_values[VAR_OVERLAY_H] = var_values[VAR_OH] = ctx->inputs[OVERLAY]->h;
  121. if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr), var_names, var_values,
  122. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  123. goto fail;
  124. s->x = res;
  125. if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr), var_names, var_values,
  126. NULL, NULL, NULL, NULL, NULL, 0, ctx)))
  127. goto fail;
  128. s->y = res;
  129. /* x may depend on y */
  130. if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr), var_names, var_values,
  131. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  132. goto fail;
  133. s->x = res;
  134. av_log(ctx, AV_LOG_VERBOSE,
  135. "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s eof_action:%s\n",
  136. ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
  137. av_get_pix_fmt_name(ctx->inputs[MAIN]->format),
  138. s->x, s->y,
  139. ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
  140. av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format),
  141. eof_action_str[s->eof_action]);
  142. if (s->x < 0 || s->y < 0 ||
  143. s->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] ||
  144. s->y + var_values[VAR_OVERLAY_H] > var_values[VAR_MAIN_H]) {
  145. av_log(ctx, AV_LOG_ERROR,
  146. "Overlay area (%d,%d)<->(%d,%d) not within the main area (0,0)<->(%d,%d) or zero-sized\n",
  147. s->x, s->y,
  148. (int)(s->x + var_values[VAR_OVERLAY_W]),
  149. (int)(s->y + var_values[VAR_OVERLAY_H]),
  150. (int)var_values[VAR_MAIN_W], (int)var_values[VAR_MAIN_H]);
  151. return AVERROR(EINVAL);
  152. }
  153. return 0;
  154. fail:
  155. av_log(NULL, AV_LOG_ERROR,
  156. "Error when evaluating the expression '%s'\n", expr);
  157. return ret;
  158. }
  159. static int config_output(AVFilterLink *outlink)
  160. {
  161. AVFilterContext *ctx = outlink->src;
  162. outlink->w = ctx->inputs[MAIN]->w;
  163. outlink->h = ctx->inputs[MAIN]->h;
  164. outlink->time_base = ctx->inputs[MAIN]->time_base;
  165. return 0;
  166. }
  167. static void blend_frame(AVFilterContext *ctx,
  168. AVFrame *dst, AVFrame *src,
  169. int x, int y)
  170. {
  171. OverlayContext *s = ctx->priv;
  172. int i, j, k;
  173. int width, height;
  174. int overlay_end_y = y + src->height;
  175. int end_y, start_y;
  176. width = FFMIN(dst->width - x, src->width);
  177. end_y = FFMIN(dst->height, overlay_end_y);
  178. start_y = FFMAX(y, 0);
  179. height = end_y - start_y;
  180. if (dst->format == AV_PIX_FMT_BGR24 || dst->format == AV_PIX_FMT_RGB24) {
  181. uint8_t *dp = dst->data[0] + x * 3 + start_y * dst->linesize[0];
  182. uint8_t *sp = src->data[0];
  183. int b = dst->format == AV_PIX_FMT_BGR24 ? 2 : 0;
  184. int r = dst->format == AV_PIX_FMT_BGR24 ? 0 : 2;
  185. if (y < 0)
  186. sp += -y * src->linesize[0];
  187. for (i = 0; i < height; i++) {
  188. uint8_t *d = dp, *s = sp;
  189. for (j = 0; j < width; j++) {
  190. d[r] = (d[r] * (0xff - s[3]) + s[0] * s[3] + 128) >> 8;
  191. d[1] = (d[1] * (0xff - s[3]) + s[1] * s[3] + 128) >> 8;
  192. d[b] = (d[b] * (0xff - s[3]) + s[2] * s[3] + 128) >> 8;
  193. d += 3;
  194. s += 4;
  195. }
  196. dp += dst->linesize[0];
  197. sp += src->linesize[0];
  198. }
  199. } else {
  200. for (i = 0; i < 3; i++) {
  201. int hsub = i ? s->hsub : 0;
  202. int vsub = i ? s->vsub : 0;
  203. uint8_t *dp = dst->data[i] + (x >> hsub) +
  204. (start_y >> vsub) * dst->linesize[i];
  205. uint8_t *sp = src->data[i];
  206. uint8_t *ap = src->data[3];
  207. int wp = FFALIGN(width, 1<<hsub) >> hsub;
  208. int hp = FFALIGN(height, 1<<vsub) >> vsub;
  209. if (y < 0) {
  210. sp += ((-y) >> vsub) * src->linesize[i];
  211. ap += -y * src->linesize[3];
  212. }
  213. for (j = 0; j < hp; j++) {
  214. uint8_t *d = dp, *s = sp, *a = ap;
  215. for (k = 0; k < wp; k++) {
  216. // average alpha for color components, improve quality
  217. int alpha_v, alpha_h, alpha;
  218. if (hsub && vsub && j+1 < hp && k+1 < wp) {
  219. alpha = (a[0] + a[src->linesize[3]] +
  220. a[1] + a[src->linesize[3]+1]) >> 2;
  221. } else if (hsub || vsub) {
  222. alpha_h = hsub && k+1 < wp ?
  223. (a[0] + a[1]) >> 1 : a[0];
  224. alpha_v = vsub && j+1 < hp ?
  225. (a[0] + a[src->linesize[3]]) >> 1 : a[0];
  226. alpha = (alpha_v + alpha_h) >> 1;
  227. } else
  228. alpha = a[0];
  229. *d = (*d * (0xff - alpha) + *s++ * alpha + 128) >> 8;
  230. d++;
  231. a += 1 << hsub;
  232. }
  233. dp += dst->linesize[i];
  234. sp += src->linesize[i];
  235. ap += (1 << vsub) * src->linesize[3];
  236. }
  237. }
  238. }
  239. }
  240. static int filter_frame_main(AVFilterLink *inlink, AVFrame *frame)
  241. {
  242. OverlayContext *s = inlink->dst->priv;
  243. av_assert0(!s->main);
  244. s->main = frame;
  245. return 0;
  246. }
  247. static int filter_frame_overlay(AVFilterLink *inlink, AVFrame *frame)
  248. {
  249. OverlayContext *s = inlink->dst->priv;
  250. av_assert0(!s->over_next);
  251. s->over_next = frame;
  252. return 0;
  253. }
  254. static int output_frame(AVFilterContext *ctx)
  255. {
  256. OverlayContext *s = ctx->priv;
  257. AVFilterLink *outlink = ctx->outputs[0];
  258. int ret = ff_filter_frame(outlink, s->main);
  259. s->main = NULL;
  260. return ret;
  261. }
  262. static int handle_overlay_eof(AVFilterContext *ctx)
  263. {
  264. OverlayContext *s = ctx->priv;
  265. /* Repeat previous frame on secondary input */
  266. if (s->over_prev && s->eof_action == EOF_ACTION_REPEAT)
  267. blend_frame(ctx, s->main, s->over_prev, s->x, s->y);
  268. /* End both streams */
  269. else if (s->eof_action == EOF_ACTION_ENDALL)
  270. return AVERROR_EOF;
  271. return output_frame(ctx);
  272. }
  273. static int request_frame(AVFilterLink *outlink)
  274. {
  275. AVFilterContext *ctx = outlink->src;
  276. OverlayContext *s = ctx->priv;
  277. AVRational tb_main = ctx->inputs[MAIN]->time_base;
  278. AVRational tb_over = ctx->inputs[OVERLAY]->time_base;
  279. int ret = 0;
  280. /* get a frame on the main input */
  281. if (!s->main) {
  282. ret = ff_request_frame(ctx->inputs[MAIN]);
  283. if (ret < 0)
  284. return ret;
  285. }
  286. /* get a new frame on the overlay input, on EOF check setting 'eof_action' */
  287. if (!s->over_next) {
  288. ret = ff_request_frame(ctx->inputs[OVERLAY]);
  289. if (ret == AVERROR_EOF)
  290. return handle_overlay_eof(ctx);
  291. else if (ret < 0)
  292. return ret;
  293. }
  294. while (s->main->pts != AV_NOPTS_VALUE &&
  295. s->over_next->pts != AV_NOPTS_VALUE &&
  296. av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main) < 0) {
  297. av_frame_free(&s->over_prev);
  298. FFSWAP(AVFrame*, s->over_prev, s->over_next);
  299. ret = ff_request_frame(ctx->inputs[OVERLAY]);
  300. if (ret == AVERROR_EOF)
  301. return handle_overlay_eof(ctx);
  302. else if (ret < 0)
  303. return ret;
  304. }
  305. if (s->main->pts == AV_NOPTS_VALUE ||
  306. s->over_next->pts == AV_NOPTS_VALUE ||
  307. !av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main)) {
  308. blend_frame(ctx, s->main, s->over_next, s->x, s->y);
  309. av_frame_free(&s->over_prev);
  310. FFSWAP(AVFrame*, s->over_prev, s->over_next);
  311. } else if (s->over_prev) {
  312. blend_frame(ctx, s->main, s->over_prev, s->x, s->y);
  313. }
  314. return output_frame(ctx);
  315. }
  316. #define OFFSET(x) offsetof(OverlayContext, x)
  317. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM
  318. static const AVOption options[] = {
  319. { "x", "Horizontal position of the left edge of the overlaid video on the "
  320. "main video.", OFFSET(x_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
  321. { "y", "Vertical position of the top edge of the overlaid video on the "
  322. "main video.", OFFSET(y_expr), AV_OPT_TYPE_STRING, { .str = "0" }, .flags = FLAGS },
  323. { "eof_action", "Action to take when encountering EOF from secondary input ",
  324. OFFSET(eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
  325. EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" },
  326. { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
  327. { "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
  328. { "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" },
  329. { NULL },
  330. };
  331. static const AVClass overlay_class = {
  332. .class_name = "overlay",
  333. .item_name = av_default_item_name,
  334. .option = options,
  335. .version = LIBAVUTIL_VERSION_INT,
  336. };
  337. static const AVFilterPad avfilter_vf_overlay_inputs[] = {
  338. {
  339. .name = "main",
  340. .type = AVMEDIA_TYPE_VIDEO,
  341. .config_props = config_input_main,
  342. .filter_frame = filter_frame_main,
  343. .needs_writable = 1,
  344. .needs_fifo = 1,
  345. },
  346. {
  347. .name = "overlay",
  348. .type = AVMEDIA_TYPE_VIDEO,
  349. .config_props = config_input_overlay,
  350. .filter_frame = filter_frame_overlay,
  351. .needs_fifo = 1,
  352. },
  353. { NULL }
  354. };
  355. static const AVFilterPad avfilter_vf_overlay_outputs[] = {
  356. {
  357. .name = "default",
  358. .type = AVMEDIA_TYPE_VIDEO,
  359. .config_props = config_output,
  360. .request_frame = request_frame,
  361. },
  362. { NULL }
  363. };
  364. AVFilter ff_vf_overlay = {
  365. .name = "overlay",
  366. .description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
  367. .uninit = uninit,
  368. .priv_size = sizeof(OverlayContext),
  369. .priv_class = &overlay_class,
  370. .query_formats = query_formats,
  371. .inputs = avfilter_vf_overlay_inputs,
  372. .outputs = avfilter_vf_overlay_outputs,
  373. };