vf_blend.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. /*
  2. * Copyright (c) 2013 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/imgutils.h"
  21. #include "libavutil/eval.h"
  22. #include "libavutil/opt.h"
  23. #include "libavutil/pixfmt.h"
  24. #include "avfilter.h"
  25. #include "bufferqueue.h"
  26. #include "formats.h"
  27. #include "internal.h"
  28. #include "dualinput.h"
  29. #include "video.h"
  30. #define TOP 0
  31. #define BOTTOM 1
  32. enum BlendMode {
  33. BLEND_UNSET = -1,
  34. BLEND_NORMAL,
  35. BLEND_ADDITION,
  36. BLEND_AND,
  37. BLEND_AVERAGE,
  38. BLEND_BURN,
  39. BLEND_DARKEN,
  40. BLEND_DIFFERENCE,
  41. BLEND_DIFFERENCE128,
  42. BLEND_DIVIDE,
  43. BLEND_DODGE,
  44. BLEND_EXCLUSION,
  45. BLEND_HARDLIGHT,
  46. BLEND_LIGHTEN,
  47. BLEND_MULTIPLY,
  48. BLEND_NEGATION,
  49. BLEND_OR,
  50. BLEND_OVERLAY,
  51. BLEND_PHOENIX,
  52. BLEND_PINLIGHT,
  53. BLEND_REFLECT,
  54. BLEND_SCREEN,
  55. BLEND_SOFTLIGHT,
  56. BLEND_SUBTRACT,
  57. BLEND_VIVIDLIGHT,
  58. BLEND_XOR,
  59. BLEND_NB
  60. };
  61. static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
  62. enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_SW, VAR_SH, VAR_T, VAR_N, VAR_A, VAR_B, VAR_TOP, VAR_BOTTOM, VAR_VARS_NB };
  63. typedef struct FilterParams {
  64. enum BlendMode mode;
  65. double opacity;
  66. AVExpr *e;
  67. char *expr_str;
  68. void (*blend)(const uint8_t *top, int top_linesize,
  69. const uint8_t *bottom, int bottom_linesize,
  70. uint8_t *dst, int dst_linesize,
  71. int width, int start, int end,
  72. struct FilterParams *param, double *values);
  73. } FilterParams;
  74. typedef struct ThreadData {
  75. const AVFrame *top, *bottom;
  76. AVFrame *dst;
  77. AVFilterLink *inlink;
  78. int plane;
  79. int w, h;
  80. FilterParams *param;
  81. } ThreadData;
  82. typedef struct {
  83. const AVClass *class;
  84. FFDualInputContext dinput;
  85. int hsub, vsub; ///< chroma subsampling values
  86. int nb_planes;
  87. char *all_expr;
  88. enum BlendMode all_mode;
  89. double all_opacity;
  90. FilterParams params[4];
  91. int tblend;
  92. AVFrame *prev_frame; /* only used with tblend */
  93. } BlendContext;
  94. #define COMMON_OPTIONS \
  95. { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
  96. { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
  97. { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
  98. { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
  99. { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},\
  100. { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },\
  101. { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },\
  102. { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },\
  103. { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },\
  104. { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },\
  105. { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },\
  106. { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE128}, 0, 0, FLAGS, "mode" },\
  107. { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },\
  108. { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },\
  109. { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },\
  110. { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },\
  111. { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },\
  112. { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },\
  113. { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },\
  114. { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },\
  115. { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },\
  116. { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },\
  117. { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },\
  118. { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },\
  119. { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },\
  120. { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },\
  121. { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },\
  122. { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },\
  123. { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },\
  124. { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },\
  125. { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
  126. { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
  127. { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
  128. { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
  129. { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
  130. { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
  131. { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
  132. { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
  133. { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
  134. { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS}
  135. #define OFFSET(x) offsetof(BlendContext, x)
  136. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  137. static const AVOption blend_options[] = {
  138. COMMON_OPTIONS,
  139. { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
  140. { "repeatlast", "repeat last bottom frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
  141. { NULL }
  142. };
  143. AVFILTER_DEFINE_CLASS(blend);
  144. static void blend_normal(const uint8_t *top, int top_linesize,
  145. const uint8_t *bottom, int bottom_linesize,
  146. uint8_t *dst, int dst_linesize,
  147. int width, int start, int end,
  148. FilterParams *param, double *values)
  149. {
  150. av_image_copy_plane(dst, dst_linesize, top, top_linesize, width, end - start);
  151. }
  152. #define DEFINE_BLEND(name, expr) \
  153. static void blend_## name(const uint8_t *top, int top_linesize, \
  154. const uint8_t *bottom, int bottom_linesize, \
  155. uint8_t *dst, int dst_linesize, \
  156. int width, int start, int end, \
  157. FilterParams *param, double *values) \
  158. { \
  159. double opacity = param->opacity; \
  160. int i, j; \
  161. \
  162. for (i = start; i < end; i++) { \
  163. for (j = 0; j < width; j++) { \
  164. dst[j] = top[j] + ((expr) - top[j]) * opacity; \
  165. } \
  166. dst += dst_linesize; \
  167. top += top_linesize; \
  168. bottom += bottom_linesize; \
  169. } \
  170. }
  171. #define A top[j]
  172. #define B bottom[j]
  173. #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 255))
  174. #define SCREEN(x, a, b) (255 - (x) * ((255 - (a)) * (255 - (b)) / 255))
  175. #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 255 - ((255 - (b)) << 8) / (a)))
  176. #define DODGE(a, b) (((a) == 255) ? (a) : FFMIN(255, (((b) << 8) / (255 - (a)))))
  177. DEFINE_BLEND(addition, FFMIN(255, A + B))
  178. DEFINE_BLEND(average, (A + B) / 2)
  179. DEFINE_BLEND(subtract, FFMAX(0, A - B))
  180. DEFINE_BLEND(multiply, MULTIPLY(1, A, B))
  181. DEFINE_BLEND(negation, 255 - FFABS(255 - A - B))
  182. DEFINE_BLEND(difference, FFABS(A - B))
  183. DEFINE_BLEND(difference128, av_clip_uint8(128 + A - B))
  184. DEFINE_BLEND(screen, SCREEN(1, A, B))
  185. DEFINE_BLEND(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
  186. DEFINE_BLEND(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
  187. DEFINE_BLEND(darken, FFMIN(A, B))
  188. DEFINE_BLEND(lighten, FFMAX(A, B))
  189. DEFINE_BLEND(divide, ((float)A / ((float)B) * 255))
  190. DEFINE_BLEND(dodge, DODGE(A, B))
  191. DEFINE_BLEND(burn, BURN(A, B))
  192. DEFINE_BLEND(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - FFABS(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - FFABS(B - 127.5)/255))
  193. DEFINE_BLEND(exclusion, A + B - 2 * A * B / 255)
  194. DEFINE_BLEND(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
  195. DEFINE_BLEND(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
  196. DEFINE_BLEND(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
  197. DEFINE_BLEND(and, A & B)
  198. DEFINE_BLEND(or, A | B)
  199. DEFINE_BLEND(xor, A ^ B)
  200. DEFINE_BLEND(vividlight, (B < 128) ? BURN(A, 2 * B) : DODGE(A, 2 * (B - 128)))
  201. static void blend_expr(const uint8_t *top, int top_linesize,
  202. const uint8_t *bottom, int bottom_linesize,
  203. uint8_t *dst, int dst_linesize,
  204. int width, int start, int end,
  205. FilterParams *param, double *values)
  206. {
  207. AVExpr *e = param->e;
  208. int y, x;
  209. for (y = start; y < end; y++) {
  210. values[VAR_Y] = y;
  211. for (x = 0; x < width; x++) {
  212. values[VAR_X] = x;
  213. values[VAR_TOP] = values[VAR_A] = top[x];
  214. values[VAR_BOTTOM] = values[VAR_B] = bottom[x];
  215. dst[x] = av_expr_eval(e, values, NULL);
  216. }
  217. dst += dst_linesize;
  218. top += top_linesize;
  219. bottom += bottom_linesize;
  220. }
  221. }
  222. static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  223. {
  224. ThreadData *td = arg;
  225. int slice_start = (td->h * jobnr ) / nb_jobs;
  226. int slice_end = (td->h * (jobnr+1)) / nb_jobs;
  227. const uint8_t *top = td->top->data[td->plane];
  228. const uint8_t *bottom = td->bottom->data[td->plane];
  229. uint8_t *dst = td->dst->data[td->plane];
  230. double values[VAR_VARS_NB];
  231. values[VAR_N] = td->inlink->frame_count;
  232. values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
  233. values[VAR_W] = td->w;
  234. values[VAR_H] = td->h;
  235. values[VAR_SW] = td->w / (double)td->dst->width;
  236. values[VAR_SH] = td->h / (double)td->dst->height;
  237. td->param->blend(top + slice_start * td->top->linesize[td->plane],
  238. td->top->linesize[td->plane],
  239. bottom + slice_start * td->bottom->linesize[td->plane],
  240. td->bottom->linesize[td->plane],
  241. dst + slice_start * td->dst->linesize[td->plane],
  242. td->dst->linesize[td->plane],
  243. td->w, slice_start, slice_end, td->param, &values[0]);
  244. return 0;
  245. }
  246. static AVFrame *blend_frame(AVFilterContext *ctx, AVFrame *top_buf,
  247. const AVFrame *bottom_buf)
  248. {
  249. BlendContext *b = ctx->priv;
  250. AVFilterLink *inlink = ctx->inputs[0];
  251. AVFilterLink *outlink = ctx->outputs[0];
  252. AVFrame *dst_buf;
  253. int plane;
  254. dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  255. if (!dst_buf)
  256. return top_buf;
  257. av_frame_copy_props(dst_buf, top_buf);
  258. for (plane = 0; plane < b->nb_planes; plane++) {
  259. int hsub = plane == 1 || plane == 2 ? b->hsub : 0;
  260. int vsub = plane == 1 || plane == 2 ? b->vsub : 0;
  261. int outw = FF_CEIL_RSHIFT(dst_buf->width, hsub);
  262. int outh = FF_CEIL_RSHIFT(dst_buf->height, vsub);
  263. FilterParams *param = &b->params[plane];
  264. ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
  265. .w = outw, .h = outh, .param = param, .plane = plane,
  266. .inlink = inlink };
  267. ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ctx->graph->nb_threads));
  268. }
  269. if (!b->tblend)
  270. av_frame_free(&top_buf);
  271. return dst_buf;
  272. }
  273. static av_cold int init(AVFilterContext *ctx)
  274. {
  275. BlendContext *b = ctx->priv;
  276. int ret, plane;
  277. b->tblend = !strcmp(ctx->filter->name, "tblend");
  278. for (plane = 0; plane < FF_ARRAY_ELEMS(b->params); plane++) {
  279. FilterParams *param = &b->params[plane];
  280. if (b->all_mode >= 0)
  281. param->mode = b->all_mode;
  282. if (b->all_opacity < 1)
  283. param->opacity = b->all_opacity;
  284. switch (param->mode) {
  285. case BLEND_ADDITION: param->blend = blend_addition; break;
  286. case BLEND_AND: param->blend = blend_and; break;
  287. case BLEND_AVERAGE: param->blend = blend_average; break;
  288. case BLEND_BURN: param->blend = blend_burn; break;
  289. case BLEND_DARKEN: param->blend = blend_darken; break;
  290. case BLEND_DIFFERENCE: param->blend = blend_difference; break;
  291. case BLEND_DIFFERENCE128: param->blend = blend_difference128; break;
  292. case BLEND_DIVIDE: param->blend = blend_divide; break;
  293. case BLEND_DODGE: param->blend = blend_dodge; break;
  294. case BLEND_EXCLUSION: param->blend = blend_exclusion; break;
  295. case BLEND_HARDLIGHT: param->blend = blend_hardlight; break;
  296. case BLEND_LIGHTEN: param->blend = blend_lighten; break;
  297. case BLEND_MULTIPLY: param->blend = blend_multiply; break;
  298. case BLEND_NEGATION: param->blend = blend_negation; break;
  299. case BLEND_NORMAL: param->blend = blend_normal; break;
  300. case BLEND_OR: param->blend = blend_or; break;
  301. case BLEND_OVERLAY: param->blend = blend_overlay; break;
  302. case BLEND_PHOENIX: param->blend = blend_phoenix; break;
  303. case BLEND_PINLIGHT: param->blend = blend_pinlight; break;
  304. case BLEND_REFLECT: param->blend = blend_reflect; break;
  305. case BLEND_SCREEN: param->blend = blend_screen; break;
  306. case BLEND_SOFTLIGHT: param->blend = blend_softlight; break;
  307. case BLEND_SUBTRACT: param->blend = blend_subtract; break;
  308. case BLEND_VIVIDLIGHT: param->blend = blend_vividlight; break;
  309. case BLEND_XOR: param->blend = blend_xor; break;
  310. }
  311. if (b->all_expr && !param->expr_str) {
  312. param->expr_str = av_strdup(b->all_expr);
  313. if (!param->expr_str)
  314. return AVERROR(ENOMEM);
  315. }
  316. if (param->expr_str) {
  317. ret = av_expr_parse(&param->e, param->expr_str, var_names,
  318. NULL, NULL, NULL, NULL, 0, ctx);
  319. if (ret < 0)
  320. return ret;
  321. param->blend = blend_expr;
  322. }
  323. }
  324. b->dinput.process = blend_frame;
  325. return 0;
  326. }
  327. static int query_formats(AVFilterContext *ctx)
  328. {
  329. static const enum AVPixelFormat pix_fmts[] = {
  330. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
  331. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
  332. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
  333. AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
  334. };
  335. ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  336. return 0;
  337. }
  338. static av_cold void uninit(AVFilterContext *ctx)
  339. {
  340. BlendContext *b = ctx->priv;
  341. int i;
  342. ff_dualinput_uninit(&b->dinput);
  343. av_frame_free(&b->prev_frame);
  344. for (i = 0; i < FF_ARRAY_ELEMS(b->params); i++)
  345. av_expr_free(b->params[i].e);
  346. }
  347. #if CONFIG_BLEND_FILTER
  348. static int config_output(AVFilterLink *outlink)
  349. {
  350. AVFilterContext *ctx = outlink->src;
  351. AVFilterLink *toplink = ctx->inputs[TOP];
  352. AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
  353. BlendContext *b = ctx->priv;
  354. const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
  355. int ret;
  356. if (toplink->format != bottomlink->format) {
  357. av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
  358. return AVERROR(EINVAL);
  359. }
  360. if (toplink->w != bottomlink->w ||
  361. toplink->h != bottomlink->h ||
  362. toplink->sample_aspect_ratio.num != bottomlink->sample_aspect_ratio.num ||
  363. toplink->sample_aspect_ratio.den != bottomlink->sample_aspect_ratio.den) {
  364. av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
  365. "(size %dx%d, SAR %d:%d) do not match the corresponding "
  366. "second input link %s parameters (%dx%d, SAR %d:%d)\n",
  367. ctx->input_pads[TOP].name, toplink->w, toplink->h,
  368. toplink->sample_aspect_ratio.num,
  369. toplink->sample_aspect_ratio.den,
  370. ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h,
  371. bottomlink->sample_aspect_ratio.num,
  372. bottomlink->sample_aspect_ratio.den);
  373. return AVERROR(EINVAL);
  374. }
  375. outlink->w = toplink->w;
  376. outlink->h = toplink->h;
  377. outlink->time_base = toplink->time_base;
  378. outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
  379. outlink->frame_rate = toplink->frame_rate;
  380. b->hsub = pix_desc->log2_chroma_w;
  381. b->vsub = pix_desc->log2_chroma_h;
  382. b->nb_planes = av_pix_fmt_count_planes(toplink->format);
  383. if ((ret = ff_dualinput_init(ctx, &b->dinput)) < 0)
  384. return ret;
  385. return 0;
  386. }
  387. static int request_frame(AVFilterLink *outlink)
  388. {
  389. BlendContext *b = outlink->src->priv;
  390. return ff_dualinput_request_frame(&b->dinput, outlink);
  391. }
  392. static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
  393. {
  394. BlendContext *b = inlink->dst->priv;
  395. return ff_dualinput_filter_frame(&b->dinput, inlink, buf);
  396. }
  397. static const AVFilterPad blend_inputs[] = {
  398. {
  399. .name = "top",
  400. .type = AVMEDIA_TYPE_VIDEO,
  401. .filter_frame = filter_frame,
  402. },{
  403. .name = "bottom",
  404. .type = AVMEDIA_TYPE_VIDEO,
  405. .filter_frame = filter_frame,
  406. },
  407. { NULL }
  408. };
  409. static const AVFilterPad blend_outputs[] = {
  410. {
  411. .name = "default",
  412. .type = AVMEDIA_TYPE_VIDEO,
  413. .config_props = config_output,
  414. .request_frame = request_frame,
  415. },
  416. { NULL }
  417. };
  418. AVFilter ff_vf_blend = {
  419. .name = "blend",
  420. .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
  421. .init = init,
  422. .uninit = uninit,
  423. .priv_size = sizeof(BlendContext),
  424. .query_formats = query_formats,
  425. .inputs = blend_inputs,
  426. .outputs = blend_outputs,
  427. .priv_class = &blend_class,
  428. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
  429. };
  430. #endif
  431. #if CONFIG_TBLEND_FILTER
  432. static int tblend_config_output(AVFilterLink *outlink)
  433. {
  434. AVFilterContext *ctx = outlink->src;
  435. AVFilterLink *inlink = ctx->inputs[0];
  436. BlendContext *b = ctx->priv;
  437. const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
  438. b->hsub = pix_desc->log2_chroma_w;
  439. b->vsub = pix_desc->log2_chroma_h;
  440. b->nb_planes = av_pix_fmt_count_planes(inlink->format);
  441. outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
  442. return 0;
  443. }
  444. static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame)
  445. {
  446. BlendContext *b = inlink->dst->priv;
  447. AVFilterLink *outlink = inlink->dst->outputs[0];
  448. if (b->prev_frame) {
  449. AVFrame *out = blend_frame(inlink->dst, frame, b->prev_frame);
  450. av_frame_free(&b->prev_frame);
  451. b->prev_frame = frame;
  452. return ff_filter_frame(outlink, out);
  453. }
  454. b->prev_frame = frame;
  455. return 0;
  456. }
  457. static const AVOption tblend_options[] = {
  458. COMMON_OPTIONS,
  459. { NULL }
  460. };
  461. AVFILTER_DEFINE_CLASS(tblend);
  462. static const AVFilterPad tblend_inputs[] = {
  463. {
  464. .name = "default",
  465. .type = AVMEDIA_TYPE_VIDEO,
  466. .filter_frame = tblend_filter_frame,
  467. },
  468. { NULL }
  469. };
  470. static const AVFilterPad tblend_outputs[] = {
  471. {
  472. .name = "default",
  473. .type = AVMEDIA_TYPE_VIDEO,
  474. .config_props = tblend_config_output,
  475. },
  476. { NULL }
  477. };
  478. AVFilter ff_vf_tblend = {
  479. .name = "tblend",
  480. .description = NULL_IF_CONFIG_SMALL("Blend successive frames."),
  481. .priv_size = sizeof(BlendContext),
  482. .priv_class = &tblend_class,
  483. .query_formats = query_formats,
  484. .init = init,
  485. .uninit = uninit,
  486. .inputs = tblend_inputs,
  487. .outputs = tblend_outputs,
  488. .flags = AVFILTER_FLAG_SLICE_THREADS,
  489. };
  490. #endif