avf_concat.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. /*
  2. * Copyright (c) 2012 Nicolas George
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. * See the GNU Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public License
  17. * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * concat audio-video filter
  23. */
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/opt.h"
  28. #include "avfilter.h"
  29. #define FF_BUFQUEUE_SIZE 256
  30. #include "bufferqueue.h"
  31. #include "internal.h"
  32. #include "video.h"
  33. #include "audio.h"
  34. #define TYPE_ALL 2
  35. typedef struct {
  36. const AVClass *class;
  37. unsigned nb_streams[TYPE_ALL]; /**< number of out streams of each type */
  38. unsigned nb_segments;
  39. unsigned cur_idx; /**< index of the first input of current segment */
  40. int64_t delta_ts; /**< timestamp to add to produce output timestamps */
  41. unsigned nb_in_active; /**< number of active inputs in current segment */
  42. unsigned unsafe;
  43. struct concat_in {
  44. int64_t pts;
  45. int64_t nb_frames;
  46. unsigned eof;
  47. struct FFBufQueue queue;
  48. } *in;
  49. } ConcatContext;
  50. #define OFFSET(x) offsetof(ConcatContext, x)
  51. #define A AV_OPT_FLAG_AUDIO_PARAM
  52. #define F AV_OPT_FLAG_FILTERING_PARAM
  53. #define V AV_OPT_FLAG_VIDEO_PARAM
  54. static const AVOption concat_options[] = {
  55. { "n", "specify the number of segments", OFFSET(nb_segments),
  56. AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, V|A|F},
  57. { "v", "specify the number of video streams",
  58. OFFSET(nb_streams[AVMEDIA_TYPE_VIDEO]),
  59. AV_OPT_TYPE_INT, { .i64 = 1 }, 0, INT_MAX, V|F },
  60. { "a", "specify the number of audio streams",
  61. OFFSET(nb_streams[AVMEDIA_TYPE_AUDIO]),
  62. AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A|F},
  63. { "unsafe", "enable unsafe mode",
  64. OFFSET(unsafe),
  65. AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, V|A|F},
  66. { NULL }
  67. };
  68. AVFILTER_DEFINE_CLASS(concat);
  69. static int query_formats(AVFilterContext *ctx)
  70. {
  71. ConcatContext *cat = ctx->priv;
  72. unsigned type, nb_str, idx0 = 0, idx, str, seg;
  73. AVFilterFormats *formats, *rates = NULL;
  74. AVFilterChannelLayouts *layouts = NULL;
  75. int ret;
  76. for (type = 0; type < TYPE_ALL; type++) {
  77. nb_str = cat->nb_streams[type];
  78. for (str = 0; str < nb_str; str++) {
  79. idx = idx0;
  80. /* Set the output formats */
  81. formats = ff_all_formats(type);
  82. if ((ret = ff_formats_ref(formats, &ctx->outputs[idx]->in_formats)) < 0)
  83. return ret;
  84. if (type == AVMEDIA_TYPE_AUDIO) {
  85. rates = ff_all_samplerates();
  86. if ((ret = ff_formats_ref(rates, &ctx->outputs[idx]->in_samplerates)) < 0)
  87. return ret;
  88. layouts = ff_all_channel_layouts();
  89. if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[idx]->in_channel_layouts)) < 0)
  90. return ret;
  91. }
  92. /* Set the same formats for each corresponding input */
  93. for (seg = 0; seg < cat->nb_segments; seg++) {
  94. if ((ret = ff_formats_ref(formats, &ctx->inputs[idx]->out_formats)) < 0)
  95. return ret;
  96. if (type == AVMEDIA_TYPE_AUDIO) {
  97. if ((ret = ff_formats_ref(rates, &ctx->inputs[idx]->out_samplerates)) < 0 ||
  98. (ret = ff_channel_layouts_ref(layouts, &ctx->inputs[idx]->out_channel_layouts)) < 0)
  99. return ret;
  100. }
  101. idx += ctx->nb_outputs;
  102. }
  103. idx0++;
  104. }
  105. }
  106. return 0;
  107. }
  108. static int config_output(AVFilterLink *outlink)
  109. {
  110. AVFilterContext *ctx = outlink->src;
  111. ConcatContext *cat = ctx->priv;
  112. unsigned out_no = FF_OUTLINK_IDX(outlink);
  113. unsigned in_no = out_no, seg;
  114. AVFilterLink *inlink = ctx->inputs[in_no];
  115. /* enhancement: find a common one */
  116. outlink->time_base = AV_TIME_BASE_Q;
  117. outlink->w = inlink->w;
  118. outlink->h = inlink->h;
  119. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  120. outlink->format = inlink->format;
  121. for (seg = 1; seg < cat->nb_segments; seg++) {
  122. inlink = ctx->inputs[in_no += ctx->nb_outputs];
  123. if (!outlink->sample_aspect_ratio.num)
  124. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  125. /* possible enhancement: unsafe mode, do not check */
  126. if (outlink->w != inlink->w ||
  127. outlink->h != inlink->h ||
  128. outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num &&
  129. inlink->sample_aspect_ratio.num ||
  130. outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
  131. av_log(ctx, AV_LOG_ERROR, "Input link %s parameters "
  132. "(size %dx%d, SAR %d:%d) do not match the corresponding "
  133. "output link %s parameters (%dx%d, SAR %d:%d)\n",
  134. ctx->input_pads[in_no].name, inlink->w, inlink->h,
  135. inlink->sample_aspect_ratio.num,
  136. inlink->sample_aspect_ratio.den,
  137. ctx->input_pads[out_no].name, outlink->w, outlink->h,
  138. outlink->sample_aspect_ratio.num,
  139. outlink->sample_aspect_ratio.den);
  140. if (!cat->unsafe)
  141. return AVERROR(EINVAL);
  142. }
  143. }
  144. return 0;
  145. }
  146. static int push_frame(AVFilterContext *ctx, unsigned in_no, AVFrame *buf)
  147. {
  148. ConcatContext *cat = ctx->priv;
  149. unsigned out_no = in_no % ctx->nb_outputs;
  150. AVFilterLink * inlink = ctx-> inputs[ in_no];
  151. AVFilterLink *outlink = ctx->outputs[out_no];
  152. struct concat_in *in = &cat->in[in_no];
  153. buf->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
  154. in->pts = buf->pts;
  155. in->nb_frames++;
  156. /* add duration to input PTS */
  157. if (inlink->sample_rate)
  158. /* use number of audio samples */
  159. in->pts += av_rescale_q(buf->nb_samples,
  160. av_make_q(1, inlink->sample_rate),
  161. outlink->time_base);
  162. else if (in->nb_frames >= 2)
  163. /* use mean duration */
  164. in->pts = av_rescale(in->pts, in->nb_frames, in->nb_frames - 1);
  165. buf->pts += cat->delta_ts;
  166. return ff_filter_frame(outlink, buf);
  167. }
  168. static int process_frame(AVFilterLink *inlink, AVFrame *buf)
  169. {
  170. AVFilterContext *ctx = inlink->dst;
  171. ConcatContext *cat = ctx->priv;
  172. unsigned in_no = FF_INLINK_IDX(inlink);
  173. if (in_no < cat->cur_idx) {
  174. av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n",
  175. ctx->input_pads[in_no].name);
  176. av_frame_free(&buf);
  177. } else if (in_no >= cat->cur_idx + ctx->nb_outputs) {
  178. ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf);
  179. } else {
  180. return push_frame(ctx, in_no, buf);
  181. }
  182. return 0;
  183. }
  184. static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
  185. {
  186. AVFilterContext *ctx = inlink->dst;
  187. unsigned in_no = FF_INLINK_IDX(inlink);
  188. AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
  189. return ff_get_video_buffer(outlink, w, h);
  190. }
  191. static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples)
  192. {
  193. AVFilterContext *ctx = inlink->dst;
  194. unsigned in_no = FF_INLINK_IDX(inlink);
  195. AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
  196. return ff_get_audio_buffer(outlink, nb_samples);
  197. }
  198. static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
  199. {
  200. return process_frame(inlink, buf);
  201. }
  202. static void close_input(AVFilterContext *ctx, unsigned in_no)
  203. {
  204. ConcatContext *cat = ctx->priv;
  205. cat->in[in_no].eof = 1;
  206. cat->nb_in_active--;
  207. av_log(ctx, AV_LOG_VERBOSE, "EOF on %s, %d streams left in segment.\n",
  208. ctx->input_pads[in_no].name, cat->nb_in_active);
  209. }
  210. static void find_next_delta_ts(AVFilterContext *ctx, int64_t *seg_delta)
  211. {
  212. ConcatContext *cat = ctx->priv;
  213. unsigned i = cat->cur_idx;
  214. unsigned imax = i + ctx->nb_outputs;
  215. int64_t pts;
  216. pts = cat->in[i++].pts;
  217. for (; i < imax; i++)
  218. pts = FFMAX(pts, cat->in[i].pts);
  219. cat->delta_ts += pts;
  220. *seg_delta = pts;
  221. }
  222. static int send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no,
  223. int64_t seg_delta)
  224. {
  225. ConcatContext *cat = ctx->priv;
  226. AVFilterLink *outlink = ctx->outputs[out_no];
  227. int64_t base_pts = cat->in[in_no].pts + cat->delta_ts - seg_delta;
  228. int64_t nb_samples, sent = 0;
  229. int frame_nb_samples, ret;
  230. AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate };
  231. AVFrame *buf;
  232. int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
  233. if (!rate_tb.den)
  234. return AVERROR_BUG;
  235. nb_samples = av_rescale_q(seg_delta - cat->in[in_no].pts,
  236. outlink->time_base, rate_tb);
  237. frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */
  238. while (nb_samples) {
  239. frame_nb_samples = FFMIN(frame_nb_samples, nb_samples);
  240. buf = ff_get_audio_buffer(outlink, frame_nb_samples);
  241. if (!buf)
  242. return AVERROR(ENOMEM);
  243. av_samples_set_silence(buf->extended_data, 0, frame_nb_samples,
  244. nb_channels, outlink->format);
  245. buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base);
  246. ret = ff_filter_frame(outlink, buf);
  247. if (ret < 0)
  248. return ret;
  249. sent += frame_nb_samples;
  250. nb_samples -= frame_nb_samples;
  251. }
  252. return 0;
  253. }
  254. static int flush_segment(AVFilterContext *ctx)
  255. {
  256. int ret;
  257. ConcatContext *cat = ctx->priv;
  258. unsigned str, str_max;
  259. int64_t seg_delta;
  260. find_next_delta_ts(ctx, &seg_delta);
  261. cat->cur_idx += ctx->nb_outputs;
  262. cat->nb_in_active = ctx->nb_outputs;
  263. av_log(ctx, AV_LOG_VERBOSE, "Segment finished at pts=%"PRId64"\n",
  264. cat->delta_ts);
  265. if (cat->cur_idx < ctx->nb_inputs) {
  266. /* pad audio streams with silence */
  267. str = cat->nb_streams[AVMEDIA_TYPE_VIDEO];
  268. str_max = str + cat->nb_streams[AVMEDIA_TYPE_AUDIO];
  269. for (; str < str_max; str++) {
  270. ret = send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str,
  271. seg_delta);
  272. if (ret < 0)
  273. return ret;
  274. }
  275. /* flush queued buffers */
  276. /* possible enhancement: flush in PTS order */
  277. str_max = cat->cur_idx + ctx->nb_outputs;
  278. for (str = cat->cur_idx; str < str_max; str++) {
  279. while (cat->in[str].queue.available) {
  280. ret = push_frame(ctx, str, ff_bufqueue_get(&cat->in[str].queue));
  281. if (ret < 0)
  282. return ret;
  283. }
  284. }
  285. }
  286. return 0;
  287. }
  288. static int request_frame(AVFilterLink *outlink)
  289. {
  290. AVFilterContext *ctx = outlink->src;
  291. ConcatContext *cat = ctx->priv;
  292. unsigned out_no = FF_OUTLINK_IDX(outlink);
  293. unsigned in_no = out_no + cat->cur_idx;
  294. unsigned str, str_max;
  295. int ret;
  296. while (1) {
  297. if (in_no >= ctx->nb_inputs)
  298. return AVERROR_EOF;
  299. if (!cat->in[in_no].eof) {
  300. ret = ff_request_frame(ctx->inputs[in_no]);
  301. if (ret != AVERROR_EOF)
  302. return ret;
  303. close_input(ctx, in_no);
  304. }
  305. /* cycle on all inputs to finish the segment */
  306. /* possible enhancement: request in PTS order */
  307. str_max = cat->cur_idx + ctx->nb_outputs - 1;
  308. for (str = cat->cur_idx; cat->nb_in_active;
  309. str = str == str_max ? cat->cur_idx : str + 1) {
  310. if (cat->in[str].eof)
  311. continue;
  312. ret = ff_request_frame(ctx->inputs[str]);
  313. if (ret != AVERROR_EOF)
  314. return ret;
  315. close_input(ctx, str);
  316. }
  317. ret = flush_segment(ctx);
  318. if (ret < 0)
  319. return ret;
  320. in_no += ctx->nb_outputs;
  321. }
  322. }
  323. static av_cold int init(AVFilterContext *ctx)
  324. {
  325. ConcatContext *cat = ctx->priv;
  326. unsigned seg, type, str;
  327. /* create input pads */
  328. for (seg = 0; seg < cat->nb_segments; seg++) {
  329. for (type = 0; type < TYPE_ALL; type++) {
  330. for (str = 0; str < cat->nb_streams[type]; str++) {
  331. AVFilterPad pad = {
  332. .type = type,
  333. .get_video_buffer = get_video_buffer,
  334. .get_audio_buffer = get_audio_buffer,
  335. .filter_frame = filter_frame,
  336. };
  337. pad.name = av_asprintf("in%d:%c%d", seg, "va"[type], str);
  338. ff_insert_inpad(ctx, ctx->nb_inputs, &pad);
  339. }
  340. }
  341. }
  342. /* create output pads */
  343. for (type = 0; type < TYPE_ALL; type++) {
  344. for (str = 0; str < cat->nb_streams[type]; str++) {
  345. AVFilterPad pad = {
  346. .type = type,
  347. .config_props = config_output,
  348. .request_frame = request_frame,
  349. };
  350. pad.name = av_asprintf("out:%c%d", "va"[type], str);
  351. ff_insert_outpad(ctx, ctx->nb_outputs, &pad);
  352. }
  353. }
  354. cat->in = av_calloc(ctx->nb_inputs, sizeof(*cat->in));
  355. if (!cat->in)
  356. return AVERROR(ENOMEM);
  357. cat->nb_in_active = ctx->nb_outputs;
  358. return 0;
  359. }
  360. static av_cold void uninit(AVFilterContext *ctx)
  361. {
  362. ConcatContext *cat = ctx->priv;
  363. unsigned i;
  364. for (i = 0; i < ctx->nb_inputs; i++) {
  365. av_freep(&ctx->input_pads[i].name);
  366. ff_bufqueue_discard_all(&cat->in[i].queue);
  367. }
  368. for (i = 0; i < ctx->nb_outputs; i++)
  369. av_freep(&ctx->output_pads[i].name);
  370. av_freep(&cat->in);
  371. }
  372. AVFilter ff_avf_concat = {
  373. .name = "concat",
  374. .description = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."),
  375. .init = init,
  376. .uninit = uninit,
  377. .query_formats = query_formats,
  378. .priv_size = sizeof(ConcatContext),
  379. .inputs = NULL,
  380. .outputs = NULL,
  381. .priv_class = &concat_class,
  382. .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS,
  383. };