avf_concat.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. /*
  2. * Copyright (c) 2012 Nicolas George
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. * See the GNU Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public License
  17. * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * concat audio-video filter
  23. */
  24. #include "libavutil/audioconvert.h"
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/opt.h"
  27. #include "avfilter.h"
  28. #define FF_BUFQUEUE_SIZE 256
  29. #include "bufferqueue.h"
  30. #include "internal.h"
  31. #include "video.h"
  32. #include "audio.h"
  33. #define TYPE_ALL 2
  34. typedef struct {
  35. const AVClass *class;
  36. unsigned nb_streams[TYPE_ALL]; /**< number of out streams of each type */
  37. unsigned nb_segments;
  38. unsigned cur_idx; /**< index of the first input of current segment */
  39. int64_t delta_ts; /**< timestamp to add to produce output timestamps */
  40. unsigned nb_in_active; /**< number of active inputs in current segment */
  41. struct concat_in {
  42. int64_t pts;
  43. int64_t nb_frames;
  44. unsigned eof;
  45. struct FFBufQueue queue;
  46. } *in;
  47. } ConcatContext;
  48. #define OFFSET(x) offsetof(ConcatContext, x)
  49. static const AVOption concat_options[] = {
  50. { "n", "specify the number of segments", OFFSET(nb_segments),
  51. AV_OPT_TYPE_INT, { .dbl = 2 }, 2, INT_MAX },
  52. { "v", "specify the number of video streams",
  53. OFFSET(nb_streams[AVMEDIA_TYPE_VIDEO]),
  54. AV_OPT_TYPE_INT, { .dbl = 1 }, 0, INT_MAX },
  55. { "a", "specify the number of audio streams",
  56. OFFSET(nb_streams[AVMEDIA_TYPE_AUDIO]),
  57. AV_OPT_TYPE_INT, { .dbl = 0 }, 0, INT_MAX },
  58. { 0 }
  59. };
  60. AVFILTER_DEFINE_CLASS(concat);
  61. static int query_formats(AVFilterContext *ctx)
  62. {
  63. ConcatContext *cat = ctx->priv;
  64. unsigned type, nb_str, idx0 = 0, idx, str, seg;
  65. AVFilterFormats *formats, *rates;
  66. AVFilterChannelLayouts *layouts;
  67. for (type = 0; type < TYPE_ALL; type++) {
  68. nb_str = cat->nb_streams[type];
  69. for (str = 0; str < nb_str; str++) {
  70. idx = idx0;
  71. /* Set the output formats */
  72. formats = ff_all_formats(type);
  73. if (!formats)
  74. return AVERROR(ENOMEM);
  75. ff_formats_ref(formats, &ctx->outputs[idx]->in_formats);
  76. if (type == AVMEDIA_TYPE_AUDIO) {
  77. rates = ff_all_samplerates();
  78. if (!rates)
  79. return AVERROR(ENOMEM);
  80. ff_formats_ref(rates, &ctx->outputs[idx]->in_samplerates);
  81. layouts = ff_all_channel_layouts();
  82. if (!layouts)
  83. return AVERROR(ENOMEM);
  84. ff_channel_layouts_ref(layouts, &ctx->outputs[idx]->in_channel_layouts);
  85. }
  86. /* Set the same formats for each corresponding input */
  87. for (seg = 0; seg < cat->nb_segments; seg++) {
  88. ff_formats_ref(formats, &ctx->inputs[idx]->out_formats);
  89. if (type == AVMEDIA_TYPE_AUDIO) {
  90. ff_formats_ref(rates, &ctx->inputs[idx]->out_samplerates);
  91. ff_channel_layouts_ref(layouts, &ctx->inputs[idx]->out_channel_layouts);
  92. }
  93. idx += ctx->nb_outputs;
  94. }
  95. idx0++;
  96. }
  97. }
  98. return 0;
  99. }
  100. static int config_output(AVFilterLink *outlink)
  101. {
  102. AVFilterContext *ctx = outlink->src;
  103. ConcatContext *cat = ctx->priv;
  104. unsigned out_no = FF_OUTLINK_IDX(outlink);
  105. unsigned in_no = out_no, seg;
  106. AVFilterLink *inlink = ctx->inputs[in_no];
  107. /* enhancement: find a common one */
  108. outlink->time_base = AV_TIME_BASE_Q;
  109. outlink->w = inlink->w;
  110. outlink->h = inlink->h;
  111. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  112. outlink->format = inlink->format;
  113. for (seg = 1; seg < cat->nb_segments; seg++) {
  114. inlink = ctx->inputs[in_no += ctx->nb_outputs];
  115. /* possible enhancement: unsafe mode, do not check */
  116. if (outlink->w != inlink->w ||
  117. outlink->h != inlink->h ||
  118. outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
  119. outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
  120. av_log(ctx, AV_LOG_ERROR, "Input link %s parameters "
  121. "(size %dx%d, SAR %d:%d) do not match the corresponding "
  122. "output link %s parameters (%dx%d, SAR %d:%d)\n",
  123. ctx->input_pads[in_no].name, inlink->w, inlink->h,
  124. inlink->sample_aspect_ratio.num,
  125. inlink->sample_aspect_ratio.den,
  126. ctx->input_pads[out_no].name, outlink->w, outlink->h,
  127. outlink->sample_aspect_ratio.num,
  128. outlink->sample_aspect_ratio.den);
  129. return AVERROR(EINVAL);
  130. }
  131. }
  132. return 0;
  133. }
  134. static void push_frame(AVFilterContext *ctx, unsigned in_no,
  135. AVFilterBufferRef *buf)
  136. {
  137. ConcatContext *cat = ctx->priv;
  138. unsigned out_no = in_no % ctx->nb_outputs;
  139. AVFilterLink * inlink = ctx-> inputs[ in_no];
  140. AVFilterLink *outlink = ctx->outputs[out_no];
  141. struct concat_in *in = &cat->in[in_no];
  142. buf->pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
  143. in->pts = buf->pts;
  144. in->nb_frames++;
  145. /* add duration to input PTS */
  146. if (inlink->sample_rate)
  147. /* use number of audio samples */
  148. in->pts += av_rescale_q(buf->audio->nb_samples,
  149. (AVRational){ 1, inlink->sample_rate },
  150. outlink->time_base);
  151. else if (in->nb_frames >= 2)
  152. /* use mean duration */
  153. in->pts = av_rescale(in->pts, in->nb_frames, in->nb_frames - 1);
  154. buf->pts += cat->delta_ts;
  155. switch (buf->type) {
  156. case AVMEDIA_TYPE_VIDEO:
  157. ff_start_frame(outlink, buf);
  158. ff_draw_slice(outlink, 0, outlink->h, 1);
  159. ff_end_frame(outlink);
  160. break;
  161. case AVMEDIA_TYPE_AUDIO:
  162. ff_filter_samples(outlink, buf);
  163. break;
  164. }
  165. }
  166. static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
  167. {
  168. AVFilterContext *ctx = inlink->dst;
  169. ConcatContext *cat = ctx->priv;
  170. unsigned in_no = FF_INLINK_IDX(inlink);
  171. if (in_no < cat->cur_idx) {
  172. av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n",
  173. ctx->input_pads[in_no].name);
  174. avfilter_unref_buffer(buf);
  175. } if (in_no >= cat->cur_idx + ctx->nb_outputs) {
  176. ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf);
  177. } else {
  178. push_frame(ctx, in_no, buf);
  179. }
  180. }
  181. static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms,
  182. int w, int h)
  183. {
  184. AVFilterContext *ctx = inlink->dst;
  185. unsigned in_no = FF_INLINK_IDX(inlink);
  186. AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
  187. return ff_get_video_buffer(outlink, perms, w, h);
  188. }
  189. static AVFilterBufferRef *get_audio_buffer(AVFilterLink *inlink, int perms,
  190. int nb_samples)
  191. {
  192. AVFilterContext *ctx = inlink->dst;
  193. unsigned in_no = FF_INLINK_IDX(inlink);
  194. AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
  195. return ff_get_audio_buffer(outlink, perms, nb_samples);
  196. }
  197. static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
  198. {
  199. return 0;
  200. }
  201. static int draw_slice(AVFilterLink *inlink, int y, int h, int dir)
  202. {
  203. return 0;
  204. }
  205. static int end_frame(AVFilterLink *inlink)
  206. {
  207. process_frame(inlink, inlink->cur_buf);
  208. inlink->cur_buf = NULL;
  209. return 0;
  210. }
  211. static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
  212. {
  213. process_frame(inlink, buf);
  214. return 0; /* enhancement: handle error return */
  215. }
  216. static void close_input(AVFilterContext *ctx, unsigned in_no)
  217. {
  218. ConcatContext *cat = ctx->priv;
  219. cat->in[in_no].eof = 1;
  220. cat->nb_in_active--;
  221. av_log(ctx, AV_LOG_VERBOSE, "EOF on %s, %d streams left in segment.\n",
  222. ctx->input_pads[in_no].name, cat->nb_in_active);
  223. }
  224. static void find_next_delta_ts(AVFilterContext *ctx)
  225. {
  226. ConcatContext *cat = ctx->priv;
  227. unsigned i = cat->cur_idx;
  228. unsigned imax = i + ctx->nb_outputs;
  229. int64_t pts;
  230. pts = cat->in[i++].pts;
  231. for (; i < imax; i++)
  232. pts = FFMAX(pts, cat->in[i].pts);
  233. cat->delta_ts += pts;
  234. }
  235. static void send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no)
  236. {
  237. ConcatContext *cat = ctx->priv;
  238. AVFilterLink *outlink = ctx->outputs[out_no];
  239. int64_t base_pts = cat->in[in_no].pts + cat->delta_ts;
  240. int64_t nb_samples, sent = 0;
  241. int frame_nb_samples;
  242. AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate };
  243. AVFilterBufferRef *buf;
  244. int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
  245. if (!rate_tb.den)
  246. return;
  247. nb_samples = av_rescale_q(cat->delta_ts - base_pts,
  248. outlink->time_base, rate_tb);
  249. frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */
  250. while (nb_samples) {
  251. frame_nb_samples = FFMIN(frame_nb_samples, nb_samples);
  252. buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, frame_nb_samples);
  253. if (!buf)
  254. return;
  255. av_samples_set_silence(buf->extended_data, 0, frame_nb_samples,
  256. nb_channels, outlink->format);
  257. buf->pts = base_pts + av_rescale_q(sent, rate_tb, outlink->time_base);
  258. ff_filter_samples(outlink, buf);
  259. sent += frame_nb_samples;
  260. nb_samples -= frame_nb_samples;
  261. }
  262. }
  263. static void flush_segment(AVFilterContext *ctx)
  264. {
  265. ConcatContext *cat = ctx->priv;
  266. unsigned str, str_max;
  267. find_next_delta_ts(ctx);
  268. cat->cur_idx += ctx->nb_outputs;
  269. cat->nb_in_active = ctx->nb_outputs;
  270. av_log(ctx, AV_LOG_VERBOSE, "Segment finished at pts=%"PRId64"\n",
  271. cat->delta_ts);
  272. if (cat->cur_idx < ctx->nb_inputs) {
  273. /* pad audio streams with silence */
  274. str = cat->nb_streams[AVMEDIA_TYPE_VIDEO];
  275. str_max = str + cat->nb_streams[AVMEDIA_TYPE_AUDIO];
  276. for (; str < str_max; str++)
  277. send_silence(ctx, cat->cur_idx - ctx->nb_outputs + str, str);
  278. /* flush queued buffers */
  279. /* possible enhancement: flush in PTS order */
  280. str_max = cat->cur_idx + ctx->nb_outputs;
  281. for (str = cat->cur_idx; str < str_max; str++)
  282. while (cat->in[str].queue.available)
  283. push_frame(ctx, str, ff_bufqueue_get(&cat->in[str].queue));
  284. }
  285. }
  286. static int request_frame(AVFilterLink *outlink)
  287. {
  288. AVFilterContext *ctx = outlink->src;
  289. ConcatContext *cat = ctx->priv;
  290. unsigned out_no = FF_OUTLINK_IDX(outlink);
  291. unsigned in_no = out_no + cat->cur_idx;
  292. unsigned str, str_max;
  293. int ret;
  294. while (1) {
  295. if (in_no >= ctx->nb_inputs)
  296. return AVERROR_EOF;
  297. if (!cat->in[in_no].eof) {
  298. ret = ff_request_frame(ctx->inputs[in_no]);
  299. if (ret != AVERROR_EOF)
  300. return ret;
  301. close_input(ctx, in_no);
  302. }
  303. /* cycle on all inputs to finish the segment */
  304. /* possible enhancement: request in PTS order */
  305. str_max = cat->cur_idx + ctx->nb_outputs - 1;
  306. for (str = cat->cur_idx; cat->nb_in_active;
  307. str = str == str_max ? cat->cur_idx : str + 1) {
  308. if (cat->in[str].eof)
  309. continue;
  310. ret = ff_request_frame(ctx->inputs[str]);
  311. if (ret == AVERROR_EOF)
  312. close_input(ctx, str);
  313. else if (ret < 0)
  314. return ret;
  315. }
  316. flush_segment(ctx);
  317. in_no += ctx->nb_outputs;
  318. }
  319. }
  320. static av_cold int init(AVFilterContext *ctx, const char *args)
  321. {
  322. ConcatContext *cat = ctx->priv;
  323. int ret;
  324. unsigned seg, type, str;
  325. char name[32];
  326. cat->class = &concat_class;
  327. av_opt_set_defaults(cat);
  328. ret = av_set_options_string(cat, args, "=", ":");
  329. if (ret < 0) {
  330. av_log(ctx, AV_LOG_ERROR, "Error parsing options: '%s'\n", args);
  331. return ret;
  332. }
  333. /* create input pads */
  334. for (seg = 0; seg < cat->nb_segments; seg++) {
  335. for (type = 0; type < TYPE_ALL; type++) {
  336. for (str = 0; str < cat->nb_streams[type]; str++) {
  337. AVFilterPad pad = {
  338. .type = type,
  339. .min_perms = AV_PERM_READ,
  340. .rej_perms = AV_PERM_REUSE2,
  341. .get_video_buffer = get_video_buffer,
  342. .get_audio_buffer = get_audio_buffer,
  343. };
  344. snprintf(name, sizeof(name), "in%d:%c%d", seg, "va"[type], str);
  345. pad.name = av_strdup(name);
  346. if (type == AVMEDIA_TYPE_VIDEO) {
  347. pad.start_frame = start_frame;
  348. pad.draw_slice = draw_slice;
  349. pad.end_frame = end_frame;
  350. } else {
  351. pad.filter_samples = filter_samples;
  352. }
  353. ff_insert_inpad(ctx, ctx->nb_inputs, &pad);
  354. }
  355. }
  356. }
  357. /* create output pads */
  358. for (type = 0; type < TYPE_ALL; type++) {
  359. for (str = 0; str < cat->nb_streams[type]; str++) {
  360. AVFilterPad pad = {
  361. .type = type,
  362. .config_props = config_output,
  363. .request_frame = request_frame,
  364. };
  365. snprintf(name, sizeof(name), "out:%c%d", "va"[type], str);
  366. pad.name = av_strdup(name);
  367. ff_insert_outpad(ctx, ctx->nb_outputs, &pad);
  368. }
  369. }
  370. cat->in = av_calloc(ctx->nb_inputs, sizeof(*cat->in));
  371. if (!cat->in)
  372. return AVERROR(ENOMEM);
  373. cat->nb_in_active = ctx->nb_outputs;
  374. return 0;
  375. }
  376. static av_cold void uninit(AVFilterContext *ctx)
  377. {
  378. ConcatContext *cat = ctx->priv;
  379. unsigned i;
  380. for (i = 0; i < ctx->nb_inputs; i++) {
  381. av_freep(&ctx->input_pads[i].name);
  382. ff_bufqueue_discard_all(&cat->in[i].queue);
  383. }
  384. for (i = 0; i < ctx->nb_outputs; i++)
  385. av_freep(&ctx->output_pads[i].name);
  386. av_free(cat->in);
  387. }
  388. AVFilter avfilter_avf_concat = {
  389. .name = "concat",
  390. .description = NULL_IF_CONFIG_SMALL("Concatenate audio and video streams."),
  391. .init = init,
  392. .uninit = uninit,
  393. .query_formats = query_formats,
  394. .priv_size = sizeof(ConcatContext),
  395. .inputs = (const AVFilterPad[]) { { .name = NULL } },
  396. .outputs = (const AVFilterPad[]) { { .name = NULL } },
  397. };