af_amerge.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Audio merging filter
  23. */
  24. #include "libavutil/avstring.h"
  25. #include "libavutil/bprint.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/opt.h"
  28. #include "avfilter.h"
  29. #include "audio.h"
  30. #include "bufferqueue.h"
  31. #include "internal.h"
  32. #define SWR_CH_MAX 64
  33. typedef struct {
  34. const AVClass *class;
  35. int nb_inputs;
  36. int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
  37. int bps;
  38. struct amerge_input {
  39. struct FFBufQueue queue;
  40. int nb_ch; /**< number of channels for the input */
  41. int nb_samples;
  42. int pos;
  43. } *in;
  44. } AMergeContext;
  45. #define OFFSET(x) offsetof(AMergeContext, x)
  46. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  47. static const AVOption amerge_options[] = {
  48. { "inputs", "specify the number of inputs", OFFSET(nb_inputs),
  49. AV_OPT_TYPE_INT, { .i64 = 2 }, 2, SWR_CH_MAX, FLAGS },
  50. { NULL }
  51. };
  52. AVFILTER_DEFINE_CLASS(amerge);
  53. static av_cold void uninit(AVFilterContext *ctx)
  54. {
  55. AMergeContext *s = ctx->priv;
  56. int i;
  57. for (i = 0; i < s->nb_inputs; i++) {
  58. if (s->in)
  59. ff_bufqueue_discard_all(&s->in[i].queue);
  60. if (ctx->input_pads)
  61. av_freep(&ctx->input_pads[i].name);
  62. }
  63. av_freep(&s->in);
  64. }
  65. static int query_formats(AVFilterContext *ctx)
  66. {
  67. AMergeContext *s = ctx->priv;
  68. int64_t inlayout[SWR_CH_MAX], outlayout = 0;
  69. AVFilterFormats *formats;
  70. AVFilterChannelLayouts *layouts;
  71. int i, ret, overlap = 0, nb_ch = 0;
  72. for (i = 0; i < s->nb_inputs; i++) {
  73. if (!ctx->inputs[i]->in_channel_layouts ||
  74. !ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) {
  75. av_log(ctx, AV_LOG_WARNING,
  76. "No channel layout for input %d\n", i + 1);
  77. return AVERROR(EAGAIN);
  78. }
  79. inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0];
  80. if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) {
  81. char buf[256];
  82. av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
  83. av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
  84. }
  85. s->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]);
  86. if (outlayout & inlayout[i])
  87. overlap++;
  88. outlayout |= inlayout[i];
  89. nb_ch += s->in[i].nb_ch;
  90. }
  91. if (nb_ch > SWR_CH_MAX) {
  92. av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
  93. return AVERROR(EINVAL);
  94. }
  95. if (overlap) {
  96. av_log(ctx, AV_LOG_WARNING,
  97. "Input channel layouts overlap: "
  98. "output layout will be determined by the number of distinct input channels\n");
  99. for (i = 0; i < nb_ch; i++)
  100. s->route[i] = i;
  101. outlayout = av_get_default_channel_layout(nb_ch);
  102. if (!outlayout && nb_ch)
  103. outlayout = 0xFFFFFFFFFFFFFFFFULL >> (64 - nb_ch);
  104. } else {
  105. int *route[SWR_CH_MAX];
  106. int c, out_ch_number = 0;
  107. route[0] = s->route;
  108. for (i = 1; i < s->nb_inputs; i++)
  109. route[i] = route[i - 1] + s->in[i - 1].nb_ch;
  110. for (c = 0; c < 64; c++)
  111. for (i = 0; i < s->nb_inputs; i++)
  112. if ((inlayout[i] >> c) & 1)
  113. *(route[i]++) = out_ch_number++;
  114. }
  115. formats = ff_make_format_list(ff_packed_sample_fmts_array);
  116. if ((ret = ff_set_common_formats(ctx, formats)) < 0)
  117. return ret;
  118. for (i = 0; i < s->nb_inputs; i++) {
  119. layouts = NULL;
  120. if ((ret = ff_add_channel_layout(&layouts, inlayout[i])) < 0)
  121. return ret;
  122. if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
  123. return ret;
  124. }
  125. layouts = NULL;
  126. if ((ret = ff_add_channel_layout(&layouts, outlayout)) < 0)
  127. return ret;
  128. if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
  129. return ret;
  130. return ff_set_common_samplerates(ctx, ff_all_samplerates());
  131. }
  132. static int config_output(AVFilterLink *outlink)
  133. {
  134. AVFilterContext *ctx = outlink->src;
  135. AMergeContext *s = ctx->priv;
  136. AVBPrint bp;
  137. int i;
  138. for (i = 1; i < s->nb_inputs; i++) {
  139. if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) {
  140. av_log(ctx, AV_LOG_ERROR,
  141. "Inputs must have the same sample rate "
  142. "%d for in%d vs %d\n",
  143. ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate);
  144. return AVERROR(EINVAL);
  145. }
  146. }
  147. s->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
  148. outlink->sample_rate = ctx->inputs[0]->sample_rate;
  149. outlink->time_base = ctx->inputs[0]->time_base;
  150. av_bprint_init(&bp, 0, 1);
  151. for (i = 0; i < s->nb_inputs; i++) {
  152. av_bprintf(&bp, "%sin%d:", i ? " + " : "", i);
  153. av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout);
  154. }
  155. av_bprintf(&bp, " -> out:");
  156. av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout);
  157. av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str);
  158. return 0;
  159. }
  160. static int request_frame(AVFilterLink *outlink)
  161. {
  162. AVFilterContext *ctx = outlink->src;
  163. AMergeContext *s = ctx->priv;
  164. int i, ret;
  165. for (i = 0; i < s->nb_inputs; i++)
  166. if (!s->in[i].nb_samples)
  167. if ((ret = ff_request_frame(ctx->inputs[i])) < 0)
  168. return ret;
  169. return 0;
  170. }
  171. /**
  172. * Copy samples from several input streams to one output stream.
  173. * @param nb_inputs number of inputs
  174. * @param in inputs; used only for the nb_ch field;
  175. * @param route routing values;
  176. * input channel i goes to output channel route[i];
  177. * i < in[0].nb_ch are the channels from the first output;
  178. * i >= in[0].nb_ch are the channels from the second output
  179. * @param ins pointer to the samples of each inputs, in packed format;
  180. * will be left at the end of the copied samples
  181. * @param outs pointer to the samples of the output, in packet format;
  182. * must point to a buffer big enough;
  183. * will be left at the end of the copied samples
  184. * @param ns number of samples to copy
  185. * @param bps bytes per sample
  186. */
  187. static inline void copy_samples(int nb_inputs, struct amerge_input in[],
  188. int *route, uint8_t *ins[],
  189. uint8_t **outs, int ns, int bps)
  190. {
  191. int *route_cur;
  192. int i, c, nb_ch = 0;
  193. for (i = 0; i < nb_inputs; i++)
  194. nb_ch += in[i].nb_ch;
  195. while (ns--) {
  196. route_cur = route;
  197. for (i = 0; i < nb_inputs; i++) {
  198. for (c = 0; c < in[i].nb_ch; c++) {
  199. memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
  200. ins[i] += bps;
  201. }
  202. }
  203. *outs += nb_ch * bps;
  204. }
  205. }
  206. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  207. {
  208. AVFilterContext *ctx = inlink->dst;
  209. AMergeContext *s = ctx->priv;
  210. AVFilterLink *const outlink = ctx->outputs[0];
  211. int input_number;
  212. int nb_samples, ns, i;
  213. AVFrame *outbuf, *inbuf[SWR_CH_MAX];
  214. uint8_t *ins[SWR_CH_MAX], *outs;
  215. for (input_number = 0; input_number < s->nb_inputs; input_number++)
  216. if (inlink == ctx->inputs[input_number])
  217. break;
  218. av_assert1(input_number < s->nb_inputs);
  219. if (ff_bufqueue_is_full(&s->in[input_number].queue)) {
  220. av_frame_free(&insamples);
  221. return AVERROR(ENOMEM);
  222. }
  223. ff_bufqueue_add(ctx, &s->in[input_number].queue, av_frame_clone(insamples));
  224. s->in[input_number].nb_samples += insamples->nb_samples;
  225. av_frame_free(&insamples);
  226. nb_samples = s->in[0].nb_samples;
  227. for (i = 1; i < s->nb_inputs; i++)
  228. nb_samples = FFMIN(nb_samples, s->in[i].nb_samples);
  229. if (!nb_samples)
  230. return 0;
  231. outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
  232. if (!outbuf)
  233. return AVERROR(ENOMEM);
  234. outs = outbuf->data[0];
  235. for (i = 0; i < s->nb_inputs; i++) {
  236. inbuf[i] = ff_bufqueue_peek(&s->in[i].queue, 0);
  237. ins[i] = inbuf[i]->data[0] +
  238. s->in[i].pos * s->in[i].nb_ch * s->bps;
  239. }
  240. av_frame_copy_props(outbuf, inbuf[0]);
  241. outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
  242. inbuf[0]->pts +
  243. av_rescale_q(s->in[0].pos,
  244. av_make_q(1, ctx->inputs[0]->sample_rate),
  245. ctx->outputs[0]->time_base);
  246. outbuf->nb_samples = nb_samples;
  247. outbuf->channel_layout = outlink->channel_layout;
  248. av_frame_set_channels(outbuf, outlink->channels);
  249. while (nb_samples) {
  250. ns = nb_samples;
  251. for (i = 0; i < s->nb_inputs; i++)
  252. ns = FFMIN(ns, inbuf[i]->nb_samples - s->in[i].pos);
  253. /* Unroll the most common sample formats: speed +~350% for the loop,
  254. +~13% overall (including two common decoders) */
  255. switch (s->bps) {
  256. case 1:
  257. copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, 1);
  258. break;
  259. case 2:
  260. copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, 2);
  261. break;
  262. case 4:
  263. copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, 4);
  264. break;
  265. default:
  266. copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, ns, s->bps);
  267. break;
  268. }
  269. nb_samples -= ns;
  270. for (i = 0; i < s->nb_inputs; i++) {
  271. s->in[i].nb_samples -= ns;
  272. s->in[i].pos += ns;
  273. if (s->in[i].pos == inbuf[i]->nb_samples) {
  274. s->in[i].pos = 0;
  275. av_frame_free(&inbuf[i]);
  276. ff_bufqueue_get(&s->in[i].queue);
  277. inbuf[i] = ff_bufqueue_peek(&s->in[i].queue, 0);
  278. ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL;
  279. }
  280. }
  281. }
  282. return ff_filter_frame(ctx->outputs[0], outbuf);
  283. }
  284. static av_cold int init(AVFilterContext *ctx)
  285. {
  286. AMergeContext *s = ctx->priv;
  287. int i;
  288. s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
  289. if (!s->in)
  290. return AVERROR(ENOMEM);
  291. for (i = 0; i < s->nb_inputs; i++) {
  292. char *name = av_asprintf("in%d", i);
  293. AVFilterPad pad = {
  294. .name = name,
  295. .type = AVMEDIA_TYPE_AUDIO,
  296. .filter_frame = filter_frame,
  297. };
  298. if (!name)
  299. return AVERROR(ENOMEM);
  300. ff_insert_inpad(ctx, i, &pad);
  301. }
  302. return 0;
  303. }
  304. static const AVFilterPad amerge_outputs[] = {
  305. {
  306. .name = "default",
  307. .type = AVMEDIA_TYPE_AUDIO,
  308. .config_props = config_output,
  309. .request_frame = request_frame,
  310. },
  311. { NULL }
  312. };
  313. AVFilter ff_af_amerge = {
  314. .name = "amerge",
  315. .description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
  316. "a single multi-channel stream."),
  317. .priv_size = sizeof(AMergeContext),
  318. .init = init,
  319. .uninit = uninit,
  320. .query_formats = query_formats,
  321. .inputs = NULL,
  322. .outputs = amerge_outputs,
  323. .priv_class = &amerge_class,
  324. .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
  325. };