af_adelay.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /*
  2. * Copyright (c) 2013 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/avstring.h"
  21. #include "libavutil/opt.h"
  22. #include "libavutil/samplefmt.h"
  23. #include "avfilter.h"
  24. #include "audio.h"
  25. #include "internal.h"
  26. typedef struct ChanDelay {
  27. int delay;
  28. unsigned delay_index;
  29. unsigned index;
  30. uint8_t *samples;
  31. } ChanDelay;
  32. typedef struct AudioDelayContext {
  33. const AVClass *class;
  34. char *delays;
  35. ChanDelay *chandelay;
  36. int nb_delays;
  37. int block_align;
  38. unsigned max_delay;
  39. int64_t next_pts;
  40. void (*delay_channel)(ChanDelay *d, int nb_samples,
  41. const uint8_t *src, uint8_t *dst);
  42. } AudioDelayContext;
  43. #define OFFSET(x) offsetof(AudioDelayContext, x)
  44. #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  45. static const AVOption adelay_options[] = {
  46. { "delays", "set list of delays for each channel", OFFSET(delays), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
  47. { NULL }
  48. };
  49. AVFILTER_DEFINE_CLASS(adelay);
  50. static int query_formats(AVFilterContext *ctx)
  51. {
  52. AVFilterChannelLayouts *layouts;
  53. AVFilterFormats *formats;
  54. static const enum AVSampleFormat sample_fmts[] = {
  55. AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
  56. AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
  57. AV_SAMPLE_FMT_NONE
  58. };
  59. int ret;
  60. layouts = ff_all_channel_counts();
  61. if (!layouts)
  62. return AVERROR(ENOMEM);
  63. ret = ff_set_common_channel_layouts(ctx, layouts);
  64. if (ret < 0)
  65. return ret;
  66. formats = ff_make_format_list(sample_fmts);
  67. if (!formats)
  68. return AVERROR(ENOMEM);
  69. ret = ff_set_common_formats(ctx, formats);
  70. if (ret < 0)
  71. return ret;
  72. formats = ff_all_samplerates();
  73. if (!formats)
  74. return AVERROR(ENOMEM);
  75. return ff_set_common_samplerates(ctx, formats);
  76. }
  77. #define DELAY(name, type, fill) \
  78. static void delay_channel_## name ##p(ChanDelay *d, int nb_samples, \
  79. const uint8_t *ssrc, uint8_t *ddst) \
  80. { \
  81. const type *src = (type *)ssrc; \
  82. type *dst = (type *)ddst; \
  83. type *samples = (type *)d->samples; \
  84. \
  85. while (nb_samples) { \
  86. if (d->delay_index < d->delay) { \
  87. const int len = FFMIN(nb_samples, d->delay - d->delay_index); \
  88. \
  89. memcpy(&samples[d->delay_index], src, len * sizeof(type)); \
  90. memset(dst, fill, len * sizeof(type)); \
  91. d->delay_index += len; \
  92. src += len; \
  93. dst += len; \
  94. nb_samples -= len; \
  95. } else { \
  96. *dst = samples[d->index]; \
  97. samples[d->index] = *src; \
  98. nb_samples--; \
  99. d->index++; \
  100. src++, dst++; \
  101. d->index = d->index >= d->delay ? 0 : d->index; \
  102. } \
  103. } \
  104. }
  105. DELAY(u8, uint8_t, 0x80)
  106. DELAY(s16, int16_t, 0)
  107. DELAY(s32, int32_t, 0)
  108. DELAY(flt, float, 0)
  109. DELAY(dbl, double, 0)
  110. static int config_input(AVFilterLink *inlink)
  111. {
  112. AVFilterContext *ctx = inlink->dst;
  113. AudioDelayContext *s = ctx->priv;
  114. char *p, *arg, *saveptr = NULL;
  115. int i;
  116. s->chandelay = av_calloc(inlink->channels, sizeof(*s->chandelay));
  117. if (!s->chandelay)
  118. return AVERROR(ENOMEM);
  119. s->nb_delays = inlink->channels;
  120. s->block_align = av_get_bytes_per_sample(inlink->format);
  121. p = s->delays;
  122. for (i = 0; i < s->nb_delays; i++) {
  123. ChanDelay *d = &s->chandelay[i];
  124. float delay;
  125. if (!(arg = av_strtok(p, "|", &saveptr)))
  126. break;
  127. p = NULL;
  128. sscanf(arg, "%f", &delay);
  129. d->delay = delay * inlink->sample_rate / 1000.0;
  130. if (d->delay < 0) {
  131. av_log(ctx, AV_LOG_ERROR, "Delay must be non negative number.\n");
  132. return AVERROR(EINVAL);
  133. }
  134. }
  135. for (i = 0; i < s->nb_delays; i++) {
  136. ChanDelay *d = &s->chandelay[i];
  137. if (!d->delay)
  138. continue;
  139. d->samples = av_malloc_array(d->delay, s->block_align);
  140. if (!d->samples)
  141. return AVERROR(ENOMEM);
  142. s->max_delay = FFMAX(s->max_delay, d->delay);
  143. }
  144. if (!s->max_delay) {
  145. av_log(ctx, AV_LOG_ERROR, "At least one delay >0 must be specified.\n");
  146. return AVERROR(EINVAL);
  147. }
  148. switch (inlink->format) {
  149. case AV_SAMPLE_FMT_U8P : s->delay_channel = delay_channel_u8p ; break;
  150. case AV_SAMPLE_FMT_S16P: s->delay_channel = delay_channel_s16p; break;
  151. case AV_SAMPLE_FMT_S32P: s->delay_channel = delay_channel_s32p; break;
  152. case AV_SAMPLE_FMT_FLTP: s->delay_channel = delay_channel_fltp; break;
  153. case AV_SAMPLE_FMT_DBLP: s->delay_channel = delay_channel_dblp; break;
  154. }
  155. return 0;
  156. }
  157. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  158. {
  159. AVFilterContext *ctx = inlink->dst;
  160. AudioDelayContext *s = ctx->priv;
  161. AVFrame *out_frame;
  162. int i;
  163. if (ctx->is_disabled || !s->delays)
  164. return ff_filter_frame(ctx->outputs[0], frame);
  165. out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
  166. if (!out_frame) {
  167. av_frame_free(&frame);
  168. return AVERROR(ENOMEM);
  169. }
  170. av_frame_copy_props(out_frame, frame);
  171. for (i = 0; i < s->nb_delays; i++) {
  172. ChanDelay *d = &s->chandelay[i];
  173. const uint8_t *src = frame->extended_data[i];
  174. uint8_t *dst = out_frame->extended_data[i];
  175. if (!d->delay)
  176. memcpy(dst, src, frame->nb_samples * s->block_align);
  177. else
  178. s->delay_channel(d, frame->nb_samples, src, dst);
  179. }
  180. s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
  181. av_frame_free(&frame);
  182. return ff_filter_frame(ctx->outputs[0], out_frame);
  183. }
  184. static int request_frame(AVFilterLink *outlink)
  185. {
  186. AVFilterContext *ctx = outlink->src;
  187. AudioDelayContext *s = ctx->priv;
  188. int ret;
  189. ret = ff_request_frame(ctx->inputs[0]);
  190. if (ret == AVERROR_EOF && !ctx->is_disabled && s->max_delay) {
  191. int nb_samples = FFMIN(s->max_delay, 2048);
  192. AVFrame *frame;
  193. frame = ff_get_audio_buffer(outlink, nb_samples);
  194. if (!frame)
  195. return AVERROR(ENOMEM);
  196. s->max_delay -= nb_samples;
  197. av_samples_set_silence(frame->extended_data, 0,
  198. frame->nb_samples,
  199. outlink->channels,
  200. frame->format);
  201. frame->pts = s->next_pts;
  202. if (s->next_pts != AV_NOPTS_VALUE)
  203. s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
  204. ret = filter_frame(ctx->inputs[0], frame);
  205. }
  206. return ret;
  207. }
  208. static av_cold void uninit(AVFilterContext *ctx)
  209. {
  210. AudioDelayContext *s = ctx->priv;
  211. int i;
  212. for (i = 0; i < s->nb_delays; i++)
  213. av_freep(&s->chandelay[i].samples);
  214. av_freep(&s->chandelay);
  215. }
  216. static const AVFilterPad adelay_inputs[] = {
  217. {
  218. .name = "default",
  219. .type = AVMEDIA_TYPE_AUDIO,
  220. .config_props = config_input,
  221. .filter_frame = filter_frame,
  222. },
  223. { NULL }
  224. };
  225. static const AVFilterPad adelay_outputs[] = {
  226. {
  227. .name = "default",
  228. .request_frame = request_frame,
  229. .type = AVMEDIA_TYPE_AUDIO,
  230. },
  231. { NULL }
  232. };
  233. AVFilter ff_af_adelay = {
  234. .name = "adelay",
  235. .description = NULL_IF_CONFIG_SMALL("Delay one or more audio channels."),
  236. .query_formats = query_formats,
  237. .priv_size = sizeof(AudioDelayContext),
  238. .priv_class = &adelay_class,
  239. .uninit = uninit,
  240. .inputs = adelay_inputs,
  241. .outputs = adelay_outputs,
  242. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
  243. };