af_firequalizer.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. /*
  2. * Copyright (c) 2016 Muhammad Faiz <mfcc64@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "libavutil/eval.h"
  22. #include "libavutil/avassert.h"
  23. #include "libavcodec/avfft.h"
  24. #include "avfilter.h"
  25. #include "internal.h"
  26. #include "audio.h"
  27. #define RDFT_BITS_MIN 4
  28. #define RDFT_BITS_MAX 16
  29. enum WindowFunc {
  30. WFUNC_MIN,
  31. WFUNC_RECTANGULAR = WFUNC_MIN,
  32. WFUNC_HANN,
  33. WFUNC_HAMMING,
  34. WFUNC_BLACKMAN,
  35. WFUNC_NUTTALL3,
  36. WFUNC_MNUTTALL3,
  37. WFUNC_NUTTALL,
  38. WFUNC_BNUTTALL,
  39. WFUNC_BHARRIS,
  40. WFUNC_MAX = WFUNC_BHARRIS
  41. };
  42. #define NB_GAIN_ENTRY_MAX 4096
  43. typedef struct {
  44. double freq;
  45. double gain;
  46. } GainEntry;
  47. typedef struct {
  48. int buf_idx;
  49. int overlap_idx;
  50. } OverlapIndex;
  51. typedef struct {
  52. const AVClass *class;
  53. RDFTContext *analysis_irdft;
  54. RDFTContext *rdft;
  55. RDFTContext *irdft;
  56. int analysis_rdft_len;
  57. int rdft_len;
  58. float *analysis_buf;
  59. float *kernel_tmp_buf;
  60. float *kernel_buf;
  61. float *conv_buf;
  62. OverlapIndex *conv_idx;
  63. int fir_len;
  64. int nsamples_max;
  65. int64_t next_pts;
  66. int frame_nsamples_max;
  67. int remaining;
  68. char *gain_cmd;
  69. char *gain_entry_cmd;
  70. const char *gain;
  71. const char *gain_entry;
  72. double delay;
  73. double accuracy;
  74. int wfunc;
  75. int fixed;
  76. int multi;
  77. int zero_phase;
  78. int nb_gain_entry;
  79. int gain_entry_err;
  80. GainEntry gain_entry_tbl[NB_GAIN_ENTRY_MAX];
  81. } FIREqualizerContext;
  82. #define OFFSET(x) offsetof(FIREqualizerContext, x)
  83. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  84. static const AVOption firequalizer_options[] = {
  85. { "gain", "set gain curve", OFFSET(gain), AV_OPT_TYPE_STRING, { .str = "gain_interpolate(f)" }, 0, 0, FLAGS },
  86. { "gain_entry", "set gain entry", OFFSET(gain_entry), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
  87. { "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.0, 1e10, FLAGS },
  88. { "accuracy", "set accuracy", OFFSET(accuracy), AV_OPT_TYPE_DOUBLE, { .dbl = 5.0 }, 0.0, 1e10, FLAGS },
  89. { "wfunc", "set window function", OFFSET(wfunc), AV_OPT_TYPE_INT, { .i64 = WFUNC_HANN }, WFUNC_MIN, WFUNC_MAX, FLAGS, "wfunc" },
  90. { "rectangular", "rectangular window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_RECTANGULAR }, 0, 0, FLAGS, "wfunc" },
  91. { "hann", "hann window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HANN }, 0, 0, FLAGS, "wfunc" },
  92. { "hamming", "hamming window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HAMMING }, 0, 0, FLAGS, "wfunc" },
  93. { "blackman", "blackman window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BLACKMAN }, 0, 0, FLAGS, "wfunc" },
  94. { "nuttall3", "3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL3 }, 0, 0, FLAGS, "wfunc" },
  95. { "mnuttall3", "minimum 3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_MNUTTALL3 }, 0, 0, FLAGS, "wfunc" },
  96. { "nuttall", "nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL }, 0, 0, FLAGS, "wfunc" },
  97. { "bnuttall", "blackman-nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BNUTTALL }, 0, 0, FLAGS, "wfunc" },
  98. { "bharris", "blackman-harris window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BHARRIS }, 0, 0, FLAGS, "wfunc" },
  99. { "fixed", "set fixed frame samples", OFFSET(fixed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  100. { "multi", "set multi channels mode", OFFSET(multi), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  101. { "zero_phase", "set zero phase mode", OFFSET(zero_phase), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
  102. { NULL }
  103. };
  104. AVFILTER_DEFINE_CLASS(firequalizer);
  105. static void common_uninit(FIREqualizerContext *s)
  106. {
  107. av_rdft_end(s->analysis_irdft);
  108. av_rdft_end(s->rdft);
  109. av_rdft_end(s->irdft);
  110. s->analysis_irdft = s->rdft = s->irdft = NULL;
  111. av_freep(&s->analysis_buf);
  112. av_freep(&s->kernel_tmp_buf);
  113. av_freep(&s->kernel_buf);
  114. av_freep(&s->conv_buf);
  115. av_freep(&s->conv_idx);
  116. }
  117. static av_cold void uninit(AVFilterContext *ctx)
  118. {
  119. FIREqualizerContext *s = ctx->priv;
  120. common_uninit(s);
  121. av_freep(&s->gain_cmd);
  122. av_freep(&s->gain_entry_cmd);
  123. }
  124. static int query_formats(AVFilterContext *ctx)
  125. {
  126. AVFilterChannelLayouts *layouts;
  127. AVFilterFormats *formats;
  128. static const enum AVSampleFormat sample_fmts[] = {
  129. AV_SAMPLE_FMT_FLTP,
  130. AV_SAMPLE_FMT_NONE
  131. };
  132. int ret;
  133. layouts = ff_all_channel_counts();
  134. if (!layouts)
  135. return AVERROR(ENOMEM);
  136. ret = ff_set_common_channel_layouts(ctx, layouts);
  137. if (ret < 0)
  138. return ret;
  139. formats = ff_make_format_list(sample_fmts);
  140. if (!formats)
  141. return AVERROR(ENOMEM);
  142. ret = ff_set_common_formats(ctx, formats);
  143. if (ret < 0)
  144. return ret;
  145. formats = ff_all_samplerates();
  146. if (!formats)
  147. return AVERROR(ENOMEM);
  148. return ff_set_common_samplerates(ctx, formats);
  149. }
  150. static void fast_convolute(FIREqualizerContext *s, const float *kernel_buf, float *conv_buf,
  151. OverlapIndex *idx, float *data, int nsamples)
  152. {
  153. if (nsamples <= s->nsamples_max) {
  154. float *buf = conv_buf + idx->buf_idx * s->rdft_len;
  155. float *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
  156. int k;
  157. memcpy(buf, data, nsamples * sizeof(*data));
  158. memset(buf + nsamples, 0, (s->rdft_len - nsamples) * sizeof(*data));
  159. av_rdft_calc(s->rdft, buf);
  160. buf[0] *= kernel_buf[0];
  161. buf[1] *= kernel_buf[1];
  162. for (k = 2; k < s->rdft_len; k += 2) {
  163. float re, im;
  164. re = buf[k] * kernel_buf[k] - buf[k+1] * kernel_buf[k+1];
  165. im = buf[k] * kernel_buf[k+1] + buf[k+1] * kernel_buf[k];
  166. buf[k] = re;
  167. buf[k+1] = im;
  168. }
  169. av_rdft_calc(s->irdft, buf);
  170. for (k = 0; k < s->rdft_len - idx->overlap_idx; k++)
  171. buf[k] += obuf[k];
  172. memcpy(data, buf, nsamples * sizeof(*data));
  173. idx->buf_idx = !idx->buf_idx;
  174. idx->overlap_idx = nsamples;
  175. } else {
  176. while (nsamples > s->nsamples_max * 2) {
  177. fast_convolute(s, kernel_buf, conv_buf, idx, data, s->nsamples_max);
  178. data += s->nsamples_max;
  179. nsamples -= s->nsamples_max;
  180. }
  181. fast_convolute(s, kernel_buf, conv_buf, idx, data, nsamples/2);
  182. fast_convolute(s, kernel_buf, conv_buf, idx, data + nsamples/2, nsamples - nsamples/2);
  183. }
  184. }
  185. static double entry_func(void *p, double freq, double gain)
  186. {
  187. AVFilterContext *ctx = p;
  188. FIREqualizerContext *s = ctx->priv;
  189. if (s->nb_gain_entry >= NB_GAIN_ENTRY_MAX) {
  190. av_log(ctx, AV_LOG_ERROR, "entry table overflow.\n");
  191. s->gain_entry_err = AVERROR(EINVAL);
  192. return 0;
  193. }
  194. if (isnan(freq)) {
  195. av_log(ctx, AV_LOG_ERROR, "nan frequency (%g, %g).\n", freq, gain);
  196. s->gain_entry_err = AVERROR(EINVAL);
  197. return 0;
  198. }
  199. if (s->nb_gain_entry > 0 && freq <= s->gain_entry_tbl[s->nb_gain_entry - 1].freq) {
  200. av_log(ctx, AV_LOG_ERROR, "unsorted frequency (%g, %g).\n", freq, gain);
  201. s->gain_entry_err = AVERROR(EINVAL);
  202. return 0;
  203. }
  204. s->gain_entry_tbl[s->nb_gain_entry].freq = freq;
  205. s->gain_entry_tbl[s->nb_gain_entry].gain = gain;
  206. s->nb_gain_entry++;
  207. return 0;
  208. }
  209. static int gain_entry_compare(const void *key, const void *memb)
  210. {
  211. const double *freq = key;
  212. const GainEntry *entry = memb;
  213. if (*freq < entry[0].freq)
  214. return -1;
  215. if (*freq > entry[1].freq)
  216. return 1;
  217. return 0;
  218. }
  219. static double gain_interpolate_func(void *p, double freq)
  220. {
  221. AVFilterContext *ctx = p;
  222. FIREqualizerContext *s = ctx->priv;
  223. GainEntry *res;
  224. double d0, d1, d;
  225. if (isnan(freq))
  226. return freq;
  227. if (!s->nb_gain_entry)
  228. return 0;
  229. if (freq <= s->gain_entry_tbl[0].freq)
  230. return s->gain_entry_tbl[0].gain;
  231. if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
  232. return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
  233. res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
  234. av_assert0(res);
  235. d = res[1].freq - res[0].freq;
  236. d0 = freq - res[0].freq;
  237. d1 = res[1].freq - freq;
  238. if (d0 && d1)
  239. return (d0 * res[1].gain + d1 * res[0].gain) / d;
  240. if (d0)
  241. return res[1].gain;
  242. return res[0].gain;
  243. }
  244. static const char *const var_names[] = {
  245. "f",
  246. "sr",
  247. "ch",
  248. "chid",
  249. "chs",
  250. "chlayout",
  251. NULL
  252. };
  253. enum VarOffset {
  254. VAR_F,
  255. VAR_SR,
  256. VAR_CH,
  257. VAR_CHID,
  258. VAR_CHS,
  259. VAR_CHLAYOUT,
  260. VAR_NB
  261. };
  262. static int generate_kernel(AVFilterContext *ctx, const char *gain, const char *gain_entry)
  263. {
  264. FIREqualizerContext *s = ctx->priv;
  265. AVFilterLink *inlink = ctx->inputs[0];
  266. const char *gain_entry_func_names[] = { "entry", NULL };
  267. const char *gain_func_names[] = { "gain_interpolate", NULL };
  268. double (*gain_entry_funcs[])(void *, double, double) = { entry_func, NULL };
  269. double (*gain_funcs[])(void *, double) = { gain_interpolate_func, NULL };
  270. double vars[VAR_NB];
  271. AVExpr *gain_expr;
  272. int ret, k, center, ch;
  273. s->nb_gain_entry = 0;
  274. s->gain_entry_err = 0;
  275. if (gain_entry) {
  276. double result = 0.0;
  277. ret = av_expr_parse_and_eval(&result, gain_entry, NULL, NULL, NULL, NULL,
  278. gain_entry_func_names, gain_entry_funcs, ctx, 0, ctx);
  279. if (ret < 0)
  280. return ret;
  281. if (s->gain_entry_err < 0)
  282. return s->gain_entry_err;
  283. }
  284. av_log(ctx, AV_LOG_DEBUG, "nb_gain_entry = %d.\n", s->nb_gain_entry);
  285. ret = av_expr_parse(&gain_expr, gain, var_names,
  286. gain_func_names, gain_funcs, NULL, NULL, 0, ctx);
  287. if (ret < 0)
  288. return ret;
  289. vars[VAR_CHS] = inlink->channels;
  290. vars[VAR_CHLAYOUT] = inlink->channel_layout;
  291. vars[VAR_SR] = inlink->sample_rate;
  292. for (ch = 0; ch < inlink->channels; ch++) {
  293. vars[VAR_CH] = ch;
  294. vars[VAR_CHID] = av_channel_layout_extract_channel(inlink->channel_layout, ch);
  295. vars[VAR_F] = 0.0;
  296. s->analysis_buf[0] = pow(10.0, 0.05 * av_expr_eval(gain_expr, vars, ctx));
  297. vars[VAR_F] = 0.5 * inlink->sample_rate;
  298. s->analysis_buf[1] = pow(10.0, 0.05 * av_expr_eval(gain_expr, vars, ctx));
  299. for (k = 1; k < s->analysis_rdft_len/2; k++) {
  300. vars[VAR_F] = k * ((double)inlink->sample_rate /(double)s->analysis_rdft_len);
  301. s->analysis_buf[2*k] = pow(10.0, 0.05 * av_expr_eval(gain_expr, vars, ctx));
  302. s->analysis_buf[2*k+1] = 0.0;
  303. }
  304. av_rdft_calc(s->analysis_irdft, s->analysis_buf);
  305. center = s->fir_len / 2;
  306. for (k = 0; k <= center; k++) {
  307. double u = k * (M_PI/center);
  308. double win;
  309. switch (s->wfunc) {
  310. case WFUNC_RECTANGULAR:
  311. win = 1.0;
  312. break;
  313. case WFUNC_HANN:
  314. win = 0.5 + 0.5 * cos(u);
  315. break;
  316. case WFUNC_HAMMING:
  317. win = 0.53836 + 0.46164 * cos(u);
  318. break;
  319. case WFUNC_BLACKMAN:
  320. win = 0.48 + 0.5 * cos(u) + 0.02 * cos(2*u);
  321. break;
  322. case WFUNC_NUTTALL3:
  323. win = 0.40897 + 0.5 * cos(u) + 0.09103 * cos(2*u);
  324. break;
  325. case WFUNC_MNUTTALL3:
  326. win = 0.4243801 + 0.4973406 * cos(u) + 0.0782793 * cos(2*u);
  327. break;
  328. case WFUNC_NUTTALL:
  329. win = 0.355768 + 0.487396 * cos(u) + 0.144232 * cos(2*u) + 0.012604 * cos(3*u);
  330. break;
  331. case WFUNC_BNUTTALL:
  332. win = 0.3635819 + 0.4891775 * cos(u) + 0.1365995 * cos(2*u) + 0.0106411 * cos(3*u);
  333. break;
  334. case WFUNC_BHARRIS:
  335. win = 0.35875 + 0.48829 * cos(u) + 0.14128 * cos(2*u) + 0.01168 * cos(3*u);
  336. break;
  337. default:
  338. av_assert0(0);
  339. }
  340. s->analysis_buf[k] *= (2.0/s->analysis_rdft_len) * (2.0/s->rdft_len) * win;
  341. }
  342. for (k = 0; k < center - k; k++) {
  343. float tmp = s->analysis_buf[k];
  344. s->analysis_buf[k] = s->analysis_buf[center - k];
  345. s->analysis_buf[center - k] = tmp;
  346. }
  347. for (k = 1; k <= center; k++)
  348. s->analysis_buf[center + k] = s->analysis_buf[center - k];
  349. memset(s->analysis_buf + s->fir_len, 0, (s->rdft_len - s->fir_len) * sizeof(*s->analysis_buf));
  350. av_rdft_calc(s->rdft, s->analysis_buf);
  351. for (k = 0; k < s->rdft_len; k++) {
  352. if (isnan(s->analysis_buf[k]) || isinf(s->analysis_buf[k])) {
  353. av_log(ctx, AV_LOG_ERROR, "filter kernel contains nan or infinity.\n");
  354. av_expr_free(gain_expr);
  355. return AVERROR(EINVAL);
  356. }
  357. }
  358. memcpy(s->kernel_tmp_buf + ch * s->rdft_len, s->analysis_buf, s->rdft_len * sizeof(*s->analysis_buf));
  359. if (!s->multi)
  360. break;
  361. }
  362. memcpy(s->kernel_buf, s->kernel_tmp_buf, (s->multi ? inlink->channels : 1) * s->rdft_len * sizeof(*s->kernel_buf));
  363. av_expr_free(gain_expr);
  364. return 0;
  365. }
  366. static int config_input(AVFilterLink *inlink)
  367. {
  368. AVFilterContext *ctx = inlink->dst;
  369. FIREqualizerContext *s = ctx->priv;
  370. int rdft_bits;
  371. common_uninit(s);
  372. s->next_pts = 0;
  373. s->frame_nsamples_max = 0;
  374. s->fir_len = FFMAX(2 * (int)(inlink->sample_rate * s->delay) + 1, 3);
  375. s->remaining = s->fir_len - 1;
  376. for (rdft_bits = RDFT_BITS_MIN; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
  377. s->rdft_len = 1 << rdft_bits;
  378. s->nsamples_max = s->rdft_len - s->fir_len + 1;
  379. if (s->nsamples_max * 2 >= s->fir_len)
  380. break;
  381. }
  382. if (rdft_bits > RDFT_BITS_MAX) {
  383. av_log(ctx, AV_LOG_ERROR, "too large delay, please decrease it.\n");
  384. return AVERROR(EINVAL);
  385. }
  386. if (!(s->rdft = av_rdft_init(rdft_bits, DFT_R2C)) || !(s->irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
  387. return AVERROR(ENOMEM);
  388. for ( ; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
  389. s->analysis_rdft_len = 1 << rdft_bits;
  390. if (inlink->sample_rate <= s->accuracy * s->analysis_rdft_len)
  391. break;
  392. }
  393. if (rdft_bits > RDFT_BITS_MAX) {
  394. av_log(ctx, AV_LOG_ERROR, "too small accuracy, please increase it.\n");
  395. return AVERROR(EINVAL);
  396. }
  397. if (!(s->analysis_irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
  398. return AVERROR(ENOMEM);
  399. s->analysis_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->analysis_buf));
  400. s->kernel_tmp_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_tmp_buf));
  401. s->kernel_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_buf));
  402. s->conv_buf = av_calloc(2 * s->rdft_len * inlink->channels, sizeof(*s->conv_buf));
  403. s->conv_idx = av_calloc(inlink->channels, sizeof(*s->conv_idx));
  404. if (!s->analysis_buf || !s->kernel_tmp_buf || !s->kernel_buf || !s->conv_buf || !s->conv_idx)
  405. return AVERROR(ENOMEM);
  406. av_log(ctx, AV_LOG_DEBUG, "sample_rate = %d, channels = %d, analysis_rdft_len = %d, rdft_len = %d, fir_len = %d, nsamples_max = %d.\n",
  407. inlink->sample_rate, inlink->channels, s->analysis_rdft_len, s->rdft_len, s->fir_len, s->nsamples_max);
  408. if (s->fixed)
  409. inlink->min_samples = inlink->max_samples = inlink->partial_buf_size = s->nsamples_max;
  410. return generate_kernel(ctx, s->gain_cmd ? s->gain_cmd : s->gain,
  411. s->gain_entry_cmd ? s->gain_entry_cmd : s->gain_entry);
  412. }
  413. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  414. {
  415. AVFilterContext *ctx = inlink->dst;
  416. FIREqualizerContext *s = ctx->priv;
  417. int ch;
  418. for (ch = 0; ch < inlink->channels; ch++) {
  419. fast_convolute(s, s->kernel_buf + (s->multi ? ch * s->rdft_len : 0),
  420. s->conv_buf + 2 * ch * s->rdft_len, s->conv_idx + ch,
  421. (float *) frame->extended_data[ch], frame->nb_samples);
  422. }
  423. s->next_pts = AV_NOPTS_VALUE;
  424. if (frame->pts != AV_NOPTS_VALUE) {
  425. s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, av_make_q(1, inlink->sample_rate), inlink->time_base);
  426. if (s->zero_phase)
  427. frame->pts -= av_rescale_q(s->fir_len/2, av_make_q(1, inlink->sample_rate), inlink->time_base);
  428. }
  429. s->frame_nsamples_max = FFMAX(s->frame_nsamples_max, frame->nb_samples);
  430. return ff_filter_frame(ctx->outputs[0], frame);
  431. }
  432. static int request_frame(AVFilterLink *outlink)
  433. {
  434. AVFilterContext *ctx = outlink->src;
  435. FIREqualizerContext *s= ctx->priv;
  436. int ret;
  437. ret = ff_request_frame(ctx->inputs[0]);
  438. if (ret == AVERROR_EOF && s->remaining > 0 && s->frame_nsamples_max > 0) {
  439. AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(s->remaining, s->frame_nsamples_max));
  440. if (!frame)
  441. return AVERROR(ENOMEM);
  442. av_samples_set_silence(frame->extended_data, 0, frame->nb_samples, outlink->channels, frame->format);
  443. frame->pts = s->next_pts;
  444. s->remaining -= frame->nb_samples;
  445. ret = filter_frame(ctx->inputs[0], frame);
  446. }
  447. return ret;
  448. }
  449. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  450. char *res, int res_len, int flags)
  451. {
  452. FIREqualizerContext *s = ctx->priv;
  453. int ret = AVERROR(ENOSYS);
  454. if (!strcmp(cmd, "gain")) {
  455. char *gain_cmd;
  456. gain_cmd = av_strdup(args);
  457. if (!gain_cmd)
  458. return AVERROR(ENOMEM);
  459. ret = generate_kernel(ctx, gain_cmd, s->gain_entry_cmd ? s->gain_entry_cmd : s->gain_entry);
  460. if (ret >= 0) {
  461. av_freep(&s->gain_cmd);
  462. s->gain_cmd = gain_cmd;
  463. } else {
  464. av_freep(&gain_cmd);
  465. }
  466. } else if (!strcmp(cmd, "gain_entry")) {
  467. char *gain_entry_cmd;
  468. gain_entry_cmd = av_strdup(args);
  469. if (!gain_entry_cmd)
  470. return AVERROR(ENOMEM);
  471. ret = generate_kernel(ctx, s->gain_cmd ? s->gain_cmd : s->gain, gain_entry_cmd);
  472. if (ret >= 0) {
  473. av_freep(&s->gain_entry_cmd);
  474. s->gain_entry_cmd = gain_entry_cmd;
  475. } else {
  476. av_freep(&gain_entry_cmd);
  477. }
  478. }
  479. return ret;
  480. }
  481. static const AVFilterPad firequalizer_inputs[] = {
  482. {
  483. .name = "default",
  484. .config_props = config_input,
  485. .filter_frame = filter_frame,
  486. .type = AVMEDIA_TYPE_AUDIO,
  487. .needs_writable = 1,
  488. },
  489. { NULL }
  490. };
  491. static const AVFilterPad firequalizer_outputs[] = {
  492. {
  493. .name = "default",
  494. .request_frame = request_frame,
  495. .type = AVMEDIA_TYPE_AUDIO,
  496. },
  497. { NULL }
  498. };
  499. AVFilter ff_af_firequalizer = {
  500. .name = "firequalizer",
  501. .description = NULL_IF_CONFIG_SMALL("Finite Impulse Response Equalizer."),
  502. .uninit = uninit,
  503. .query_formats = query_formats,
  504. .process_command = process_command,
  505. .priv_size = sizeof(FIREqualizerContext),
  506. .inputs = firequalizer_inputs,
  507. .outputs = firequalizer_outputs,
  508. .priv_class = &firequalizer_class,
  509. };