af_dynaudnorm.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737
  1. /*
  2. * Dynamic Audio Normalizer
  3. * Copyright (c) 2015 LoRd_MuldeR <mulder2@gmx.de>. Some rights reserved.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Dynamic Audio Normalizer
  24. */
  25. #include <float.h>
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/opt.h"
  28. #define FF_BUFQUEUE_SIZE 302
  29. #include "libavfilter/bufferqueue.h"
  30. #include "audio.h"
  31. #include "avfilter.h"
  32. #include "internal.h"
  33. typedef struct cqueue {
  34. double *elements;
  35. int size;
  36. int nb_elements;
  37. int first;
  38. } cqueue;
  39. typedef struct DynamicAudioNormalizerContext {
  40. const AVClass *class;
  41. struct FFBufQueue queue;
  42. int frame_len;
  43. int frame_len_msec;
  44. int filter_size;
  45. int dc_correction;
  46. int channels_coupled;
  47. int alt_boundary_mode;
  48. double peak_value;
  49. double max_amplification;
  50. double target_rms;
  51. double compress_factor;
  52. double *prev_amplification_factor;
  53. double *dc_correction_value;
  54. double *compress_threshold;
  55. double *fade_factors[2];
  56. double *weights;
  57. int channels;
  58. int delay;
  59. cqueue **gain_history_original;
  60. cqueue **gain_history_minimum;
  61. cqueue **gain_history_smoothed;
  62. } DynamicAudioNormalizerContext;
  63. #define OFFSET(x) offsetof(DynamicAudioNormalizerContext, x)
  64. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  65. static const AVOption dynaudnorm_options[] = {
  66. { "f", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
  67. { "g", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
  68. { "p", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
  69. { "m", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
  70. { "r", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
  71. { "n", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
  72. { "c", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  73. { "b", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
  74. { "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
  75. { NULL }
  76. };
  77. AVFILTER_DEFINE_CLASS(dynaudnorm);
  78. static av_cold int init(AVFilterContext *ctx)
  79. {
  80. DynamicAudioNormalizerContext *s = ctx->priv;
  81. if (!(s->filter_size & 1)) {
  82. av_log(ctx, AV_LOG_ERROR, "filter size %d is invalid. Must be an odd value.\n", s->filter_size);
  83. return AVERROR(EINVAL);
  84. }
  85. return 0;
  86. }
  87. static int query_formats(AVFilterContext *ctx)
  88. {
  89. AVFilterFormats *formats;
  90. AVFilterChannelLayouts *layouts;
  91. static const enum AVSampleFormat sample_fmts[] = {
  92. AV_SAMPLE_FMT_DBLP,
  93. AV_SAMPLE_FMT_NONE
  94. };
  95. int ret;
  96. layouts = ff_all_channel_counts();
  97. if (!layouts)
  98. return AVERROR(ENOMEM);
  99. ret = ff_set_common_channel_layouts(ctx, layouts);
  100. if (ret < 0)
  101. return ret;
  102. formats = ff_make_format_list(sample_fmts);
  103. if (!formats)
  104. return AVERROR(ENOMEM);
  105. ret = ff_set_common_formats(ctx, formats);
  106. if (ret < 0)
  107. return ret;
  108. formats = ff_all_samplerates();
  109. if (!formats)
  110. return AVERROR(ENOMEM);
  111. return ff_set_common_samplerates(ctx, formats);
  112. }
  113. static inline int frame_size(int sample_rate, int frame_len_msec)
  114. {
  115. const int frame_size = lrint((double)sample_rate * (frame_len_msec / 1000.0));
  116. return frame_size + (frame_size % 2);
  117. }
  118. static void precalculate_fade_factors(double *fade_factors[2], int frame_len)
  119. {
  120. const double step_size = 1.0 / frame_len;
  121. int pos;
  122. for (pos = 0; pos < frame_len; pos++) {
  123. fade_factors[0][pos] = 1.0 - (step_size * (pos + 1.0));
  124. fade_factors[1][pos] = 1.0 - fade_factors[0][pos];
  125. }
  126. }
  127. static cqueue *cqueue_create(int size)
  128. {
  129. cqueue *q;
  130. q = av_malloc(sizeof(cqueue));
  131. if (!q)
  132. return NULL;
  133. q->size = size;
  134. q->nb_elements = 0;
  135. q->first = 0;
  136. q->elements = av_malloc_array(size, sizeof(double));
  137. if (!q->elements) {
  138. av_free(q);
  139. return NULL;
  140. }
  141. return q;
  142. }
  143. static void cqueue_free(cqueue *q)
  144. {
  145. if (q)
  146. av_free(q->elements);
  147. av_free(q);
  148. }
  149. static int cqueue_size(cqueue *q)
  150. {
  151. return q->nb_elements;
  152. }
  153. static int cqueue_empty(cqueue *q)
  154. {
  155. return !q->nb_elements;
  156. }
  157. static int cqueue_enqueue(cqueue *q, double element)
  158. {
  159. int i;
  160. av_assert2(q->nb_elements != q->size);
  161. i = (q->first + q->nb_elements) % q->size;
  162. q->elements[i] = element;
  163. q->nb_elements++;
  164. return 0;
  165. }
  166. static double cqueue_peek(cqueue *q, int index)
  167. {
  168. av_assert2(index < q->nb_elements);
  169. return q->elements[(q->first + index) % q->size];
  170. }
  171. static int cqueue_dequeue(cqueue *q, double *element)
  172. {
  173. av_assert2(!cqueue_empty(q));
  174. *element = q->elements[q->first];
  175. q->first = (q->first + 1) % q->size;
  176. q->nb_elements--;
  177. return 0;
  178. }
  179. static int cqueue_pop(cqueue *q)
  180. {
  181. av_assert2(!cqueue_empty(q));
  182. q->first = (q->first + 1) % q->size;
  183. q->nb_elements--;
  184. return 0;
  185. }
  186. static void init_gaussian_filter(DynamicAudioNormalizerContext *s)
  187. {
  188. double total_weight = 0.0;
  189. const double sigma = (((s->filter_size / 2.0) - 1.0) / 3.0) + (1.0 / 3.0);
  190. double adjust;
  191. int i;
  192. // Pre-compute constants
  193. const int offset = s->filter_size / 2;
  194. const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
  195. const double c2 = 2.0 * sigma * sigma;
  196. // Compute weights
  197. for (i = 0; i < s->filter_size; i++) {
  198. const int x = i - offset;
  199. s->weights[i] = c1 * exp(-x * x / c2);
  200. total_weight += s->weights[i];
  201. }
  202. // Adjust weights
  203. adjust = 1.0 / total_weight;
  204. for (i = 0; i < s->filter_size; i++) {
  205. s->weights[i] *= adjust;
  206. }
  207. }
  208. static av_cold void uninit(AVFilterContext *ctx)
  209. {
  210. DynamicAudioNormalizerContext *s = ctx->priv;
  211. int c;
  212. av_freep(&s->prev_amplification_factor);
  213. av_freep(&s->dc_correction_value);
  214. av_freep(&s->compress_threshold);
  215. av_freep(&s->fade_factors[0]);
  216. av_freep(&s->fade_factors[1]);
  217. for (c = 0; c < s->channels; c++) {
  218. if (s->gain_history_original)
  219. cqueue_free(s->gain_history_original[c]);
  220. if (s->gain_history_minimum)
  221. cqueue_free(s->gain_history_minimum[c]);
  222. if (s->gain_history_smoothed)
  223. cqueue_free(s->gain_history_smoothed[c]);
  224. }
  225. av_freep(&s->gain_history_original);
  226. av_freep(&s->gain_history_minimum);
  227. av_freep(&s->gain_history_smoothed);
  228. av_freep(&s->weights);
  229. ff_bufqueue_discard_all(&s->queue);
  230. }
  231. static int config_input(AVFilterLink *inlink)
  232. {
  233. AVFilterContext *ctx = inlink->dst;
  234. DynamicAudioNormalizerContext *s = ctx->priv;
  235. int c;
  236. uninit(ctx);
  237. s->frame_len =
  238. inlink->min_samples =
  239. inlink->max_samples =
  240. inlink->partial_buf_size = frame_size(inlink->sample_rate, s->frame_len_msec);
  241. av_log(ctx, AV_LOG_DEBUG, "frame len %d\n", s->frame_len);
  242. s->fade_factors[0] = av_malloc_array(s->frame_len, sizeof(*s->fade_factors[0]));
  243. s->fade_factors[1] = av_malloc_array(s->frame_len, sizeof(*s->fade_factors[1]));
  244. s->prev_amplification_factor = av_malloc_array(inlink->channels, sizeof(*s->prev_amplification_factor));
  245. s->dc_correction_value = av_calloc(inlink->channels, sizeof(*s->dc_correction_value));
  246. s->compress_threshold = av_calloc(inlink->channels, sizeof(*s->compress_threshold));
  247. s->gain_history_original = av_calloc(inlink->channels, sizeof(*s->gain_history_original));
  248. s->gain_history_minimum = av_calloc(inlink->channels, sizeof(*s->gain_history_minimum));
  249. s->gain_history_smoothed = av_calloc(inlink->channels, sizeof(*s->gain_history_smoothed));
  250. s->weights = av_malloc_array(s->filter_size, sizeof(*s->weights));
  251. if (!s->prev_amplification_factor || !s->dc_correction_value ||
  252. !s->compress_threshold || !s->fade_factors[0] || !s->fade_factors[1] ||
  253. !s->gain_history_original || !s->gain_history_minimum ||
  254. !s->gain_history_smoothed || !s->weights)
  255. return AVERROR(ENOMEM);
  256. for (c = 0; c < inlink->channels; c++) {
  257. s->prev_amplification_factor[c] = 1.0;
  258. s->gain_history_original[c] = cqueue_create(s->filter_size);
  259. s->gain_history_minimum[c] = cqueue_create(s->filter_size);
  260. s->gain_history_smoothed[c] = cqueue_create(s->filter_size);
  261. if (!s->gain_history_original[c] || !s->gain_history_minimum[c] ||
  262. !s->gain_history_smoothed[c])
  263. return AVERROR(ENOMEM);
  264. }
  265. precalculate_fade_factors(s->fade_factors, s->frame_len);
  266. init_gaussian_filter(s);
  267. s->channels = inlink->channels;
  268. s->delay = s->filter_size;
  269. return 0;
  270. }
  271. static inline double fade(double prev, double next, int pos,
  272. double *fade_factors[2])
  273. {
  274. return fade_factors[0][pos] * prev + fade_factors[1][pos] * next;
  275. }
  276. static inline double pow2(const double value)
  277. {
  278. return value * value;
  279. }
  280. static inline double bound(const double threshold, const double val)
  281. {
  282. const double CONST = 0.8862269254527580136490837416705725913987747280611935; //sqrt(PI) / 2.0
  283. return erf(CONST * (val / threshold)) * threshold;
  284. }
  285. static double find_peak_magnitude(AVFrame *frame, int channel)
  286. {
  287. double max = DBL_EPSILON;
  288. int c, i;
  289. if (channel == -1) {
  290. for (c = 0; c < av_frame_get_channels(frame); c++) {
  291. double *data_ptr = (double *)frame->extended_data[c];
  292. for (i = 0; i < frame->nb_samples; i++)
  293. max = FFMAX(max, fabs(data_ptr[i]));
  294. }
  295. } else {
  296. double *data_ptr = (double *)frame->extended_data[channel];
  297. for (i = 0; i < frame->nb_samples; i++)
  298. max = FFMAX(max, fabs(data_ptr[i]));
  299. }
  300. return max;
  301. }
  302. static double compute_frame_rms(AVFrame *frame, int channel)
  303. {
  304. double rms_value = 0.0;
  305. int c, i;
  306. if (channel == -1) {
  307. for (c = 0; c < av_frame_get_channels(frame); c++) {
  308. const double *data_ptr = (double *)frame->extended_data[c];
  309. for (i = 0; i < frame->nb_samples; i++) {
  310. rms_value += pow2(data_ptr[i]);
  311. }
  312. }
  313. rms_value /= frame->nb_samples * av_frame_get_channels(frame);
  314. } else {
  315. const double *data_ptr = (double *)frame->extended_data[channel];
  316. for (i = 0; i < frame->nb_samples; i++) {
  317. rms_value += pow2(data_ptr[i]);
  318. }
  319. rms_value /= frame->nb_samples;
  320. }
  321. return FFMAX(sqrt(rms_value), DBL_EPSILON);
  322. }
  323. static double get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame,
  324. int channel)
  325. {
  326. const double maximum_gain = s->peak_value / find_peak_magnitude(frame, channel);
  327. const double rms_gain = s->target_rms > DBL_EPSILON ? (s->target_rms / compute_frame_rms(frame, channel)) : DBL_MAX;
  328. return bound(s->max_amplification, FFMIN(maximum_gain, rms_gain));
  329. }
  330. static double minimum_filter(cqueue *q)
  331. {
  332. double min = DBL_MAX;
  333. int i;
  334. for (i = 0; i < cqueue_size(q); i++) {
  335. min = FFMIN(min, cqueue_peek(q, i));
  336. }
  337. return min;
  338. }
  339. static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q)
  340. {
  341. double result = 0.0;
  342. int i;
  343. for (i = 0; i < cqueue_size(q); i++) {
  344. result += cqueue_peek(q, i) * s->weights[i];
  345. }
  346. return result;
  347. }
  348. static void update_gain_history(DynamicAudioNormalizerContext *s, int channel,
  349. double current_gain_factor)
  350. {
  351. if (cqueue_empty(s->gain_history_original[channel]) ||
  352. cqueue_empty(s->gain_history_minimum[channel])) {
  353. const int pre_fill_size = s->filter_size / 2;
  354. s->prev_amplification_factor[channel] = s->alt_boundary_mode ? current_gain_factor : 1.0;
  355. while (cqueue_size(s->gain_history_original[channel]) < pre_fill_size) {
  356. cqueue_enqueue(s->gain_history_original[channel], s->alt_boundary_mode ? current_gain_factor : 1.0);
  357. }
  358. while (cqueue_size(s->gain_history_minimum[channel]) < pre_fill_size) {
  359. cqueue_enqueue(s->gain_history_minimum[channel], s->alt_boundary_mode ? current_gain_factor : 1.0);
  360. }
  361. }
  362. cqueue_enqueue(s->gain_history_original[channel], current_gain_factor);
  363. while (cqueue_size(s->gain_history_original[channel]) >= s->filter_size) {
  364. double minimum;
  365. av_assert0(cqueue_size(s->gain_history_original[channel]) == s->filter_size);
  366. minimum = minimum_filter(s->gain_history_original[channel]);
  367. cqueue_enqueue(s->gain_history_minimum[channel], minimum);
  368. cqueue_pop(s->gain_history_original[channel]);
  369. }
  370. while (cqueue_size(s->gain_history_minimum[channel]) >= s->filter_size) {
  371. double smoothed;
  372. av_assert0(cqueue_size(s->gain_history_minimum[channel]) == s->filter_size);
  373. smoothed = gaussian_filter(s, s->gain_history_minimum[channel]);
  374. cqueue_enqueue(s->gain_history_smoothed[channel], smoothed);
  375. cqueue_pop(s->gain_history_minimum[channel]);
  376. }
  377. }
  378. static inline double update_value(double new, double old, double aggressiveness)
  379. {
  380. av_assert0((aggressiveness >= 0.0) && (aggressiveness <= 1.0));
  381. return aggressiveness * new + (1.0 - aggressiveness) * old;
  382. }
  383. static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *frame)
  384. {
  385. const double diff = 1.0 / frame->nb_samples;
  386. int is_first_frame = cqueue_empty(s->gain_history_original[0]);
  387. int c, i;
  388. for (c = 0; c < s->channels; c++) {
  389. double *dst_ptr = (double *)frame->extended_data[c];
  390. double current_average_value = 0.0;
  391. double prev_value;
  392. for (i = 0; i < frame->nb_samples; i++)
  393. current_average_value += dst_ptr[i] * diff;
  394. prev_value = is_first_frame ? current_average_value : s->dc_correction_value[c];
  395. s->dc_correction_value[c] = is_first_frame ? current_average_value : update_value(current_average_value, s->dc_correction_value[c], 0.1);
  396. for (i = 0; i < frame->nb_samples; i++) {
  397. dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, s->fade_factors);
  398. }
  399. }
  400. }
  401. static double setup_compress_thresh(double threshold)
  402. {
  403. if ((threshold > DBL_EPSILON) && (threshold < (1.0 - DBL_EPSILON))) {
  404. double current_threshold = threshold;
  405. double step_size = 1.0;
  406. while (step_size > DBL_EPSILON) {
  407. while ((current_threshold + step_size > current_threshold) &&
  408. (bound(current_threshold + step_size, 1.0) <= threshold)) {
  409. current_threshold += step_size;
  410. }
  411. step_size /= 2.0;
  412. }
  413. return current_threshold;
  414. } else {
  415. return threshold;
  416. }
  417. }
  418. static double compute_frame_std_dev(DynamicAudioNormalizerContext *s,
  419. AVFrame *frame, int channel)
  420. {
  421. double variance = 0.0;
  422. int i, c;
  423. if (channel == -1) {
  424. for (c = 0; c < s->channels; c++) {
  425. const double *data_ptr = (double *)frame->extended_data[c];
  426. for (i = 0; i < frame->nb_samples; i++) {
  427. variance += pow2(data_ptr[i]); // Assume that MEAN is *zero*
  428. }
  429. }
  430. variance /= (s->channels * frame->nb_samples) - 1;
  431. } else {
  432. const double *data_ptr = (double *)frame->extended_data[channel];
  433. for (i = 0; i < frame->nb_samples; i++) {
  434. variance += pow2(data_ptr[i]); // Assume that MEAN is *zero*
  435. }
  436. variance /= frame->nb_samples - 1;
  437. }
  438. return FFMAX(sqrt(variance), DBL_EPSILON);
  439. }
  440. static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame)
  441. {
  442. int is_first_frame = cqueue_empty(s->gain_history_original[0]);
  443. int c, i;
  444. if (s->channels_coupled) {
  445. const double standard_deviation = compute_frame_std_dev(s, frame, -1);
  446. const double current_threshold = FFMIN(1.0, s->compress_factor * standard_deviation);
  447. const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[0];
  448. double prev_actual_thresh, curr_actual_thresh;
  449. s->compress_threshold[0] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[0], (1.0/3.0));
  450. prev_actual_thresh = setup_compress_thresh(prev_value);
  451. curr_actual_thresh = setup_compress_thresh(s->compress_threshold[0]);
  452. for (c = 0; c < s->channels; c++) {
  453. double *const dst_ptr = (double *)frame->extended_data[c];
  454. for (i = 0; i < frame->nb_samples; i++) {
  455. const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, s->fade_factors);
  456. dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
  457. }
  458. }
  459. } else {
  460. for (c = 0; c < s->channels; c++) {
  461. const double standard_deviation = compute_frame_std_dev(s, frame, c);
  462. const double current_threshold = setup_compress_thresh(FFMIN(1.0, s->compress_factor * standard_deviation));
  463. const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[c];
  464. double prev_actual_thresh, curr_actual_thresh;
  465. double *dst_ptr;
  466. s->compress_threshold[c] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[c], 1.0/3.0);
  467. prev_actual_thresh = setup_compress_thresh(prev_value);
  468. curr_actual_thresh = setup_compress_thresh(s->compress_threshold[c]);
  469. dst_ptr = (double *)frame->extended_data[c];
  470. for (i = 0; i < frame->nb_samples; i++) {
  471. const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, s->fade_factors);
  472. dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
  473. }
  474. }
  475. }
  476. }
  477. static void analyze_frame(DynamicAudioNormalizerContext *s, AVFrame *frame)
  478. {
  479. if (s->dc_correction) {
  480. perform_dc_correction(s, frame);
  481. }
  482. if (s->compress_factor > DBL_EPSILON) {
  483. perform_compression(s, frame);
  484. }
  485. if (s->channels_coupled) {
  486. const double current_gain_factor = get_max_local_gain(s, frame, -1);
  487. int c;
  488. for (c = 0; c < s->channels; c++)
  489. update_gain_history(s, c, current_gain_factor);
  490. } else {
  491. int c;
  492. for (c = 0; c < s->channels; c++)
  493. update_gain_history(s, c, get_max_local_gain(s, frame, c));
  494. }
  495. }
  496. static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *frame)
  497. {
  498. int c, i;
  499. for (c = 0; c < s->channels; c++) {
  500. double *dst_ptr = (double *)frame->extended_data[c];
  501. double current_amplification_factor;
  502. cqueue_dequeue(s->gain_history_smoothed[c], &current_amplification_factor);
  503. for (i = 0; i < frame->nb_samples; i++) {
  504. const double amplification_factor = fade(s->prev_amplification_factor[c],
  505. current_amplification_factor, i,
  506. s->fade_factors);
  507. dst_ptr[i] *= amplification_factor;
  508. if (fabs(dst_ptr[i]) > s->peak_value)
  509. dst_ptr[i] = copysign(s->peak_value, dst_ptr[i]);
  510. }
  511. s->prev_amplification_factor[c] = current_amplification_factor;
  512. }
  513. }
  514. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  515. {
  516. AVFilterContext *ctx = inlink->dst;
  517. DynamicAudioNormalizerContext *s = ctx->priv;
  518. AVFilterLink *outlink = inlink->dst->outputs[0];
  519. int ret = 0;
  520. if (!cqueue_empty(s->gain_history_smoothed[0])) {
  521. AVFrame *out = ff_bufqueue_get(&s->queue);
  522. amplify_frame(s, out);
  523. ret = ff_filter_frame(outlink, out);
  524. }
  525. analyze_frame(s, in);
  526. ff_bufqueue_add(ctx, &s->queue, in);
  527. return ret;
  528. }
  529. static int flush_buffer(DynamicAudioNormalizerContext *s, AVFilterLink *inlink,
  530. AVFilterLink *outlink)
  531. {
  532. AVFrame *out = ff_get_audio_buffer(outlink, s->frame_len);
  533. int c, i;
  534. if (!out)
  535. return AVERROR(ENOMEM);
  536. for (c = 0; c < s->channels; c++) {
  537. double *dst_ptr = (double *)out->extended_data[c];
  538. for (i = 0; i < out->nb_samples; i++) {
  539. dst_ptr[i] = s->alt_boundary_mode ? DBL_EPSILON : ((s->target_rms > DBL_EPSILON) ? FFMIN(s->peak_value, s->target_rms) : s->peak_value);
  540. if (s->dc_correction) {
  541. dst_ptr[i] *= ((i % 2) == 1) ? -1 : 1;
  542. dst_ptr[i] += s->dc_correction_value[c];
  543. }
  544. }
  545. }
  546. s->delay--;
  547. return filter_frame(inlink, out);
  548. }
  549. static int request_frame(AVFilterLink *outlink)
  550. {
  551. AVFilterContext *ctx = outlink->src;
  552. DynamicAudioNormalizerContext *s = ctx->priv;
  553. int ret = 0;
  554. ret = ff_request_frame(ctx->inputs[0]);
  555. if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay)
  556. ret = flush_buffer(s, ctx->inputs[0], outlink);
  557. return ret;
  558. }
  559. static const AVFilterPad avfilter_af_dynaudnorm_inputs[] = {
  560. {
  561. .name = "default",
  562. .type = AVMEDIA_TYPE_AUDIO,
  563. .filter_frame = filter_frame,
  564. .config_props = config_input,
  565. .needs_writable = 1,
  566. },
  567. { NULL }
  568. };
  569. static const AVFilterPad avfilter_af_dynaudnorm_outputs[] = {
  570. {
  571. .name = "default",
  572. .type = AVMEDIA_TYPE_AUDIO,
  573. .request_frame = request_frame,
  574. },
  575. { NULL }
  576. };
  577. AVFilter ff_af_dynaudnorm = {
  578. .name = "dynaudnorm",
  579. .description = NULL_IF_CONFIG_SMALL("Dynamic Audio Normalizer."),
  580. .query_formats = query_formats,
  581. .priv_size = sizeof(DynamicAudioNormalizerContext),
  582. .init = init,
  583. .uninit = uninit,
  584. .inputs = avfilter_af_dynaudnorm_inputs,
  585. .outputs = avfilter_af_dynaudnorm_outputs,
  586. .priv_class = &dynaudnorm_class,
  587. };