FFmpeg
af_dynaudnorm.c
Go to the documentation of this file.
1 /*
2  * Dynamic Audio Normalizer
3  * Copyright (c) 2015 LoRd_MuldeR <mulder2@gmx.de>. Some rights reserved.
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Dynamic Audio Normalizer
25  */
26 
27 #include <float.h>
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/opt.h"
31 
32 #define MIN_FILTER_SIZE 3
33 #define MAX_FILTER_SIZE 301
34 
35 #define FF_BUFQUEUE_SIZE (MAX_FILTER_SIZE + 1)
37 
38 #include "audio.h"
39 #include "avfilter.h"
40 #include "filters.h"
41 #include "internal.h"
42 
43 typedef struct local_gain {
44  double max_gain;
45  double threshold;
46 } local_gain;
47 
48 typedef struct cqueue {
49  double *elements;
50  int size;
51  int max_size;
53 } cqueue;
54 
56  const AVClass *class;
57 
58  struct FFBufQueue queue;
59 
60  int frame_len;
66 
67  double peak_value;
69  double target_rms;
71  double threshold;
75  double *weights;
76 
77  int channels;
78  int eof;
79  int64_t pts;
80 
85 
88 
89 #define OFFSET(x) offsetof(DynamicAudioNormalizerContext, x)
90 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
91 
92 static const AVOption dynaudnorm_options[] = {
93  { "framelen", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
94  { "f", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
95  { "gausssize", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
96  { "g", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
97  { "peak", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
98  { "p", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
99  { "maxgain", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
100  { "m", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
101  { "targetrms", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
102  { "r", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
103  { "coupling", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
104  { "n", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
105  { "correctdc", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
106  { "c", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
107  { "altboundary", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
108  { "b", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
109  { "compress", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
110  { "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
111  { "threshold", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
112  { "t", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
113  { NULL }
114 };
115 
116 AVFILTER_DEFINE_CLASS(dynaudnorm);
117 
119 {
121 
122  if (!(s->filter_size & 1)) {
123  av_log(ctx, AV_LOG_WARNING, "filter size %d is invalid. Changing to an odd value.\n", s->filter_size);
124  s->filter_size |= 1;
125  }
126 
127  return 0;
128 }
129 
131 {
132  static const enum AVSampleFormat sample_fmts[] = {
135  };
137  if (ret < 0)
138  return ret;
139 
141  if (ret < 0)
142  return ret;
143 
145 }
146 
147 static inline int frame_size(int sample_rate, int frame_len_msec)
148 {
149  const int frame_size = lrint((double)sample_rate * (frame_len_msec / 1000.0));
150  return frame_size + (frame_size % 2);
151 }
152 
153 static cqueue *cqueue_create(int size, int max_size)
154 {
155  cqueue *q;
156 
157  if (max_size < size)
158  return NULL;
159 
160  q = av_malloc(sizeof(cqueue));
161  if (!q)
162  return NULL;
163 
164  q->max_size = max_size;
165  q->size = size;
166  q->nb_elements = 0;
167 
168  q->elements = av_malloc_array(max_size, sizeof(double));
169  if (!q->elements) {
170  av_free(q);
171  return NULL;
172  }
173 
174  return q;
175 }
176 
177 static void cqueue_free(cqueue *q)
178 {
179  if (q)
180  av_free(q->elements);
181  av_free(q);
182 }
183 
184 static int cqueue_size(cqueue *q)
185 {
186  return q->nb_elements;
187 }
188 
189 static int cqueue_empty(cqueue *q)
190 {
191  return q->nb_elements <= 0;
192 }
193 
194 static int cqueue_enqueue(cqueue *q, double element)
195 {
197 
198  q->elements[q->nb_elements] = element;
199  q->nb_elements++;
200 
201  return 0;
202 }
203 
204 static double cqueue_peek(cqueue *q, int index)
205 {
206  av_assert2(index < q->nb_elements);
207  return q->elements[index];
208 }
209 
210 static int cqueue_dequeue(cqueue *q, double *element)
211 {
213 
214  *element = q->elements[0];
215  memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double));
216  q->nb_elements--;
217 
218  return 0;
219 }
220 
221 static int cqueue_pop(cqueue *q)
222 {
224 
225  memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double));
226  q->nb_elements--;
227 
228  return 0;
229 }
230 
231 static void cqueue_resize(cqueue *q, int new_size)
232 {
233  av_assert2(q->max_size >= new_size);
234  av_assert2(MIN_FILTER_SIZE <= new_size);
235 
236  if (new_size > q->nb_elements) {
237  const int side = (new_size - q->nb_elements) / 2;
238 
239  memmove(q->elements + side, q->elements, sizeof(double) * q->nb_elements);
240  for (int i = 0; i < side; i++)
241  q->elements[i] = q->elements[side];
242  q->nb_elements = new_size - 1 - side;
243  } else {
244  int count = (q->size - new_size + 1) / 2;
245 
246  while (count-- > 0)
247  cqueue_pop(q);
248  }
249 
250  q->size = new_size;
251 }
252 
254 {
255  double total_weight = 0.0;
256  const double sigma = (((s->filter_size / 2.0) - 1.0) / 3.0) + (1.0 / 3.0);
257  double adjust;
258  int i;
259 
260  // Pre-compute constants
261  const int offset = s->filter_size / 2;
262  const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
263  const double c2 = 2.0 * sigma * sigma;
264 
265  // Compute weights
266  for (i = 0; i < s->filter_size; i++) {
267  const int x = i - offset;
268 
269  s->weights[i] = c1 * exp(-x * x / c2);
270  total_weight += s->weights[i];
271  }
272 
273  // Adjust weights
274  adjust = 1.0 / total_weight;
275  for (i = 0; i < s->filter_size; i++) {
276  s->weights[i] *= adjust;
277  }
278 }
279 
281 {
283  int c;
284 
285  av_freep(&s->prev_amplification_factor);
286  av_freep(&s->dc_correction_value);
287  av_freep(&s->compress_threshold);
288 
289  for (c = 0; c < s->channels; c++) {
290  if (s->gain_history_original)
291  cqueue_free(s->gain_history_original[c]);
292  if (s->gain_history_minimum)
293  cqueue_free(s->gain_history_minimum[c]);
294  if (s->gain_history_smoothed)
295  cqueue_free(s->gain_history_smoothed[c]);
296  if (s->threshold_history)
297  cqueue_free(s->threshold_history[c]);
298  }
299 
300  av_freep(&s->gain_history_original);
301  av_freep(&s->gain_history_minimum);
302  av_freep(&s->gain_history_smoothed);
303  av_freep(&s->threshold_history);
304 
305  cqueue_free(s->is_enabled);
306  s->is_enabled = NULL;
307 
308  av_freep(&s->weights);
309 
310  ff_bufqueue_discard_all(&s->queue);
311 }
312 
314 {
315  AVFilterContext *ctx = inlink->dst;
317  int c;
318 
319  uninit(ctx);
320 
321  s->channels = inlink->channels;
322  s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
323  av_log(ctx, AV_LOG_DEBUG, "frame len %d\n", s->frame_len);
324 
325  s->prev_amplification_factor = av_malloc_array(inlink->channels, sizeof(*s->prev_amplification_factor));
326  s->dc_correction_value = av_calloc(inlink->channels, sizeof(*s->dc_correction_value));
327  s->compress_threshold = av_calloc(inlink->channels, sizeof(*s->compress_threshold));
328  s->gain_history_original = av_calloc(inlink->channels, sizeof(*s->gain_history_original));
329  s->gain_history_minimum = av_calloc(inlink->channels, sizeof(*s->gain_history_minimum));
330  s->gain_history_smoothed = av_calloc(inlink->channels, sizeof(*s->gain_history_smoothed));
331  s->threshold_history = av_calloc(inlink->channels, sizeof(*s->threshold_history));
332  s->weights = av_malloc_array(MAX_FILTER_SIZE, sizeof(*s->weights));
333  s->is_enabled = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
334  if (!s->prev_amplification_factor || !s->dc_correction_value ||
335  !s->compress_threshold ||
336  !s->gain_history_original || !s->gain_history_minimum ||
337  !s->gain_history_smoothed || !s->threshold_history ||
338  !s->is_enabled || !s->weights)
339  return AVERROR(ENOMEM);
340 
341  for (c = 0; c < inlink->channels; c++) {
342  s->prev_amplification_factor[c] = 1.0;
343 
344  s->gain_history_original[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
345  s->gain_history_minimum[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
346  s->gain_history_smoothed[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
347  s->threshold_history[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
348 
349  if (!s->gain_history_original[c] || !s->gain_history_minimum[c] ||
350  !s->gain_history_smoothed[c] || !s->threshold_history[c])
351  return AVERROR(ENOMEM);
352  }
353 
355 
356  return 0;
357 }
358 
359 static inline double fade(double prev, double next, int pos, int length)
360 {
361  const double step_size = 1.0 / length;
362  const double f0 = 1.0 - (step_size * (pos + 1.0));
363  const double f1 = 1.0 - f0;
364  return f0 * prev + f1 * next;
365 }
366 
367 static inline double pow_2(const double value)
368 {
369  return value * value;
370 }
371 
372 static inline double bound(const double threshold, const double val)
373 {
374  const double CONST = 0.8862269254527580136490837416705725913987747280611935; //sqrt(PI) / 2.0
375  return erf(CONST * (val / threshold)) * threshold;
376 }
377 
379 {
380  double max = DBL_EPSILON;
381  int c, i;
382 
383  if (channel == -1) {
384  for (c = 0; c < frame->channels; c++) {
385  double *data_ptr = (double *)frame->extended_data[c];
386 
387  for (i = 0; i < frame->nb_samples; i++)
388  max = FFMAX(max, fabs(data_ptr[i]));
389  }
390  } else {
391  double *data_ptr = (double *)frame->extended_data[channel];
392 
393  for (i = 0; i < frame->nb_samples; i++)
394  max = FFMAX(max, fabs(data_ptr[i]));
395  }
396 
397  return max;
398 }
399 
401 {
402  double rms_value = 0.0;
403  int c, i;
404 
405  if (channel == -1) {
406  for (c = 0; c < frame->channels; c++) {
407  const double *data_ptr = (double *)frame->extended_data[c];
408 
409  for (i = 0; i < frame->nb_samples; i++) {
410  rms_value += pow_2(data_ptr[i]);
411  }
412  }
413 
414  rms_value /= frame->nb_samples * frame->channels;
415  } else {
416  const double *data_ptr = (double *)frame->extended_data[channel];
417  for (i = 0; i < frame->nb_samples; i++) {
418  rms_value += pow_2(data_ptr[i]);
419  }
420 
421  rms_value /= frame->nb_samples;
422  }
423 
424  return FFMAX(sqrt(rms_value), DBL_EPSILON);
425 }
426 
428  int channel)
429 {
430  const double peak_magnitude = find_peak_magnitude(frame, channel);
431  const double maximum_gain = s->peak_value / peak_magnitude;
432  const double rms_gain = s->target_rms > DBL_EPSILON ? (s->target_rms / compute_frame_rms(frame, channel)) : DBL_MAX;
433  local_gain gain;
434 
435  gain.threshold = peak_magnitude > s->threshold;
436  gain.max_gain = bound(s->max_amplification, FFMIN(maximum_gain, rms_gain));
437 
438  return gain;
439 }
440 
441 static double minimum_filter(cqueue *q)
442 {
443  double min = DBL_MAX;
444  int i;
445 
446  for (i = 0; i < cqueue_size(q); i++) {
447  min = FFMIN(min, cqueue_peek(q, i));
448  }
449 
450  return min;
451 }
452 
454 {
455  double result = 0.0, tsum = 0.0;
456  int i;
457 
458  for (i = 0; i < cqueue_size(q); i++) {
459  tsum += cqueue_peek(tq, i) * s->weights[i];
460  result += cqueue_peek(q, i) * s->weights[i] * cqueue_peek(tq, i);
461  }
462 
463  if (tsum == 0.0)
464  result = 1.0;
465 
466  return result;
467 }
468 
470  local_gain gain)
471 {
472  if (cqueue_empty(s->gain_history_original[channel])) {
473  const int pre_fill_size = s->filter_size / 2;
474  const double initial_value = s->alt_boundary_mode ? gain.max_gain : s->peak_value;
475 
476  s->prev_amplification_factor[channel] = initial_value;
477 
478  while (cqueue_size(s->gain_history_original[channel]) < pre_fill_size) {
479  cqueue_enqueue(s->gain_history_original[channel], initial_value);
480  cqueue_enqueue(s->threshold_history[channel], gain.threshold);
481  }
482  }
483 
484  cqueue_enqueue(s->gain_history_original[channel], gain.max_gain);
485 
486  while (cqueue_size(s->gain_history_original[channel]) >= s->filter_size) {
487  double minimum;
488 
489  if (cqueue_empty(s->gain_history_minimum[channel])) {
490  const int pre_fill_size = s->filter_size / 2;
491  double initial_value = s->alt_boundary_mode ? cqueue_peek(s->gain_history_original[channel], 0) : 1.0;
492  int input = pre_fill_size;
493 
494  while (cqueue_size(s->gain_history_minimum[channel]) < pre_fill_size) {
495  input++;
496  initial_value = FFMIN(initial_value, cqueue_peek(s->gain_history_original[channel], input));
497  cqueue_enqueue(s->gain_history_minimum[channel], initial_value);
498  }
499  }
500 
501  minimum = minimum_filter(s->gain_history_original[channel]);
502 
503  cqueue_enqueue(s->gain_history_minimum[channel], minimum);
504 
505  cqueue_enqueue(s->threshold_history[channel], gain.threshold);
506 
507  cqueue_pop(s->gain_history_original[channel]);
508  }
509 
510  while (cqueue_size(s->gain_history_minimum[channel]) >= s->filter_size) {
511  double smoothed, limit;
512 
513  smoothed = gaussian_filter(s, s->gain_history_minimum[channel], s->threshold_history[channel]);
514  limit = cqueue_peek(s->gain_history_original[channel], 0);
515  smoothed = FFMIN(smoothed, limit);
516 
517  cqueue_enqueue(s->gain_history_smoothed[channel], smoothed);
518 
519  cqueue_pop(s->gain_history_minimum[channel]);
520  cqueue_pop(s->threshold_history[channel]);
521  }
522 }
523 
524 static inline double update_value(double new, double old, double aggressiveness)
525 {
526  av_assert0((aggressiveness >= 0.0) && (aggressiveness <= 1.0));
527  return aggressiveness * new + (1.0 - aggressiveness) * old;
528 }
529 
531 {
532  const double diff = 1.0 / frame->nb_samples;
533  int is_first_frame = cqueue_empty(s->gain_history_original[0]);
534  int c, i;
535 
536  for (c = 0; c < s->channels; c++) {
537  double *dst_ptr = (double *)frame->extended_data[c];
538  double current_average_value = 0.0;
539  double prev_value;
540 
541  for (i = 0; i < frame->nb_samples; i++)
542  current_average_value += dst_ptr[i] * diff;
543 
544  prev_value = is_first_frame ? current_average_value : s->dc_correction_value[c];
545  s->dc_correction_value[c] = is_first_frame ? current_average_value : update_value(current_average_value, s->dc_correction_value[c], 0.1);
546 
547  for (i = 0; i < frame->nb_samples; i++) {
548  dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, frame->nb_samples);
549  }
550  }
551 }
552 
553 static double setup_compress_thresh(double threshold)
554 {
555  if ((threshold > DBL_EPSILON) && (threshold < (1.0 - DBL_EPSILON))) {
556  double current_threshold = threshold;
557  double step_size = 1.0;
558 
559  while (step_size > DBL_EPSILON) {
560  while ((llrint((current_threshold + step_size) * (UINT64_C(1) << 63)) >
561  llrint(current_threshold * (UINT64_C(1) << 63))) &&
562  (bound(current_threshold + step_size, 1.0) <= threshold)) {
563  current_threshold += step_size;
564  }
565 
566  step_size /= 2.0;
567  }
568 
569  return current_threshold;
570  } else {
571  return threshold;
572  }
573 }
574 
576  AVFrame *frame, int channel)
577 {
578  double variance = 0.0;
579  int i, c;
580 
581  if (channel == -1) {
582  for (c = 0; c < s->channels; c++) {
583  const double *data_ptr = (double *)frame->extended_data[c];
584 
585  for (i = 0; i < frame->nb_samples; i++) {
586  variance += pow_2(data_ptr[i]); // Assume that MEAN is *zero*
587  }
588  }
589  variance /= (s->channels * frame->nb_samples) - 1;
590  } else {
591  const double *data_ptr = (double *)frame->extended_data[channel];
592 
593  for (i = 0; i < frame->nb_samples; i++) {
594  variance += pow_2(data_ptr[i]); // Assume that MEAN is *zero*
595  }
596  variance /= frame->nb_samples - 1;
597  }
598 
599  return FFMAX(sqrt(variance), DBL_EPSILON);
600 }
601 
603 {
604  int is_first_frame = cqueue_empty(s->gain_history_original[0]);
605  int c, i;
606 
607  if (s->channels_coupled) {
608  const double standard_deviation = compute_frame_std_dev(s, frame, -1);
609  const double current_threshold = FFMIN(1.0, s->compress_factor * standard_deviation);
610 
611  const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[0];
612  double prev_actual_thresh, curr_actual_thresh;
613  s->compress_threshold[0] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[0], (1.0/3.0));
614 
615  prev_actual_thresh = setup_compress_thresh(prev_value);
616  curr_actual_thresh = setup_compress_thresh(s->compress_threshold[0]);
617 
618  for (c = 0; c < s->channels; c++) {
619  double *const dst_ptr = (double *)frame->extended_data[c];
620  for (i = 0; i < frame->nb_samples; i++) {
621  const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
622  dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
623  }
624  }
625  } else {
626  for (c = 0; c < s->channels; c++) {
627  const double standard_deviation = compute_frame_std_dev(s, frame, c);
628  const double current_threshold = setup_compress_thresh(FFMIN(1.0, s->compress_factor * standard_deviation));
629 
630  const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[c];
631  double prev_actual_thresh, curr_actual_thresh;
632  double *dst_ptr;
633  s->compress_threshold[c] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[c], 1.0/3.0);
634 
635  prev_actual_thresh = setup_compress_thresh(prev_value);
636  curr_actual_thresh = setup_compress_thresh(s->compress_threshold[c]);
637 
638  dst_ptr = (double *)frame->extended_data[c];
639  for (i = 0; i < frame->nb_samples; i++) {
640  const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
641  dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
642  }
643  }
644  }
645 }
646 
648 {
649  if (s->dc_correction) {
651  }
652 
653  if (s->compress_factor > DBL_EPSILON) {
655  }
656 
657  if (s->channels_coupled) {
658  const local_gain gain = get_max_local_gain(s, frame, -1);
659  int c;
660 
661  for (c = 0; c < s->channels; c++)
662  update_gain_history(s, c, gain);
663  } else {
664  int c;
665 
666  for (c = 0; c < s->channels; c++)
668  }
669 }
670 
672 {
673  int c, i;
674 
675  for (c = 0; c < s->channels; c++) {
676  double *dst_ptr = (double *)frame->extended_data[c];
677  double current_amplification_factor;
678 
679  cqueue_dequeue(s->gain_history_smoothed[c], &current_amplification_factor);
680 
681  for (i = 0; i < frame->nb_samples && enabled; i++) {
682  const double amplification_factor = fade(s->prev_amplification_factor[c],
683  current_amplification_factor, i,
684  frame->nb_samples);
685 
686  dst_ptr[i] *= amplification_factor;
687  }
688 
689  s->prev_amplification_factor[c] = current_amplification_factor;
690  }
691 }
692 
694 {
695  AVFilterContext *ctx = inlink->dst;
697  AVFilterLink *outlink = ctx->outputs[0];
698  int ret = 1;
699 
700  while (((s->queue.available >= s->filter_size) ||
701  (s->eof && s->queue.available)) &&
702  !cqueue_empty(s->gain_history_smoothed[0])) {
703  AVFrame *out = ff_bufqueue_get(&s->queue);
704  double is_enabled;
705 
706  cqueue_dequeue(s->is_enabled, &is_enabled);
707 
708  amplify_frame(s, out, is_enabled > 0.);
709  s->pts = out->pts + av_rescale_q(out->nb_samples, av_make_q(1, outlink->sample_rate),
710  outlink->time_base);
711  ret = ff_filter_frame(outlink, out);
712  }
713 
715  analyze_frame(s, in);
716  if (!s->eof) {
717  ff_bufqueue_add(ctx, &s->queue, in);
718  cqueue_enqueue(s->is_enabled, !ctx->is_disabled);
719  } else {
720  av_frame_free(&in);
721  }
722 
723  return ret;
724 }
725 
727  AVFilterLink *outlink)
728 {
729  AVFrame *out = ff_get_audio_buffer(outlink, s->frame_len);
730  int c, i;
731 
732  if (!out)
733  return AVERROR(ENOMEM);
734 
735  for (c = 0; c < s->channels; c++) {
736  double *dst_ptr = (double *)out->extended_data[c];
737 
738  for (i = 0; i < out->nb_samples; i++) {
739  dst_ptr[i] = s->alt_boundary_mode ? DBL_EPSILON : ((s->target_rms > DBL_EPSILON) ? FFMIN(s->peak_value, s->target_rms) : s->peak_value);
740  if (s->dc_correction) {
741  dst_ptr[i] *= ((i % 2) == 1) ? -1 : 1;
742  dst_ptr[i] += s->dc_correction_value[c];
743  }
744  }
745  }
746 
747  return filter_frame(inlink, out);
748 }
749 
750 static int flush(AVFilterLink *outlink)
751 {
752  AVFilterContext *ctx = outlink->src;
753  AVFilterLink *inlink = ctx->inputs[0];
755  int ret = 0;
756 
757  if (!cqueue_empty(s->gain_history_smoothed[0])) {
758  ret = flush_buffer(s, inlink, outlink);
759  } else if (s->queue.available) {
760  AVFrame *out = ff_bufqueue_get(&s->queue);
761 
762  s->pts = out->pts + av_rescale_q(out->nb_samples, av_make_q(1, outlink->sample_rate),
763  outlink->time_base);
764  ret = ff_filter_frame(outlink, out);
765  }
766 
767  return ret;
768 }
769 
771 {
772  AVFilterLink *inlink = ctx->inputs[0];
773  AVFilterLink *outlink = ctx->outputs[0];
775  AVFrame *in = NULL;
776  int ret = 0, status;
777  int64_t pts;
778 
780 
781  if (!s->eof) {
782  ret = ff_inlink_consume_samples(inlink, s->frame_len, s->frame_len, &in);
783  if (ret < 0)
784  return ret;
785  if (ret > 0) {
786  ret = filter_frame(inlink, in);
787  if (ret <= 0)
788  return ret;
789  }
790 
791  if (ff_inlink_check_available_samples(inlink, s->frame_len) > 0) {
793  return 0;
794  }
795  }
796 
797  if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
798  if (status == AVERROR_EOF)
799  s->eof = 1;
800  }
801 
802  if (s->eof && s->queue.available)
803  return flush(outlink);
804 
805  if (s->eof && !s->queue.available) {
806  ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
807  return 0;
808  }
809 
810  if (!s->eof)
812 
813  return FFERROR_NOT_READY;
814 }
815 
816 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
817  char *res, int res_len, int flags)
818 {
820  AVFilterLink *inlink = ctx->inputs[0];
821  int prev_filter_size = s->filter_size;
822  int ret;
823 
824  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
825  if (ret < 0)
826  return ret;
827 
828  s->filter_size |= 1;
829  if (prev_filter_size != s->filter_size) {
831 
832  for (int c = 0; c < s->channels; c++) {
833  cqueue_resize(s->gain_history_original[c], s->filter_size);
834  cqueue_resize(s->gain_history_minimum[c], s->filter_size);
835  cqueue_resize(s->threshold_history[c], s->filter_size);
836  }
837  }
838 
839  s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
840 
841  return 0;
842 }
843 
845  {
846  .name = "default",
847  .type = AVMEDIA_TYPE_AUDIO,
848  .config_props = config_input,
849  },
850 };
851 
853  {
854  .name = "default",
855  .type = AVMEDIA_TYPE_AUDIO,
856  },
857 };
858 
860  .name = "dynaudnorm",
861  .description = NULL_IF_CONFIG_SMALL("Dynamic Audio Normalizer."),
862  .query_formats = query_formats,
863  .priv_size = sizeof(DynamicAudioNormalizerContext),
864  .init = init,
865  .uninit = uninit,
866  .activate = activate,
869  .priv_class = &dynaudnorm_class,
871  .process_command = process_command,
872 };
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_dynaudnorm.c:313
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
flush_buffer
static int flush_buffer(DynamicAudioNormalizerContext *s, AVFilterLink *inlink, AVFilterLink *outlink)
Definition: af_dynaudnorm.c:726
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
OFFSET
#define OFFSET(x)
Definition: af_dynaudnorm.c:89
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
analyze_frame
static void analyze_frame(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:647
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dynaudnorm)
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
MAX_FILTER_SIZE
#define MAX_FILTER_SIZE
Definition: af_dynaudnorm.c:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:490
index
fg index
Definition: ffmpeg_filter.c:168
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:118
AVOption
AVOption.
Definition: opt.h:247
DynamicAudioNormalizerContext::dc_correction_value
double * dc_correction_value
Definition: af_dynaudnorm.c:73
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:687
float.h
cqueue_resize
static void cqueue_resize(cqueue *q, int new_size)
Definition: af_dynaudnorm.c:231
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
c1
static const uint64_t c1
Definition: murmur3.c:51
cqueue::max_size
int max_size
Definition: af_dynaudnorm.c:51
avfilter_af_dynaudnorm_inputs
static const AVFilterPad avfilter_af_dynaudnorm_inputs[]
Definition: af_dynaudnorm.c:844
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
find_peak_magnitude
static double find_peak_magnitude(AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:378
ff_bufqueue_get
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
Definition: bufferqueue.h:98
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
DynamicAudioNormalizerContext
Definition: af_dynaudnorm.c:55
local_gain
Definition: af_dynaudnorm.c:43
cqueue::size
int size
Definition: af_dynaudnorm.c:50
val
static double val(void *priv, double ch)
Definition: aeval.c:76
cqueue_create
static cqueue * cqueue_create(int size, int max_size)
Definition: af_dynaudnorm.c:153
pts
static int64_t pts
Definition: transcode_aac.c:653
activate
static int activate(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:770
update_value
static double update_value(double new, double old, double aggressiveness)
Definition: af_dynaudnorm.c:524
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
avfilter_af_dynaudnorm_outputs
static const AVFilterPad avfilter_af_dynaudnorm_outputs[]
Definition: af_dynaudnorm.c:852
DynamicAudioNormalizerContext::is_enabled
cqueue * is_enabled
Definition: af_dynaudnorm.c:86
DynamicAudioNormalizerContext::queue
struct FFBufQueue queue
Definition: af_dynaudnorm.c:58
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
cqueue::nb_elements
int nb_elements
Definition: af_dynaudnorm.c:52
ff_inlink_check_available_samples
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1402
av_cold
#define av_cold
Definition: attributes.h:90
frame_size
static int frame_size(int sample_rate, int frame_len_msec)
Definition: af_dynaudnorm.c:147
minimum_filter
static double minimum_filter(cqueue *q)
Definition: af_dynaudnorm.c:441
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
s
#define s(width, name)
Definition: cbs_vp9.c:257
cqueue_empty
static int cqueue_empty(cqueue *q)
Definition: af_dynaudnorm.c:189
adjust
static int adjust(int x, int size)
Definition: mobiclip.c:515
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:226
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:703
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
filters.h
DynamicAudioNormalizerContext::channels
int channels
Definition: af_dynaudnorm.c:77
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
copysign
static av_always_inline double copysign(double x, double y)
Definition: libm.h:68
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
cqueue_size
static int cqueue_size(cqueue *q)
Definition: af_dynaudnorm.c:184
DynamicAudioNormalizerContext::weights
double * weights
Definition: af_dynaudnorm.c:75
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:280
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
FLAGS
#define FLAGS
Definition: af_dynaudnorm.c:90
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
result
and forward the result(frame or status change) to the corresponding input. If nothing is possible
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
DynamicAudioNormalizerContext::peak_value
double peak_value
Definition: af_dynaudnorm.c:67
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1437
NULL
#define NULL
Definition: coverity.c:32
get_max_local_gain
static local_gain get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:427
pow_2
static double pow_2(const double value)
Definition: af_dynaudnorm.c:367
perform_dc_correction
static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:530
ff_bufqueue_discard_all
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
Definition: bufferqueue.h:111
flush
static int flush(AVFilterLink *outlink)
Definition: af_dynaudnorm.c:750
DynamicAudioNormalizerContext::frame_len_msec
int frame_len_msec
Definition: af_dynaudnorm.c:61
ff_set_common_all_channel_counts
int ff_set_common_all_channel_counts(AVFilterContext *ctx)
Equivalent to ff_set_common_channel_layouts(ctx, ff_all_channel_counts())
Definition: formats.c:669
exp
int8_t exp
Definition: eval.c:72
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1372
DynamicAudioNormalizerContext::threshold_history
cqueue ** threshold_history
Definition: af_dynaudnorm.c:84
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
cqueue_enqueue
static int cqueue_enqueue(cqueue *q, double element)
Definition: af_dynaudnorm.c:194
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
bufferqueue.h
minimum
static float minimum(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:47
compute_frame_std_dev
static double compute_frame_std_dev(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:575
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
DynamicAudioNormalizerContext::target_rms
double target_rms
Definition: af_dynaudnorm.c:69
DynamicAudioNormalizerContext::prev_amplification_factor
double * prev_amplification_factor
Definition: af_dynaudnorm.c:72
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
size
int size
Definition: twinvq_data.h:10344
amplify_frame
static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *frame, int enabled)
Definition: af_dynaudnorm.c:671
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
cqueue_pop
static int cqueue_pop(cqueue *q)
Definition: af_dynaudnorm.c:221
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:883
cqueue_free
static void cqueue_free(cqueue *q)
Definition: af_dynaudnorm.c:177
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_dynaudnorm.c:816
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_dynaudnorm.c:693
ff_af_dynaudnorm
const AVFilter ff_af_dynaudnorm
Definition: af_dynaudnorm.c:859
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
ff_bufqueue_add
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
Definition: bufferqueue.h:71
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:130
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
DynamicAudioNormalizerContext::compress_threshold
double * compress_threshold
Definition: af_dynaudnorm.c:74
update_gain_history
static void update_gain_history(DynamicAudioNormalizerContext *s, int channel, local_gain gain)
Definition: af_dynaudnorm.c:469
DynamicAudioNormalizerContext::filter_size
int filter_size
Definition: af_dynaudnorm.c:62
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
DynamicAudioNormalizerContext::pts
int64_t pts
Definition: af_dynaudnorm.c:79
i
int i
Definition: input.c:406
DynamicAudioNormalizerContext::threshold
double threshold
Definition: af_dynaudnorm.c:71
FFBufQueue
Structure holding the queue.
Definition: bufferqueue.h:49
DynamicAudioNormalizerContext::dc_correction
int dc_correction
Definition: af_dynaudnorm.c:63
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
DynamicAudioNormalizerContext::gain_history_minimum
cqueue ** gain_history_minimum
Definition: af_dynaudnorm.c:82
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
gaussian_filter
static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q, cqueue *tq)
Definition: af_dynaudnorm.c:453
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
erf
static double erf(double z)
erf function Algorithm taken from the Boost project, source: http://www.boost.org/doc/libs/1_46_1/boo...
Definition: libm.h:121
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
AVFilter
Filter definition.
Definition: avfilter.h:149
bound
static double bound(const double threshold, const double val)
Definition: af_dynaudnorm.c:372
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
cqueue_peek
static double cqueue_peek(cqueue *q, int index)
Definition: af_dynaudnorm.c:204
cqueue::elements
double * elements
Definition: af_dynaudnorm.c:49
pos
unsigned int pos
Definition: spdifenc.c:412
compute_frame_rms
static double compute_frame_rms(AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:400
cqueue
Definition: af_dynaudnorm.c:48
init_gaussian_filter
static void init_gaussian_filter(DynamicAudioNormalizerContext *s)
Definition: af_dynaudnorm.c:253
c2
static const uint64_t c2
Definition: murmur3.c:52
dynaudnorm_options
static const AVOption dynaudnorm_options[]
Definition: af_dynaudnorm.c:92
DynamicAudioNormalizerContext::gain_history_original
cqueue ** gain_history_original
Definition: af_dynaudnorm.c:81
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
DynamicAudioNormalizerContext::alt_boundary_mode
int alt_boundary_mode
Definition: af_dynaudnorm.c:65
DynamicAudioNormalizerContext::compress_factor
double compress_factor
Definition: af_dynaudnorm.c:70
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
MIN_FILTER_SIZE
#define MIN_FILTER_SIZE
Definition: af_dynaudnorm.c:32
CONST
#define CONST(name, help, val, unit)
Definition: vf_bwdif.c:368
DynamicAudioNormalizerContext::frame_len
int frame_len
Definition: af_dynaudnorm.c:60
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
DynamicAudioNormalizerContext::eof
int eof
Definition: af_dynaudnorm.c:78
audio.h
local_gain::max_gain
double max_gain
Definition: af_dynaudnorm.c:44
llrint
#define llrint(x)
Definition: libm.h:394
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
DynamicAudioNormalizerContext::channels_coupled
int channels_coupled
Definition: af_dynaudnorm.c:64
fade
static double fade(double prev, double next, int pos, int length)
Definition: af_dynaudnorm.c:359
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:138
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
perform_compression
static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:602
setup_compress_thresh
static double setup_compress_thresh(double threshold)
Definition: af_dynaudnorm.c:553
DynamicAudioNormalizerContext::max_amplification
double max_amplification
Definition: af_dynaudnorm.c:68
DynamicAudioNormalizerContext::gain_history_smoothed
cqueue ** gain_history_smoothed
Definition: af_dynaudnorm.c:83
cqueue_dequeue
static int cqueue_dequeue(cqueue *q, double *element)
Definition: af_dynaudnorm.c:210
channel
channel
Definition: ebur128.h:39
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:212
local_gain::threshold
double threshold
Definition: af_dynaudnorm.c:45
min
float min
Definition: vorbis_enc_data.h:429