FFmpeg
af_loudnorm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Kyle Swanson <k@ylo.ph>.
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /* http://k.ylo.ph/2016/04/04/loudnorm.html */
22 
23 #include "libavutil/opt.h"
24 #include "avfilter.h"
25 #include "filters.h"
26 #include "internal.h"
27 #include "audio.h"
28 #include "ebur128.h"
29 
30 enum FrameType {
36 };
37 
39  OUT,
44 };
45 
51 };
52 
53 typedef struct LoudNormContext {
54  const AVClass *class;
55  double target_i;
56  double target_lra;
57  double target_tp;
58  double measured_i;
59  double measured_lra;
60  double measured_tp;
62  double offset;
63  int linear;
64  int dual_mono;
66 
67  double *buf;
68  int buf_size;
69  int buf_index;
71 
72  double delta[30];
73  double weights[21];
74  double prev_delta;
75  int index;
76 
77  double gain_reduction[2];
78  double *limiter_buf;
79  double *prev_smp;
84  int env_index;
85  int env_cnt;
88 
89  int64_t pts[30];
93  int channels;
94 
98 
99 #define OFFSET(x) offsetof(LoudNormContext, x)
100 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
101 
102 static const AVOption loudnorm_options[] = {
103  { "I", "set integrated loudness target", OFFSET(target_i), AV_OPT_TYPE_DOUBLE, {.dbl = -24.}, -70., -5., FLAGS },
104  { "i", "set integrated loudness target", OFFSET(target_i), AV_OPT_TYPE_DOUBLE, {.dbl = -24.}, -70., -5., FLAGS },
105  { "LRA", "set loudness range target", OFFSET(target_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 7.}, 1., 50., FLAGS },
106  { "lra", "set loudness range target", OFFSET(target_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 7.}, 1., 50., FLAGS },
107  { "TP", "set maximum true peak", OFFSET(target_tp), AV_OPT_TYPE_DOUBLE, {.dbl = -2.}, -9., 0., FLAGS },
108  { "tp", "set maximum true peak", OFFSET(target_tp), AV_OPT_TYPE_DOUBLE, {.dbl = -2.}, -9., 0., FLAGS },
109  { "measured_I", "measured IL of input file", OFFSET(measured_i), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 0., FLAGS },
110  { "measured_i", "measured IL of input file", OFFSET(measured_i), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 0., FLAGS },
111  { "measured_LRA", "measured LRA of input file", OFFSET(measured_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, 0., 99., FLAGS },
112  { "measured_lra", "measured LRA of input file", OFFSET(measured_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, 0., 99., FLAGS },
113  { "measured_TP", "measured true peak of input file", OFFSET(measured_tp), AV_OPT_TYPE_DOUBLE, {.dbl = 99.}, -99., 99., FLAGS },
114  { "measured_tp", "measured true peak of input file", OFFSET(measured_tp), AV_OPT_TYPE_DOUBLE, {.dbl = 99.}, -99., 99., FLAGS },
115  { "measured_thresh", "measured threshold of input file", OFFSET(measured_thresh), AV_OPT_TYPE_DOUBLE, {.dbl = -70.}, -99., 0., FLAGS },
116  { "offset", "set offset gain", OFFSET(offset), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 99., FLAGS },
117  { "linear", "normalize linearly if possible", OFFSET(linear), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
118  { "dual_mono", "treat mono input as dual-mono", OFFSET(dual_mono), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
119  { "print_format", "set print format for stats", OFFSET(print_format), AV_OPT_TYPE_INT, {.i64 = NONE}, NONE, PF_NB -1, FLAGS, "print_format" },
120  { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NONE}, 0, 0, FLAGS, "print_format" },
121  { "json", 0, 0, AV_OPT_TYPE_CONST, {.i64 = JSON}, 0, 0, FLAGS, "print_format" },
122  { "summary", 0, 0, AV_OPT_TYPE_CONST, {.i64 = SUMMARY}, 0, 0, FLAGS, "print_format" },
123  { NULL }
124 };
125 
126 AVFILTER_DEFINE_CLASS(loudnorm);
127 
128 static inline int frame_size(int sample_rate, int frame_len_msec)
129 {
130  const int frame_size = round((double)sample_rate * (frame_len_msec / 1000.0));
131  return frame_size + (frame_size % 2);
132 }
133 
135 {
136  double total_weight = 0.0;
137  const double sigma = 3.5;
138  double adjust;
139  int i;
140 
141  const int offset = 21 / 2;
142  const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
143  const double c2 = 2.0 * pow(sigma, 2.0);
144 
145  for (i = 0; i < 21; i++) {
146  const int x = i - offset;
147  s->weights[i] = c1 * exp(-(pow(x, 2.0) / c2));
148  total_weight += s->weights[i];
149  }
150 
151  adjust = 1.0 / total_weight;
152  for (i = 0; i < 21; i++)
153  s->weights[i] *= adjust;
154 }
155 
157 {
158  double result = 0.;
159  int i;
160 
161  index = index - 10 > 0 ? index - 10 : index + 20;
162  for (i = 0; i < 21; i++)
163  result += s->delta[((index + i) < 30) ? (index + i) : (index + i - 30)] * s->weights[i];
164 
165  return result;
166 }
167 
168 static void detect_peak(LoudNormContext *s, int offset, int nb_samples, int channels, int *peak_delta, double *peak_value)
169 {
170  int n, c, i, index;
171  double ceiling;
172  double *buf;
173 
174  *peak_delta = -1;
175  buf = s->limiter_buf;
176  ceiling = s->target_tp;
177 
178  index = s->limiter_buf_index + (offset * channels) + (1920 * channels);
179  if (index >= s->limiter_buf_size)
180  index -= s->limiter_buf_size;
181 
182  if (s->frame_type == FIRST_FRAME) {
183  for (c = 0; c < channels; c++)
184  s->prev_smp[c] = fabs(buf[index + c - channels]);
185  }
186 
187  for (n = 0; n < nb_samples; n++) {
188  for (c = 0; c < channels; c++) {
189  double this, next, max_peak;
190 
191  this = fabs(buf[(index + c) < s->limiter_buf_size ? (index + c) : (index + c - s->limiter_buf_size)]);
192  next = fabs(buf[(index + c + channels) < s->limiter_buf_size ? (index + c + channels) : (index + c + channels - s->limiter_buf_size)]);
193 
194  if ((s->prev_smp[c] <= this) && (next <= this) && (this > ceiling) && (n > 0)) {
195  int detected;
196 
197  detected = 1;
198  for (i = 2; i < 12; i++) {
199  next = fabs(buf[(index + c + (i * channels)) < s->limiter_buf_size ? (index + c + (i * channels)) : (index + c + (i * channels) - s->limiter_buf_size)]);
200  if (next > this) {
201  detected = 0;
202  break;
203  }
204  }
205 
206  if (!detected)
207  continue;
208 
209  for (c = 0; c < channels; c++) {
210  if (c == 0 || fabs(buf[index + c]) > max_peak)
211  max_peak = fabs(buf[index + c]);
212 
213  s->prev_smp[c] = fabs(buf[(index + c) < s->limiter_buf_size ? (index + c) : (index + c - s->limiter_buf_size)]);
214  }
215 
216  *peak_delta = n;
217  s->peak_index = index;
218  *peak_value = max_peak;
219  return;
220  }
221 
222  s->prev_smp[c] = this;
223  }
224 
225  index += channels;
226  if (index >= s->limiter_buf_size)
227  index -= s->limiter_buf_size;
228  }
229 }
230 
231 static void true_peak_limiter(LoudNormContext *s, double *out, int nb_samples, int channels)
232 {
233  int n, c, index, peak_delta, smp_cnt;
234  double ceiling, peak_value;
235  double *buf;
236 
237  buf = s->limiter_buf;
238  ceiling = s->target_tp;
239  index = s->limiter_buf_index;
240  smp_cnt = 0;
241 
242  if (s->frame_type == FIRST_FRAME) {
243  double max;
244 
245  max = 0.;
246  for (n = 0; n < 1920; n++) {
247  for (c = 0; c < channels; c++) {
248  max = fabs(buf[c]) > max ? fabs(buf[c]) : max;
249  }
250  buf += channels;
251  }
252 
253  if (max > ceiling) {
254  s->gain_reduction[1] = ceiling / max;
255  s->limiter_state = SUSTAIN;
256  buf = s->limiter_buf;
257 
258  for (n = 0; n < 1920; n++) {
259  for (c = 0; c < channels; c++) {
260  double env;
261  env = s->gain_reduction[1];
262  buf[c] *= env;
263  }
264  buf += channels;
265  }
266  }
267 
268  buf = s->limiter_buf;
269  }
270 
271  do {
272 
273  switch(s->limiter_state) {
274  case OUT:
275  detect_peak(s, smp_cnt, nb_samples - smp_cnt, channels, &peak_delta, &peak_value);
276  if (peak_delta != -1) {
277  s->env_cnt = 0;
278  smp_cnt += (peak_delta - s->attack_length);
279  s->gain_reduction[0] = 1.;
280  s->gain_reduction[1] = ceiling / peak_value;
281  s->limiter_state = ATTACK;
282 
283  s->env_index = s->peak_index - (s->attack_length * channels);
284  if (s->env_index < 0)
285  s->env_index += s->limiter_buf_size;
286 
287  s->env_index += (s->env_cnt * channels);
288  if (s->env_index > s->limiter_buf_size)
289  s->env_index -= s->limiter_buf_size;
290 
291  } else {
292  smp_cnt = nb_samples;
293  }
294  break;
295 
296  case ATTACK:
297  for (; s->env_cnt < s->attack_length; s->env_cnt++) {
298  for (c = 0; c < channels; c++) {
299  double env;
300  env = s->gain_reduction[0] - ((double) s->env_cnt / (s->attack_length - 1) * (s->gain_reduction[0] - s->gain_reduction[1]));
301  buf[s->env_index + c] *= env;
302  }
303 
304  s->env_index += channels;
305  if (s->env_index >= s->limiter_buf_size)
306  s->env_index -= s->limiter_buf_size;
307 
308  smp_cnt++;
309  if (smp_cnt >= nb_samples) {
310  s->env_cnt++;
311  break;
312  }
313  }
314 
315  if (smp_cnt < nb_samples) {
316  s->env_cnt = 0;
317  s->attack_length = 1920;
318  s->limiter_state = SUSTAIN;
319  }
320  break;
321 
322  case SUSTAIN:
323  detect_peak(s, smp_cnt, nb_samples, channels, &peak_delta, &peak_value);
324  if (peak_delta == -1) {
325  s->limiter_state = RELEASE;
326  s->gain_reduction[0] = s->gain_reduction[1];
327  s->gain_reduction[1] = 1.;
328  s->env_cnt = 0;
329  break;
330  } else {
331  double gain_reduction;
332  gain_reduction = ceiling / peak_value;
333 
334  if (gain_reduction < s->gain_reduction[1]) {
335  s->limiter_state = ATTACK;
336 
337  s->attack_length = peak_delta;
338  if (s->attack_length <= 1)
339  s->attack_length = 2;
340 
341  s->gain_reduction[0] = s->gain_reduction[1];
342  s->gain_reduction[1] = gain_reduction;
343  s->env_cnt = 0;
344  break;
345  }
346 
347  for (s->env_cnt = 0; s->env_cnt < peak_delta; s->env_cnt++) {
348  for (c = 0; c < channels; c++) {
349  double env;
350  env = s->gain_reduction[1];
351  buf[s->env_index + c] *= env;
352  }
353 
354  s->env_index += channels;
355  if (s->env_index >= s->limiter_buf_size)
356  s->env_index -= s->limiter_buf_size;
357 
358  smp_cnt++;
359  if (smp_cnt >= nb_samples) {
360  s->env_cnt++;
361  break;
362  }
363  }
364  }
365  break;
366 
367  case RELEASE:
368  for (; s->env_cnt < s->release_length; s->env_cnt++) {
369  for (c = 0; c < channels; c++) {
370  double env;
371  env = s->gain_reduction[0] + (((double) s->env_cnt / (s->release_length - 1)) * (s->gain_reduction[1] - s->gain_reduction[0]));
372  buf[s->env_index + c] *= env;
373  }
374 
375  s->env_index += channels;
376  if (s->env_index >= s->limiter_buf_size)
377  s->env_index -= s->limiter_buf_size;
378 
379  smp_cnt++;
380  if (smp_cnt >= nb_samples) {
381  s->env_cnt++;
382  break;
383  }
384  }
385 
386  if (smp_cnt < nb_samples) {
387  s->env_cnt = 0;
388  s->limiter_state = OUT;
389  }
390 
391  break;
392  }
393 
394  } while (smp_cnt < nb_samples);
395 
396  for (n = 0; n < nb_samples; n++) {
397  for (c = 0; c < channels; c++) {
398  out[c] = buf[index + c];
399  if (fabs(out[c]) > ceiling) {
400  out[c] = ceiling * (out[c] < 0 ? -1 : 1);
401  }
402  }
403  out += channels;
404  index += channels;
405  if (index >= s->limiter_buf_size)
406  index -= s->limiter_buf_size;
407  }
408 }
409 
411 {
412  AVFilterContext *ctx = inlink->dst;
413  LoudNormContext *s = ctx->priv;
414  AVFilterLink *outlink = ctx->outputs[0];
415  AVFrame *out;
416  const double *src;
417  double *dst;
418  double *buf;
419  double *limiter_buf;
420  int i, n, c, subframe_length, src_index;
421  double gain, gain_next, env_global, env_shortterm,
422  global, shortterm, lra, relative_threshold;
423 
424  if (av_frame_is_writable(in)) {
425  out = in;
426  } else {
427  out = ff_get_audio_buffer(outlink, in->nb_samples);
428  if (!out) {
429  av_frame_free(&in);
430  return AVERROR(ENOMEM);
431  }
433  }
434 
435  out->pts = s->pts[0];
436  memmove(s->pts, &s->pts[1], (FF_ARRAY_ELEMS(s->pts) - 1) * sizeof(s->pts[0]));
437 
438  src = (const double *)in->data[0];
439  dst = (double *)out->data[0];
440  buf = s->buf;
441  limiter_buf = s->limiter_buf;
442 
444 
445  if (s->frame_type == FIRST_FRAME && in->nb_samples < frame_size(inlink->sample_rate, 3000)) {
446  double offset, offset_tp, true_peak;
447 
448  ff_ebur128_loudness_global(s->r128_in, &global);
449  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
450  double tmp;
451  ff_ebur128_sample_peak(s->r128_in, c, &tmp);
452  if (c == 0 || tmp > true_peak)
453  true_peak = tmp;
454  }
455 
456  offset = pow(10., (s->target_i - global) / 20.);
457  offset_tp = true_peak * offset;
458  s->offset = offset_tp < s->target_tp ? offset : s->target_tp / true_peak;
459  s->frame_type = LINEAR_MODE;
460  }
461 
462  switch (s->frame_type) {
463  case FIRST_FRAME:
464  for (n = 0; n < in->nb_samples; n++) {
465  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
466  buf[s->buf_index + c] = src[c];
467  }
468  src += inlink->ch_layout.nb_channels;
469  s->buf_index += inlink->ch_layout.nb_channels;
470  }
471 
472  ff_ebur128_loudness_shortterm(s->r128_in, &shortterm);
473 
474  if (shortterm < s->measured_thresh) {
475  s->above_threshold = 0;
476  env_shortterm = shortterm <= -70. ? 0. : s->target_i - s->measured_i;
477  } else {
478  s->above_threshold = 1;
479  env_shortterm = shortterm <= -70. ? 0. : s->target_i - shortterm;
480  }
481 
482  for (n = 0; n < 30; n++)
483  s->delta[n] = pow(10., env_shortterm / 20.);
484  s->prev_delta = s->delta[s->index];
485 
486  s->buf_index =
487  s->limiter_buf_index = 0;
488 
489  for (n = 0; n < (s->limiter_buf_size / inlink->ch_layout.nb_channels); n++) {
490  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
491  limiter_buf[s->limiter_buf_index + c] = buf[s->buf_index + c] * s->delta[s->index] * s->offset;
492  }
493  s->limiter_buf_index += inlink->ch_layout.nb_channels;
494  if (s->limiter_buf_index >= s->limiter_buf_size)
495  s->limiter_buf_index -= s->limiter_buf_size;
496 
497  s->buf_index += inlink->ch_layout.nb_channels;
498  }
499 
500  subframe_length = frame_size(inlink->sample_rate, 100);
501  true_peak_limiter(s, dst, subframe_length, inlink->ch_layout.nb_channels);
502  ff_ebur128_add_frames_double(s->r128_out, dst, subframe_length);
503 
504  out->nb_samples = subframe_length;
505 
506  s->frame_type = INNER_FRAME;
507  break;
508 
509  case INNER_FRAME:
510  gain = gaussian_filter(s, s->index + 10 < 30 ? s->index + 10 : s->index + 10 - 30);
511  gain_next = gaussian_filter(s, s->index + 11 < 30 ? s->index + 11 : s->index + 11 - 30);
512 
513  for (n = 0; n < in->nb_samples; n++) {
514  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
515  buf[s->prev_buf_index + c] = src[c];
516  limiter_buf[s->limiter_buf_index + c] = buf[s->buf_index + c] * (gain + (((double) n / in->nb_samples) * (gain_next - gain))) * s->offset;
517  }
518  src += inlink->ch_layout.nb_channels;
519 
520  s->limiter_buf_index += inlink->ch_layout.nb_channels;
521  if (s->limiter_buf_index >= s->limiter_buf_size)
522  s->limiter_buf_index -= s->limiter_buf_size;
523 
524  s->prev_buf_index += inlink->ch_layout.nb_channels;
525  if (s->prev_buf_index >= s->buf_size)
526  s->prev_buf_index -= s->buf_size;
527 
528  s->buf_index += inlink->ch_layout.nb_channels;
529  if (s->buf_index >= s->buf_size)
530  s->buf_index -= s->buf_size;
531  }
532 
533  subframe_length = (frame_size(inlink->sample_rate, 100) - in->nb_samples) * inlink->ch_layout.nb_channels;
534  s->limiter_buf_index = s->limiter_buf_index + subframe_length < s->limiter_buf_size ? s->limiter_buf_index + subframe_length : s->limiter_buf_index + subframe_length - s->limiter_buf_size;
535 
536  true_peak_limiter(s, dst, in->nb_samples, inlink->ch_layout.nb_channels);
537  ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
538 
539  ff_ebur128_loudness_range(s->r128_in, &lra);
540  ff_ebur128_loudness_global(s->r128_in, &global);
541  ff_ebur128_loudness_shortterm(s->r128_in, &shortterm);
542  ff_ebur128_relative_threshold(s->r128_in, &relative_threshold);
543 
544  if (s->above_threshold == 0) {
545  double shortterm_out;
546 
547  if (shortterm > s->measured_thresh)
548  s->prev_delta *= 1.0058;
549 
550  ff_ebur128_loudness_shortterm(s->r128_out, &shortterm_out);
551  if (shortterm_out >= s->target_i)
552  s->above_threshold = 1;
553  }
554 
555  if (shortterm < relative_threshold || shortterm <= -70. || s->above_threshold == 0) {
556  s->delta[s->index] = s->prev_delta;
557  } else {
558  env_global = fabs(shortterm - global) < (s->target_lra / 2.) ? shortterm - global : (s->target_lra / 2.) * ((shortterm - global) < 0 ? -1 : 1);
559  env_shortterm = s->target_i - shortterm;
560  s->delta[s->index] = pow(10., (env_global + env_shortterm) / 20.);
561  }
562 
563  s->prev_delta = s->delta[s->index];
564  s->index++;
565  if (s->index >= 30)
566  s->index -= 30;
567  s->prev_nb_samples = in->nb_samples;
568  break;
569 
570  case FINAL_FRAME:
571  gain = gaussian_filter(s, s->index + 10 < 30 ? s->index + 10 : s->index + 10 - 30);
572  s->limiter_buf_index = 0;
573  src_index = 0;
574 
575  for (n = 0; n < s->limiter_buf_size / inlink->ch_layout.nb_channels; n++) {
576  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
577  s->limiter_buf[s->limiter_buf_index + c] = src[src_index + c] * gain * s->offset;
578  }
579  src_index += inlink->ch_layout.nb_channels;
580 
581  s->limiter_buf_index += inlink->ch_layout.nb_channels;
582  if (s->limiter_buf_index >= s->limiter_buf_size)
583  s->limiter_buf_index -= s->limiter_buf_size;
584  }
585 
586  subframe_length = frame_size(inlink->sample_rate, 100);
587  for (i = 0; i < in->nb_samples / subframe_length; i++) {
588  true_peak_limiter(s, dst, subframe_length, inlink->ch_layout.nb_channels);
589 
590  for (n = 0; n < subframe_length; n++) {
591  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
592  if (src_index < (in->nb_samples * inlink->ch_layout.nb_channels)) {
593  limiter_buf[s->limiter_buf_index + c] = src[src_index + c] * gain * s->offset;
594  } else {
595  limiter_buf[s->limiter_buf_index + c] = 0.;
596  }
597  }
598 
599  if (src_index < (in->nb_samples * inlink->ch_layout.nb_channels))
600  src_index += inlink->ch_layout.nb_channels;
601 
602  s->limiter_buf_index += inlink->ch_layout.nb_channels;
603  if (s->limiter_buf_index >= s->limiter_buf_size)
604  s->limiter_buf_index -= s->limiter_buf_size;
605  }
606 
607  dst += (subframe_length * inlink->ch_layout.nb_channels);
608  }
609 
610  dst = (double *)out->data[0];
611  ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
612  break;
613 
614  case LINEAR_MODE:
615  for (n = 0; n < in->nb_samples; n++) {
616  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
617  dst[c] = src[c] * s->offset;
618  }
619  src += inlink->ch_layout.nb_channels;
620  dst += inlink->ch_layout.nb_channels;
621  }
622 
623  dst = (double *)out->data[0];
624  ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
625  break;
626  }
627 
628  if (in != out)
629  av_frame_free(&in);
630  return ff_filter_frame(outlink, out);
631 }
632 
633 static int flush_frame(AVFilterLink *outlink)
634 {
635  AVFilterContext *ctx = outlink->src;
636  AVFilterLink *inlink = ctx->inputs[0];
637  LoudNormContext *s = ctx->priv;
638  int ret = 0;
639 
640  if (s->frame_type == INNER_FRAME) {
641  double *src;
642  double *buf;
643  int nb_samples, n, c, offset;
644  AVFrame *frame;
645 
646  nb_samples = (s->buf_size / inlink->ch_layout.nb_channels) - s->prev_nb_samples;
647  nb_samples -= (frame_size(inlink->sample_rate, 100) - s->prev_nb_samples);
648 
649  frame = ff_get_audio_buffer(outlink, nb_samples);
650  if (!frame)
651  return AVERROR(ENOMEM);
652  frame->nb_samples = nb_samples;
653 
654  buf = s->buf;
655  src = (double *)frame->data[0];
656 
657  offset = ((s->limiter_buf_size / inlink->ch_layout.nb_channels) - s->prev_nb_samples) * inlink->ch_layout.nb_channels;
658  offset -= (frame_size(inlink->sample_rate, 100) - s->prev_nb_samples) * inlink->ch_layout.nb_channels;
659  s->buf_index = s->buf_index - offset < 0 ? s->buf_index - offset + s->buf_size : s->buf_index - offset;
660 
661  for (n = 0; n < nb_samples; n++) {
662  for (c = 0; c < inlink->ch_layout.nb_channels; c++) {
663  src[c] = buf[s->buf_index + c];
664  }
665  src += inlink->ch_layout.nb_channels;
666  s->buf_index += inlink->ch_layout.nb_channels;
667  if (s->buf_index >= s->buf_size)
668  s->buf_index -= s->buf_size;
669  }
670 
671  s->frame_type = FINAL_FRAME;
673  }
674  return ret;
675 }
676 
678 {
679  AVFilterLink *inlink = ctx->inputs[0];
680  AVFilterLink *outlink = ctx->outputs[0];
681  LoudNormContext *s = ctx->priv;
682  AVFrame *in = NULL;
683  int ret = 0, status;
684  int64_t pts;
685 
687 
688  if (s->frame_type != LINEAR_MODE) {
689  int nb_samples;
690 
691  if (s->frame_type == FIRST_FRAME) {
692  nb_samples = frame_size(inlink->sample_rate, 3000);
693  } else {
694  nb_samples = frame_size(inlink->sample_rate, 100);
695  }
696 
697  ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
698  } else {
700  }
701 
702  if (ret < 0)
703  return ret;
704  if (ret > 0) {
705  if (s->frame_type == FIRST_FRAME) {
706  const int nb_samples = frame_size(inlink->sample_rate, 100);
707 
708  for (int i = 0; i < FF_ARRAY_ELEMS(s->pts); i++)
709  s->pts[i] = in->pts + i * nb_samples;
710  } else if (s->frame_type == LINEAR_MODE) {
711  s->pts[0] = in->pts;
712  } else {
713  s->pts[FF_ARRAY_ELEMS(s->pts) - 1] = in->pts;
714  }
715  ret = filter_frame(inlink, in);
716  }
717  if (ret < 0)
718  return ret;
719 
721  ff_outlink_set_status(outlink, status, pts);
722  return flush_frame(outlink);
723  }
724 
726 
727  return FFERROR_NOT_READY;
728 }
729 
731 {
732  LoudNormContext *s = ctx->priv;
734  static const int input_srate[] = {192000, -1};
736  if (ret < 0)
737  return ret;
738 
740  if (ret)
741  return ret;
743  if (ret)
744  return ret;
745 
746  if (s->frame_type != LINEAR_MODE) {
747  formats = ff_make_format_list(input_srate);
748  } else {
750  }
751 
753 }
754 
756 {
757  AVFilterContext *ctx = inlink->dst;
758  LoudNormContext *s = ctx->priv;
759 
760  s->r128_in = ff_ebur128_init(inlink->ch_layout.nb_channels, inlink->sample_rate, 0, FF_EBUR128_MODE_I | FF_EBUR128_MODE_S | FF_EBUR128_MODE_LRA | FF_EBUR128_MODE_SAMPLE_PEAK);
761  if (!s->r128_in)
762  return AVERROR(ENOMEM);
763 
764  s->r128_out = ff_ebur128_init(inlink->ch_layout.nb_channels, inlink->sample_rate, 0, FF_EBUR128_MODE_I | FF_EBUR128_MODE_S | FF_EBUR128_MODE_LRA | FF_EBUR128_MODE_SAMPLE_PEAK);
765  if (!s->r128_out)
766  return AVERROR(ENOMEM);
767 
768  if (inlink->ch_layout.nb_channels == 1 && s->dual_mono) {
771  }
772 
773  s->buf_size = frame_size(inlink->sample_rate, 3000) * inlink->ch_layout.nb_channels;
774  s->buf = av_malloc_array(s->buf_size, sizeof(*s->buf));
775  if (!s->buf)
776  return AVERROR(ENOMEM);
777 
778  s->limiter_buf_size = frame_size(inlink->sample_rate, 210) * inlink->ch_layout.nb_channels;
779  s->limiter_buf = av_malloc_array(s->buf_size, sizeof(*s->limiter_buf));
780  if (!s->limiter_buf)
781  return AVERROR(ENOMEM);
782 
783  s->prev_smp = av_malloc_array(inlink->ch_layout.nb_channels, sizeof(*s->prev_smp));
784  if (!s->prev_smp)
785  return AVERROR(ENOMEM);
786 
788 
789  s->buf_index =
790  s->prev_buf_index =
791  s->limiter_buf_index = 0;
792  s->channels = inlink->ch_layout.nb_channels;
793  s->index = 1;
794  s->limiter_state = OUT;
795  s->offset = pow(10., s->offset / 20.);
796  s->target_tp = pow(10., s->target_tp / 20.);
797  s->attack_length = frame_size(inlink->sample_rate, 10);
798  s->release_length = frame_size(inlink->sample_rate, 100);
799 
800  return 0;
801 }
802 
804 {
805  LoudNormContext *s = ctx->priv;
806  s->frame_type = FIRST_FRAME;
807 
808  if (s->linear) {
809  double offset, offset_tp;
810  offset = s->target_i - s->measured_i;
811  offset_tp = s->measured_tp + offset;
812 
813  if (s->measured_tp != 99 && s->measured_thresh != -70 && s->measured_lra != 0 && s->measured_i != 0) {
814  if ((offset_tp <= s->target_tp) && (s->measured_lra <= s->target_lra)) {
815  s->frame_type = LINEAR_MODE;
816  s->offset = offset;
817  }
818  }
819  }
820 
821  return 0;
822 }
823 
825 {
826  LoudNormContext *s = ctx->priv;
827  double i_in, i_out, lra_in, lra_out, thresh_in, thresh_out, tp_in, tp_out;
828  int c;
829 
830  if (!s->r128_in || !s->r128_out)
831  goto end;
832 
833  ff_ebur128_loudness_range(s->r128_in, &lra_in);
834  ff_ebur128_loudness_global(s->r128_in, &i_in);
835  ff_ebur128_relative_threshold(s->r128_in, &thresh_in);
836  for (c = 0; c < s->channels; c++) {
837  double tmp;
838  ff_ebur128_sample_peak(s->r128_in, c, &tmp);
839  if ((c == 0) || (tmp > tp_in))
840  tp_in = tmp;
841  }
842 
843  ff_ebur128_loudness_range(s->r128_out, &lra_out);
844  ff_ebur128_loudness_global(s->r128_out, &i_out);
845  ff_ebur128_relative_threshold(s->r128_out, &thresh_out);
846  for (c = 0; c < s->channels; c++) {
847  double tmp;
848  ff_ebur128_sample_peak(s->r128_out, c, &tmp);
849  if ((c == 0) || (tmp > tp_out))
850  tp_out = tmp;
851  }
852 
853  switch(s->print_format) {
854  case NONE:
855  break;
856 
857  case JSON:
859  "\n{\n"
860  "\t\"input_i\" : \"%.2f\",\n"
861  "\t\"input_tp\" : \"%.2f\",\n"
862  "\t\"input_lra\" : \"%.2f\",\n"
863  "\t\"input_thresh\" : \"%.2f\",\n"
864  "\t\"output_i\" : \"%.2f\",\n"
865  "\t\"output_tp\" : \"%+.2f\",\n"
866  "\t\"output_lra\" : \"%.2f\",\n"
867  "\t\"output_thresh\" : \"%.2f\",\n"
868  "\t\"normalization_type\" : \"%s\",\n"
869  "\t\"target_offset\" : \"%.2f\"\n"
870  "}\n",
871  i_in,
872  20. * log10(tp_in),
873  lra_in,
874  thresh_in,
875  i_out,
876  20. * log10(tp_out),
877  lra_out,
878  thresh_out,
879  s->frame_type == LINEAR_MODE ? "linear" : "dynamic",
880  s->target_i - i_out
881  );
882  break;
883 
884  case SUMMARY:
886  "\n"
887  "Input Integrated: %+6.1f LUFS\n"
888  "Input True Peak: %+6.1f dBTP\n"
889  "Input LRA: %6.1f LU\n"
890  "Input Threshold: %+6.1f LUFS\n"
891  "\n"
892  "Output Integrated: %+6.1f LUFS\n"
893  "Output True Peak: %+6.1f dBTP\n"
894  "Output LRA: %6.1f LU\n"
895  "Output Threshold: %+6.1f LUFS\n"
896  "\n"
897  "Normalization Type: %s\n"
898  "Target Offset: %+6.1f LU\n",
899  i_in,
900  20. * log10(tp_in),
901  lra_in,
902  thresh_in,
903  i_out,
904  20. * log10(tp_out),
905  lra_out,
906  thresh_out,
907  s->frame_type == LINEAR_MODE ? "Linear" : "Dynamic",
908  s->target_i - i_out
909  );
910  break;
911  }
912 
913 end:
914  if (s->r128_in)
915  ff_ebur128_destroy(&s->r128_in);
916  if (s->r128_out)
917  ff_ebur128_destroy(&s->r128_out);
918  av_freep(&s->limiter_buf);
919  av_freep(&s->prev_smp);
920  av_freep(&s->buf);
921 }
922 
924  {
925  .name = "default",
926  .type = AVMEDIA_TYPE_AUDIO,
927  .config_props = config_input,
928  },
929 };
930 
932  {
933  .name = "default",
934  .type = AVMEDIA_TYPE_AUDIO,
935  },
936 };
937 
939  .name = "loudnorm",
940  .description = NULL_IF_CONFIG_SMALL("EBU R128 loudness normalization"),
941  .priv_size = sizeof(LoudNormContext),
942  .priv_class = &loudnorm_class,
943  .init = init,
944  .activate = activate,
945  .uninit = uninit,
949 };
formats
formats
Definition: signature.h:48
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_loudnorm.c:803
STATE_NB
@ STATE_NB
Definition: af_loudnorm.c:43
LoudNormContext::pts
int64_t pts[30]
Definition: af_loudnorm.c:89
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:100
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
LoudNormContext::weights
double weights[21]
Definition: af_loudnorm.c:73
SUSTAIN
@ SUSTAIN
Definition: af_loudnorm.c:41
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
NONE
@ NONE
Definition: af_loudnorm.c:47
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_loudnorm.c:824
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:380
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
frame_size
static int frame_size(int sample_rate, int frame_len_msec)
Definition: af_loudnorm.c:128
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:437
LoudNormContext::peak_index
int peak_index
Definition: af_loudnorm.c:83
AVOption
AVOption.
Definition: opt.h:251
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:171
LoudNormContext::prev_smp
double * prev_smp
Definition: af_loudnorm.c:79
linear
static int linear(InterplayACMContext *s, unsigned ind, unsigned col)
Definition: interplayacm.c:132
FF_EBUR128_MODE_I
@ FF_EBUR128_MODE_I
can call ff_ebur128_loudness_global_* and ff_ebur128_relative_threshold
Definition: ebur128.h:89
print_format
static char * print_format
Definition: ffprobe.c:137
LoudNormContext::r128_in
FFEBUR128State * r128_in
Definition: af_loudnorm.c:95
max
#define max(a, b)
Definition: cuda_runtime.h:33
LoudNormContext::print_format
enum PrintFormat print_format
Definition: af_loudnorm.c:65
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
c1
static const uint64_t c1
Definition: murmur3.c:51
avfilter_af_loudnorm_outputs
static const AVFilterPad avfilter_af_loudnorm_outputs[]
Definition: af_loudnorm.c:931
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
LoudNormContext::prev_nb_samples
int prev_nb_samples
Definition: af_loudnorm.c:92
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
ff_ebur128_loudness_range
int ff_ebur128_loudness_range(FFEBUR128State *st, double *out)
Get loudness range (LRA) of programme in LU.
Definition: ebur128.c:709
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
JSON
@ JSON
Definition: af_loudnorm.c:48
LoudNormContext::target_tp
double target_tp
Definition: af_loudnorm.c:57
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1364
INNER_FRAME
@ INNER_FRAME
Definition: af_loudnorm.c:32
LoudNormContext::above_threshold
int above_threshold
Definition: af_loudnorm.c:91
ff_ebur128_destroy
void ff_ebur128_destroy(FFEBUR128State **st)
Destroy library state.
Definition: ebur128.c:304
ATTACK
@ ATTACK
Definition: af_loudnorm.c:40
pts
static int64_t pts
Definition: transcode_aac.c:653
LoudNormContext::env_index
int env_index
Definition: af_loudnorm.c:84
RELEASE
@ RELEASE
Definition: af_loudnorm.c:42
true_peak_limiter
static void true_peak_limiter(LoudNormContext *s, double *out, int nb_samples, int channels)
Definition: af_loudnorm.c:231
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
FF_EBUR128_DUAL_MONO
@ FF_EBUR128_DUAL_MONO
a channel that is counted twice
Definition: ebur128.h:51
flush_frame
static int flush_frame(AVFilterLink *outlink)
Definition: af_loudnorm.c:633
FLAGS
#define FLAGS
Definition: af_loudnorm.c:100
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_loudnorm.c:410
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:749
LoudNormContext::index
int index
Definition: af_loudnorm.c:75
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
FF_EBUR128_MODE_LRA
@ FF_EBUR128_MODE_LRA
can call ff_ebur128_loudness_range
Definition: ebur128.h:91
SUMMARY
@ SUMMARY
Definition: af_loudnorm.c:49
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_ebur128_add_frames_double
void ff_ebur128_add_frames_double(FFEBUR128State *st, const double *src, size_t frames)
Add frames to be processed.
adjust
static int adjust(int x, int size)
Definition: mobiclip.c:515
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
LoudNormContext::measured_tp
double measured_tp
Definition: af_loudnorm.c:60
filters.h
LoudNormContext::limiter_state
enum LimiterState limiter_state
Definition: af_loudnorm.c:82
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:31
LoudNormContext::env_cnt
int env_cnt
Definition: af_loudnorm.c:85
LoudNormContext::prev_delta
double prev_delta
Definition: af_loudnorm.c:74
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
FrameType
FrameType
G723.1 frame types.
Definition: g723_1.h:63
ff_af_loudnorm
const AVFilter ff_af_loudnorm
Definition: af_loudnorm.c:938
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
result
and forward the result(frame or status change) to the corresponding input. If nothing is possible
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1383
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:594
FRAME_NB
@ FRAME_NB
Definition: af_loudnorm.c:35
LoudNormContext::measured_lra
double measured_lra
Definition: af_loudnorm.c:59
LoudNormContext::delta
double delta[30]
Definition: af_loudnorm.c:72
ff_ebur128_sample_peak
int ff_ebur128_sample_peak(FFEBUR128State *st, unsigned int channel_number, double *out)
Get maximum sample peak of selected channel in float format.
Definition: ebur128.c:714
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:449
loudnorm_options
static const AVOption loudnorm_options[]
Definition: af_loudnorm.c:102
double
double
Definition: af_crystalizer.c:132
LoudNormContext::buf_index
int buf_index
Definition: af_loudnorm.c:69
LoudNormContext::attack_length
int attack_length
Definition: af_loudnorm.c:86
activate
static int activate(AVFilterContext *ctx)
Definition: af_loudnorm.c:677
LoudNormContext::limiter_buf_index
int limiter_buf_index
Definition: af_loudnorm.c:80
ff_set_common_all_channel_counts
int ff_set_common_all_channel_counts(AVFilterContext *ctx)
Equivalent to ff_set_common_channel_layouts(ctx, ff_all_channel_counts())
Definition: formats.c:721
exp
int8_t exp
Definition: eval.c:72
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1318
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
LoudNormContext::limiter_buf
double * limiter_buf
Definition: af_loudnorm.c:78
LoudNormContext::release_length
int release_length
Definition: af_loudnorm.c:87
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_loudnorm.c:730
LoudNormContext::measured_i
double measured_i
Definition: af_loudnorm.c:58
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(loudnorm)
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_ebur128_loudness_shortterm
int ff_ebur128_loudness_shortterm(FFEBUR128State *st, double *out)
Get short-term loudness (last 3s) in LUFS.
Definition: ebur128.c:617
init_gaussian_filter
static void init_gaussian_filter(LoudNormContext *s)
Definition: af_loudnorm.c:134
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:524
PrintFormat
PrintFormat
Definition: af_loudnorm.c:46
FF_EBUR128_MODE_S
@ FF_EBUR128_MODE_S
can call ff_ebur128_loudness_shortterm
Definition: ebur128.h:87
ff_ebur128_init
FFEBUR128State * ff_ebur128_init(unsigned int channels, unsigned long samplerate, unsigned long window, int mode)
Initialize library state.
Definition: ebur128.c:219
LoudNormContext::prev_buf_index
int prev_buf_index
Definition: af_loudnorm.c:70
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
OUT
@ OUT
Definition: af_loudnorm.c:39
LoudNormContext::target_i
double target_i
Definition: af_loudnorm.c:55
LoudNormContext
Definition: af_loudnorm.c:53
M_PI
#define M_PI
Definition: mathematics.h:52
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
internal.h
LoudNormContext::frame_type
enum FrameType frame_type
Definition: af_loudnorm.c:90
LimiterState
LimiterState
Definition: af_loudnorm.c:38
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:410
LoudNormContext::measured_thresh
double measured_thresh
Definition: af_loudnorm.c:61
LoudNormContext::buf
double * buf
Definition: af_loudnorm.c:67
LoudNormContext::offset
double offset
Definition: af_loudnorm.c:62
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
LoudNormContext::limiter_buf_size
int limiter_buf_size
Definition: af_loudnorm.c:81
ff_ebur128_set_channel
int ff_ebur128_set_channel(FFEBUR128State *st, unsigned int channel_number, int value)
Set channel type.
Definition: ebur128.c:445
round
static av_always_inline av_const double round(double x)
Definition: libm.h:444
ebur128.h
libebur128 - a library for loudness measurement according to the EBU R128 standard.
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
FFEBUR128State
Contains information about the state of a loudness measurement.
Definition: ebur128.h:103
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
avfilter_af_loudnorm_inputs
static const AVFilterPad avfilter_af_loudnorm_inputs[]
Definition: af_loudnorm.c:923
AVFilter
Filter definition.
Definition: avfilter.h:161
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
LoudNormContext::channels
int channels
Definition: af_loudnorm.c:93
FINAL_FRAME
@ FINAL_FRAME
Definition: af_loudnorm.c:33
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:551
LoudNormContext::buf_size
int buf_size
Definition: af_loudnorm.c:68
c2
static const uint64_t c2
Definition: murmur3.c:52
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_loudnorm.c:755
LoudNormContext::gain_reduction
double gain_reduction[2]
Definition: af_loudnorm.c:77
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
PF_NB
@ PF_NB
Definition: af_loudnorm.c:50
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
LoudNormContext::linear
int linear
Definition: af_loudnorm.c:63
audio.h
LINEAR_MODE
@ LINEAR_MODE
Definition: af_loudnorm.c:34
LoudNormContext::dual_mono
int dual_mono
Definition: af_loudnorm.c:64
OFFSET
#define OFFSET(x)
Definition: af_loudnorm.c:99
ff_ebur128_relative_threshold
int ff_ebur128_relative_threshold(FFEBUR128State *st, double *out)
Get relative threshold in LUFS.
Definition: ebur128.c:580
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
FF_EBUR128_MODE_SAMPLE_PEAK
@ FF_EBUR128_MODE_SAMPLE_PEAK
can call ff_ebur128_sample_peak
Definition: ebur128.h:93
detect_peak
static void detect_peak(LoudNormContext *s, int offset, int nb_samples, int channels, int *peak_delta, double *peak_value)
Definition: af_loudnorm.c:168
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
gaussian_filter
static double gaussian_filter(LoudNormContext *s, int index)
Definition: af_loudnorm.c:156
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:726
LoudNormContext::r128_out
FFEBUR128State * r128_out
Definition: af_loudnorm.c:96
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:61
FIRST_FRAME
@ FIRST_FRAME
Definition: af_loudnorm.c:31
ff_ebur128_loudness_global
int ff_ebur128_loudness_global(FFEBUR128State *st, double *out)
Get global integrated loudness in LUFS.
Definition: ebur128.c:596
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
LoudNormContext::target_lra
double target_lra
Definition: af_loudnorm.c:56