FFmpeg
af_loudnorm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Kyle Swanson <k@ylo.ph>.
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /* http://k.ylo.ph/2016/04/04/loudnorm.html */
22 
23 #include "libavutil/opt.h"
24 #include "avfilter.h"
25 #include "internal.h"
26 #include "audio.h"
27 #include "ebur128.h"
28 
29 enum FrameType {
35 };
36 
38  OUT,
43 };
44 
50 };
51 
52 typedef struct LoudNormContext {
53  const AVClass *class;
54  double target_i;
55  double target_lra;
56  double target_tp;
57  double measured_i;
58  double measured_lra;
59  double measured_tp;
61  double offset;
62  int linear;
63  int dual_mono;
65 
66  double *buf;
67  int buf_size;
68  int buf_index;
70 
71  double delta[30];
72  double weights[21];
73  double prev_delta;
74  int index;
75 
76  double gain_reduction[2];
77  double *limiter_buf;
78  double *prev_smp;
83  int env_index;
84  int env_cnt;
87 
88  int64_t pts;
92  int channels;
93 
97 
98 #define OFFSET(x) offsetof(LoudNormContext, x)
99 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
100 
101 static const AVOption loudnorm_options[] = {
102  { "I", "set integrated loudness target", OFFSET(target_i), AV_OPT_TYPE_DOUBLE, {.dbl = -24.}, -70., -5., FLAGS },
103  { "i", "set integrated loudness target", OFFSET(target_i), AV_OPT_TYPE_DOUBLE, {.dbl = -24.}, -70., -5., FLAGS },
104  { "LRA", "set loudness range target", OFFSET(target_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 7.}, 1., 20., FLAGS },
105  { "lra", "set loudness range target", OFFSET(target_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 7.}, 1., 20., FLAGS },
106  { "TP", "set maximum true peak", OFFSET(target_tp), AV_OPT_TYPE_DOUBLE, {.dbl = -2.}, -9., 0., FLAGS },
107  { "tp", "set maximum true peak", OFFSET(target_tp), AV_OPT_TYPE_DOUBLE, {.dbl = -2.}, -9., 0., FLAGS },
108  { "measured_I", "measured IL of input file", OFFSET(measured_i), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 0., FLAGS },
109  { "measured_i", "measured IL of input file", OFFSET(measured_i), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 0., FLAGS },
110  { "measured_LRA", "measured LRA of input file", OFFSET(measured_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, 0., 99., FLAGS },
111  { "measured_lra", "measured LRA of input file", OFFSET(measured_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, 0., 99., FLAGS },
112  { "measured_TP", "measured true peak of input file", OFFSET(measured_tp), AV_OPT_TYPE_DOUBLE, {.dbl = 99.}, -99., 99., FLAGS },
113  { "measured_tp", "measured true peak of input file", OFFSET(measured_tp), AV_OPT_TYPE_DOUBLE, {.dbl = 99.}, -99., 99., FLAGS },
114  { "measured_thresh", "measured threshold of input file", OFFSET(measured_thresh), AV_OPT_TYPE_DOUBLE, {.dbl = -70.}, -99., 0., FLAGS },
115  { "offset", "set offset gain", OFFSET(offset), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 99., FLAGS },
116  { "linear", "normalize linearly if possible", OFFSET(linear), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
117  { "dual_mono", "treat mono input as dual-mono", OFFSET(dual_mono), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
118  { "print_format", "set print format for stats", OFFSET(print_format), AV_OPT_TYPE_INT, {.i64 = NONE}, NONE, PF_NB -1, FLAGS, "print_format" },
119  { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NONE}, 0, 0, FLAGS, "print_format" },
120  { "json", 0, 0, AV_OPT_TYPE_CONST, {.i64 = JSON}, 0, 0, FLAGS, "print_format" },
121  { "summary", 0, 0, AV_OPT_TYPE_CONST, {.i64 = SUMMARY}, 0, 0, FLAGS, "print_format" },
122  { NULL }
123 };
124 
125 AVFILTER_DEFINE_CLASS(loudnorm);
126 
127 static inline int frame_size(int sample_rate, int frame_len_msec)
128 {
129  const int frame_size = round((double)sample_rate * (frame_len_msec / 1000.0));
130  return frame_size + (frame_size % 2);
131 }
132 
134 {
135  double total_weight = 0.0;
136  const double sigma = 3.5;
137  double adjust;
138  int i;
139 
140  const int offset = 21 / 2;
141  const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
142  const double c2 = 2.0 * pow(sigma, 2.0);
143 
144  for (i = 0; i < 21; i++) {
145  const int x = i - offset;
146  s->weights[i] = c1 * exp(-(pow(x, 2.0) / c2));
147  total_weight += s->weights[i];
148  }
149 
150  adjust = 1.0 / total_weight;
151  for (i = 0; i < 21; i++)
152  s->weights[i] *= adjust;
153 }
154 
156 {
157  double result = 0.;
158  int i;
159 
160  index = index - 10 > 0 ? index - 10 : index + 20;
161  for (i = 0; i < 21; i++)
162  result += s->delta[((index + i) < 30) ? (index + i) : (index + i - 30)] * s->weights[i];
163 
164  return result;
165 }
166 
167 static void detect_peak(LoudNormContext *s, int offset, int nb_samples, int channels, int *peak_delta, double *peak_value)
168 {
169  int n, c, i, index;
170  double ceiling;
171  double *buf;
172 
173  *peak_delta = -1;
174  buf = s->limiter_buf;
175  ceiling = s->target_tp;
176 
177  index = s->limiter_buf_index + (offset * channels) + (1920 * channels);
178  if (index >= s->limiter_buf_size)
179  index -= s->limiter_buf_size;
180 
181  if (s->frame_type == FIRST_FRAME) {
182  for (c = 0; c < channels; c++)
183  s->prev_smp[c] = fabs(buf[index + c - channels]);
184  }
185 
186  for (n = 0; n < nb_samples; n++) {
187  for (c = 0; c < channels; c++) {
188  double this, next, max_peak;
189 
190  this = fabs(buf[(index + c) < s->limiter_buf_size ? (index + c) : (index + c - s->limiter_buf_size)]);
191  next = fabs(buf[(index + c + channels) < s->limiter_buf_size ? (index + c + channels) : (index + c + channels - s->limiter_buf_size)]);
192 
193  if ((s->prev_smp[c] <= this) && (next <= this) && (this > ceiling) && (n > 0)) {
194  int detected;
195 
196  detected = 1;
197  for (i = 2; i < 12; i++) {
198  next = fabs(buf[(index + c + (i * channels)) < s->limiter_buf_size ? (index + c + (i * channels)) : (index + c + (i * channels) - s->limiter_buf_size)]);
199  if (next > this) {
200  detected = 0;
201  break;
202  }
203  }
204 
205  if (!detected)
206  continue;
207 
208  for (c = 0; c < channels; c++) {
209  if (c == 0 || fabs(buf[index + c]) > max_peak)
210  max_peak = fabs(buf[index + c]);
211 
212  s->prev_smp[c] = fabs(buf[(index + c) < s->limiter_buf_size ? (index + c) : (index + c - s->limiter_buf_size)]);
213  }
214 
215  *peak_delta = n;
216  s->peak_index = index;
217  *peak_value = max_peak;
218  return;
219  }
220 
221  s->prev_smp[c] = this;
222  }
223 
224  index += channels;
225  if (index >= s->limiter_buf_size)
226  index -= s->limiter_buf_size;
227  }
228 }
229 
230 static void true_peak_limiter(LoudNormContext *s, double *out, int nb_samples, int channels)
231 {
232  int n, c, index, peak_delta, smp_cnt;
233  double ceiling, peak_value;
234  double *buf;
235 
236  buf = s->limiter_buf;
237  ceiling = s->target_tp;
238  index = s->limiter_buf_index;
239  smp_cnt = 0;
240 
241  if (s->frame_type == FIRST_FRAME) {
242  double max;
243 
244  max = 0.;
245  for (n = 0; n < 1920; n++) {
246  for (c = 0; c < channels; c++) {
247  max = fabs(buf[c]) > max ? fabs(buf[c]) : max;
248  }
249  buf += channels;
250  }
251 
252  if (max > ceiling) {
253  s->gain_reduction[1] = ceiling / max;
254  s->limiter_state = SUSTAIN;
255  buf = s->limiter_buf;
256 
257  for (n = 0; n < 1920; n++) {
258  for (c = 0; c < channels; c++) {
259  double env;
260  env = s->gain_reduction[1];
261  buf[c] *= env;
262  }
263  buf += channels;
264  }
265  }
266 
267  buf = s->limiter_buf;
268  }
269 
270  do {
271 
272  switch(s->limiter_state) {
273  case OUT:
274  detect_peak(s, smp_cnt, nb_samples - smp_cnt, channels, &peak_delta, &peak_value);
275  if (peak_delta != -1) {
276  s->env_cnt = 0;
277  smp_cnt += (peak_delta - s->attack_length);
278  s->gain_reduction[0] = 1.;
279  s->gain_reduction[1] = ceiling / peak_value;
280  s->limiter_state = ATTACK;
281 
282  s->env_index = s->peak_index - (s->attack_length * channels);
283  if (s->env_index < 0)
284  s->env_index += s->limiter_buf_size;
285 
286  s->env_index += (s->env_cnt * channels);
287  if (s->env_index > s->limiter_buf_size)
288  s->env_index -= s->limiter_buf_size;
289 
290  } else {
291  smp_cnt = nb_samples;
292  }
293  break;
294 
295  case ATTACK:
296  for (; s->env_cnt < s->attack_length; s->env_cnt++) {
297  for (c = 0; c < channels; c++) {
298  double env;
299  env = s->gain_reduction[0] - ((double) s->env_cnt / (s->attack_length - 1) * (s->gain_reduction[0] - s->gain_reduction[1]));
300  buf[s->env_index + c] *= env;
301  }
302 
303  s->env_index += channels;
304  if (s->env_index >= s->limiter_buf_size)
305  s->env_index -= s->limiter_buf_size;
306 
307  smp_cnt++;
308  if (smp_cnt >= nb_samples) {
309  s->env_cnt++;
310  break;
311  }
312  }
313 
314  if (smp_cnt < nb_samples) {
315  s->env_cnt = 0;
316  s->attack_length = 1920;
317  s->limiter_state = SUSTAIN;
318  }
319  break;
320 
321  case SUSTAIN:
322  detect_peak(s, smp_cnt, nb_samples, channels, &peak_delta, &peak_value);
323  if (peak_delta == -1) {
324  s->limiter_state = RELEASE;
325  s->gain_reduction[0] = s->gain_reduction[1];
326  s->gain_reduction[1] = 1.;
327  s->env_cnt = 0;
328  break;
329  } else {
330  double gain_reduction;
331  gain_reduction = ceiling / peak_value;
332 
333  if (gain_reduction < s->gain_reduction[1]) {
334  s->limiter_state = ATTACK;
335 
336  s->attack_length = peak_delta;
337  if (s->attack_length <= 1)
338  s->attack_length = 2;
339 
340  s->gain_reduction[0] = s->gain_reduction[1];
342  s->env_cnt = 0;
343  break;
344  }
345 
346  for (s->env_cnt = 0; s->env_cnt < peak_delta; s->env_cnt++) {
347  for (c = 0; c < channels; c++) {
348  double env;
349  env = s->gain_reduction[1];
350  buf[s->env_index + c] *= env;
351  }
352 
353  s->env_index += channels;
354  if (s->env_index >= s->limiter_buf_size)
355  s->env_index -= s->limiter_buf_size;
356 
357  smp_cnt++;
358  if (smp_cnt >= nb_samples) {
359  s->env_cnt++;
360  break;
361  }
362  }
363  }
364  break;
365 
366  case RELEASE:
367  for (; s->env_cnt < s->release_length; s->env_cnt++) {
368  for (c = 0; c < channels; c++) {
369  double env;
370  env = s->gain_reduction[0] + (((double) s->env_cnt / (s->release_length - 1)) * (s->gain_reduction[1] - s->gain_reduction[0]));
371  buf[s->env_index + c] *= env;
372  }
373 
374  s->env_index += channels;
375  if (s->env_index >= s->limiter_buf_size)
376  s->env_index -= s->limiter_buf_size;
377 
378  smp_cnt++;
379  if (smp_cnt >= nb_samples) {
380  s->env_cnt++;
381  break;
382  }
383  }
384 
385  if (smp_cnt < nb_samples) {
386  s->env_cnt = 0;
387  s->limiter_state = OUT;
388  }
389 
390  break;
391  }
392 
393  } while (smp_cnt < nb_samples);
394 
395  for (n = 0; n < nb_samples; n++) {
396  for (c = 0; c < channels; c++) {
397  out[c] = buf[index + c];
398  if (fabs(out[c]) > ceiling) {
399  out[c] = ceiling * (out[c] < 0 ? -1 : 1);
400  }
401  }
402  out += channels;
403  index += channels;
404  if (index >= s->limiter_buf_size)
405  index -= s->limiter_buf_size;
406  }
407 }
408 
410 {
411  AVFilterContext *ctx = inlink->dst;
412  LoudNormContext *s = ctx->priv;
413  AVFilterLink *outlink = ctx->outputs[0];
414  AVFrame *out;
415  const double *src;
416  double *dst;
417  double *buf;
418  double *limiter_buf;
419  int i, n, c, subframe_length, src_index;
420  double gain, gain_next, env_global, env_shortterm,
421  global, shortterm, lra, relative_threshold;
422 
423  if (av_frame_is_writable(in)) {
424  out = in;
425  } else {
426  out = ff_get_audio_buffer(outlink, in->nb_samples);
427  if (!out) {
428  av_frame_free(&in);
429  return AVERROR(ENOMEM);
430  }
431  av_frame_copy_props(out, in);
432  }
433 
434  if (s->pts == AV_NOPTS_VALUE)
435  s->pts = in->pts;
436 
437  out->pts = s->pts;
438  src = (const double *)in->data[0];
439  dst = (double *)out->data[0];
440  buf = s->buf;
441  limiter_buf = s->limiter_buf;
442 
444 
445  if (s->frame_type == FIRST_FRAME && in->nb_samples < frame_size(inlink->sample_rate, 3000)) {
446  double offset, offset_tp, true_peak;
447 
448  ff_ebur128_loudness_global(s->r128_in, &global);
449  for (c = 0; c < inlink->channels; c++) {
450  double tmp;
451  ff_ebur128_sample_peak(s->r128_in, c, &tmp);
452  if (c == 0 || tmp > true_peak)
453  true_peak = tmp;
454  }
455 
456  offset = pow(10., (s->target_i - global) / 20.);
457  offset_tp = true_peak * offset;
458  s->offset = offset_tp < s->target_tp ? offset : s->target_tp - true_peak;
459  s->frame_type = LINEAR_MODE;
460  }
461 
462  switch (s->frame_type) {
463  case FIRST_FRAME:
464  for (n = 0; n < in->nb_samples; n++) {
465  for (c = 0; c < inlink->channels; c++) {
466  buf[s->buf_index + c] = src[c];
467  }
468  src += inlink->channels;
469  s->buf_index += inlink->channels;
470  }
471 
472  ff_ebur128_loudness_shortterm(s->r128_in, &shortterm);
473 
474  if (shortterm < s->measured_thresh) {
475  s->above_threshold = 0;
476  env_shortterm = shortterm <= -70. ? 0. : s->target_i - s->measured_i;
477  } else {
478  s->above_threshold = 1;
479  env_shortterm = shortterm <= -70. ? 0. : s->target_i - shortterm;
480  }
481 
482  for (n = 0; n < 30; n++)
483  s->delta[n] = pow(10., env_shortterm / 20.);
484  s->prev_delta = s->delta[s->index];
485 
486  s->buf_index =
487  s->limiter_buf_index = 0;
488 
489  for (n = 0; n < (s->limiter_buf_size / inlink->channels); n++) {
490  for (c = 0; c < inlink->channels; c++) {
491  limiter_buf[s->limiter_buf_index + c] = buf[s->buf_index + c] * s->delta[s->index] * s->offset;
492  }
493  s->limiter_buf_index += inlink->channels;
494  if (s->limiter_buf_index >= s->limiter_buf_size)
496 
497  s->buf_index += inlink->channels;
498  }
499 
500  subframe_length = frame_size(inlink->sample_rate, 100);
501  true_peak_limiter(s, dst, subframe_length, inlink->channels);
502  ff_ebur128_add_frames_double(s->r128_out, dst, subframe_length);
503 
504  s->pts +=
505  out->nb_samples =
506  inlink->min_samples =
507  inlink->max_samples =
508  inlink->partial_buf_size = subframe_length;
509 
510  s->frame_type = INNER_FRAME;
511  break;
512 
513  case INNER_FRAME:
514  gain = gaussian_filter(s, s->index + 10 < 30 ? s->index + 10 : s->index + 10 - 30);
515  gain_next = gaussian_filter(s, s->index + 11 < 30 ? s->index + 11 : s->index + 11 - 30);
516 
517  for (n = 0; n < in->nb_samples; n++) {
518  for (c = 0; c < inlink->channels; c++) {
519  buf[s->prev_buf_index + c] = src[c];
520  limiter_buf[s->limiter_buf_index + c] = buf[s->buf_index + c] * (gain + (((double) n / in->nb_samples) * (gain_next - gain))) * s->offset;
521  }
522  src += inlink->channels;
523 
524  s->limiter_buf_index += inlink->channels;
525  if (s->limiter_buf_index >= s->limiter_buf_size)
527 
528  s->prev_buf_index += inlink->channels;
529  if (s->prev_buf_index >= s->buf_size)
530  s->prev_buf_index -= s->buf_size;
531 
532  s->buf_index += inlink->channels;
533  if (s->buf_index >= s->buf_size)
534  s->buf_index -= s->buf_size;
535  }
536 
537  subframe_length = (frame_size(inlink->sample_rate, 100) - in->nb_samples) * inlink->channels;
538  s->limiter_buf_index = s->limiter_buf_index + subframe_length < s->limiter_buf_size ? s->limiter_buf_index + subframe_length : s->limiter_buf_index + subframe_length - s->limiter_buf_size;
539 
540  true_peak_limiter(s, dst, in->nb_samples, inlink->channels);
542 
544  ff_ebur128_loudness_global(s->r128_in, &global);
545  ff_ebur128_loudness_shortterm(s->r128_in, &shortterm);
546  ff_ebur128_relative_threshold(s->r128_in, &relative_threshold);
547 
548  if (s->above_threshold == 0) {
549  double shortterm_out;
550 
551  if (shortterm > s->measured_thresh)
552  s->prev_delta *= 1.0058;
553 
554  ff_ebur128_loudness_shortterm(s->r128_out, &shortterm_out);
555  if (shortterm_out >= s->target_i)
556  s->above_threshold = 1;
557  }
558 
559  if (shortterm < relative_threshold || shortterm <= -70. || s->above_threshold == 0) {
560  s->delta[s->index] = s->prev_delta;
561  } else {
562  env_global = fabs(shortterm - global) < (s->target_lra / 2.) ? shortterm - global : (s->target_lra / 2.) * ((shortterm - global) < 0 ? -1 : 1);
563  env_shortterm = s->target_i - shortterm;
564  s->delta[s->index] = pow(10., (env_global + env_shortterm) / 20.);
565  }
566 
567  s->prev_delta = s->delta[s->index];
568  s->index++;
569  if (s->index >= 30)
570  s->index -= 30;
571  s->prev_nb_samples = in->nb_samples;
572  s->pts += in->nb_samples;
573  break;
574 
575  case FINAL_FRAME:
576  gain = gaussian_filter(s, s->index + 10 < 30 ? s->index + 10 : s->index + 10 - 30);
577  s->limiter_buf_index = 0;
578  src_index = 0;
579 
580  for (n = 0; n < s->limiter_buf_size / inlink->channels; n++) {
581  for (c = 0; c < inlink->channels; c++) {
582  s->limiter_buf[s->limiter_buf_index + c] = src[src_index + c] * gain * s->offset;
583  }
584  src_index += inlink->channels;
585 
586  s->limiter_buf_index += inlink->channels;
587  if (s->limiter_buf_index >= s->limiter_buf_size)
589  }
590 
591  subframe_length = frame_size(inlink->sample_rate, 100);
592  for (i = 0; i < in->nb_samples / subframe_length; i++) {
593  true_peak_limiter(s, dst, subframe_length, inlink->channels);
594 
595  for (n = 0; n < subframe_length; n++) {
596  for (c = 0; c < inlink->channels; c++) {
597  if (src_index < (in->nb_samples * inlink->channels)) {
598  limiter_buf[s->limiter_buf_index + c] = src[src_index + c] * gain * s->offset;
599  } else {
600  limiter_buf[s->limiter_buf_index + c] = 0.;
601  }
602  }
603 
604  if (src_index < (in->nb_samples * inlink->channels))
605  src_index += inlink->channels;
606 
607  s->limiter_buf_index += inlink->channels;
608  if (s->limiter_buf_index >= s->limiter_buf_size)
610  }
611 
612  dst += (subframe_length * inlink->channels);
613  }
614 
615  dst = (double *)out->data[0];
617  break;
618 
619  case LINEAR_MODE:
620  for (n = 0; n < in->nb_samples; n++) {
621  for (c = 0; c < inlink->channels; c++) {
622  dst[c] = src[c] * s->offset;
623  }
624  src += inlink->channels;
625  dst += inlink->channels;
626  }
627 
628  dst = (double *)out->data[0];
630  s->pts += in->nb_samples;
631  break;
632  }
633 
634  if (in != out)
635  av_frame_free(&in);
636 
637  return ff_filter_frame(outlink, out);
638 }
639 
640 static int request_frame(AVFilterLink *outlink)
641 {
642  int ret;
643  AVFilterContext *ctx = outlink->src;
644  AVFilterLink *inlink = ctx->inputs[0];
645  LoudNormContext *s = ctx->priv;
646 
647  ret = ff_request_frame(inlink);
648  if (ret == AVERROR_EOF && s->frame_type == INNER_FRAME) {
649  double *src;
650  double *buf;
651  int nb_samples, n, c, offset;
652  AVFrame *frame;
653 
654  nb_samples = (s->buf_size / inlink->channels) - s->prev_nb_samples;
655  nb_samples -= (frame_size(inlink->sample_rate, 100) - s->prev_nb_samples);
656 
657  frame = ff_get_audio_buffer(outlink, nb_samples);
658  if (!frame)
659  return AVERROR(ENOMEM);
660  frame->nb_samples = nb_samples;
661 
662  buf = s->buf;
663  src = (double *)frame->data[0];
664 
665  offset = ((s->limiter_buf_size / inlink->channels) - s->prev_nb_samples) * inlink->channels;
666  offset -= (frame_size(inlink->sample_rate, 100) - s->prev_nb_samples) * inlink->channels;
667  s->buf_index = s->buf_index - offset < 0 ? s->buf_index - offset + s->buf_size : s->buf_index - offset;
668 
669  for (n = 0; n < nb_samples; n++) {
670  for (c = 0; c < inlink->channels; c++) {
671  src[c] = buf[s->buf_index + c];
672  }
673  src += inlink->channels;
674  s->buf_index += inlink->channels;
675  if (s->buf_index >= s->buf_size)
676  s->buf_index -= s->buf_size;
677  }
678 
679  s->frame_type = FINAL_FRAME;
680  ret = filter_frame(inlink, frame);
681  }
682  return ret;
683 }
684 
686 {
687  LoudNormContext *s = ctx->priv;
690  AVFilterLink *inlink = ctx->inputs[0];
691  AVFilterLink *outlink = ctx->outputs[0];
692  static const int input_srate[] = {192000, -1};
693  static const enum AVSampleFormat sample_fmts[] = {
696  };
697  int ret;
698 
699  layouts = ff_all_channel_counts();
700  if (!layouts)
701  return AVERROR(ENOMEM);
702  ret = ff_set_common_channel_layouts(ctx, layouts);
703  if (ret < 0)
704  return ret;
705 
706  formats = ff_make_format_list(sample_fmts);
707  if (!formats)
708  return AVERROR(ENOMEM);
709  ret = ff_set_common_formats(ctx, formats);
710  if (ret < 0)
711  return ret;
712 
713  if (s->frame_type != LINEAR_MODE) {
714  formats = ff_make_format_list(input_srate);
715  if (!formats)
716  return AVERROR(ENOMEM);
717  ret = ff_formats_ref(formats, &inlink->out_samplerates);
718  if (ret < 0)
719  return ret;
720  ret = ff_formats_ref(formats, &outlink->in_samplerates);
721  if (ret < 0)
722  return ret;
723  }
724 
725  return 0;
726 }
727 
729 {
730  AVFilterContext *ctx = inlink->dst;
731  LoudNormContext *s = ctx->priv;
732 
734  if (!s->r128_in)
735  return AVERROR(ENOMEM);
736 
738  if (!s->r128_out)
739  return AVERROR(ENOMEM);
740 
741  if (inlink->channels == 1 && s->dual_mono) {
744  }
745 
746  s->buf_size = frame_size(inlink->sample_rate, 3000) * inlink->channels;
747  s->buf = av_malloc_array(s->buf_size, sizeof(*s->buf));
748  if (!s->buf)
749  return AVERROR(ENOMEM);
750 
751  s->limiter_buf_size = frame_size(inlink->sample_rate, 210) * inlink->channels;
752  s->limiter_buf = av_malloc_array(s->buf_size, sizeof(*s->limiter_buf));
753  if (!s->limiter_buf)
754  return AVERROR(ENOMEM);
755 
756  s->prev_smp = av_malloc_array(inlink->channels, sizeof(*s->prev_smp));
757  if (!s->prev_smp)
758  return AVERROR(ENOMEM);
759 
761 
762  if (s->frame_type != LINEAR_MODE) {
763  inlink->min_samples =
764  inlink->max_samples =
765  inlink->partial_buf_size = frame_size(inlink->sample_rate, 3000);
766  }
767 
768  s->pts = AV_NOPTS_VALUE;
769  s->buf_index =
770  s->prev_buf_index =
771  s->limiter_buf_index = 0;
772  s->channels = inlink->channels;
773  s->index = 1;
774  s->limiter_state = OUT;
775  s->offset = pow(10., s->offset / 20.);
776  s->target_tp = pow(10., s->target_tp / 20.);
777  s->attack_length = frame_size(inlink->sample_rate, 10);
778  s->release_length = frame_size(inlink->sample_rate, 100);
779 
780  return 0;
781 }
782 
784 {
785  LoudNormContext *s = ctx->priv;
786  s->frame_type = FIRST_FRAME;
787 
788  if (s->linear) {
789  double offset, offset_tp;
790  offset = s->target_i - s->measured_i;
791  offset_tp = s->measured_tp + offset;
792 
793  if (s->measured_tp != 99 && s->measured_thresh != -70 && s->measured_lra != 0 && s->measured_i != 0) {
794  if ((offset_tp <= s->target_tp) && (s->measured_lra <= s->target_lra)) {
795  s->frame_type = LINEAR_MODE;
796  s->offset = offset;
797  }
798  }
799  }
800 
801  return 0;
802 }
803 
805 {
806  LoudNormContext *s = ctx->priv;
807  double i_in, i_out, lra_in, lra_out, thresh_in, thresh_out, tp_in, tp_out;
808  int c;
809 
810  if (!s->r128_in || !s->r128_out)
811  goto end;
812 
813  ff_ebur128_loudness_range(s->r128_in, &lra_in);
815  ff_ebur128_relative_threshold(s->r128_in, &thresh_in);
816  for (c = 0; c < s->channels; c++) {
817  double tmp;
818  ff_ebur128_sample_peak(s->r128_in, c, &tmp);
819  if ((c == 0) || (tmp > tp_in))
820  tp_in = tmp;
821  }
822 
823  ff_ebur128_loudness_range(s->r128_out, &lra_out);
825  ff_ebur128_relative_threshold(s->r128_out, &thresh_out);
826  for (c = 0; c < s->channels; c++) {
827  double tmp;
828  ff_ebur128_sample_peak(s->r128_out, c, &tmp);
829  if ((c == 0) || (tmp > tp_out))
830  tp_out = tmp;
831  }
832 
833  switch(s->print_format) {
834  case NONE:
835  break;
836 
837  case JSON:
838  av_log(ctx, AV_LOG_INFO,
839  "\n{\n"
840  "\t\"input_i\" : \"%.2f\",\n"
841  "\t\"input_tp\" : \"%.2f\",\n"
842  "\t\"input_lra\" : \"%.2f\",\n"
843  "\t\"input_thresh\" : \"%.2f\",\n"
844  "\t\"output_i\" : \"%.2f\",\n"
845  "\t\"output_tp\" : \"%+.2f\",\n"
846  "\t\"output_lra\" : \"%.2f\",\n"
847  "\t\"output_thresh\" : \"%.2f\",\n"
848  "\t\"normalization_type\" : \"%s\",\n"
849  "\t\"target_offset\" : \"%.2f\"\n"
850  "}\n",
851  i_in,
852  20. * log10(tp_in),
853  lra_in,
854  thresh_in,
855  i_out,
856  20. * log10(tp_out),
857  lra_out,
858  thresh_out,
859  s->frame_type == LINEAR_MODE ? "linear" : "dynamic",
860  s->target_i - i_out
861  );
862  break;
863 
864  case SUMMARY:
865  av_log(ctx, AV_LOG_INFO,
866  "\n"
867  "Input Integrated: %+6.1f LUFS\n"
868  "Input True Peak: %+6.1f dBTP\n"
869  "Input LRA: %6.1f LU\n"
870  "Input Threshold: %+6.1f LUFS\n"
871  "\n"
872  "Output Integrated: %+6.1f LUFS\n"
873  "Output True Peak: %+6.1f dBTP\n"
874  "Output LRA: %6.1f LU\n"
875  "Output Threshold: %+6.1f LUFS\n"
876  "\n"
877  "Normalization Type: %s\n"
878  "Target Offset: %+6.1f LU\n",
879  i_in,
880  20. * log10(tp_in),
881  lra_in,
882  thresh_in,
883  i_out,
884  20. * log10(tp_out),
885  lra_out,
886  thresh_out,
887  s->frame_type == LINEAR_MODE ? "Linear" : "Dynamic",
888  s->target_i - i_out
889  );
890  break;
891  }
892 
893 end:
894  if (s->r128_in)
896  if (s->r128_out)
898  av_freep(&s->limiter_buf);
899  av_freep(&s->prev_smp);
900  av_freep(&s->buf);
901 }
902 
904  {
905  .name = "default",
906  .type = AVMEDIA_TYPE_AUDIO,
907  .config_props = config_input,
908  .filter_frame = filter_frame,
909  },
910  { NULL }
911 };
912 
914  {
915  .name = "default",
916  .request_frame = request_frame,
917  .type = AVMEDIA_TYPE_AUDIO,
918  },
919  { NULL }
920 };
921 
923  .name = "loudnorm",
924  .description = NULL_IF_CONFIG_SMALL("EBU R128 loudness normalization"),
925  .priv_size = sizeof(LoudNormContext),
926  .priv_class = &loudnorm_class,
928  .init = init,
929  .uninit = uninit,
930  .inputs = avfilter_af_loudnorm_inputs,
931  .outputs = avfilter_af_loudnorm_outputs,
932 };
#define NULL
Definition: coverity.c:32
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates...
Definition: formats.c:581
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AVOption.
Definition: opt.h:246
double weights[21]
Definition: af_loudnorm.c:72
Main libavfilter public API header.
int ff_ebur128_loudness_global(FFEBUR128State *st, double *out)
Get global integrated loudness in LUFS.
Definition: ebur128.c:603
void ff_ebur128_destroy(FFEBUR128State **st)
Destroy library state.
Definition: ebur128.c:302
double * buf
Definition: af_loudnorm.c:66
double target_tp
Definition: af_loudnorm.c:56
can call ff_ebur128_loudness_global_* and ff_ebur128_relative_threshold
Definition: ebur128.h:89
AVFilter ff_af_loudnorm
Definition: af_loudnorm.c:922
a channel that is counted twice
Definition: ebur128.h:51
can call ff_ebur128_sample_peak
Definition: ebur128.h:93
static int config_input(AVFilterLink *inlink)
Definition: af_loudnorm.c:728
FFEBUR128State * r128_in
Definition: af_loudnorm.c:94
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
void ff_ebur128_add_frames_double(FFEBUR128State *st, const double *src, size_t frames)
See ebur128_add_frames_short.
AVFILTER_DEFINE_CLASS(loudnorm)
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
static const AVOption loudnorm_options[]
Definition: af_loudnorm.c:101
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
#define av_cold
Definition: attributes.h:88
double measured_tp
Definition: af_loudnorm.c:59
AVOptions.
int ff_ebur128_loudness_range(FFEBUR128State *st, double *out)
Get loudness range (LRA) of programme in LU.
Definition: ebur128.c:753
double measured_lra
Definition: af_loudnorm.c:58
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static void detect_peak(LoudNormContext *s, int offset, int nb_samples, int channels, int *peak_delta, double *peak_value)
Definition: af_loudnorm.c:167
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
static const uint64_t c1
Definition: murmur3.c:49
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define max(a, b)
Definition: cuda_runtime.h:33
channels
Definition: aptx.h:33
#define av_log(a,...)
enum PrintFormat print_format
Definition: af_loudnorm.c:64
A filter pad used for either input or output.
Definition: internal.h:54
#define src
Definition: vp8dsp.c:254
FrameType
G723.1 frame types.
Definition: g723_1.h:63
double target_lra
Definition: af_loudnorm.c:55
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:600
can call ff_ebur128_loudness_shortterm
Definition: ebur128.h:87
static int request_frame(AVFilterLink *outlink)
Definition: af_loudnorm.c:640
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
int ff_ebur128_sample_peak(FFEBUR128State *st, unsigned int channel_number, double *out)
Get maximum sample peak of selected channel in float format.
Definition: ebur128.c:758
int ff_ebur128_loudness_shortterm(FFEBUR128State *st, double *out)
Get short-term loudness (last 3s) in LUFS.
Definition: ebur128.c:645
static av_always_inline av_const double round(double x)
Definition: libm.h:444
double * prev_smp
Definition: af_loudnorm.c:78
can call ff_ebur128_loudness_range
Definition: ebur128.h:91
int8_t exp
Definition: eval.c:72
static const AVFilterPad avfilter_af_loudnorm_inputs[]
Definition: af_loudnorm.c:903
double measured_i
Definition: af_loudnorm.c:57
enum LimiterState limiter_state
Definition: af_loudnorm.c:81
double measured_thresh
Definition: af_loudnorm.c:60
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:470
double * limiter_buf
Definition: af_loudnorm.c:77
AVFormatContext * ctx
Definition: movenc.c:48
Contains information about the state of a loudness measurement.
Definition: ebur128.h:103
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static double gaussian_filter(LoudNormContext *s, int index)
Definition: af_loudnorm.c:155
FFEBUR128State * ff_ebur128_init(unsigned int channels, unsigned long samplerate, unsigned long window, int mode)
Initialize library state.
Definition: ebur128.c:217
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
A list of supported channel layouts.
Definition: formats.h:85
sample_rate
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
#define OFFSET(x)
Definition: af_loudnorm.c:98
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
double prev_delta
Definition: af_loudnorm.c:73
int ff_ebur128_set_channel(FFEBUR128State *st, unsigned int channel_number, int value)
Set channel type.
Definition: ebur128.c:446
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:595
static void true_peak_limiter(LoudNormContext *s, double *out, int nb_samples, int channels)
Definition: af_loudnorm.c:230
enum FrameType frame_type
Definition: af_loudnorm.c:89
static av_cold int init(AVFilterContext *ctx)
Definition: af_loudnorm.c:783
double delta[30]
Definition: af_loudnorm.c:71
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_loudnorm.c:804
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
const char * name
Filter name.
Definition: avfilter.h:148
static int frame_size(int sample_rate, int frame_len_msec)
Definition: af_loudnorm.c:127
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
double gain_reduction[2]
Definition: af_loudnorm.c:76
static int query_formats(AVFilterContext *ctx)
Definition: af_loudnorm.c:685
static const uint64_t c2
Definition: murmur3.c:50
int ff_ebur128_relative_threshold(FFEBUR128State *st, double *out)
Get relative threshold in LUFS.
Definition: ebur128.c:587
static void init_gaussian_filter(LoudNormContext *s)
Definition: af_loudnorm.c:133
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
and forward the result(frame or status change) to the corresponding input.If nothing is possible
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:731
FILE * out
Definition: movenc.c:54
static const AVFilterPad avfilter_af_loudnorm_outputs[]
Definition: af_loudnorm.c:913
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
LimiterState
Definition: af_loudnorm.c:37
#define av_malloc_array(a, b)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
formats
Definition: signature.h:48
FFEBUR128State * r128_out
Definition: af_loudnorm.c:95
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:440
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_loudnorm.c:409
#define FLAGS
Definition: af_loudnorm.c:99
PrintFormat
Definition: af_loudnorm.c:45
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:366
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
libebur128 - a library for loudness measurement according to the EBU R128 standard.
static uint8_t tmp[11]
Definition: aes_ctr.c:26