FFmpeg
af_alimiter.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Lookahead limiter filter
25  */
26 
28 #include "libavutil/common.h"
29 #include "libavutil/fifo.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 
33 #include "audio.h"
34 #include "avfilter.h"
35 #include "internal.h"
36 
37 typedef struct MetaItem {
38  int64_t pts;
40 } MetaItem;
41 
42 typedef struct AudioLimiterContext {
43  const AVClass *class;
44 
45  double limit;
46  double attack;
47  double release;
48  double att;
49  double level_in;
50  double level_out;
53  double asc;
54  int asc_c;
55  int asc_pos;
56  double asc_coeff;
57 
58  double *buffer;
60  int pos;
61  int *nextpos;
62  double *nextdelta;
63 
64  int in_trim;
65  int out_pad;
66  int64_t next_in_pts;
67  int64_t next_out_pts;
68  int latency;
69 
71 
72  double delta;
73  int nextiter;
74  int nextlen;
77 
78 #define OFFSET(x) offsetof(AudioLimiterContext, x)
79 #define AF AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM
80 
81 static const AVOption alimiter_options[] = {
82  { "level_in", "set input level", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, AF },
83  { "level_out", "set output level", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, AF },
84  { "limit", "set limit", OFFSET(limit), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0625, 1, AF },
85  { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=5}, 0.1, 80, AF },
86  { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=50}, 1, 8000, AF },
87  { "asc", "enable asc", OFFSET(auto_release), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AF },
88  { "asc_level", "set asc level", OFFSET(asc_coeff), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, AF },
89  { "level", "auto level", OFFSET(auto_level), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
90  { "latency", "compensate delay", OFFSET(latency), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AF },
91  { NULL }
92 };
93 
94 AVFILTER_DEFINE_CLASS(alimiter);
95 
97 {
98  AudioLimiterContext *s = ctx->priv;
99 
100  s->attack /= 1000.;
101  s->release /= 1000.;
102  s->att = 1.;
103  s->asc_pos = -1;
104  s->asc_coeff = pow(0.5, s->asc_coeff - 0.5) * 2 * -1;
105 
106  return 0;
107 }
108 
109 static double get_rdelta(AudioLimiterContext *s, double release, int sample_rate,
110  double peak, double limit, double patt, int asc)
111 {
112  double rdelta = (1.0 - patt) / (sample_rate * release);
113 
114  if (asc && s->auto_release && s->asc_c > 0) {
115  double a_att = limit / (s->asc_coeff * s->asc) * (double)s->asc_c;
116 
117  if (a_att > patt) {
118  double delta = FFMAX((a_att - patt) / (sample_rate * release), rdelta / 10);
119 
120  if (delta < rdelta)
121  rdelta = delta;
122  }
123  }
124 
125  return rdelta;
126 }
127 
129 {
130  AVFilterContext *ctx = inlink->dst;
131  AudioLimiterContext *s = ctx->priv;
132  AVFilterLink *outlink = ctx->outputs[0];
133  const double *src = (const double *)in->data[0];
134  const int channels = inlink->ch_layout.nb_channels;
135  const int buffer_size = s->buffer_size;
136  double *dst, *buffer = s->buffer;
137  const double release = s->release;
138  const double limit = s->limit;
139  double *nextdelta = s->nextdelta;
140  double level = s->auto_level ? 1 / limit : 1;
141  const double level_out = s->level_out;
142  const double level_in = s->level_in;
143  int *nextpos = s->nextpos;
144  AVFrame *out;
145  double *buf;
146  int n, c, i;
147  int new_out_samples;
148  int64_t out_duration;
149  int64_t in_duration;
150  int64_t in_pts;
151  MetaItem meta;
152 
153  if (av_frame_is_writable(in)) {
154  out = in;
155  } else {
156  out = ff_get_audio_buffer(outlink, in->nb_samples);
157  if (!out) {
158  av_frame_free(&in);
159  return AVERROR(ENOMEM);
160  }
162  }
163  dst = (double *)out->data[0];
164 
165  for (n = 0; n < in->nb_samples; n++) {
166  double peak = 0;
167 
168  for (c = 0; c < channels; c++) {
169  double sample = src[c] * level_in;
170 
171  buffer[s->pos + c] = sample;
172  peak = FFMAX(peak, fabs(sample));
173  }
174 
175  if (s->auto_release && peak > limit) {
176  s->asc += peak;
177  s->asc_c++;
178  }
179 
180  if (peak > limit) {
181  double patt = FFMIN(limit / peak, 1.);
182  double rdelta = get_rdelta(s, release, inlink->sample_rate,
183  peak, limit, patt, 0);
184  double delta = (limit / peak - s->att) / buffer_size * channels;
185  int found = 0;
186 
187  if (delta < s->delta) {
188  s->delta = delta;
189  nextpos[0] = s->pos;
190  nextpos[1] = -1;
191  nextdelta[0] = rdelta;
192  s->nextlen = 1;
193  s->nextiter= 0;
194  } else {
195  for (i = s->nextiter; i < s->nextiter + s->nextlen; i++) {
196  int j = i % buffer_size;
197  double ppeak = 0, pdelta;
198 
199  if (nextpos[j] >= 0)
200  for (c = 0; c < channels; c++) {
201  ppeak = FFMAX(ppeak, fabs(buffer[nextpos[j] + c]));
202  }
203  pdelta = (limit / peak - limit / ppeak) / (((buffer_size - nextpos[j] + s->pos) % buffer_size) / channels);
204  if (pdelta < nextdelta[j]) {
205  nextdelta[j] = pdelta;
206  found = 1;
207  break;
208  }
209  }
210  if (found) {
211  s->nextlen = i - s->nextiter + 1;
212  nextpos[(s->nextiter + s->nextlen) % buffer_size] = s->pos;
213  nextdelta[(s->nextiter + s->nextlen) % buffer_size] = rdelta;
214  nextpos[(s->nextiter + s->nextlen + 1) % buffer_size] = -1;
215  s->nextlen++;
216  }
217  }
218  }
219 
220  buf = &s->buffer[(s->pos + channels) % buffer_size];
221  peak = 0;
222  for (c = 0; c < channels; c++) {
223  double sample = buf[c];
224 
225  peak = FFMAX(peak, fabs(sample));
226  }
227 
228  if (s->pos == s->asc_pos && !s->asc_changed)
229  s->asc_pos = -1;
230 
231  if (s->auto_release && s->asc_pos == -1 && peak > limit) {
232  s->asc -= peak;
233  s->asc_c--;
234  }
235 
236  s->att += s->delta;
237 
238  for (c = 0; c < channels; c++)
239  dst[c] = buf[c] * s->att;
240 
241  if ((s->pos + channels) % buffer_size == nextpos[s->nextiter]) {
242  if (s->auto_release) {
243  s->delta = get_rdelta(s, release, inlink->sample_rate,
244  peak, limit, s->att, 1);
245  if (s->nextlen > 1) {
246  double ppeak = 0, pdelta;
247  int pnextpos = nextpos[(s->nextiter + 1) % buffer_size];
248 
249  for (c = 0; c < channels; c++) {
250  ppeak = FFMAX(ppeak, fabs(buffer[pnextpos + c]));
251  }
252  pdelta = (limit / ppeak - s->att) /
253  (((buffer_size + pnextpos -
254  ((s->pos + channels) % buffer_size)) %
255  buffer_size) / channels);
256  if (pdelta < s->delta)
257  s->delta = pdelta;
258  }
259  } else {
260  s->delta = nextdelta[s->nextiter];
261  s->att = limit / peak;
262  }
263 
264  s->nextlen -= 1;
265  nextpos[s->nextiter] = -1;
266  s->nextiter = (s->nextiter + 1) % buffer_size;
267  }
268 
269  if (s->att > 1.) {
270  s->att = 1.;
271  s->delta = 0.;
272  s->nextiter = 0;
273  s->nextlen = 0;
274  nextpos[0] = -1;
275  }
276 
277  if (s->att <= 0.) {
278  s->att = 0.0000000000001;
279  s->delta = (1.0 - s->att) / (inlink->sample_rate * release);
280  }
281 
282  if (s->att != 1. && (1. - s->att) < 0.0000000000001)
283  s->att = 1.;
284 
285  if (s->delta != 0. && fabs(s->delta) < 0.00000000000001)
286  s->delta = 0.;
287 
288  for (c = 0; c < channels; c++)
289  dst[c] = av_clipd(dst[c], -limit, limit) * level * level_out;
290 
291  s->pos = (s->pos + channels) % buffer_size;
292  src += channels;
293  dst += channels;
294  }
295 
296  in_duration = av_rescale_q(in->nb_samples, inlink->time_base, av_make_q(1, in->sample_rate));
297  in_pts = in->pts;
298  meta = (MetaItem){ in->pts, in->nb_samples };
299  av_fifo_write(s->fifo, &meta, 1);
300  if (in != out)
301  av_frame_free(&in);
302 
303  new_out_samples = out->nb_samples;
304  if (s->in_trim > 0) {
305  int trim = FFMIN(new_out_samples, s->in_trim);
306  new_out_samples -= trim;
307  s->in_trim -= trim;
308  }
309 
310  if (new_out_samples <= 0) {
311  av_frame_free(&out);
312  return 0;
313  } else if (new_out_samples < out->nb_samples) {
314  int offset = out->nb_samples - new_out_samples;
315  memmove(out->extended_data[0], out->extended_data[0] + sizeof(double) * offset * out->ch_layout.nb_channels,
316  sizeof(double) * new_out_samples * out->ch_layout.nb_channels);
317  out->nb_samples = new_out_samples;
318  s->in_trim = 0;
319  }
320 
321  av_fifo_read(s->fifo, &meta, 1);
322 
323  out_duration = av_rescale_q(out->nb_samples, inlink->time_base, av_make_q(1, out->sample_rate));
324  in_duration = av_rescale_q(meta.nb_samples, inlink->time_base, av_make_q(1, out->sample_rate));
325  in_pts = meta.pts;
326 
327  if (s->next_out_pts != AV_NOPTS_VALUE && out->pts != s->next_out_pts &&
328  s->next_in_pts != AV_NOPTS_VALUE && in_pts == s->next_in_pts) {
329  out->pts = s->next_out_pts;
330  } else {
331  out->pts = in_pts;
332  }
333  s->next_in_pts = in_pts + in_duration;
334  s->next_out_pts = out->pts + out_duration;
335 
336  return ff_filter_frame(outlink, out);
337 }
338 
339 static int request_frame(AVFilterLink* outlink)
340 {
341  AVFilterContext *ctx = outlink->src;
343  int ret;
344 
345  ret = ff_request_frame(ctx->inputs[0]);
346 
347  if (ret == AVERROR_EOF && s->out_pad > 0) {
348  AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(1024, s->out_pad));
349  if (!frame)
350  return AVERROR(ENOMEM);
351 
352  s->out_pad -= frame->nb_samples;
353  frame->pts = s->next_in_pts;
354  return filter_frame(ctx->inputs[0], frame);
355  }
356  return ret;
357 }
358 
360 {
361  AVFilterContext *ctx = inlink->dst;
362  AudioLimiterContext *s = ctx->priv;
363  int obuffer_size;
364 
365  obuffer_size = inlink->sample_rate * inlink->ch_layout.nb_channels * 100 / 1000. + inlink->ch_layout.nb_channels;
366  if (obuffer_size < inlink->ch_layout.nb_channels)
367  return AVERROR(EINVAL);
368 
369  s->buffer = av_calloc(obuffer_size, sizeof(*s->buffer));
370  s->nextdelta = av_calloc(obuffer_size, sizeof(*s->nextdelta));
371  s->nextpos = av_malloc_array(obuffer_size, sizeof(*s->nextpos));
372  if (!s->buffer || !s->nextdelta || !s->nextpos)
373  return AVERROR(ENOMEM);
374 
375  memset(s->nextpos, -1, obuffer_size * sizeof(*s->nextpos));
376  s->buffer_size = inlink->sample_rate * s->attack * inlink->ch_layout.nb_channels;
377  s->buffer_size -= s->buffer_size % inlink->ch_layout.nb_channels;
378  if (s->latency)
379  s->in_trim = s->out_pad = s->buffer_size / inlink->ch_layout.nb_channels - 1;
380  s->next_out_pts = AV_NOPTS_VALUE;
381  s->next_in_pts = AV_NOPTS_VALUE;
382 
383  s->fifo = av_fifo_alloc2(8, sizeof(MetaItem), AV_FIFO_FLAG_AUTO_GROW);
384  if (!s->fifo) {
385  return AVERROR(ENOMEM);
386  }
387 
388  if (s->buffer_size <= 0) {
389  av_log(ctx, AV_LOG_ERROR, "Attack is too small.\n");
390  return AVERROR(EINVAL);
391  }
392 
393  return 0;
394 }
395 
397 {
398  AudioLimiterContext *s = ctx->priv;
399 
400  av_freep(&s->buffer);
401  av_freep(&s->nextdelta);
402  av_freep(&s->nextpos);
403 
404  av_fifo_freep2(&s->fifo);
405 }
406 
407 static const AVFilterPad alimiter_inputs[] = {
408  {
409  .name = "main",
410  .type = AVMEDIA_TYPE_AUDIO,
411  .filter_frame = filter_frame,
412  .config_props = config_input,
413  },
414 };
415 
416 static const AVFilterPad alimiter_outputs[] = {
417  {
418  .name = "default",
419  .type = AVMEDIA_TYPE_AUDIO,
420  .request_frame = request_frame,
421  },
422 };
423 
425  .name = "alimiter",
426  .description = NULL_IF_CONFIG_SMALL("Audio lookahead limiter."),
427  .priv_size = sizeof(AudioLimiterContext),
428  .priv_class = &alimiter_class,
429  .init = init,
430  .uninit = uninit,
434  .process_command = ff_filter_process_command,
436 };
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:97
alimiter_options
static const AVOption alimiter_options[]
Definition: af_alimiter.c:81
AudioLimiterContext::next_out_pts
int64_t next_out_pts
Definition: af_alimiter.c:67
level
uint8_t level
Definition: svq3.c:205
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:55
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_alimiter.c:96
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AudioLimiterContext::asc_c
int asc_c
Definition: af_alimiter.c:54
AudioLimiterContext::attack
double attack
Definition: af_alimiter.c:46
FILTER_SINGLE_SAMPLEFMT
#define FILTER_SINGLE_SAMPLEFMT(sample_fmt_)
Definition: internal.h:175
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:486
alimiter_inputs
static const AVFilterPad alimiter_inputs[]
Definition: af_alimiter.c:407
AVOption
AVOption.
Definition: opt.h:346
alimiter_outputs
static const AVFilterPad alimiter_outputs[]
Definition: af_alimiter.c:416
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:463
AudioLimiterContext::auto_level
int auto_level
Definition: af_alimiter.c:52
ff_af_alimiter
const AVFilter ff_af_alimiter
Definition: af_alimiter.c:424
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
AudioLimiterContext::pos
int pos
Definition: af_alimiter.c:60
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
fifo.h
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
AudioLimiterContext::asc_coeff
double asc_coeff
Definition: af_alimiter.c:56
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
s
#define s(width, name)
Definition: cbs_vp9.c:198
AudioLimiterContext::fifo
AVFifo * fifo
Definition: af_alimiter.c:70
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:237
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_alimiter.c:396
ctx
AVFormatContext * ctx
Definition: movenc.c:49
channels
channels
Definition: aptx.h:31
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_alimiter.c:128
if
if(ret)
Definition: filter_design.txt:179
AudioLimiterContext::level_in
double level_in
Definition: af_alimiter.c:49
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
AudioLimiterContext
Definition: af_alimiter.c:42
MetaItem::pts
int64_t pts
Definition: af_alimiter.c:38
AudioLimiterContext::nextlen
int nextlen
Definition: af_alimiter.c:74
AudioLimiterContext::out_pad
int out_pad
Definition: af_alimiter.c:65
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AudioLimiterContext::asc
double asc
Definition: af_alimiter.c:53
AVFifo
Definition: fifo.c:35
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:573
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AudioLimiterContext::buffer
double * buffer
Definition: af_alimiter.c:58
sample
#define sample
Definition: flacdsp_template.c:44
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:645
AF
#define AF
Definition: af_alimiter.c:79
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:887
AudioLimiterContext::release
double release
Definition: af_alimiter.c:47
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:147
AudioLimiterContext::delta
double delta
Definition: af_alimiter.c:72
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:454
AudioLimiterContext::in_trim
int in_trim
Definition: af_alimiter.c:64
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AudioLimiterContext::buffer_size
int buffer_size
Definition: af_alimiter.c:59
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
common.h
delta
float delta
Definition: vorbis_enc_data.h:430
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AudioLimiterContext::nextpos
int * nextpos
Definition: af_alimiter.c:61
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
AVFilter
Filter definition.
Definition: avfilter.h:166
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: af_alimiter.c:339
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AudioLimiterContext::nextiter
int nextiter
Definition: af_alimiter.c:73
OFFSET
#define OFFSET(x)
Definition: af_alimiter.c:78
channel_layout.h
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
patt
static const int8_t patt[4]
Definition: vf_noise.c:68
avfilter.h
AudioLimiterContext::asc_pos
int asc_pos
Definition: af_alimiter.c:55
MetaItem
Definition: af_alimiter.c:37
AudioLimiterContext::level_out
double level_out
Definition: af_alimiter.c:50
get_rdelta
static double get_rdelta(AudioLimiterContext *s, double release, int sample_rate, double peak, double limit, double patt, int asc)
Definition: af_alimiter.c:109
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
MetaItem::nb_samples
int nb_samples
Definition: af_alimiter.c:39
AudioLimiterContext::auto_release
int auto_release
Definition: af_alimiter.c:51
mem.h
audio.h
AudioLimiterContext::asc_changed
int asc_changed
Definition: af_alimiter.c:75
AudioLimiterContext::att
double att
Definition: af_alimiter.c:48
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(alimiter)
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:183
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AudioLimiterContext::limit
double limit
Definition: af_alimiter.c:45
AudioLimiterContext::latency
int latency
Definition: af_alimiter.c:68
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AudioLimiterContext::next_in_pts
int64_t next_in_pts
Definition: af_alimiter.c:66
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_alimiter.c:359
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:61
AudioLimiterContext::nextdelta
double * nextdelta
Definition: af_alimiter.c:62
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
av_clipd
av_clipd
Definition: af_crystalizer.c:131