FFmpeg
af_alimiter.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Lookahead limiter filter
25  */
26 
28 #include "libavutil/common.h"
29 #include "libavutil/fifo.h"
30 #include "libavutil/opt.h"
31 
32 #include "audio.h"
33 #include "avfilter.h"
34 #include "formats.h"
35 #include "internal.h"
36 
37 typedef struct MetaItem {
38  int64_t pts;
40 } MetaItem;
41 
42 typedef struct AudioLimiterContext {
43  const AVClass *class;
44 
45  double limit;
46  double attack;
47  double release;
48  double att;
49  double level_in;
50  double level_out;
53  double asc;
54  int asc_c;
55  int asc_pos;
56  double asc_coeff;
57 
58  double *buffer;
60  int pos;
61  int *nextpos;
62  double *nextdelta;
63 
64  int in_trim;
65  int out_pad;
66  int64_t next_in_pts;
67  int64_t next_out_pts;
68  int latency;
69 
71 
72  double delta;
73  int nextiter;
74  int nextlen;
77 
78 #define OFFSET(x) offsetof(AudioLimiterContext, x)
79 #define AF AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM
80 
81 static const AVOption alimiter_options[] = {
82  { "level_in", "set input level", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, AF },
83  { "level_out", "set output level", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, AF },
84  { "limit", "set limit", OFFSET(limit), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0625, 1, AF },
85  { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=5}, 0.1, 80, AF },
86  { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=50}, 1, 8000, AF },
87  { "asc", "enable asc", OFFSET(auto_release), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AF },
88  { "asc_level", "set asc level", OFFSET(asc_coeff), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, AF },
89  { "level", "auto level", OFFSET(auto_level), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
90  { "latency", "compensate delay", OFFSET(latency), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AF },
91  { NULL }
92 };
93 
94 AVFILTER_DEFINE_CLASS(alimiter);
95 
97 {
98  AudioLimiterContext *s = ctx->priv;
99 
100  s->attack /= 1000.;
101  s->release /= 1000.;
102  s->att = 1.;
103  s->asc_pos = -1;
104  s->asc_coeff = pow(0.5, s->asc_coeff - 0.5) * 2 * -1;
105 
106  return 0;
107 }
108 
109 static double get_rdelta(AudioLimiterContext *s, double release, int sample_rate,
110  double peak, double limit, double patt, int asc)
111 {
112  double rdelta = (1.0 - patt) / (sample_rate * release);
113 
114  if (asc && s->auto_release && s->asc_c > 0) {
115  double a_att = limit / (s->asc_coeff * s->asc) * (double)s->asc_c;
116 
117  if (a_att > patt) {
118  double delta = FFMAX((a_att - patt) / (sample_rate * release), rdelta / 10);
119 
120  if (delta < rdelta)
121  rdelta = delta;
122  }
123  }
124 
125  return rdelta;
126 }
127 
129 {
130  AVFilterContext *ctx = inlink->dst;
131  AudioLimiterContext *s = ctx->priv;
132  AVFilterLink *outlink = ctx->outputs[0];
133  const double *src = (const double *)in->data[0];
134  const int channels = inlink->ch_layout.nb_channels;
135  const int buffer_size = s->buffer_size;
136  double *dst, *buffer = s->buffer;
137  const double release = s->release;
138  const double limit = s->limit;
139  double *nextdelta = s->nextdelta;
140  double level = s->auto_level ? 1 / limit : 1;
141  const double level_out = s->level_out;
142  const double level_in = s->level_in;
143  int *nextpos = s->nextpos;
144  AVFrame *out;
145  double *buf;
146  int n, c, i;
147  int new_out_samples;
148  int64_t out_duration;
149  int64_t in_duration;
150  int64_t in_pts;
151  MetaItem meta;
152 
153  if (av_frame_is_writable(in)) {
154  out = in;
155  } else {
156  out = ff_get_audio_buffer(outlink, in->nb_samples);
157  if (!out) {
158  av_frame_free(&in);
159  return AVERROR(ENOMEM);
160  }
162  }
163  dst = (double *)out->data[0];
164 
165  for (n = 0; n < in->nb_samples; n++) {
166  double peak = 0;
167 
168  for (c = 0; c < channels; c++) {
169  double sample = src[c] * level_in;
170 
171  buffer[s->pos + c] = sample;
172  peak = FFMAX(peak, fabs(sample));
173  }
174 
175  if (s->auto_release && peak > limit) {
176  s->asc += peak;
177  s->asc_c++;
178  }
179 
180  if (peak > limit) {
181  double patt = FFMIN(limit / peak, 1.);
182  double rdelta = get_rdelta(s, release, inlink->sample_rate,
183  peak, limit, patt, 0);
184  double delta = (limit / peak - s->att) / buffer_size * channels;
185  int found = 0;
186 
187  if (delta < s->delta) {
188  s->delta = delta;
189  nextpos[0] = s->pos;
190  nextpos[1] = -1;
191  nextdelta[0] = rdelta;
192  s->nextlen = 1;
193  s->nextiter= 0;
194  } else {
195  for (i = s->nextiter; i < s->nextiter + s->nextlen; i++) {
196  int j = i % buffer_size;
197  double ppeak = 0, pdelta;
198 
199  for (c = 0; c < channels; c++) {
200  ppeak = FFMAX(ppeak, fabs(buffer[nextpos[j] + c]));
201  }
202  pdelta = (limit / peak - limit / ppeak) / (((buffer_size - nextpos[j] + s->pos) % buffer_size) / channels);
203  if (pdelta < nextdelta[j]) {
204  nextdelta[j] = pdelta;
205  found = 1;
206  break;
207  }
208  }
209  if (found) {
210  s->nextlen = i - s->nextiter + 1;
211  nextpos[(s->nextiter + s->nextlen) % buffer_size] = s->pos;
212  nextdelta[(s->nextiter + s->nextlen) % buffer_size] = rdelta;
213  nextpos[(s->nextiter + s->nextlen + 1) % buffer_size] = -1;
214  s->nextlen++;
215  }
216  }
217  }
218 
219  buf = &s->buffer[(s->pos + channels) % buffer_size];
220  peak = 0;
221  for (c = 0; c < channels; c++) {
222  double sample = buf[c];
223 
224  peak = FFMAX(peak, fabs(sample));
225  }
226 
227  if (s->pos == s->asc_pos && !s->asc_changed)
228  s->asc_pos = -1;
229 
230  if (s->auto_release && s->asc_pos == -1 && peak > limit) {
231  s->asc -= peak;
232  s->asc_c--;
233  }
234 
235  s->att += s->delta;
236 
237  for (c = 0; c < channels; c++)
238  dst[c] = buf[c] * s->att;
239 
240  if ((s->pos + channels) % buffer_size == nextpos[s->nextiter]) {
241  if (s->auto_release) {
242  s->delta = get_rdelta(s, release, inlink->sample_rate,
243  peak, limit, s->att, 1);
244  if (s->nextlen > 1) {
245  double ppeak = 0, pdelta;
246  int pnextpos = nextpos[(s->nextiter + 1) % buffer_size];
247 
248  for (c = 0; c < channels; c++) {
249  ppeak = FFMAX(ppeak, fabs(buffer[pnextpos + c]));
250  }
251  pdelta = (limit / ppeak - s->att) /
252  (((buffer_size + pnextpos -
253  ((s->pos + channels) % buffer_size)) %
254  buffer_size) / channels);
255  if (pdelta < s->delta)
256  s->delta = pdelta;
257  }
258  } else {
259  s->delta = nextdelta[s->nextiter];
260  s->att = limit / peak;
261  }
262 
263  s->nextlen -= 1;
264  nextpos[s->nextiter] = -1;
265  s->nextiter = (s->nextiter + 1) % buffer_size;
266  }
267 
268  if (s->att > 1.) {
269  s->att = 1.;
270  s->delta = 0.;
271  s->nextiter = 0;
272  s->nextlen = 0;
273  nextpos[0] = -1;
274  }
275 
276  if (s->att <= 0.) {
277  s->att = 0.0000000000001;
278  s->delta = (1.0 - s->att) / (inlink->sample_rate * release);
279  }
280 
281  if (s->att != 1. && (1. - s->att) < 0.0000000000001)
282  s->att = 1.;
283 
284  if (s->delta != 0. && fabs(s->delta) < 0.00000000000001)
285  s->delta = 0.;
286 
287  for (c = 0; c < channels; c++)
288  dst[c] = av_clipd(dst[c], -limit, limit) * level * level_out;
289 
290  s->pos = (s->pos + channels) % buffer_size;
291  src += channels;
292  dst += channels;
293  }
294 
295  in_duration = av_rescale_q(in->nb_samples, inlink->time_base, av_make_q(1, in->sample_rate));
296  in_pts = in->pts;
297  meta = (MetaItem){ in->pts, in->nb_samples };
298  av_fifo_write(s->fifo, &meta, 1);
299  if (in != out)
300  av_frame_free(&in);
301 
302  new_out_samples = out->nb_samples;
303  if (s->in_trim > 0) {
304  int trim = FFMIN(new_out_samples, s->in_trim);
305  new_out_samples -= trim;
306  s->in_trim -= trim;
307  }
308 
309  if (new_out_samples <= 0) {
310  av_frame_free(&out);
311  return 0;
312  } else if (new_out_samples < out->nb_samples) {
313  int offset = out->nb_samples - new_out_samples;
314  memmove(out->extended_data[0], out->extended_data[0] + sizeof(double) * offset * out->ch_layout.nb_channels,
315  sizeof(double) * new_out_samples * out->ch_layout.nb_channels);
316  out->nb_samples = new_out_samples;
317  s->in_trim = 0;
318  }
319 
320  av_fifo_read(s->fifo, &meta, 1);
321 
322  out_duration = av_rescale_q(out->nb_samples, inlink->time_base, av_make_q(1, out->sample_rate));
323  in_duration = av_rescale_q(meta.nb_samples, inlink->time_base, av_make_q(1, out->sample_rate));
324  in_pts = meta.pts;
325 
326  if (s->next_out_pts != AV_NOPTS_VALUE && out->pts != s->next_out_pts &&
327  s->next_in_pts != AV_NOPTS_VALUE && in_pts == s->next_in_pts) {
328  out->pts = s->next_out_pts;
329  } else {
330  out->pts = in_pts;
331  }
332  s->next_in_pts = in_pts + in_duration;
333  s->next_out_pts = out->pts + out_duration;
334 
335  return ff_filter_frame(outlink, out);
336 }
337 
338 static int request_frame(AVFilterLink* outlink)
339 {
340  AVFilterContext *ctx = outlink->src;
342  int ret;
343 
344  ret = ff_request_frame(ctx->inputs[0]);
345 
346  if (ret == AVERROR_EOF && s->out_pad > 0) {
347  AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(1024, s->out_pad));
348  if (!frame)
349  return AVERROR(ENOMEM);
350 
351  s->out_pad -= frame->nb_samples;
352  frame->pts = s->next_in_pts;
353  return filter_frame(ctx->inputs[0], frame);
354  }
355  return ret;
356 }
357 
359 {
360  AVFilterContext *ctx = inlink->dst;
361  AudioLimiterContext *s = ctx->priv;
362  int obuffer_size;
363 
364  obuffer_size = inlink->sample_rate * inlink->ch_layout.nb_channels * 100 / 1000. + inlink->ch_layout.nb_channels;
365  if (obuffer_size < inlink->ch_layout.nb_channels)
366  return AVERROR(EINVAL);
367 
368  s->buffer = av_calloc(obuffer_size, sizeof(*s->buffer));
369  s->nextdelta = av_calloc(obuffer_size, sizeof(*s->nextdelta));
370  s->nextpos = av_malloc_array(obuffer_size, sizeof(*s->nextpos));
371  if (!s->buffer || !s->nextdelta || !s->nextpos)
372  return AVERROR(ENOMEM);
373 
374  memset(s->nextpos, -1, obuffer_size * sizeof(*s->nextpos));
375  s->buffer_size = inlink->sample_rate * s->attack * inlink->ch_layout.nb_channels;
376  s->buffer_size -= s->buffer_size % inlink->ch_layout.nb_channels;
377  if (s->latency)
378  s->in_trim = s->out_pad = s->buffer_size / inlink->ch_layout.nb_channels - 1;
379  s->next_out_pts = AV_NOPTS_VALUE;
380  s->next_in_pts = AV_NOPTS_VALUE;
381 
382  s->fifo = av_fifo_alloc2(8, sizeof(MetaItem), AV_FIFO_FLAG_AUTO_GROW);
383  if (!s->fifo) {
384  return AVERROR(ENOMEM);
385  }
386 
387  if (s->buffer_size <= 0) {
388  av_log(ctx, AV_LOG_ERROR, "Attack is too small.\n");
389  return AVERROR(EINVAL);
390  }
391 
392  return 0;
393 }
394 
396 {
397  AudioLimiterContext *s = ctx->priv;
398 
399  av_freep(&s->buffer);
400  av_freep(&s->nextdelta);
401  av_freep(&s->nextpos);
402 
403  av_fifo_freep2(&s->fifo);
404 }
405 
406 static const AVFilterPad alimiter_inputs[] = {
407  {
408  .name = "main",
409  .type = AVMEDIA_TYPE_AUDIO,
410  .filter_frame = filter_frame,
411  .config_props = config_input,
412  },
413 };
414 
415 static const AVFilterPad alimiter_outputs[] = {
416  {
417  .name = "default",
418  .type = AVMEDIA_TYPE_AUDIO,
419  .request_frame = request_frame,
420  },
421 };
422 
424  .name = "alimiter",
425  .description = NULL_IF_CONFIG_SMALL("Audio lookahead limiter."),
426  .priv_size = sizeof(AudioLimiterContext),
427  .priv_class = &alimiter_class,
428  .init = init,
429  .uninit = uninit,
433  .process_command = ff_filter_process_command,
435 };
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:100
alimiter_options
static const AVOption alimiter_options[]
Definition: af_alimiter.c:81
AudioLimiterContext::next_out_pts
int64_t next_out_pts
Definition: af_alimiter.c:67
level
uint8_t level
Definition: svq3.c:204
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
out
FILE * out
Definition: movenc.c:54
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_alimiter.c:96
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1009
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AudioLimiterContext::asc_c
int asc_c
Definition: af_alimiter.c:54
AudioLimiterContext::attack
double attack
Definition: af_alimiter.c:46
FILTER_SINGLE_SAMPLEFMT
#define FILTER_SINGLE_SAMPLEFMT(sample_fmt_)
Definition: internal.h:183
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:432
alimiter_inputs
static const AVFilterPad alimiter_inputs[]
Definition: af_alimiter.c:406
AVOption
AVOption.
Definition: opt.h:251
alimiter_outputs
static const AVFilterPad alimiter_outputs[]
Definition: af_alimiter.c:415
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:410
AudioLimiterContext::auto_level
int auto_level
Definition: af_alimiter.c:52
ff_af_alimiter
const AVFilter ff_af_alimiter
Definition: af_alimiter.c:423
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:175
AudioLimiterContext::pos
int pos
Definition: af_alimiter.c:60
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
formats.h
fifo.h
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
AudioLimiterContext::asc_coeff
double asc_coeff
Definition: af_alimiter.c:56
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
s
#define s(width, name)
Definition: cbs_vp9.c:256
AudioLimiterContext::fifo
AVFifo * fifo
Definition: af_alimiter.c:70
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_alimiter.c:395
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:31
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:190
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_alimiter.c:128
if
if(ret)
Definition: filter_design.txt:179
AudioLimiterContext::level_in
double level_in
Definition: af_alimiter.c:49
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:603
AudioLimiterContext
Definition: af_alimiter.c:42
MetaItem::pts
int64_t pts
Definition: af_alimiter.c:38
AudioLimiterContext::nextlen
int nextlen
Definition: af_alimiter.c:74
AudioLimiterContext::out_pad
int out_pad
Definition: af_alimiter.c:65
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AudioLimiterContext::asc
double asc
Definition: af_alimiter.c:53
AVFifo
Definition: fifo.c:35
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:502
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AudioLimiterContext::buffer
double * buffer
Definition: af_alimiter.c:58
sample
#define sample
Definition: flacdsp_template.c:44
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:533
AF
#define AF
Definition: af_alimiter.c:79
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:873
AudioLimiterContext::release
double release
Definition: af_alimiter.c:47
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:152
AudioLimiterContext::delta
double delta
Definition: af_alimiter.c:72
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:405
AudioLimiterContext::in_trim
int in_trim
Definition: af_alimiter.c:64
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AudioLimiterContext::buffer_size
int buffer_size
Definition: af_alimiter.c:59
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
common.h
delta
float delta
Definition: vorbis_enc_data.h:430
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AudioLimiterContext::nextpos
int * nextpos
Definition: af_alimiter.c:61
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
limit
static double limit(double x)
Definition: vf_pseudocolor.c:130
AVFilter
Filter definition.
Definition: avfilter.h:171
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: af_alimiter.c:338
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AudioLimiterContext::nextiter
int nextiter
Definition: af_alimiter.c:73
OFFSET
#define OFFSET(x)
Definition: af_alimiter.c:78
channel_layout.h
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
patt
static const int8_t patt[4]
Definition: vf_noise.c:67
avfilter.h
AudioLimiterContext::asc_pos
int asc_pos
Definition: af_alimiter.c:55
MetaItem
Definition: af_alimiter.c:37
AudioLimiterContext::level_out
double level_out
Definition: af_alimiter.c:50
get_rdelta
static double get_rdelta(AudioLimiterContext *s, double release, int sample_rate, double peak, double limit, double patt, int asc)
Definition: af_alimiter.c:109
AVFilterContext
An instance of a filter.
Definition: avfilter.h:415
MetaItem::nb_samples
int nb_samples
Definition: af_alimiter.c:39
AudioLimiterContext::auto_release
int auto_release
Definition: af_alimiter.c:51
audio.h
AudioLimiterContext::asc_changed
int asc_changed
Definition: af_alimiter.c:75
AudioLimiterContext::att
double att
Definition: af_alimiter.c:48
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(alimiter)
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:191
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AudioLimiterContext::limit
double limit
Definition: af_alimiter.c:45
AudioLimiterContext::latency
int latency
Definition: af_alimiter.c:68
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AudioLimiterContext::next_in_pts
int64_t next_in_pts
Definition: af_alimiter.c:66
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_alimiter.c:358
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:61
AudioLimiterContext::nextdelta
double * nextdelta
Definition: af_alimiter.c:62
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
av_clipd
av_clipd
Definition: af_crystalizer.c:132