FFmpeg
af_compand.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 1999 Chris Bagwell
3  * Copyright (c) 1999 Nick Bailey
4  * Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
5  * Copyright (c) 2013 Paul B Mahol
6  * Copyright (c) 2014 Andrew Kelley
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * audio compand filter
28  */
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/ffmath.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/samplefmt.h"
35 #include "audio.h"
36 #include "avfilter.h"
37 #include "internal.h"
38 
39 typedef struct ChanParam {
40  double attack;
41  double decay;
42  double volume;
43 } ChanParam;
44 
45 typedef struct CompandSegment {
46  double x, y;
47  double a, b;
49 
50 typedef struct CompandContext {
51  const AVClass *class;
53  char *attacks, *decays, *points;
56  double in_min_lin;
57  double out_min_lin;
58  double curve_dB;
59  double gain_dB;
61  double delay;
66  int64_t pts;
67 
70 
71 #define OFFSET(x) offsetof(CompandContext, x)
72 #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
73 
74 static const AVOption compand_options[] = {
75  { "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0" }, 0, 0, A },
76  { "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
77  { "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20|1/0" }, 0, 0, A },
78  { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
79  { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
80  { "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
81  { "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
82  { NULL }
83 };
84 
85 AVFILTER_DEFINE_CLASS(compand);
86 
88 {
89  CompandContext *s = ctx->priv;
90  s->pts = AV_NOPTS_VALUE;
91  return 0;
92 }
93 
95 {
96  CompandContext *s = ctx->priv;
97 
98  av_freep(&s->channels);
99  av_freep(&s->segments);
100  av_frame_free(&s->delay_frame);
101 }
102 
104 {
105  static const enum AVSampleFormat sample_fmts[] = {
108  };
110  if (ret < 0)
111  return ret;
112 
114  if (ret < 0)
115  return ret;
116 
118 }
119 
120 static void count_items(char *item_str, int *nb_items)
121 {
122  char *p;
123 
124  *nb_items = 1;
125  for (p = item_str; *p; p++) {
126  if (*p == ' ' || *p == '|')
127  (*nb_items)++;
128  }
129 }
130 
131 static void update_volume(ChanParam *cp, double in)
132 {
133  double delta = in - cp->volume;
134 
135  if (delta > 0.0)
136  cp->volume += delta * cp->attack;
137  else
138  cp->volume += delta * cp->decay;
139 }
140 
141 static double get_volume(CompandContext *s, double in_lin)
142 {
143  CompandSegment *cs;
144  double in_log, out_log;
145  int i;
146 
147  if (in_lin < s->in_min_lin)
148  return s->out_min_lin;
149 
150  in_log = log(in_lin);
151 
152  for (i = 1; i < s->nb_segments; i++)
153  if (in_log <= s->segments[i].x)
154  break;
155  cs = &s->segments[i - 1];
156  in_log -= cs->x;
157  out_log = cs->y + in_log * (cs->a * in_log + cs->b);
158 
159  return exp(out_log);
160 }
161 
163 {
164  CompandContext *s = ctx->priv;
165  AVFilterLink *inlink = ctx->inputs[0];
166  const int channels = inlink->channels;
167  const int nb_samples = frame->nb_samples;
168  AVFrame *out_frame;
169  int chan, i;
170  int err;
171 
173  out_frame = frame;
174  } else {
175  out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
176  if (!out_frame) {
178  return AVERROR(ENOMEM);
179  }
180  err = av_frame_copy_props(out_frame, frame);
181  if (err < 0) {
182  av_frame_free(&out_frame);
184  return err;
185  }
186  }
187 
188  for (chan = 0; chan < channels; chan++) {
189  const double *src = (double *)frame->extended_data[chan];
190  double *dst = (double *)out_frame->extended_data[chan];
191  ChanParam *cp = &s->channels[chan];
192 
193  for (i = 0; i < nb_samples; i++) {
194  update_volume(cp, fabs(src[i]));
195 
196  dst[i] = src[i] * get_volume(s, cp->volume);
197  }
198  }
199 
200  if (frame != out_frame)
202 
203  return ff_filter_frame(ctx->outputs[0], out_frame);
204 }
205 
206 #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
207 
209 {
210  CompandContext *s = ctx->priv;
211  AVFilterLink *inlink = ctx->inputs[0];
212  const int channels = inlink->channels;
213  const int nb_samples = frame->nb_samples;
214  int chan, i, av_uninit(dindex), oindex, av_uninit(count);
215  AVFrame *out_frame = NULL;
216  int err;
217 
218  if (s->pts == AV_NOPTS_VALUE) {
219  s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
220  }
221 
222  av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
223 
224  for (chan = 0; chan < channels; chan++) {
225  AVFrame *delay_frame = s->delay_frame;
226  const double *src = (double *)frame->extended_data[chan];
227  double *dbuf = (double *)delay_frame->extended_data[chan];
228  ChanParam *cp = &s->channels[chan];
229  double *dst;
230 
231  count = s->delay_count;
232  dindex = s->delay_index;
233  for (i = 0, oindex = 0; i < nb_samples; i++) {
234  const double in = src[i];
235  update_volume(cp, fabs(in));
236 
237  if (count >= s->delay_samples) {
238  if (!out_frame) {
239  out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples - i);
240  if (!out_frame) {
242  return AVERROR(ENOMEM);
243  }
244  err = av_frame_copy_props(out_frame, frame);
245  if (err < 0) {
246  av_frame_free(&out_frame);
248  return err;
249  }
250  out_frame->pts = s->pts;
251  s->pts += av_rescale_q(nb_samples - i,
252  (AVRational){ 1, inlink->sample_rate },
253  inlink->time_base);
254  }
255 
256  dst = (double *)out_frame->extended_data[chan];
257  dst[oindex++] = dbuf[dindex] * get_volume(s, cp->volume);
258  } else {
259  count++;
260  }
261 
262  dbuf[dindex] = in;
263  dindex = MOD(dindex + 1, s->delay_samples);
264  }
265  }
266 
267  s->delay_count = count;
268  s->delay_index = dindex;
269 
271 
272  if (out_frame) {
273  err = ff_filter_frame(ctx->outputs[0], out_frame);
274  return err;
275  }
276 
277  return 0;
278 }
279 
280 static int compand_drain(AVFilterLink *outlink)
281 {
282  AVFilterContext *ctx = outlink->src;
283  CompandContext *s = ctx->priv;
284  const int channels = outlink->channels;
285  AVFrame *frame = NULL;
286  int chan, i, dindex;
287 
288  /* 2048 is to limit output frame size during drain */
289  frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
290  if (!frame)
291  return AVERROR(ENOMEM);
292  frame->pts = s->pts;
293  s->pts += av_rescale_q(frame->nb_samples,
294  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
295 
296  av_assert0(channels > 0);
297  for (chan = 0; chan < channels; chan++) {
298  AVFrame *delay_frame = s->delay_frame;
299  double *dbuf = (double *)delay_frame->extended_data[chan];
300  double *dst = (double *)frame->extended_data[chan];
301  ChanParam *cp = &s->channels[chan];
302 
303  dindex = s->delay_index;
304  for (i = 0; i < frame->nb_samples; i++) {
305  dst[i] = dbuf[dindex] * get_volume(s, cp->volume);
306  dindex = MOD(dindex + 1, s->delay_samples);
307  }
308  }
309  s->delay_count -= frame->nb_samples;
310  s->delay_index = dindex;
311 
312  return ff_filter_frame(outlink, frame);
313 }
314 
315 static int config_output(AVFilterLink *outlink)
316 {
317  AVFilterContext *ctx = outlink->src;
318  CompandContext *s = ctx->priv;
319  const int sample_rate = outlink->sample_rate;
320  double radius = s->curve_dB * M_LN10 / 20.0;
321  char *p, *saveptr = NULL;
322  const int channels = outlink->channels;
323  int nb_attacks, nb_decays, nb_points;
324  int new_nb_items, num;
325  int i;
326  int err;
327 
328 
329  count_items(s->attacks, &nb_attacks);
330  count_items(s->decays, &nb_decays);
331  count_items(s->points, &nb_points);
332 
333  if (channels <= 0) {
334  av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
335  return AVERROR(EINVAL);
336  }
337 
338  if (nb_attacks > channels || nb_decays > channels) {
340  "Number of attacks/decays bigger than number of channels. Ignoring rest of entries.\n");
341  nb_attacks = FFMIN(nb_attacks, channels);
342  nb_decays = FFMIN(nb_decays, channels);
343  }
344 
345  uninit(ctx);
346 
347  s->channels = av_calloc(channels, sizeof(*s->channels));
348  s->nb_segments = (nb_points + 4) * 2;
349  s->segments = av_calloc(s->nb_segments, sizeof(*s->segments));
350 
351  if (!s->channels || !s->segments) {
352  uninit(ctx);
353  return AVERROR(ENOMEM);
354  }
355 
356  p = s->attacks;
357  for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
358  char *tstr = av_strtok(p, " |", &saveptr);
359  if (!tstr) {
360  uninit(ctx);
361  return AVERROR(EINVAL);
362  }
363  p = NULL;
364  new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
365  if (s->channels[i].attack < 0) {
366  uninit(ctx);
367  return AVERROR(EINVAL);
368  }
369  }
370  nb_attacks = new_nb_items;
371 
372  p = s->decays;
373  for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
374  char *tstr = av_strtok(p, " |", &saveptr);
375  if (!tstr) {
376  uninit(ctx);
377  return AVERROR(EINVAL);
378  }
379  p = NULL;
380  new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
381  if (s->channels[i].decay < 0) {
382  uninit(ctx);
383  return AVERROR(EINVAL);
384  }
385  }
386  nb_decays = new_nb_items;
387 
388  if (nb_attacks != nb_decays) {
390  "Number of attacks %d differs from number of decays %d.\n",
391  nb_attacks, nb_decays);
392  uninit(ctx);
393  return AVERROR(EINVAL);
394  }
395 
396  for (i = nb_decays; i < channels; i++) {
397  s->channels[i].attack = s->channels[nb_decays - 1].attack;
398  s->channels[i].decay = s->channels[nb_decays - 1].decay;
399  }
400 
401 #define S(x) s->segments[2 * ((x) + 1)]
402  p = s->points;
403  for (i = 0, new_nb_items = 0; i < nb_points; i++) {
404  char *tstr = av_strtok(p, " |", &saveptr);
405  p = NULL;
406  if (!tstr || sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
408  "Invalid and/or missing input/output value.\n");
409  uninit(ctx);
410  return AVERROR(EINVAL);
411  }
412  if (i && S(i - 1).x > S(i).x) {
414  "Transfer function input values must be increasing.\n");
415  uninit(ctx);
416  return AVERROR(EINVAL);
417  }
418  S(i).y -= S(i).x;
419  av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
420  new_nb_items++;
421  }
422  num = new_nb_items;
423 
424  /* Add 0,0 if necessary */
425  if (num == 0 || S(num - 1).x)
426  num++;
427 
428 #undef S
429 #define S(x) s->segments[2 * (x)]
430  /* Add a tail off segment at the start */
431  S(0).x = S(1).x - 2 * s->curve_dB;
432  S(0).y = S(1).y;
433  num++;
434 
435  /* Join adjacent colinear segments */
436  for (i = 2; i < num; i++) {
437  double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
438  double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
439  int j;
440 
441  if (fabs(g1 - g2))
442  continue;
443  num--;
444  for (j = --i; j < num; j++)
445  S(j) = S(j + 1);
446  }
447 
448  for (i = 0; i < s->nb_segments; i += 2) {
449  s->segments[i].y += s->gain_dB;
450  s->segments[i].x *= M_LN10 / 20;
451  s->segments[i].y *= M_LN10 / 20;
452  }
453 
454 #define L(x) s->segments[i - (x)]
455  for (i = 4; i < s->nb_segments; i += 2) {
456  double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
457 
458  L(4).a = 0;
459  L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
460 
461  L(2).a = 0;
462  L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
463 
464  theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
465  len = hypot(L(2).x - L(4).x, L(2).y - L(4).y);
466  r = FFMIN(radius, len);
467  L(3).x = L(2).x - r * cos(theta);
468  L(3).y = L(2).y - r * sin(theta);
469 
470  theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
471  len = hypot(L(0).x - L(2).x, L(0).y - L(2).y);
472  r = FFMIN(radius, len / 2);
473  x = L(2).x + r * cos(theta);
474  y = L(2).y + r * sin(theta);
475 
476  cx = (L(3).x + L(2).x + x) / 3;
477  cy = (L(3).y + L(2).y + y) / 3;
478 
479  L(2).x = x;
480  L(2).y = y;
481 
482  in1 = cx - L(3).x;
483  out1 = cy - L(3).y;
484  in2 = L(2).x - L(3).x;
485  out2 = L(2).y - L(3).y;
486  L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
487  L(3).b = out1 / in1 - L(3).a * in1;
488  }
489  L(3).x = 0;
490  L(3).y = L(2).y;
491 
492  s->in_min_lin = exp(s->segments[1].x);
493  s->out_min_lin = exp(s->segments[1].y);
494 
495  for (i = 0; i < channels; i++) {
496  ChanParam *cp = &s->channels[i];
497 
498  if (cp->attack > 1.0 / sample_rate)
499  cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
500  else
501  cp->attack = 1.0;
502  if (cp->decay > 1.0 / sample_rate)
503  cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
504  else
505  cp->decay = 1.0;
506  cp->volume = ff_exp10(s->initial_volume / 20);
507  }
508 
509  s->delay_samples = s->delay * sample_rate;
510  if (s->delay_samples <= 0) {
511  s->compand = compand_nodelay;
512  return 0;
513  }
514 
515  s->delay_frame = av_frame_alloc();
516  if (!s->delay_frame) {
517  uninit(ctx);
518  return AVERROR(ENOMEM);
519  }
520 
521  s->delay_frame->format = outlink->format;
522  s->delay_frame->nb_samples = s->delay_samples;
523  s->delay_frame->channel_layout = outlink->channel_layout;
524 
525  err = av_frame_get_buffer(s->delay_frame, 0);
526  if (err)
527  return err;
528 
529  s->compand = compand_delay;
530  return 0;
531 }
532 
534 {
535  AVFilterContext *ctx = inlink->dst;
536  CompandContext *s = ctx->priv;
537 
538  return s->compand(ctx, frame);
539 }
540 
541 static int request_frame(AVFilterLink *outlink)
542 {
543  AVFilterContext *ctx = outlink->src;
544  CompandContext *s = ctx->priv;
545  int ret = 0;
546 
547  ret = ff_request_frame(ctx->inputs[0]);
548 
549  if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
550  ret = compand_drain(outlink);
551 
552  return ret;
553 }
554 
555 static const AVFilterPad compand_inputs[] = {
556  {
557  .name = "default",
558  .type = AVMEDIA_TYPE_AUDIO,
559  .filter_frame = filter_frame,
560  },
561 };
562 
563 static const AVFilterPad compand_outputs[] = {
564  {
565  .name = "default",
566  .request_frame = request_frame,
567  .config_props = config_output,
568  .type = AVMEDIA_TYPE_AUDIO,
569  },
570 };
571 
572 
574  .name = "compand",
575  .description = NULL_IF_CONFIG_SMALL(
576  "Compress or expand audio dynamic range."),
577  .query_formats = query_formats,
578  .priv_size = sizeof(CompandContext),
579  .priv_class = &compand_class,
580  .init = init,
581  .uninit = uninit,
584 };
CompandContext::delay
double delay
Definition: af_compand.c:61
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_compand.c:103
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
ff_exp10
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
compand_delay
static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:208
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:246
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
ff_af_compand
const AVFilter ff_af_compand
Definition: af_compand.c:573
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
CompandSegment::b
double b
Definition: af_compand.c:47
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:396
AVOption
AVOption.
Definition: opt.h:247
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:421
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:682
OFFSET
#define OFFSET(x)
Definition: af_compand.c:71
CompandContext::out_min_lin
double out_min_lin
Definition: af_compand.c:57
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_compand.c:87
CompandSegment::x
double x
Definition: af_compand.c:46
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
CompandSegment
Definition: af_compand.c:45
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(compand)
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
CompandContext
Definition: af_compand.c:50
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: af_compand.c:541
samplefmt.h
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_compand.c:315
CompandContext::attacks
char * attacks
Definition: af_compand.c:53
ChanParam::attack
double attack
Definition: af_compand.c:40
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
compand_options
static const AVOption compand_options[]
Definition: af_compand.c:74
CompandContext::delay_frame
AVFrame * delay_frame
Definition: af_compand.c:62
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
CompandContext::curve_dB
double curve_dB
Definition: af_compand.c:58
av_cold
#define av_cold
Definition: attributes.h:90
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:226
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:698
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
CompandContext::delay_samples
int delay_samples
Definition: af_compand.c:63
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
compand_nodelay
static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:162
compand_inputs
static const AVFilterPad compand_inputs[]
Definition: af_compand.c:555
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
CompandContext::channels
ChanParam * channels
Definition: af_compand.c:55
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
get_volume
static double get_volume(CompandContext *s, double in_lin)
Definition: af_compand.c:141
src
#define src
Definition: vp8dsp.c:255
CompandContext::decays
char * decays
Definition: af_compand.c:53
update_volume
static void update_volume(ChanParam *cp, double in)
Definition: af_compand.c:131
CompandContext::compand
int(* compand)(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:68
ff_set_common_all_channel_counts
int ff_set_common_all_channel_counts(AVFilterContext *ctx)
Equivalent to ff_set_common_channel_layouts(ctx, ff_all_channel_counts())
Definition: formats.c:664
exp
int8_t exp
Definition: eval.c:72
CompandContext::segments
CompandSegment * segments
Definition: af_compand.c:54
compand_drain
static int compand_drain(AVFilterLink *outlink)
Definition: af_compand.c:280
CompandContext::in_min_lin
double in_min_lin
Definition: af_compand.c:56
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
L
#define L(x)
MOD
#define MOD(a, b)
Definition: af_compand.c:206
CompandContext::nb_segments
int nb_segments
Definition: af_compand.c:52
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
CompandContext::delay_index
int delay_index
Definition: af_compand.c:65
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: af_compand.c:533
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
CompandContext::delay_count
int delay_count
Definition: af_compand.c:64
CompandContext::points
char * points
Definition: af_compand.c:53
A
#define A
Definition: af_compand.c:72
CompandContext::gain_dB
double gain_dB
Definition: af_compand.c:59
internal.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
CompandContext::pts
int64_t pts
Definition: af_compand.c:66
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:350
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
delta
float delta
Definition: vorbis_enc_data.h:430
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
len
int len
Definition: vorbis_enc_data.h:426
CompandSegment::y
double y
Definition: af_compand.c:46
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
AVFilter
Filter definition.
Definition: avfilter.h:149
av_uninit
#define av_uninit(x)
Definition: attributes.h:154
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
S
#define S(x)
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_compand.c:94
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
ffmath.h
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
count_items
static void count_items(char *item_str, int *nb_items)
Definition: af_compand.c:120
audio.h
M_LN10
#define M_LN10
Definition: mathematics.h:43
ChanParam
Definition: af_compand.c:39
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
int
int
Definition: ffmpeg_filter.c:156
ChanParam::decay
double decay
Definition: af_compand.c:41
CompandSegment::a
double a
Definition: af_compand.c:47
CompandContext::initial_volume
double initial_volume
Definition: af_compand.c:60
ChanParam::volume
double volume
Definition: af_compand.c:42
compand_outputs
static const AVFilterPad compand_outputs[]
Definition: af_compand.c:563