FFmpeg
af_compand.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 1999 Chris Bagwell
3  * Copyright (c) 1999 Nick Bailey
4  * Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
5  * Copyright (c) 2013 Paul B Mahol
6  * Copyright (c) 2014 Andrew Kelley
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * audio compand filter
28  */
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/ffmath.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/samplefmt.h"
35 #include "audio.h"
36 #include "avfilter.h"
37 #include "internal.h"
38 
39 typedef struct ChanParam {
40  double attack;
41  double decay;
42  double volume;
43 } ChanParam;
44 
45 typedef struct CompandSegment {
46  double x, y;
47  double a, b;
49 
50 typedef struct CompandContext {
51  const AVClass *class;
53  char *attacks, *decays, *points;
56  double in_min_lin;
57  double out_min_lin;
58  double curve_dB;
59  double gain_dB;
61  double delay;
66  int64_t pts;
67 
70 
71 #define OFFSET(x) offsetof(CompandContext, x)
72 #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
73 
74 static const AVOption compand_options[] = {
75  { "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0" }, 0, 0, A },
76  { "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
77  { "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20|1/0" }, 0, 0, A },
78  { "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
79  { "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
80  { "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
81  { "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
82  { NULL }
83 };
84 
85 AVFILTER_DEFINE_CLASS(compand);
86 
88 {
89  CompandContext *s = ctx->priv;
90  s->pts = AV_NOPTS_VALUE;
91  return 0;
92 }
93 
95 {
96  CompandContext *s = ctx->priv;
97 
98  av_freep(&s->channels);
99  av_freep(&s->segments);
101 }
102 
104 {
107  static const enum AVSampleFormat sample_fmts[] = {
110  };
111  int ret;
112 
113  layouts = ff_all_channel_counts();
114  if (!layouts)
115  return AVERROR(ENOMEM);
116  ret = ff_set_common_channel_layouts(ctx, layouts);
117  if (ret < 0)
118  return ret;
119 
120  formats = ff_make_format_list(sample_fmts);
121  if (!formats)
122  return AVERROR(ENOMEM);
123  ret = ff_set_common_formats(ctx, formats);
124  if (ret < 0)
125  return ret;
126 
127  formats = ff_all_samplerates();
128  if (!formats)
129  return AVERROR(ENOMEM);
130  return ff_set_common_samplerates(ctx, formats);
131 }
132 
133 static void count_items(char *item_str, int *nb_items)
134 {
135  char *p;
136 
137  *nb_items = 1;
138  for (p = item_str; *p; p++) {
139  if (*p == ' ' || *p == '|')
140  (*nb_items)++;
141  }
142 }
143 
144 static void update_volume(ChanParam *cp, double in)
145 {
146  double delta = in - cp->volume;
147 
148  if (delta > 0.0)
149  cp->volume += delta * cp->attack;
150  else
151  cp->volume += delta * cp->decay;
152 }
153 
154 static double get_volume(CompandContext *s, double in_lin)
155 {
156  CompandSegment *cs;
157  double in_log, out_log;
158  int i;
159 
160  if (in_lin < s->in_min_lin)
161  return s->out_min_lin;
162 
163  in_log = log(in_lin);
164 
165  for (i = 1; i < s->nb_segments; i++)
166  if (in_log <= s->segments[i].x)
167  break;
168  cs = &s->segments[i - 1];
169  in_log -= cs->x;
170  out_log = cs->y + in_log * (cs->a * in_log + cs->b);
171 
172  return exp(out_log);
173 }
174 
176 {
177  CompandContext *s = ctx->priv;
178  AVFilterLink *inlink = ctx->inputs[0];
179  const int channels = inlink->channels;
180  const int nb_samples = frame->nb_samples;
181  AVFrame *out_frame;
182  int chan, i;
183  int err;
184 
185  if (av_frame_is_writable(frame)) {
186  out_frame = frame;
187  } else {
188  out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
189  if (!out_frame) {
190  av_frame_free(&frame);
191  return AVERROR(ENOMEM);
192  }
193  err = av_frame_copy_props(out_frame, frame);
194  if (err < 0) {
195  av_frame_free(&out_frame);
196  av_frame_free(&frame);
197  return err;
198  }
199  }
200 
201  for (chan = 0; chan < channels; chan++) {
202  const double *src = (double *)frame->extended_data[chan];
203  double *dst = (double *)out_frame->extended_data[chan];
204  ChanParam *cp = &s->channels[chan];
205 
206  for (i = 0; i < nb_samples; i++) {
207  update_volume(cp, fabs(src[i]));
208 
209  dst[i] = src[i] * get_volume(s, cp->volume);
210  }
211  }
212 
213  if (frame != out_frame)
214  av_frame_free(&frame);
215 
216  return ff_filter_frame(ctx->outputs[0], out_frame);
217 }
218 
219 #define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
220 
222 {
223  CompandContext *s = ctx->priv;
224  AVFilterLink *inlink = ctx->inputs[0];
225  const int channels = inlink->channels;
226  const int nb_samples = frame->nb_samples;
227  int chan, i, av_uninit(dindex), oindex, av_uninit(count);
228  AVFrame *out_frame = NULL;
229  int err;
230 
231  if (s->pts == AV_NOPTS_VALUE) {
232  s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
233  }
234 
235  av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
236 
237  for (chan = 0; chan < channels; chan++) {
238  AVFrame *delay_frame = s->delay_frame;
239  const double *src = (double *)frame->extended_data[chan];
240  double *dbuf = (double *)delay_frame->extended_data[chan];
241  ChanParam *cp = &s->channels[chan];
242  double *dst;
243 
244  count = s->delay_count;
245  dindex = s->delay_index;
246  for (i = 0, oindex = 0; i < nb_samples; i++) {
247  const double in = src[i];
248  update_volume(cp, fabs(in));
249 
250  if (count >= s->delay_samples) {
251  if (!out_frame) {
252  out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples - i);
253  if (!out_frame) {
254  av_frame_free(&frame);
255  return AVERROR(ENOMEM);
256  }
257  err = av_frame_copy_props(out_frame, frame);
258  if (err < 0) {
259  av_frame_free(&out_frame);
260  av_frame_free(&frame);
261  return err;
262  }
263  out_frame->pts = s->pts;
264  s->pts += av_rescale_q(nb_samples - i,
265  (AVRational){ 1, inlink->sample_rate },
266  inlink->time_base);
267  }
268 
269  dst = (double *)out_frame->extended_data[chan];
270  dst[oindex++] = dbuf[dindex] * get_volume(s, cp->volume);
271  } else {
272  count++;
273  }
274 
275  dbuf[dindex] = in;
276  dindex = MOD(dindex + 1, s->delay_samples);
277  }
278  }
279 
280  s->delay_count = count;
281  s->delay_index = dindex;
282 
283  av_frame_free(&frame);
284 
285  if (out_frame) {
286  err = ff_filter_frame(ctx->outputs[0], out_frame);
287  return err;
288  }
289 
290  return 0;
291 }
292 
293 static int compand_drain(AVFilterLink *outlink)
294 {
295  AVFilterContext *ctx = outlink->src;
296  CompandContext *s = ctx->priv;
297  const int channels = outlink->channels;
298  AVFrame *frame = NULL;
299  int chan, i, dindex;
300 
301  /* 2048 is to limit output frame size during drain */
302  frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
303  if (!frame)
304  return AVERROR(ENOMEM);
305  frame->pts = s->pts;
306  s->pts += av_rescale_q(frame->nb_samples,
307  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
308 
309  av_assert0(channels > 0);
310  for (chan = 0; chan < channels; chan++) {
311  AVFrame *delay_frame = s->delay_frame;
312  double *dbuf = (double *)delay_frame->extended_data[chan];
313  double *dst = (double *)frame->extended_data[chan];
314  ChanParam *cp = &s->channels[chan];
315 
316  dindex = s->delay_index;
317  for (i = 0; i < frame->nb_samples; i++) {
318  dst[i] = dbuf[dindex] * get_volume(s, cp->volume);
319  dindex = MOD(dindex + 1, s->delay_samples);
320  }
321  }
322  s->delay_count -= frame->nb_samples;
323  s->delay_index = dindex;
324 
325  return ff_filter_frame(outlink, frame);
326 }
327 
328 static int config_output(AVFilterLink *outlink)
329 {
330  AVFilterContext *ctx = outlink->src;
331  CompandContext *s = ctx->priv;
332  const int sample_rate = outlink->sample_rate;
333  double radius = s->curve_dB * M_LN10 / 20.0;
334  char *p, *saveptr = NULL;
335  const int channels = outlink->channels;
336  int nb_attacks, nb_decays, nb_points;
337  int new_nb_items, num;
338  int i;
339  int err;
340 
341 
342  count_items(s->attacks, &nb_attacks);
343  count_items(s->decays, &nb_decays);
344  count_items(s->points, &nb_points);
345 
346  if (channels <= 0) {
347  av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
348  return AVERROR(EINVAL);
349  }
350 
351  if (nb_attacks > channels || nb_decays > channels) {
352  av_log(ctx, AV_LOG_ERROR,
353  "Number of attacks/decays bigger than number of channels.\n");
354  return AVERROR(EINVAL);
355  }
356 
357  uninit(ctx);
358 
359  s->channels = av_mallocz_array(channels, sizeof(*s->channels));
360  s->nb_segments = (nb_points + 4) * 2;
361  s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments));
362 
363  if (!s->channels || !s->segments) {
364  uninit(ctx);
365  return AVERROR(ENOMEM);
366  }
367 
368  p = s->attacks;
369  for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
370  char *tstr = av_strtok(p, " |", &saveptr);
371  if (!tstr) {
372  uninit(ctx);
373  return AVERROR(EINVAL);
374  }
375  p = NULL;
376  new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
377  if (s->channels[i].attack < 0) {
378  uninit(ctx);
379  return AVERROR(EINVAL);
380  }
381  }
382  nb_attacks = new_nb_items;
383 
384  p = s->decays;
385  for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
386  char *tstr = av_strtok(p, " |", &saveptr);
387  if (!tstr) {
388  uninit(ctx);
389  return AVERROR(EINVAL);
390  }
391  p = NULL;
392  new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
393  if (s->channels[i].decay < 0) {
394  uninit(ctx);
395  return AVERROR(EINVAL);
396  }
397  }
398  nb_decays = new_nb_items;
399 
400  if (nb_attacks != nb_decays) {
401  av_log(ctx, AV_LOG_ERROR,
402  "Number of attacks %d differs from number of decays %d.\n",
403  nb_attacks, nb_decays);
404  uninit(ctx);
405  return AVERROR(EINVAL);
406  }
407 
408  for (i = nb_decays; i < channels; i++) {
409  s->channels[i].attack = s->channels[nb_decays - 1].attack;
410  s->channels[i].decay = s->channels[nb_decays - 1].decay;
411  }
412 
413 #define S(x) s->segments[2 * ((x) + 1)]
414  p = s->points;
415  for (i = 0, new_nb_items = 0; i < nb_points; i++) {
416  char *tstr = av_strtok(p, " |", &saveptr);
417  p = NULL;
418  if (!tstr || sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
419  av_log(ctx, AV_LOG_ERROR,
420  "Invalid and/or missing input/output value.\n");
421  uninit(ctx);
422  return AVERROR(EINVAL);
423  }
424  if (i && S(i - 1).x > S(i).x) {
425  av_log(ctx, AV_LOG_ERROR,
426  "Transfer function input values must be increasing.\n");
427  uninit(ctx);
428  return AVERROR(EINVAL);
429  }
430  S(i).y -= S(i).x;
431  av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
432  new_nb_items++;
433  }
434  num = new_nb_items;
435 
436  /* Add 0,0 if necessary */
437  if (num == 0 || S(num - 1).x)
438  num++;
439 
440 #undef S
441 #define S(x) s->segments[2 * (x)]
442  /* Add a tail off segment at the start */
443  S(0).x = S(1).x - 2 * s->curve_dB;
444  S(0).y = S(1).y;
445  num++;
446 
447  /* Join adjacent colinear segments */
448  for (i = 2; i < num; i++) {
449  double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
450  double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
451  int j;
452 
453  if (fabs(g1 - g2))
454  continue;
455  num--;
456  for (j = --i; j < num; j++)
457  S(j) = S(j + 1);
458  }
459 
460  for (i = 0; i < s->nb_segments; i += 2) {
461  s->segments[i].y += s->gain_dB;
462  s->segments[i].x *= M_LN10 / 20;
463  s->segments[i].y *= M_LN10 / 20;
464  }
465 
466 #define L(x) s->segments[i - (x)]
467  for (i = 4; i < s->nb_segments; i += 2) {
468  double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
469 
470  L(4).a = 0;
471  L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
472 
473  L(2).a = 0;
474  L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
475 
476  theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
477  len = hypot(L(2).x - L(4).x, L(2).y - L(4).y);
478  r = FFMIN(radius, len);
479  L(3).x = L(2).x - r * cos(theta);
480  L(3).y = L(2).y - r * sin(theta);
481 
482  theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
483  len = hypot(L(0).x - L(2).x, L(0).y - L(2).y);
484  r = FFMIN(radius, len / 2);
485  x = L(2).x + r * cos(theta);
486  y = L(2).y + r * sin(theta);
487 
488  cx = (L(3).x + L(2).x + x) / 3;
489  cy = (L(3).y + L(2).y + y) / 3;
490 
491  L(2).x = x;
492  L(2).y = y;
493 
494  in1 = cx - L(3).x;
495  out1 = cy - L(3).y;
496  in2 = L(2).x - L(3).x;
497  out2 = L(2).y - L(3).y;
498  L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
499  L(3).b = out1 / in1 - L(3).a * in1;
500  }
501  L(3).x = 0;
502  L(3).y = L(2).y;
503 
504  s->in_min_lin = exp(s->segments[1].x);
505  s->out_min_lin = exp(s->segments[1].y);
506 
507  for (i = 0; i < channels; i++) {
508  ChanParam *cp = &s->channels[i];
509 
510  if (cp->attack > 1.0 / sample_rate)
511  cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
512  else
513  cp->attack = 1.0;
514  if (cp->decay > 1.0 / sample_rate)
515  cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
516  else
517  cp->decay = 1.0;
518  cp->volume = ff_exp10(s->initial_volume / 20);
519  }
520 
521  s->delay_samples = s->delay * sample_rate;
522  if (s->delay_samples <= 0) {
524  return 0;
525  }
526 
528  if (!s->delay_frame) {
529  uninit(ctx);
530  return AVERROR(ENOMEM);
531  }
532 
533  s->delay_frame->format = outlink->format;
536 
537  err = av_frame_get_buffer(s->delay_frame, 32);
538  if (err)
539  return err;
540 
541  s->compand = compand_delay;
542  return 0;
543 }
544 
546 {
547  AVFilterContext *ctx = inlink->dst;
548  CompandContext *s = ctx->priv;
549 
550  return s->compand(ctx, frame);
551 }
552 
553 static int request_frame(AVFilterLink *outlink)
554 {
555  AVFilterContext *ctx = outlink->src;
556  CompandContext *s = ctx->priv;
557  int ret = 0;
558 
559  ret = ff_request_frame(ctx->inputs[0]);
560 
561  if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
562  ret = compand_drain(outlink);
563 
564  return ret;
565 }
566 
567 static const AVFilterPad compand_inputs[] = {
568  {
569  .name = "default",
570  .type = AVMEDIA_TYPE_AUDIO,
571  .filter_frame = filter_frame,
572  },
573  { NULL }
574 };
575 
576 static const AVFilterPad compand_outputs[] = {
577  {
578  .name = "default",
579  .request_frame = request_frame,
580  .config_props = config_output,
581  .type = AVMEDIA_TYPE_AUDIO,
582  },
583  { NULL }
584 };
585 
586 
588  .name = "compand",
589  .description = NULL_IF_CONFIG_SMALL(
590  "Compress or expand audio dynamic range."),
591  .query_formats = query_formats,
592  .priv_size = sizeof(CompandContext),
593  .priv_class = &compand_class,
594  .init = init,
595  .uninit = uninit,
596  .inputs = compand_inputs,
597  .outputs = compand_outputs,
598 };
#define L(x)
static const AVFilterPad compand_inputs[]
Definition: af_compand.c:567
#define NULL
Definition: coverity.c:32
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates...
Definition: formats.c:549
char * points
Definition: af_compand.c:53
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static const AVOption compand_options[]
Definition: af_compand.c:74
AVOption.
Definition: opt.h:246
static double get_volume(CompandContext *s, double in_lin)
Definition: af_compand.c:154
Main libavfilter public API header.
int64_t pts
Definition: af_compand.c:66
channels
Definition: aptx.c:30
AVFILTER_DEFINE_CLASS(compand)
double out_min_lin
Definition: af_compand.c:57
double, planar
Definition: samplefmt.h:70
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
#define OFFSET(x)
Definition: af_compand.c:71
double in_min_lin
Definition: af_compand.c:56
double attack
Definition: af_compand.c:40
static const AVFilterPad compand_outputs[]
Definition: af_compand.c:576
static void update_volume(ChanParam *cp, double in)
Definition: af_compand.c:144
#define src
Definition: vp8dsp.c:254
int is_disabled
the enabled state from the last expression evaluation
Definition: avfilter.h:385
static av_cold int init(AVFilterContext *ctx)
Definition: af_compand.c:87
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
float delta
AVOptions.
static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:221
static int query_formats(AVFilterContext *ctx)
Definition: af_compand.c:103
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:175
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
void * priv
private data for use by the filter
Definition: avfilter.h:353
AVFilter ff_af_compand
Definition: af_compand.c:587
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei count
Definition: opengl_enc.c:108
int8_t exp
Definition: eval.c:72
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
static int request_frame(AVFilterLink *outlink)
Definition: af_compand.c:553
ChanParam * channels
Definition: af_compand.c:55
static av_const double hypot(double x, double y)
Definition: libm.h:366
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FFMIN(a, b)
Definition: common.h:96
char * decays
Definition: af_compand.c:53
int(* compand)(AVFilterContext *ctx, AVFrame *frame)
Definition: af_compand.c:68
AVFormatContext * ctx
Definition: movenc.c:48
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
CompandSegment * segments
Definition: af_compand.c:54
double delay
Definition: af_compand.c:61
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
A list of supported channel layouts.
Definition: formats.h:85
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
sample_rate
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
#define A
Definition: af_compand.c:72
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
double curve_dB
Definition: af_compand.c:58
#define S(x)
const char * name
Filter name.
Definition: avfilter.h:148
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:324
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
#define M_LN10
Definition: mathematics.h:43
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_compand.c:94
internal math functions header
int
double gain_dB
Definition: af_compand.c:59
double decay
Definition: af_compand.c:41
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: af_compand.c:545
static int config_output(AVFilterLink *outlink)
Definition: af_compand.c:328
static int compand_drain(AVFilterLink *outlink)
Definition: af_compand.c:293
int len
static void count_items(char *item_str, int *nb_items)
Definition: af_compand.c:133
char * attacks
Definition: af_compand.c:53
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
#define av_uninit(x)
Definition: attributes.h:148
AVFrame * delay_frame
Definition: af_compand.c:62
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
#define av_freep(p)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
formats
Definition: signature.h:48
double initial_volume
Definition: af_compand.c:60
double volume
Definition: af_compand.c:42
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:410
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
#define MOD(a, b)
Definition: af_compand.c:219
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191