FFmpeg
af_afade.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * fade audio filter
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/opt.h"
29 #include "audio.h"
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "internal.h"
33 
34 typedef struct AudioFadeContext {
35  const AVClass *class;
36  int type;
37  int curve, curve2;
38  int64_t nb_samples;
39  int64_t start_sample;
40  int64_t duration;
41  int64_t start_time;
42  double silence;
43  double unity;
44  int overlap;
45  int cf0_eof;
47  int64_t pts;
48 
49  void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
50  int nb_samples, int channels, int direction,
51  int64_t start, int64_t range, int curve,
52  double silence, double unity);
53  void (*scale_samples)(uint8_t **dst, uint8_t * const *src,
54  int nb_samples, int channels, double unity);
55  void (*crossfade_samples)(uint8_t **dst, uint8_t * const *cf0,
56  uint8_t * const *cf1,
57  int nb_samples, int channels,
58  int curve0, int curve1);
60 
62 
63 #define OFFSET(x) offsetof(AudioFadeContext, x)
64 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
65 #define TFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
66 
67  static const enum AVSampleFormat sample_fmts[] = {
73  };
74 
75 static double fade_gain(int curve, int64_t index, int64_t range, double silence, double unity)
76 {
77 #define CUBE(a) ((a)*(a)*(a))
78  double gain;
79 
80  gain = av_clipd(1.0 * index / range, 0, 1.0);
81 
82  switch (curve) {
83  case QSIN:
84  gain = sin(gain * M_PI / 2.0);
85  break;
86  case IQSIN:
87  /* 0.6... = 2 / M_PI */
88  gain = 0.6366197723675814 * asin(gain);
89  break;
90  case ESIN:
91  gain = 1.0 - cos(M_PI / 4.0 * (CUBE(2.0*gain - 1) + 1));
92  break;
93  case HSIN:
94  gain = (1.0 - cos(gain * M_PI)) / 2.0;
95  break;
96  case IHSIN:
97  /* 0.3... = 1 / M_PI */
98  gain = 0.3183098861837907 * acos(1 - 2 * gain);
99  break;
100  case EXP:
101  /* -11.5... = 5*ln(0.1) */
102  gain = exp(-11.512925464970227 * (1 - gain));
103  break;
104  case LOG:
105  gain = av_clipd(1 + 0.2 * log10(gain), 0, 1.0);
106  break;
107  case PAR:
108  gain = 1 - sqrt(1 - gain);
109  break;
110  case IPAR:
111  gain = (1 - (1 - gain) * (1 - gain));
112  break;
113  case QUA:
114  gain *= gain;
115  break;
116  case CUB:
117  gain = CUBE(gain);
118  break;
119  case SQU:
120  gain = sqrt(gain);
121  break;
122  case CBR:
123  gain = cbrt(gain);
124  break;
125  case DESE:
126  gain = gain <= 0.5 ? cbrt(2 * gain) / 2: 1 - cbrt(2 * (1 - gain)) / 2;
127  break;
128  case DESI:
129  gain = gain <= 0.5 ? CUBE(2 * gain) / 2: 1 - CUBE(2 * (1 - gain)) / 2;
130  break;
131  case LOSI: {
132  const double a = 1. / (1. - 0.787) - 1;
133  double A = 1. / (1.0 + exp(0 -((gain-0.5) * a * 2.0)));
134  double B = 1. / (1.0 + exp(a));
135  double C = 1. / (1.0 + exp(0-a));
136  gain = (A - B) / (C - B);
137  }
138  break;
139  case SINC:
140  gain = gain >= 1.0 ? 1.0 : sin(M_PI * (1.0 - gain)) / (M_PI * (1.0 - gain));
141  break;
142  case ISINC:
143  gain = gain <= 0.0 ? 0.0 : 1.0 - sin(M_PI * gain) / (M_PI * gain);
144  break;
145  case NONE:
146  gain = 1.0;
147  break;
148  }
149 
150  return silence + (unity - silence) * gain;
151 }
152 
153 #define FADE_PLANAR(name, type) \
154 static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
155  int nb_samples, int channels, int dir, \
156  int64_t start, int64_t range,int curve,\
157  double silence, double unity) \
158 { \
159  int i, c; \
160  \
161  for (i = 0; i < nb_samples; i++) { \
162  double gain = fade_gain(curve, start + i * dir,range,silence,unity);\
163  for (c = 0; c < channels; c++) { \
164  type *d = (type *)dst[c]; \
165  const type *s = (type *)src[c]; \
166  \
167  d[i] = s[i] * gain; \
168  } \
169  } \
170 }
171 
172 #define FADE(name, type) \
173 static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
174  int nb_samples, int channels, int dir, \
175  int64_t start, int64_t range, int curve, \
176  double silence, double unity) \
177 { \
178  type *d = (type *)dst[0]; \
179  const type *s = (type *)src[0]; \
180  int i, c, k = 0; \
181  \
182  for (i = 0; i < nb_samples; i++) { \
183  double gain = fade_gain(curve, start + i * dir,range,silence,unity);\
184  for (c = 0; c < channels; c++, k++) \
185  d[k] = s[k] * gain; \
186  } \
187 }
188 
189 FADE_PLANAR(dbl, double)
190 FADE_PLANAR(flt, float)
191 FADE_PLANAR(s16, int16_t)
192 FADE_PLANAR(s32, int32_t)
193 
194 FADE(dbl, double)
195 FADE(flt, float)
196 FADE(s16, int16_t)
197 FADE(s32, int32_t)
198 
199 #define SCALE_PLANAR(name, type) \
200 static void scale_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
201  int nb_samples, int channels, \
202  double gain) \
203 { \
204  int i, c; \
205  \
206  for (i = 0; i < nb_samples; i++) { \
207  for (c = 0; c < channels; c++) { \
208  type *d = (type *)dst[c]; \
209  const type *s = (type *)src[c]; \
210  \
211  d[i] = s[i] * gain; \
212  } \
213  } \
214 }
215 
216 #define SCALE(name, type) \
217 static void scale_samples_## name (uint8_t **dst, uint8_t * const *src, \
218  int nb_samples, int channels, double gain)\
219 { \
220  type *d = (type *)dst[0]; \
221  const type *s = (type *)src[0]; \
222  int i, c, k = 0; \
223  \
224  for (i = 0; i < nb_samples; i++) { \
225  for (c = 0; c < channels; c++, k++) \
226  d[k] = s[k] * gain; \
227  } \
228 }
229 
230 SCALE_PLANAR(dbl, double)
231 SCALE_PLANAR(flt, float)
232 SCALE_PLANAR(s16, int16_t)
233 SCALE_PLANAR(s32, int32_t)
234 
235 SCALE(dbl, double)
236 SCALE(flt, float)
237 SCALE(s16, int16_t)
238 SCALE(s32, int32_t)
239 
240 static int config_output(AVFilterLink *outlink)
241 {
242  AVFilterContext *ctx = outlink->src;
243  AudioFadeContext *s = ctx->priv;
244 
245  switch (outlink->format) {
246  case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl;
247  s->scale_samples = scale_samples_dbl;
248  break;
249  case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp;
250  s->scale_samples = scale_samples_dblp;
251  break;
252  case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt;
253  s->scale_samples = scale_samples_flt;
254  break;
255  case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp;
256  s->scale_samples = scale_samples_fltp;
257  break;
258  case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16;
259  s->scale_samples = scale_samples_s16;
260  break;
261  case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p;
262  s->scale_samples = scale_samples_s16p;
263  break;
264  case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32;
265  s->scale_samples = scale_samples_s32;
266  break;
267  case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p;
268  s->scale_samples = scale_samples_s32p;
269  break;
270  }
271 
272  if (s->duration)
273  s->nb_samples = av_rescale(s->duration, outlink->sample_rate, AV_TIME_BASE);
274  s->duration = 0;
275  if (s->start_time)
276  s->start_sample = av_rescale(s->start_time, outlink->sample_rate, AV_TIME_BASE);
277  s->start_time = 0;
278 
279  return 0;
280 }
281 
282 #if CONFIG_AFADE_FILTER
283 
284 static const AVOption afade_options[] = {
285  { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, "type" },
286  { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, "type" },
287  { "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, TFLAGS, "type" },
288  { "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, TFLAGS, "type" },
289  { "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
290  { "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
291  { "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
292  { "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
293  { "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
294  { "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
295  { "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
296  { "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
297  { "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, "curve" },
298  { "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, "curve" },
299  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, TFLAGS, "curve" },
300  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, TFLAGS, "curve" },
301  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, TFLAGS, "curve" },
302  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, TFLAGS, "curve" },
303  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, TFLAGS, "curve" },
304  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, TFLAGS, "curve" },
305  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, TFLAGS, "curve" },
306  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, TFLAGS, "curve" },
307  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, TFLAGS, "curve" },
308  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, TFLAGS, "curve" },
309  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, TFLAGS, "curve" },
310  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, TFLAGS, "curve" },
311  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, TFLAGS, "curve" },
312  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, TFLAGS, "curve" },
313  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, TFLAGS, "curve" },
314  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, TFLAGS, "curve" },
315  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, TFLAGS, "curve" },
316  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, TFLAGS, "curve" },
317  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, TFLAGS, "curve" },
318  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, TFLAGS, "curve" },
319  { "silence", "set the silence gain", OFFSET(silence), AV_OPT_TYPE_DOUBLE, {.dbl = 0 }, 0, 1, TFLAGS },
320  { "unity", "set the unity gain", OFFSET(unity), AV_OPT_TYPE_DOUBLE, {.dbl = 1 }, 0, 1, TFLAGS },
321  { NULL }
322 };
323 
324 AVFILTER_DEFINE_CLASS(afade);
325 
326 static av_cold int init(AVFilterContext *ctx)
327 {
328  AudioFadeContext *s = ctx->priv;
329 
330  if (INT64_MAX - s->nb_samples < s->start_sample)
331  return AVERROR(EINVAL);
332 
333  return 0;
334 }
335 
336 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
337 {
338  AudioFadeContext *s = inlink->dst->priv;
339  AVFilterLink *outlink = inlink->dst->outputs[0];
340  int nb_samples = buf->nb_samples;
341  AVFrame *out_buf;
342  int64_t cur_sample = av_rescale_q(buf->pts, inlink->time_base, (AVRational){1, inlink->sample_rate});
343 
344  if (s->unity == 1.0 &&
345  ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
346  ( s->type && (cur_sample + nb_samples < s->start_sample))))
347  return ff_filter_frame(outlink, buf);
348 
349  if (av_frame_is_writable(buf)) {
350  out_buf = buf;
351  } else {
352  out_buf = ff_get_audio_buffer(outlink, nb_samples);
353  if (!out_buf)
354  return AVERROR(ENOMEM);
355  av_frame_copy_props(out_buf, buf);
356  }
357 
358  if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
359  ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
360  if (s->silence == 0.) {
361  av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
362  out_buf->ch_layout.nb_channels, out_buf->format);
363  } else {
364  s->scale_samples(out_buf->extended_data, buf->extended_data,
365  nb_samples, buf->ch_layout.nb_channels,
366  s->silence);
367  }
368  } else if (( s->type && (cur_sample + nb_samples < s->start_sample)) ||
369  (!s->type && (s->start_sample + s->nb_samples < cur_sample))) {
370  s->scale_samples(out_buf->extended_data, buf->extended_data,
371  nb_samples, buf->ch_layout.nb_channels,
372  s->unity);
373  } else {
374  int64_t start;
375 
376  if (!s->type)
377  start = cur_sample - s->start_sample;
378  else
379  start = s->start_sample + s->nb_samples - cur_sample;
380 
381  s->fade_samples(out_buf->extended_data, buf->extended_data,
382  nb_samples, buf->ch_layout.nb_channels,
383  s->type ? -1 : 1, start,
384  s->nb_samples, s->curve, s->silence, s->unity);
385  }
386 
387  if (buf != out_buf)
388  av_frame_free(&buf);
389 
390  return ff_filter_frame(outlink, out_buf);
391 }
392 
393 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
394  char *res, int res_len, int flags)
395 {
396  int ret;
397 
398  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
399  if (ret < 0)
400  return ret;
401 
402  return config_output(ctx->outputs[0]);
403 }
404 
405 static const AVFilterPad avfilter_af_afade_inputs[] = {
406  {
407  .name = "default",
408  .type = AVMEDIA_TYPE_AUDIO,
409  .filter_frame = filter_frame,
410  },
411 };
412 
413 static const AVFilterPad avfilter_af_afade_outputs[] = {
414  {
415  .name = "default",
416  .type = AVMEDIA_TYPE_AUDIO,
417  .config_props = config_output,
418  },
419 };
420 
421 const AVFilter ff_af_afade = {
422  .name = "afade",
423  .description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
424  .priv_size = sizeof(AudioFadeContext),
425  .init = init,
426  FILTER_INPUTS(avfilter_af_afade_inputs),
427  FILTER_OUTPUTS(avfilter_af_afade_outputs),
429  .priv_class = &afade_class,
430  .process_command = process_command,
432 };
433 
434 #endif /* CONFIG_AFADE_FILTER */
435 
436 #if CONFIG_ACROSSFADE_FILTER
437 
438 static const AVOption acrossfade_options[] = {
439  { "nb_samples", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
440  { "ns", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
441  { "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
442  { "d", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
443  { "overlap", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
444  { "o", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
445  { "curve1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
446  { "c1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
447  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, FLAGS, "curve" },
448  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
449  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
450  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
451  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
452  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
453  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, "curve" },
454  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
455  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
456  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
457  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
458  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
459  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, "curve" },
460  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, "curve" },
461  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, "curve" },
462  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, "curve" },
463  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, "curve" },
464  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, FLAGS, "curve" },
465  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, FLAGS, "curve" },
466  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, FLAGS, "curve" },
467  { "curve2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
468  { "c2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
469  { NULL }
470 };
471 
472 AVFILTER_DEFINE_CLASS(acrossfade);
473 
474 #define CROSSFADE_PLANAR(name, type) \
475 static void crossfade_samples_## name ##p(uint8_t **dst, uint8_t * const *cf0, \
476  uint8_t * const *cf1, \
477  int nb_samples, int channels, \
478  int curve0, int curve1) \
479 { \
480  int i, c; \
481  \
482  for (i = 0; i < nb_samples; i++) { \
483  double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples,0.,1.);\
484  double gain1 = fade_gain(curve1, i, nb_samples, 0., 1.); \
485  for (c = 0; c < channels; c++) { \
486  type *d = (type *)dst[c]; \
487  const type *s0 = (type *)cf0[c]; \
488  const type *s1 = (type *)cf1[c]; \
489  \
490  d[i] = s0[i] * gain0 + s1[i] * gain1; \
491  } \
492  } \
493 }
494 
495 #define CROSSFADE(name, type) \
496 static void crossfade_samples_## name (uint8_t **dst, uint8_t * const *cf0, \
497  uint8_t * const *cf1, \
498  int nb_samples, int channels, \
499  int curve0, int curve1) \
500 { \
501  type *d = (type *)dst[0]; \
502  const type *s0 = (type *)cf0[0]; \
503  const type *s1 = (type *)cf1[0]; \
504  int i, c, k = 0; \
505  \
506  for (i = 0; i < nb_samples; i++) { \
507  double gain0 = fade_gain(curve0, nb_samples - 1-i,nb_samples,0.,1.);\
508  double gain1 = fade_gain(curve1, i, nb_samples, 0., 1.); \
509  for (c = 0; c < channels; c++, k++) \
510  d[k] = s0[k] * gain0 + s1[k] * gain1; \
511  } \
512 }
513 
514 CROSSFADE_PLANAR(dbl, double)
515 CROSSFADE_PLANAR(flt, float)
516 CROSSFADE_PLANAR(s16, int16_t)
517 CROSSFADE_PLANAR(s32, int32_t)
518 
519 CROSSFADE(dbl, double)
520 CROSSFADE(flt, float)
521 CROSSFADE(s16, int16_t)
522 CROSSFADE(s32, int32_t)
523 
524 static int activate(AVFilterContext *ctx)
525 {
526  AudioFadeContext *s = ctx->priv;
527  AVFilterLink *outlink = ctx->outputs[0];
528  AVFrame *in = NULL, *out, *cf[2] = { NULL };
529  int ret = 0, nb_samples, status;
530  int64_t pts;
531 
533 
534  if (s->crossfade_is_over) {
535  ret = ff_inlink_consume_frame(ctx->inputs[1], &in);
536  if (ret > 0) {
537  in->pts = s->pts;
538  s->pts += av_rescale_q(in->nb_samples,
539  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
540  return ff_filter_frame(outlink, in);
541  } else if (ret < 0) {
542  return ret;
543  } else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) {
544  ff_outlink_set_status(ctx->outputs[0], status, pts);
545  return 0;
546  } else if (!ret) {
547  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
548  ff_inlink_request_frame(ctx->inputs[1]);
549  return 0;
550  }
551  }
552  }
553 
554  nb_samples = ff_inlink_queued_samples(ctx->inputs[0]);
555  if (nb_samples > s->nb_samples) {
556  nb_samples -= s->nb_samples;
557  ret = ff_inlink_consume_samples(ctx->inputs[0], nb_samples, nb_samples, &in);
558  if (ret < 0)
559  return ret;
560  in->pts = s->pts;
561  s->pts += av_rescale_q(in->nb_samples,
562  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
563  return ff_filter_frame(outlink, in);
564  } else if (s->cf0_eof && nb_samples >= s->nb_samples &&
565  ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples) {
566  if (s->overlap) {
567  out = ff_get_audio_buffer(outlink, s->nb_samples);
568  if (!out)
569  return AVERROR(ENOMEM);
570 
571  ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
572  if (ret < 0) {
573  av_frame_free(&out);
574  return ret;
575  }
576 
577  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
578  if (ret < 0) {
579  av_frame_free(&out);
580  return ret;
581  }
582 
583  s->crossfade_samples(out->extended_data, cf[0]->extended_data,
584  cf[1]->extended_data,
585  s->nb_samples, out->ch_layout.nb_channels,
586  s->curve, s->curve2);
587  out->pts = s->pts;
588  s->pts += av_rescale_q(s->nb_samples,
589  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
590  s->crossfade_is_over = 1;
591  av_frame_free(&cf[0]);
592  av_frame_free(&cf[1]);
593  return ff_filter_frame(outlink, out);
594  } else {
595  out = ff_get_audio_buffer(outlink, s->nb_samples);
596  if (!out)
597  return AVERROR(ENOMEM);
598 
599  ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
600  if (ret < 0) {
601  av_frame_free(&out);
602  return ret;
603  }
604 
605  s->fade_samples(out->extended_data, cf[0]->extended_data, s->nb_samples,
606  outlink->ch_layout.nb_channels, -1, s->nb_samples - 1, s->nb_samples, s->curve, 0., 1.);
607  out->pts = s->pts;
608  s->pts += av_rescale_q(s->nb_samples,
609  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
610  av_frame_free(&cf[0]);
611  ret = ff_filter_frame(outlink, out);
612  if (ret < 0)
613  return ret;
614 
615  out = ff_get_audio_buffer(outlink, s->nb_samples);
616  if (!out)
617  return AVERROR(ENOMEM);
618 
619  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
620  if (ret < 0) {
621  av_frame_free(&out);
622  return ret;
623  }
624 
625  s->fade_samples(out->extended_data, cf[1]->extended_data, s->nb_samples,
626  outlink->ch_layout.nb_channels, 1, 0, s->nb_samples, s->curve2, 0., 1.);
627  out->pts = s->pts;
628  s->pts += av_rescale_q(s->nb_samples,
629  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
630  s->crossfade_is_over = 1;
631  av_frame_free(&cf[1]);
632  return ff_filter_frame(outlink, out);
633  }
634  } else if (ff_outlink_frame_wanted(ctx->outputs[0])) {
635  if (!s->cf0_eof && ff_outlink_get_status(ctx->inputs[0])) {
636  s->cf0_eof = 1;
637  }
638  if (ff_outlink_get_status(ctx->inputs[1])) {
640  return 0;
641  }
642  if (!s->cf0_eof)
643  ff_inlink_request_frame(ctx->inputs[0]);
644  else
645  ff_inlink_request_frame(ctx->inputs[1]);
646  return 0;
647  }
648 
649  return ret;
650 }
651 
652 static int acrossfade_config_output(AVFilterLink *outlink)
653 {
654  AVFilterContext *ctx = outlink->src;
655  AudioFadeContext *s = ctx->priv;
656 
657  outlink->time_base = ctx->inputs[0]->time_base;
658 
659  switch (outlink->format) {
660  case AV_SAMPLE_FMT_DBL: s->crossfade_samples = crossfade_samples_dbl; break;
661  case AV_SAMPLE_FMT_DBLP: s->crossfade_samples = crossfade_samples_dblp; break;
662  case AV_SAMPLE_FMT_FLT: s->crossfade_samples = crossfade_samples_flt; break;
663  case AV_SAMPLE_FMT_FLTP: s->crossfade_samples = crossfade_samples_fltp; break;
664  case AV_SAMPLE_FMT_S16: s->crossfade_samples = crossfade_samples_s16; break;
665  case AV_SAMPLE_FMT_S16P: s->crossfade_samples = crossfade_samples_s16p; break;
666  case AV_SAMPLE_FMT_S32: s->crossfade_samples = crossfade_samples_s32; break;
667  case AV_SAMPLE_FMT_S32P: s->crossfade_samples = crossfade_samples_s32p; break;
668  }
669 
670  config_output(outlink);
671 
672  return 0;
673 }
674 
675 static const AVFilterPad avfilter_af_acrossfade_inputs[] = {
676  {
677  .name = "crossfade0",
678  .type = AVMEDIA_TYPE_AUDIO,
679  },
680  {
681  .name = "crossfade1",
682  .type = AVMEDIA_TYPE_AUDIO,
683  },
684 };
685 
686 static const AVFilterPad avfilter_af_acrossfade_outputs[] = {
687  {
688  .name = "default",
689  .type = AVMEDIA_TYPE_AUDIO,
690  .config_props = acrossfade_config_output,
691  },
692 };
693 
694 const AVFilter ff_af_acrossfade = {
695  .name = "acrossfade",
696  .description = NULL_IF_CONFIG_SMALL("Cross fade two input audio streams."),
697  .priv_size = sizeof(AudioFadeContext),
698  .activate = activate,
699  .priv_class = &acrossfade_class,
700  FILTER_INPUTS(avfilter_af_acrossfade_inputs),
701  FILTER_OUTPUTS(avfilter_af_acrossfade_outputs),
703 };
704 
705 #endif /* CONFIG_ACROSSFADE_FILTER */
AudioFadeContext::unity
double unity
Definition: af_afade.c:43
A
#define A(x)
Definition: vpx_arith.h:28
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:100
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
AudioFadeContext::type
int type
Definition: af_afade.c:36
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AudioFadeContext::curve2
int curve2
Definition: af_afade.c:37
ff_af_afade
const AVFilter ff_af_afade
out
FILE * out
Definition: movenc.c:54
NONE
@ NONE
Definition: af_afade.c:61
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AudioFadeContext::fade_samples
void(* fade_samples)(uint8_t **dst, uint8_t *const *src, int nb_samples, int channels, int direction, int64_t start, int64_t range, int curve, double silence, double unity)
Definition: af_afade.c:49
QUA
@ QUA
Definition: af_afade.c:61
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:437
AVOption
AVOption.
Definition: opt.h:251
IPAR
@ IPAR
Definition: af_afade.c:61
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:65
AV_OPT_TYPE_DURATION
@ AV_OPT_TYPE_DURATION
Definition: opt.h:239
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_afade.c:240
NB_CURVES
@ NB_CURVES
Definition: af_afade.c:61
SCALE_PLANAR
#define SCALE_PLANAR(name, type)
Definition: af_afade.c:199
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
DESE
@ DESE
Definition: af_afade.c:61
DESI
@ DESI
Definition: af_afade.c:61
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1364
FADE
#define FADE(name, type)
Definition: af_afade.c:172
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
ISINC
@ ISINC
Definition: af_afade.c:61
scale_samples_s32
static void scale_samples_s32(uint8_t *dst, const uint8_t *src, int nb_samples, int volume)
Definition: af_volume.c:203
scale_samples_s16
static void scale_samples_s16(uint8_t *dst, const uint8_t *src, int nb_samples, int volume)
Definition: af_volume.c:183
fade_gain
static double fade_gain(int curve, int64_t index, int64_t range, double silence, double unity)
Definition: af_afade.c:75
CUBE
#define CUBE(a)
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:723
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:653
OFFSET
#define OFFSET(x)
Definition: af_afade.c:63
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
cbrt
#define cbrt
Definition: tablegen.h:35
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
AudioFadeContext::silence
double silence
Definition: af_afade.c:42
ff_af_acrossfade
const AVFilter ff_af_acrossfade
AudioFadeContext::cf0_eof
int cf0_eof
Definition: af_afade.c:45
av_cold
#define av_cold
Definition: attributes.h:90
CUB
@ CUB
Definition: af_afade.c:61
QSIN
@ QSIN
Definition: af_afade.c:61
duration
int64_t duration
Definition: movenc.c:64
IHSIN
@ IHSIN
Definition: af_afade.c:61
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1481
s
#define s(width, name)
Definition: cbs_vp9.c:256
TRI
@ TRI
Definition: af_afade.c:61
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
HSIN
@ HSIN
Definition: af_afade.c:61
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
AV_OPT_TYPE_INT64
@ AV_OPT_TYPE_INT64
Definition: opt.h:226
filters.h
B
#define B
Definition: huffyuv.h:42
ctx
AVFormatContext * ctx
Definition: movenc.c:48
FLAGS
#define FLAGS
Definition: af_afade.c:64
channels
channels
Definition: aptx.h:31
IQSIN
@ IQSIN
Definition: af_afade.c:61
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
AudioFadeContext::crossfade_is_over
int crossfade_is_over
Definition: af_afade.c:46
AudioFadeContext::crossfade_samples
void(* crossfade_samples)(uint8_t **dst, uint8_t *const *cf0, uint8_t *const *cf1, int nb_samples, int channels, int curve0, int curve1)
Definition: af_afade.c:55
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1383
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:594
SCALE
#define SCALE(name, type)
Definition: af_afade.c:216
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AudioFadeContext::start_sample
int64_t start_sample
Definition: af_afade.c:39
activate
filter_frame For filters that do not use the activate() callback
filter_frame
static int filter_frame(DBEDecodeContext *s, AVFrame *frame)
Definition: dolby_e.c:1059
exp
int8_t exp
Definition: eval.c:72
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1318
index
int index
Definition: gxfenc.c:89
SQU
@ SQU
Definition: af_afade.c:61
CurveType
CurveType
Definition: af_afade.c:61
TFLAGS
#define TFLAGS
Definition: af_afade.c:65
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_acrusher.c:306
start_time
static int64_t start_time
Definition: ffplay.c:331
AudioFadeContext::curve
int curve
Definition: af_afade.c:37
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
FILTER_SAMPLEFMTS_ARRAY
#define FILTER_SAMPLEFMTS_ARRAY(array)
Definition: internal.h:177
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:524
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:417
AudioFadeContext::scale_samples
void(* scale_samples)(uint8_t **dst, uint8_t *const *src, int nb_samples, int channels, double unity)
Definition: af_afade.c:53
AudioFadeContext::duration
int64_t duration
Definition: af_afade.c:40
EXP
@ EXP
Definition: af_afade.c:61
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:842
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
M_PI
#define M_PI
Definition: mathematics.h:52
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:142
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:329
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:410
SINC
@ SINC
Definition: af_afade.c:61
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:391
AudioFadeContext::pts
int64_t pts
Definition: af_afade.c:47
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
AudioFadeContext::overlap
int overlap
Definition: af_afade.c:44
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
CBR
@ CBR
Definition: af_afade.c:61
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1343
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
AudioFadeContext::start_time
int64_t start_time
Definition: af_afade.c:41
av_samples_set_silence
int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Fill an audio buffer with silence.
Definition: samplefmt.c:246
AVFilter
Filter definition.
Definition: avfilter.h:161
LOSI
@ LOSI
Definition: af_afade.c:61
ret
ret
Definition: filter_design.txt:187
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
FADE_PLANAR
#define FADE_PLANAR(name, type)
Definition: af_afade.c:153
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1504
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
audio.h
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
int32_t
int32_t
Definition: audioconvert.c:56
ESIN
@ ESIN
Definition: af_afade.c:61
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: af_afade.c:67
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:61
PAR
@ PAR
Definition: af_afade.c:61
AV_SAMPLE_FMT_S32
@ AV_SAMPLE_FMT_S32
signed 32 bits
Definition: samplefmt.h:59
AudioFadeContext::nb_samples
int64_t nb_samples
Definition: af_afade.c:38
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
AudioFadeContext
Definition: af_afade.c:34
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:60
av_clipd
av_clipd
Definition: af_crystalizer.c:132
LOG
@ LOG
Definition: af_afade.c:61