FFmpeg
af_afade.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * fade audio filter
24  */
25 
26 #include "libavutil/opt.h"
27 #include "audio.h"
28 #include "avfilter.h"
29 #include "filters.h"
30 #include "internal.h"
31 
32 typedef struct AudioFadeContext {
33  const AVClass *class;
34  int type;
35  int curve, curve2;
36  int64_t nb_samples;
37  int64_t start_sample;
38  int64_t duration;
39  int64_t start_time;
40  int overlap;
41  int cf0_eof;
43  int64_t pts;
44 
45  void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
46  int nb_samples, int channels, int direction,
47  int64_t start, int64_t range, int curve);
48  void (*crossfade_samples)(uint8_t **dst, uint8_t * const *cf0,
49  uint8_t * const *cf1,
50  int nb_samples, int channels,
51  int curve0, int curve1);
53 
55 
56 #define OFFSET(x) offsetof(AudioFadeContext, x)
57 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
58 #define TFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
59 
61 {
62  static const enum AVSampleFormat sample_fmts[] = {
68  };
70  if (ret < 0)
71  return ret;
72 
74  if (ret < 0)
75  return ret;
76 
78 }
79 
80 static double fade_gain(int curve, int64_t index, int64_t range)
81 {
82 #define CUBE(a) ((a)*(a)*(a))
83  double gain;
84 
85  gain = av_clipd(1.0 * index / range, 0, 1.0);
86 
87  switch (curve) {
88  case QSIN:
89  gain = sin(gain * M_PI / 2.0);
90  break;
91  case IQSIN:
92  /* 0.6... = 2 / M_PI */
93  gain = 0.6366197723675814 * asin(gain);
94  break;
95  case ESIN:
96  gain = 1.0 - cos(M_PI / 4.0 * (CUBE(2.0*gain - 1) + 1));
97  break;
98  case HSIN:
99  gain = (1.0 - cos(gain * M_PI)) / 2.0;
100  break;
101  case IHSIN:
102  /* 0.3... = 1 / M_PI */
103  gain = 0.3183098861837907 * acos(1 - 2 * gain);
104  break;
105  case EXP:
106  /* -11.5... = 5*ln(0.1) */
107  gain = exp(-11.512925464970227 * (1 - gain));
108  break;
109  case LOG:
110  gain = av_clipd(1 + 0.2 * log10(gain), 0, 1.0);
111  break;
112  case PAR:
113  gain = 1 - sqrt(1 - gain);
114  break;
115  case IPAR:
116  gain = (1 - (1 - gain) * (1 - gain));
117  break;
118  case QUA:
119  gain *= gain;
120  break;
121  case CUB:
122  gain = CUBE(gain);
123  break;
124  case SQU:
125  gain = sqrt(gain);
126  break;
127  case CBR:
128  gain = cbrt(gain);
129  break;
130  case DESE:
131  gain = gain <= 0.5 ? cbrt(2 * gain) / 2: 1 - cbrt(2 * (1 - gain)) / 2;
132  break;
133  case DESI:
134  gain = gain <= 0.5 ? CUBE(2 * gain) / 2: 1 - CUBE(2 * (1 - gain)) / 2;
135  break;
136  case LOSI: {
137  const double a = 1. / (1. - 0.787) - 1;
138  double A = 1. / (1.0 + exp(0 -((gain-0.5) * a * 2.0)));
139  double B = 1. / (1.0 + exp(a));
140  double C = 1. / (1.0 + exp(0-a));
141  gain = (A - B) / (C - B);
142  }
143  break;
144  case SINC:
145  gain = gain >= 1.0 ? 1.0 : sin(M_PI * (1.0 - gain)) / (M_PI * (1.0 - gain));
146  break;
147  case ISINC:
148  gain = gain <= 0.0 ? 0.0 : 1.0 - sin(M_PI * gain) / (M_PI * gain);
149  break;
150  case NONE:
151  gain = 1.0;
152  break;
153  }
154 
155  return gain;
156 }
157 
158 #define FADE_PLANAR(name, type) \
159 static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
160  int nb_samples, int channels, int dir, \
161  int64_t start, int64_t range, int curve) \
162 { \
163  int i, c; \
164  \
165  for (i = 0; i < nb_samples; i++) { \
166  double gain = fade_gain(curve, start + i * dir, range); \
167  for (c = 0; c < channels; c++) { \
168  type *d = (type *)dst[c]; \
169  const type *s = (type *)src[c]; \
170  \
171  d[i] = s[i] * gain; \
172  } \
173  } \
174 }
175 
176 #define FADE(name, type) \
177 static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
178  int nb_samples, int channels, int dir, \
179  int64_t start, int64_t range, int curve) \
180 { \
181  type *d = (type *)dst[0]; \
182  const type *s = (type *)src[0]; \
183  int i, c, k = 0; \
184  \
185  for (i = 0; i < nb_samples; i++) { \
186  double gain = fade_gain(curve, start + i * dir, range); \
187  for (c = 0; c < channels; c++, k++) \
188  d[k] = s[k] * gain; \
189  } \
190 }
191 
192 FADE_PLANAR(dbl, double)
193 FADE_PLANAR(flt, float)
194 FADE_PLANAR(s16, int16_t)
195 FADE_PLANAR(s32, int32_t)
196 
197 FADE(dbl, double)
198 FADE(flt, float)
199 FADE(s16, int16_t)
200 FADE(s32, int32_t)
201 
202 static int config_output(AVFilterLink *outlink)
203 {
204  AVFilterContext *ctx = outlink->src;
205  AudioFadeContext *s = ctx->priv;
206 
207  switch (outlink->format) {
208  case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl; break;
209  case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp; break;
210  case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt; break;
211  case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp; break;
212  case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16; break;
213  case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p; break;
214  case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32; break;
215  case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p; break;
216  }
217 
218  if (s->duration)
219  s->nb_samples = av_rescale(s->duration, outlink->sample_rate, AV_TIME_BASE);
220  s->duration = 0;
221  if (s->start_time)
222  s->start_sample = av_rescale(s->start_time, outlink->sample_rate, AV_TIME_BASE);
223  s->start_time = 0;
224 
225  return 0;
226 }
227 
228 #if CONFIG_AFADE_FILTER
229 
230 static const AVOption afade_options[] = {
231  { "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, "type" },
232  { "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, TFLAGS, "type" },
233  { "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, TFLAGS, "type" },
234  { "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, TFLAGS, "type" },
235  { "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
236  { "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
237  { "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
238  { "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, TFLAGS },
239  { "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
240  { "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
241  { "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
242  { "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, INT64_MAX, TFLAGS },
243  { "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, "curve" },
244  { "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, TFLAGS, "curve" },
245  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, TFLAGS, "curve" },
246  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, TFLAGS, "curve" },
247  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, TFLAGS, "curve" },
248  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, TFLAGS, "curve" },
249  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, TFLAGS, "curve" },
250  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, TFLAGS, "curve" },
251  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, TFLAGS, "curve" },
252  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, TFLAGS, "curve" },
253  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, TFLAGS, "curve" },
254  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, TFLAGS, "curve" },
255  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, TFLAGS, "curve" },
256  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, TFLAGS, "curve" },
257  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, TFLAGS, "curve" },
258  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, TFLAGS, "curve" },
259  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, TFLAGS, "curve" },
260  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, TFLAGS, "curve" },
261  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, TFLAGS, "curve" },
262  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, TFLAGS, "curve" },
263  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, TFLAGS, "curve" },
264  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, TFLAGS, "curve" },
265  { NULL }
266 };
267 
268 AVFILTER_DEFINE_CLASS(afade);
269 
270 static av_cold int init(AVFilterContext *ctx)
271 {
272  AudioFadeContext *s = ctx->priv;
273 
274  if (INT64_MAX - s->nb_samples < s->start_sample)
275  return AVERROR(EINVAL);
276 
277  return 0;
278 }
279 
280 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
281 {
282  AudioFadeContext *s = inlink->dst->priv;
283  AVFilterLink *outlink = inlink->dst->outputs[0];
284  int nb_samples = buf->nb_samples;
285  AVFrame *out_buf;
286  int64_t cur_sample = av_rescale_q(buf->pts, inlink->time_base, (AVRational){1, inlink->sample_rate});
287 
288  if ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
289  ( s->type && (cur_sample + nb_samples < s->start_sample)))
290  return ff_filter_frame(outlink, buf);
291 
292  if (av_frame_is_writable(buf)) {
293  out_buf = buf;
294  } else {
295  out_buf = ff_get_audio_buffer(outlink, nb_samples);
296  if (!out_buf)
297  return AVERROR(ENOMEM);
298  av_frame_copy_props(out_buf, buf);
299  }
300 
301  if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
302  ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
303  av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
304  out_buf->channels, out_buf->format);
305  } else {
306  int64_t start;
307 
308  if (!s->type)
309  start = cur_sample - s->start_sample;
310  else
311  start = s->start_sample + s->nb_samples - cur_sample;
312 
313  s->fade_samples(out_buf->extended_data, buf->extended_data,
314  nb_samples, buf->channels,
315  s->type ? -1 : 1, start,
316  s->nb_samples, s->curve);
317  }
318 
319  if (buf != out_buf)
320  av_frame_free(&buf);
321 
322  return ff_filter_frame(outlink, out_buf);
323 }
324 
325 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
326  char *res, int res_len, int flags)
327 {
328  int ret;
329 
330  ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
331  if (ret < 0)
332  return ret;
333 
334  return config_output(ctx->outputs[0]);
335 }
336 
337 static const AVFilterPad avfilter_af_afade_inputs[] = {
338  {
339  .name = "default",
340  .type = AVMEDIA_TYPE_AUDIO,
341  .filter_frame = filter_frame,
342  },
343 };
344 
345 static const AVFilterPad avfilter_af_afade_outputs[] = {
346  {
347  .name = "default",
348  .type = AVMEDIA_TYPE_AUDIO,
349  .config_props = config_output,
350  },
351 };
352 
353 const AVFilter ff_af_afade = {
354  .name = "afade",
355  .description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
356  .query_formats = query_formats,
357  .priv_size = sizeof(AudioFadeContext),
358  .init = init,
359  FILTER_INPUTS(avfilter_af_afade_inputs),
360  FILTER_OUTPUTS(avfilter_af_afade_outputs),
361  .priv_class = &afade_class,
362  .process_command = process_command,
364 };
365 
366 #endif /* CONFIG_AFADE_FILTER */
367 
368 #if CONFIG_ACROSSFADE_FILTER
369 
370 static const AVOption acrossfade_options[] = {
371  { "nb_samples", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
372  { "ns", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
373  { "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
374  { "d", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0 }, 0, 60000000, FLAGS },
375  { "overlap", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
376  { "o", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
377  { "curve1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
378  { "c1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
379  { "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, FLAGS, "curve" },
380  { "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
381  { "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
382  { "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
383  { "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
384  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
385  { "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, "curve" },
386  { "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
387  { "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
388  { "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
389  { "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
390  { "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
391  { "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, "curve" },
392  { "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, "curve" },
393  { "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, "curve" },
394  { "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, "curve" },
395  { "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, "curve" },
396  { "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, FLAGS, "curve" },
397  { "sinc", "sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = SINC }, 0, 0, FLAGS, "curve" },
398  { "isinc", "inverted sine cardinal function", 0, AV_OPT_TYPE_CONST, {.i64 = ISINC}, 0, 0, FLAGS, "curve" },
399  { "curve2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
400  { "c2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, NONE, NB_CURVES - 1, FLAGS, "curve" },
401  { NULL }
402 };
403 
404 AVFILTER_DEFINE_CLASS(acrossfade);
405 
406 #define CROSSFADE_PLANAR(name, type) \
407 static void crossfade_samples_## name ##p(uint8_t **dst, uint8_t * const *cf0, \
408  uint8_t * const *cf1, \
409  int nb_samples, int channels, \
410  int curve0, int curve1) \
411 { \
412  int i, c; \
413  \
414  for (i = 0; i < nb_samples; i++) { \
415  double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples); \
416  double gain1 = fade_gain(curve1, i, nb_samples); \
417  for (c = 0; c < channels; c++) { \
418  type *d = (type *)dst[c]; \
419  const type *s0 = (type *)cf0[c]; \
420  const type *s1 = (type *)cf1[c]; \
421  \
422  d[i] = s0[i] * gain0 + s1[i] * gain1; \
423  } \
424  } \
425 }
426 
427 #define CROSSFADE(name, type) \
428 static void crossfade_samples_## name (uint8_t **dst, uint8_t * const *cf0, \
429  uint8_t * const *cf1, \
430  int nb_samples, int channels, \
431  int curve0, int curve1) \
432 { \
433  type *d = (type *)dst[0]; \
434  const type *s0 = (type *)cf0[0]; \
435  const type *s1 = (type *)cf1[0]; \
436  int i, c, k = 0; \
437  \
438  for (i = 0; i < nb_samples; i++) { \
439  double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples); \
440  double gain1 = fade_gain(curve1, i, nb_samples); \
441  for (c = 0; c < channels; c++, k++) \
442  d[k] = s0[k] * gain0 + s1[k] * gain1; \
443  } \
444 }
445 
446 CROSSFADE_PLANAR(dbl, double)
447 CROSSFADE_PLANAR(flt, float)
448 CROSSFADE_PLANAR(s16, int16_t)
449 CROSSFADE_PLANAR(s32, int32_t)
450 
451 CROSSFADE(dbl, double)
452 CROSSFADE(flt, float)
453 CROSSFADE(s16, int16_t)
454 CROSSFADE(s32, int32_t)
455 
456 static int activate(AVFilterContext *ctx)
457 {
458  AudioFadeContext *s = ctx->priv;
459  AVFilterLink *outlink = ctx->outputs[0];
460  AVFrame *in = NULL, *out, *cf[2] = { NULL };
461  int ret = 0, nb_samples, status;
462  int64_t pts;
463 
465 
466  if (s->crossfade_is_over) {
467  ret = ff_inlink_consume_frame(ctx->inputs[1], &in);
468  if (ret > 0) {
469  in->pts = s->pts;
470  s->pts += av_rescale_q(in->nb_samples,
471  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
472  return ff_filter_frame(outlink, in);
473  } else if (ret < 0) {
474  return ret;
475  } else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) {
476  ff_outlink_set_status(ctx->outputs[0], status, pts);
477  return 0;
478  } else if (!ret) {
479  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
480  ff_inlink_request_frame(ctx->inputs[1]);
481  return 0;
482  }
483  }
484  }
485 
486  if (ff_inlink_queued_samples(ctx->inputs[0]) > s->nb_samples) {
487  nb_samples = ff_inlink_queued_samples(ctx->inputs[0]) - s->nb_samples;
488  if (nb_samples > 0) {
489  ret = ff_inlink_consume_samples(ctx->inputs[0], nb_samples, nb_samples, &in);
490  if (ret < 0) {
491  return ret;
492  }
493  }
494  in->pts = s->pts;
495  s->pts += av_rescale_q(in->nb_samples,
496  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
497  return ff_filter_frame(outlink, in);
498  } else if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->nb_samples &&
499  ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples && s->cf0_eof) {
500  if (s->overlap) {
501  out = ff_get_audio_buffer(outlink, s->nb_samples);
502  if (!out)
503  return AVERROR(ENOMEM);
504 
505  ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
506  if (ret < 0) {
507  av_frame_free(&out);
508  return ret;
509  }
510 
511  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
512  if (ret < 0) {
513  av_frame_free(&out);
514  return ret;
515  }
516 
517  s->crossfade_samples(out->extended_data, cf[0]->extended_data,
518  cf[1]->extended_data,
519  s->nb_samples, out->channels,
520  s->curve, s->curve2);
521  out->pts = s->pts;
522  s->pts += av_rescale_q(s->nb_samples,
523  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
524  s->crossfade_is_over = 1;
525  av_frame_free(&cf[0]);
526  av_frame_free(&cf[1]);
527  return ff_filter_frame(outlink, out);
528  } else {
529  out = ff_get_audio_buffer(outlink, s->nb_samples);
530  if (!out)
531  return AVERROR(ENOMEM);
532 
533  ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
534  if (ret < 0) {
535  av_frame_free(&out);
536  return ret;
537  }
538 
539  s->fade_samples(out->extended_data, cf[0]->extended_data, s->nb_samples,
540  outlink->channels, -1, s->nb_samples - 1, s->nb_samples, s->curve);
541  out->pts = s->pts;
542  s->pts += av_rescale_q(s->nb_samples,
543  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
544  av_frame_free(&cf[0]);
545  ret = ff_filter_frame(outlink, out);
546  if (ret < 0)
547  return ret;
548 
549  out = ff_get_audio_buffer(outlink, s->nb_samples);
550  if (!out)
551  return AVERROR(ENOMEM);
552 
553  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
554  if (ret < 0) {
555  av_frame_free(&out);
556  return ret;
557  }
558 
559  s->fade_samples(out->extended_data, cf[1]->extended_data, s->nb_samples,
560  outlink->channels, 1, 0, s->nb_samples, s->curve2);
561  out->pts = s->pts;
562  s->pts += av_rescale_q(s->nb_samples,
563  (AVRational){ 1, outlink->sample_rate }, outlink->time_base);
564  s->crossfade_is_over = 1;
565  av_frame_free(&cf[1]);
566  return ff_filter_frame(outlink, out);
567  }
568  } else if (ff_outlink_frame_wanted(ctx->outputs[0])) {
569  if (!s->cf0_eof && ff_outlink_get_status(ctx->inputs[0])) {
570  s->cf0_eof = 1;
571  }
572  if (ff_outlink_get_status(ctx->inputs[1])) {
574  return 0;
575  }
576  if (!s->cf0_eof)
577  ff_inlink_request_frame(ctx->inputs[0]);
578  else
579  ff_inlink_request_frame(ctx->inputs[1]);
580  return 0;
581  }
582 
583  return ret;
584 }
585 
586 static int acrossfade_config_output(AVFilterLink *outlink)
587 {
588  AVFilterContext *ctx = outlink->src;
589  AudioFadeContext *s = ctx->priv;
590 
591  if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
593  "Inputs must have the same sample rate "
594  "%d for in0 vs %d for in1\n",
595  ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
596  return AVERROR(EINVAL);
597  }
598 
599  outlink->sample_rate = ctx->inputs[0]->sample_rate;
600  outlink->time_base = ctx->inputs[0]->time_base;
601  outlink->channel_layout = ctx->inputs[0]->channel_layout;
602  outlink->channels = ctx->inputs[0]->channels;
603 
604  switch (outlink->format) {
605  case AV_SAMPLE_FMT_DBL: s->crossfade_samples = crossfade_samples_dbl; break;
606  case AV_SAMPLE_FMT_DBLP: s->crossfade_samples = crossfade_samples_dblp; break;
607  case AV_SAMPLE_FMT_FLT: s->crossfade_samples = crossfade_samples_flt; break;
608  case AV_SAMPLE_FMT_FLTP: s->crossfade_samples = crossfade_samples_fltp; break;
609  case AV_SAMPLE_FMT_S16: s->crossfade_samples = crossfade_samples_s16; break;
610  case AV_SAMPLE_FMT_S16P: s->crossfade_samples = crossfade_samples_s16p; break;
611  case AV_SAMPLE_FMT_S32: s->crossfade_samples = crossfade_samples_s32; break;
612  case AV_SAMPLE_FMT_S32P: s->crossfade_samples = crossfade_samples_s32p; break;
613  }
614 
615  config_output(outlink);
616 
617  return 0;
618 }
619 
620 static const AVFilterPad avfilter_af_acrossfade_inputs[] = {
621  {
622  .name = "crossfade0",
623  .type = AVMEDIA_TYPE_AUDIO,
624  },
625  {
626  .name = "crossfade1",
627  .type = AVMEDIA_TYPE_AUDIO,
628  },
629 };
630 
631 static const AVFilterPad avfilter_af_acrossfade_outputs[] = {
632  {
633  .name = "default",
634  .type = AVMEDIA_TYPE_AUDIO,
635  .config_props = acrossfade_config_output,
636  },
637 };
638 
639 const AVFilter ff_af_acrossfade = {
640  .name = "acrossfade",
641  .description = NULL_IF_CONFIG_SMALL("Cross fade two input audio streams."),
642  .query_formats = query_formats,
643  .priv_size = sizeof(AudioFadeContext),
644  .activate = activate,
645  .priv_class = &acrossfade_class,
646  FILTER_INPUTS(avfilter_af_acrossfade_inputs),
647  FILTER_OUTPUTS(avfilter_af_acrossfade_outputs),
648 };
649 
650 #endif /* CONFIG_ACROSSFADE_FILTER */
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AudioFadeContext::type
int type
Definition: af_afade.c:34
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AudioFadeContext::curve2
int curve2
Definition: af_afade.c:35
ff_af_afade
const AVFilter ff_af_afade
out
FILE * out
Definition: movenc.c:54
NONE
@ NONE
Definition: af_afade.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
QUA
@ QUA
Definition: af_afade.c:54
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:396
index
fg index
Definition: ffmpeg_filter.c:168
AVOption
AVOption.
Definition: opt.h:247
IPAR
@ IPAR
Definition: af_afade.c:54
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:68
AV_OPT_TYPE_DURATION
@ AV_OPT_TYPE_DURATION
Definition: opt.h:238
AudioFadeContext::fade_samples
void(* fade_samples)(uint8_t **dst, uint8_t *const *src, int nb_samples, int channels, int direction, int64_t start, int64_t range, int curve)
Definition: af_afade.c:45
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:687
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_afade.c:202
NB_CURVES
@ NB_CURVES
Definition: af_afade.c:54
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
DESE
@ DESE
Definition: af_afade.c:54
DESI
@ DESI
Definition: af_afade.c:54
A
#define A(x)
Definition: vp56_arith.h:28
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1418
FADE
#define FADE(name, type)
Definition: af_afade.c:176
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
ISINC
@ ISINC
Definition: af_afade.c:54
CUBE
#define CUBE(a)
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:653
OFFSET
#define OFFSET(x)
Definition: af_afade.c:56
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
cbrt
#define cbrt
Definition: tablegen.h:35
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
ff_af_acrossfade
const AVFilter ff_af_acrossfade
AudioFadeContext::cf0_eof
int cf0_eof
Definition: af_afade.c:41
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
CUB
@ CUB
Definition: af_afade.c:54
QSIN
@ QSIN
Definition: af_afade.c:54
duration
int64_t duration
Definition: movenc.c:64
IHSIN
@ IHSIN
Definition: af_afade.c:54
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1535
s
#define s(width, name)
Definition: cbs_vp9.c:257
TRI
@ TRI
Definition: af_afade.c:54
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:592
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
HSIN
@ HSIN
Definition: af_afade.c:54
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:703
AV_OPT_TYPE_INT64
@ AV_OPT_TYPE_INT64
Definition: opt.h:225
filters.h
ctx
AVFormatContext * ctx
Definition: movenc.c:48
FLAGS
#define FLAGS
Definition: af_afade.c:57
channels
channels
Definition: aptx.h:33
IQSIN
@ IQSIN
Definition: af_afade.c:54
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
fade_gain
static double fade_gain(int curve, int64_t index, int64_t range)
Definition: af_afade.c:80
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:152
AudioFadeContext::crossfade_is_over
int crossfade_is_over
Definition: af_afade.c:42
AudioFadeContext::crossfade_samples
void(* crossfade_samples)(uint8_t **dst, uint8_t *const *cf0, uint8_t *const *cf1, int nb_samples, int channels, int curve0, int curve1)
Definition: af_afade.c:48
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1437
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AudioFadeContext::start_sample
int64_t start_sample
Definition: af_afade.c:37
activate
filter_frame For filters that do not use the activate() callback
src
#define src
Definition: vp8dsp.c:255
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_afade.c:60
filter_frame
static int filter_frame(DBEDecodeContext *s, AVFrame *frame)
Definition: dolby_e.c:1050
ff_set_common_all_channel_counts
int ff_set_common_all_channel_counts(AVFilterContext *ctx)
Equivalent to ff_set_common_channel_layouts(ctx, ff_all_channel_counts())
Definition: formats.c:669
exp
int8_t exp
Definition: eval.c:72
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1372
SQU
@ SQU
Definition: af_afade.c:54
CurveType
CurveType
Definition: af_afade.c:54
TFLAGS
#define TFLAGS
Definition: af_afade.c:58
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_acrusher.c:325
av_clipd
#define av_clipd
Definition: common.h:147
start_time
static int64_t start_time
Definition: ffplay.c:330
AudioFadeContext::curve
int curve
Definition: af_afade.c:35
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:473
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:376
AudioFadeContext::duration
int64_t duration
Definition: af_afade.c:38
EXP
@ EXP
Definition: af_afade.c:54
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:883
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
M_PI
#define M_PI
Definition: mathematics.h:52
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
internal.h
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:130
AVFILTER_DEFINE_CLASS
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:289
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:369
SINC
@ SINC
Definition: af_afade.c:54
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:350
AudioFadeContext::pts
int64_t pts
Definition: af_afade.c:43
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
AudioFadeContext::overlap
int overlap
Definition: af_afade.c:40
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
CBR
@ CBR
Definition: af_afade.c:54
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1397
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:128
AudioFadeContext::start_time
int64_t start_time
Definition: af_afade.c:39
av_samples_set_silence
int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Fill an audio buffer with silence.
Definition: samplefmt.c:244
AVFilter
Filter definition.
Definition: avfilter.h:149
LOSI
@ LOSI
Definition: af_afade.c:54
ret
ret
Definition: filter_design.txt:187
B
#define B
Definition: huffyuvdsp.h:32
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
FADE_PLANAR
#define FADE_PLANAR(name, type)
Definition: af_afade.c:158
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1558
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
audio.h
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
int32_t
int32_t
Definition: audioconvert.c:56
ESIN
@ ESIN
Definition: af_afade.c:54
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AV_SAMPLE_FMT_DBL
@ AV_SAMPLE_FMT_DBL
double
Definition: samplefmt.h:64
PAR
@ PAR
Definition: af_afade.c:54
AV_SAMPLE_FMT_S32
@ AV_SAMPLE_FMT_S32
signed 32 bits
Definition: samplefmt.h:62
AudioFadeContext::nb_samples
int64_t nb_samples
Definition: af_afade.c:36
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
AudioFadeContext
Definition: af_afade.c:32
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:63
LOG
@ LOG
Definition: af_afade.c:54