FFmpeg
avf_showfreqs.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 #include <math.h>
23 
24 #include "libavcodec/avfft.h"
25 #include "libavutil/audio_fifo.h"
26 #include "libavutil/avassert.h"
27 #include "libavutil/avstring.h"
29 #include "libavutil/intreadwrite.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/parseutils.h"
32 #include "audio.h"
33 #include "filters.h"
34 #include "video.h"
35 #include "avfilter.h"
36 #include "internal.h"
37 #include "window_func.h"
38 
43 
44 typedef struct ShowFreqsContext {
45  const AVClass *class;
46  int w, h;
47  int mode;
48  int cmode;
49  int fft_size;
50  int fft_bits;
51  int ascale, fscale;
52  int avg;
53  int win_func;
56  float **avg_data;
58  float overlap;
59  float minamp;
60  int hop_size;
62  int nb_freq;
63  int win_size;
64  float scale;
65  char *colors;
67  int64_t pts;
69 
70 #define OFFSET(x) offsetof(ShowFreqsContext, x)
71 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
72 
73 static const AVOption showfreqs_options[] = {
74  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "1024x512"}, 0, 0, FLAGS },
75  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "1024x512"}, 0, 0, FLAGS },
76  { "mode", "set display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=BAR}, 0, NB_MODES-1, FLAGS, "mode" },
77  { "line", "show lines", 0, AV_OPT_TYPE_CONST, {.i64=LINE}, 0, 0, FLAGS, "mode" },
78  { "bar", "show bars", 0, AV_OPT_TYPE_CONST, {.i64=BAR}, 0, 0, FLAGS, "mode" },
79  { "dot", "show dots", 0, AV_OPT_TYPE_CONST, {.i64=DOT}, 0, 0, FLAGS, "mode" },
80  { "ascale", "set amplitude scale", OFFSET(ascale), AV_OPT_TYPE_INT, {.i64=AS_LOG}, 0, NB_ASCALES-1, FLAGS, "ascale" },
81  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=AS_LINEAR}, 0, 0, FLAGS, "ascale" },
82  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=AS_SQRT}, 0, 0, FLAGS, "ascale" },
83  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=AS_CBRT}, 0, 0, FLAGS, "ascale" },
84  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=AS_LOG}, 0, 0, FLAGS, "ascale" },
85  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=FS_LINEAR}, 0, NB_FSCALES-1, FLAGS, "fscale" },
86  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=FS_LINEAR}, 0, 0, FLAGS, "fscale" },
87  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=FS_LOG}, 0, 0, FLAGS, "fscale" },
88  { "rlog", "reverse logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=FS_RLOG}, 0, 0, FLAGS, "fscale" },
89  { "win_size", "set window size", OFFSET(fft_size), AV_OPT_TYPE_INT, {.i64=2048}, 16, 65536, FLAGS },
90  { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64=WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
91  { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
92  { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
93  { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
94  { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
95  { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
96  { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
97  { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
98  { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
99  { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
100  { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
101  { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
102  { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
103  { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
104  { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
105  { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
106  { "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
107  { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
108  { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
109  { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
110  { "bohman", "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN} , 0, 0, FLAGS, "win_func" },
111  { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl=1.}, 0., 1., FLAGS },
112  { "averaging", "set time averaging", OFFSET(avg), AV_OPT_TYPE_INT, {.i64=1}, 0, INT32_MAX, FLAGS },
113  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
114  { "cmode", "set channel mode", OFFSET(cmode), AV_OPT_TYPE_INT, {.i64=COMBINED}, 0, NB_CMODES-1, FLAGS, "cmode" },
115  { "combined", "show all channels in same window", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "cmode" },
116  { "separate", "show each channel in own window", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "cmode" },
117  { "minamp", "set minimum amplitude", OFFSET(minamp), AV_OPT_TYPE_FLOAT, {.dbl=1e-6}, FLT_MIN, 1e-6, FLAGS },
118  { NULL }
119 };
120 
121 AVFILTER_DEFINE_CLASS(showfreqs);
122 
124 {
127  AVFilterLink *inlink = ctx->inputs[0];
128  AVFilterLink *outlink = ctx->outputs[0];
130  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
131  int ret;
132 
133  /* set input audio formats */
134  formats = ff_make_format_list(sample_fmts);
135  if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
136  return ret;
137 
138  layouts = ff_all_channel_layouts();
139  if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
140  return ret;
141 
142  formats = ff_all_samplerates();
143  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
144  return ret;
145 
146  /* set output video format */
147  formats = ff_make_format_list(pix_fmts);
148  if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
149  return ret;
150 
151  return 0;
152 }
153 
155 {
156  ShowFreqsContext *s = ctx->priv;
157 
158  s->pts = AV_NOPTS_VALUE;
159 
160  return 0;
161 }
162 
163 static int config_output(AVFilterLink *outlink)
164 {
165  AVFilterContext *ctx = outlink->src;
166  AVFilterLink *inlink = ctx->inputs[0];
167  ShowFreqsContext *s = ctx->priv;
168  float overlap;
169  int i;
170 
171  s->fft_bits = av_log2(s->fft_size);
172  s->nb_freq = 1 << (s->fft_bits - 1);
173  s->win_size = s->nb_freq << 1;
175  av_fft_end(s->fft);
176  s->fft = av_fft_init(s->fft_bits, 0);
177  if (!s->fft) {
178  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
179  "The window size might be too high.\n");
180  return AVERROR(ENOMEM);
181  }
182 
183  /* FFT buffers: x2 for each (display) channel buffer.
184  * Note: we use free and malloc instead of a realloc-like function to
185  * make sure the buffer is aligned in memory for the FFT functions. */
186  for (i = 0; i < s->nb_channels; i++) {
187  av_freep(&s->fft_data[i]);
188  av_freep(&s->avg_data[i]);
189  }
190  av_freep(&s->fft_data);
191  av_freep(&s->avg_data);
192  s->nb_channels = inlink->channels;
193 
194  s->fft_data = av_calloc(s->nb_channels, sizeof(*s->fft_data));
195  if (!s->fft_data)
196  return AVERROR(ENOMEM);
197  s->avg_data = av_calloc(s->nb_channels, sizeof(*s->avg_data));
198  if (!s->avg_data)
199  return AVERROR(ENOMEM);
200  for (i = 0; i < s->nb_channels; i++) {
201  s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
202  s->avg_data[i] = av_calloc(s->nb_freq, sizeof(**s->avg_data));
203  if (!s->fft_data[i] || !s->avg_data[i])
204  return AVERROR(ENOMEM);
205  }
206 
207  /* pre-calc windowing function */
209  sizeof(*s->window_func_lut));
210  if (!s->window_func_lut)
211  return AVERROR(ENOMEM);
213  if (s->overlap == 1.)
214  s->overlap = overlap;
215  s->hop_size = (1. - s->overlap) * s->win_size;
216  if (s->hop_size < 1) {
217  av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
218  return AVERROR(EINVAL);
219  }
220 
221  for (s->scale = 0, i = 0; i < s->win_size; i++) {
222  s->scale += s->window_func_lut[i] * s->window_func_lut[i];
223  }
224 
225  outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
226  outlink->sample_aspect_ratio = (AVRational){1,1};
227  outlink->w = s->w;
228  outlink->h = s->h;
229 
230  s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
231  if (!s->fifo)
232  return AVERROR(ENOMEM);
233  return 0;
234 }
235 
236 static inline void draw_dot(AVFrame *out, int x, int y, uint8_t fg[4])
237 {
238 
239  uint32_t color = AV_RL32(out->data[0] + y * out->linesize[0] + x * 4);
240 
241  if ((color & 0xffffff) != 0)
242  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg) | color);
243  else
244  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
245 }
246 
247 static int get_sx(ShowFreqsContext *s, int f)
248 {
249  switch (s->fscale) {
250  case FS_LINEAR:
251  return (s->w/(float)s->nb_freq)*f;
252  case FS_LOG:
253  return s->w-pow(s->w, (s->nb_freq-f-1)/(s->nb_freq-1.));
254  case FS_RLOG:
255  return pow(s->w, f/(s->nb_freq-1.));
256  }
257 
258  return 0;
259 }
260 
261 static float get_bsize(ShowFreqsContext *s, int f)
262 {
263  switch (s->fscale) {
264  case FS_LINEAR:
265  return s->w/(float)s->nb_freq;
266  case FS_LOG:
267  return pow(s->w, (s->nb_freq-f-1)/(s->nb_freq-1.))-
268  pow(s->w, (s->nb_freq-f-2)/(s->nb_freq-1.));
269  case FS_RLOG:
270  return pow(s->w, (f+1)/(s->nb_freq-1.))-
271  pow(s->w, f /(s->nb_freq-1.));
272  }
273 
274  return 1.;
275 }
276 
277 static inline void plot_freq(ShowFreqsContext *s, int ch,
278  double a, int f, uint8_t fg[4], int *prev_y,
279  AVFrame *out, AVFilterLink *outlink)
280 {
281  const int w = s->w;
282  const float min = s->minamp;
283  const float avg = s->avg_data[ch][f];
284  const float bsize = get_bsize(s, f);
285  const int sx = get_sx(s, f);
286  int end = outlink->h;
287  int x, y, i;
288 
289  switch(s->ascale) {
290  case AS_SQRT:
291  a = 1.0 - sqrt(a);
292  break;
293  case AS_CBRT:
294  a = 1.0 - cbrt(a);
295  break;
296  case AS_LOG:
297  a = log(av_clipd(a, min, 1)) / log(min);
298  break;
299  case AS_LINEAR:
300  a = 1.0 - a;
301  break;
302  }
303 
304  switch (s->cmode) {
305  case COMBINED:
306  y = a * outlink->h - 1;
307  break;
308  case SEPARATE:
309  end = (outlink->h / s->nb_channels) * (ch + 1);
310  y = (outlink->h / s->nb_channels) * ch + a * (outlink->h / s->nb_channels) - 1;
311  break;
312  default:
313  av_assert0(0);
314  }
315  if (y < 0)
316  return;
317 
318  switch (s->avg) {
319  case 0:
320  y = s->avg_data[ch][f] = !outlink->frame_count_in ? y : FFMIN(avg, y);
321  break;
322  case 1:
323  break;
324  default:
325  s->avg_data[ch][f] = avg + y * (y - avg) / (FFMIN(outlink->frame_count_in + 1, s->avg) * y);
326  y = s->avg_data[ch][f];
327  break;
328  }
329 
330  switch(s->mode) {
331  case LINE:
332  if (*prev_y == -1) {
333  *prev_y = y;
334  }
335  if (y <= *prev_y) {
336  for (x = sx + 1; x < sx + bsize && x < w; x++)
337  draw_dot(out, x, y, fg);
338  for (i = y; i <= *prev_y; i++)
339  draw_dot(out, sx, i, fg);
340  } else {
341  for (i = *prev_y; i <= y; i++)
342  draw_dot(out, sx, i, fg);
343  for (x = sx + 1; x < sx + bsize && x < w; x++)
344  draw_dot(out, x, i - 1, fg);
345  }
346  *prev_y = y;
347  break;
348  case BAR:
349  for (x = sx; x < sx + bsize && x < w; x++)
350  for (i = y; i < end; i++)
351  draw_dot(out, x, i, fg);
352  break;
353  case DOT:
354  for (x = sx; x < sx + bsize && x < w; x++)
355  draw_dot(out, x, y, fg);
356  break;
357  }
358 }
359 
361 {
362  AVFilterContext *ctx = inlink->dst;
363  AVFilterLink *outlink = ctx->outputs[0];
364  ShowFreqsContext *s = ctx->priv;
365  const int win_size = s->win_size;
366  char *colors, *color, *saveptr = NULL;
367  AVFrame *out;
368  int ch, n;
369 
370  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
371  if (!out)
372  return AVERROR(ENOMEM);
373 
374  for (n = 0; n < outlink->h; n++)
375  memset(out->data[0] + out->linesize[0] * n, 0, outlink->w * 4);
376 
377  /* fill FFT input with the number of samples available */
378  for (ch = 0; ch < s->nb_channels; ch++) {
379  const float *p = (float *)in->extended_data[ch];
380 
381  for (n = 0; n < in->nb_samples; n++) {
382  s->fft_data[ch][n].re = p[n] * s->window_func_lut[n];
383  s->fft_data[ch][n].im = 0;
384  }
385  for (; n < win_size; n++) {
386  s->fft_data[ch][n].re = 0;
387  s->fft_data[ch][n].im = 0;
388  }
389  }
390 
391  /* run FFT on each samples set */
392  for (ch = 0; ch < s->nb_channels; ch++) {
393  av_fft_permute(s->fft, s->fft_data[ch]);
394  av_fft_calc(s->fft, s->fft_data[ch]);
395  }
396 
397 #define RE(x, ch) s->fft_data[ch][x].re
398 #define IM(x, ch) s->fft_data[ch][x].im
399 #define M(a, b) (sqrt((a) * (a) + (b) * (b)))
400 
401  colors = av_strdup(s->colors);
402  if (!colors) {
403  av_frame_free(&out);
404  return AVERROR(ENOMEM);
405  }
406 
407  for (ch = 0; ch < s->nb_channels; ch++) {
408  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
409  int prev_y = -1, f;
410  double a;
411 
412  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
413  if (color)
414  av_parse_color(fg, color, -1, ctx);
415 
416  a = av_clipd(M(RE(0, ch), 0) / s->scale, 0, 1);
417  plot_freq(s, ch, a, 0, fg, &prev_y, out, outlink);
418 
419  for (f = 1; f < s->nb_freq; f++) {
420  a = av_clipd(M(RE(f, ch), IM(f, ch)) / s->scale, 0, 1);
421 
422  plot_freq(s, ch, a, f, fg, &prev_y, out, outlink);
423  }
424  }
425 
426  av_free(colors);
427  out->pts = in->pts;
428  out->sample_aspect_ratio = (AVRational){1,1};
429  return ff_filter_frame(outlink, out);
430 }
431 
433 {
434  AVFilterContext *ctx = inlink->dst;
435  ShowFreqsContext *s = ctx->priv;
436  AVFrame *fin = NULL;
437  int ret = 0;
438 
439  fin = ff_get_audio_buffer(inlink, s->win_size);
440  if (!fin) {
441  ret = AVERROR(ENOMEM);
442  goto fail;
443  }
444 
445  fin->pts = s->pts;
446  s->pts += s->hop_size;
447  ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
448  if (ret < 0)
449  goto fail;
450 
451  ret = plot_freqs(inlink, fin);
452  av_frame_free(&fin);
454 
455 fail:
456  av_frame_free(&fin);
457  return ret;
458 }
459 
461 {
462  AVFilterLink *inlink = ctx->inputs[0];
463  AVFilterLink *outlink = ctx->outputs[0];
464  ShowFreqsContext *s = ctx->priv;
465  AVFrame *in = NULL;
466  int ret = 0;
467 
468  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
469 
470  if (av_audio_fifo_size(s->fifo) < s->win_size)
471  ret = ff_inlink_consume_samples(inlink, s->win_size, s->win_size, &in);
472  if (ret < 0)
473  return ret;
474  if (ret > 0) {
475  av_audio_fifo_write(s->fifo, (void **)in->extended_data, in->nb_samples);
476  if (s->pts == AV_NOPTS_VALUE)
477  s->pts = in->pts;
478  }
479 
480  if (av_audio_fifo_size(s->fifo) >= s->win_size) {
481  ret = filter_frame(inlink);
482  if (ret <= 0)
483  return ret;
484  }
485 
486  FF_FILTER_FORWARD_STATUS(inlink, outlink);
487  FF_FILTER_FORWARD_WANTED(outlink, inlink);
488 
489  return FFERROR_NOT_READY;
490 }
491 
493 {
494  ShowFreqsContext *s = ctx->priv;
495  int i;
496 
497  av_fft_end(s->fft);
498  for (i = 0; i < s->nb_channels; i++) {
499  if (s->fft_data)
500  av_freep(&s->fft_data[i]);
501  if (s->avg_data)
502  av_freep(&s->avg_data[i]);
503  }
504  av_freep(&s->fft_data);
505  av_freep(&s->avg_data);
508 }
509 
510 static const AVFilterPad showfreqs_inputs[] = {
511  {
512  .name = "default",
513  .type = AVMEDIA_TYPE_AUDIO,
514  },
515  { NULL }
516 };
517 
518 static const AVFilterPad showfreqs_outputs[] = {
519  {
520  .name = "default",
521  .type = AVMEDIA_TYPE_VIDEO,
522  .config_props = config_output,
523  },
524  { NULL }
525 };
526 
528  .name = "showfreqs",
529  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a frequencies video output."),
530  .init = init,
531  .uninit = uninit,
532  .query_formats = query_formats,
533  .priv_size = sizeof(ShowFreqsContext),
534  .activate = activate,
535  .inputs = showfreqs_inputs,
536  .outputs = showfreqs_outputs,
537  .priv_class = &showfreqs_class,
538 };
float, planar
Definition: samplefmt.h:69
#define NULL
Definition: coverity.c:32
FFTContext * fft
Definition: avf_showfreqs.c:54
AVAudioFifo * av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, int nb_samples)
Allocate an AVAudioFifo.
Definition: audio_fifo.c:59
static int plot_freqs(AVFilterLink *inlink, AVFrame *in)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
#define av_realloc_f(p, o, n)
static av_cold int init(AVFilterContext *ctx)
AVOption.
Definition: opt.h:246
av_cold void av_fft_end(FFTContext *s)
Definition: avfft.c:48
Main libavfilter public API header.
#define IM(x, ch)
void av_audio_fifo_free(AVAudioFifo *af)
Free an AVAudioFifo.
Definition: audio_fifo.c:45
FF_FILTER_FORWARD_STATUS(inlink, outlink)
int av_log2(unsigned v)
Definition: intmath.c:26
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
FFTSample re
Definition: avfft.h:38
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
Definition: avfft.c:38
static void generate_window_func(float *lut, int N, int win_func, float *overlap)
Definition: window_func.h:36
return FFERROR_NOT_READY
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
AVFilter ff_avf_showfreqs
AmplitudeScale
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:434
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
uint8_t
#define av_cold
Definition: attributes.h:82
AVOptions.
static const AVFilterPad showfreqs_outputs[]
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
static int get_sx(ShowFreqsContext *s, int f)
FFTComplex ** fft_data
Definition: avf_showfreqs.c:55
#define av_log(a,...)
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
A filter pad used for either input or output.
Definition: internal.h:54
static int query_formats(AVFilterContext *ctx)
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static int activate(AVFilterContext *ctx)
static float get_bsize(ShowFreqsContext *s, int f)
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define cbrt
Definition: tablegen.h:35
simple assert() macros that are a bit more flexible than ISO C assert().
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
Definition: avfft.c:28
#define OFFSET(x)
Definition: avf_showfreqs.c:70
#define fail()
Definition: checkasm.h:122
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
Context for an Audio FIFO Buffer.
Definition: audio_fifo.c:34
AVFILTER_DEFINE_CLASS(showfreqs)
int av_audio_fifo_size(AVAudioFifo *af)
Get the current number of samples in the AVAudioFifo available for reading.
Definition: audio_fifo.c:228
Definition: fft.h:88
audio channel layout utility functions
#define FFMIN(a, b)
Definition: common.h:96
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:439
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
int n
Definition: avisynth_c.h:760
static int config_output(AVFilterLink *outlink)
FrequencyScale
Definition: avf_showfreqs.c:41
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (...
Definition: formats.c:400
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
A list of supported channel layouts.
Definition: formats.h:85
if(ret)
static void draw_dot(AVFrame *out, int x, int y, uint8_t fg[4])
static av_cold void uninit(AVFilterContext *ctx)
float * window_func_lut
Definition: avf_showfreqs.c:57
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static const AVOption showfreqs_options[]
Definition: avf_showfreqs.c:73
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1513
FFT functions.
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
static const AVFilterPad showfreqs_inputs[]
Rational number (pair of numerator and denominator).
Definition: rational.h:58
const char * name
Filter name.
Definition: avfilter.h:148
offset must point to two consecutive integers
Definition: opt.h:233
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples)
Write data to an AVAudioFifo.
Definition: audio_fifo.c:112
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples)
Drain data from an AVAudioFifo.
Definition: audio_fifo.c:201
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
FFTSample im
Definition: avfft.h:38
ChannelMode
Definition: avf_showfreqs.c:40
FF_FILTER_FORWARD_WANTED(outlink, inlink)
#define M(a, b)
#define av_free(p)
Audio FIFO Buffer.
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define RE(x, ch)
int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples)
Peek data from an AVAudioFifo.
Definition: audio_fifo.c:138
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
static void plot_freq(ShowFreqsContext *s, int ch, double a, int f, uint8_t fg[4], int *prev_y, AVFrame *out, AVFilterLink *outlink)
FILE * out
Definition: movenc.c:54
#define av_freep(p)
formats
Definition: signature.h:48
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define FLAGS
Definition: avf_showfreqs.c:71
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
float min
static int filter_frame(AVFilterLink *inlink)
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().
Definition: avfft.c:43
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVAudioFifo * fifo
Definition: avf_showfreqs.c:66
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
for(j=16;j >0;--j)
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
DisplayMode