FFmpeg
avf_showspectrum.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012-2013 Clément Bœsch
3  * Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
4  * Copyright (c) 2015 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode
26  * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini).
27  */
28 
29 #include <math.h>
30 
31 #include "libavcodec/avfft.h"
32 #include "libavutil/audio_fifo.h"
33 #include "libavutil/avassert.h"
34 #include "libavutil/avstring.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/parseutils.h"
39 #include "audio.h"
40 #include "video.h"
41 #include "avfilter.h"
42 #include "filters.h"
43 #include "internal.h"
44 #include "window_func.h"
45 
53 
54 typedef struct ShowSpectrumContext {
55  const AVClass *class;
56  int w, h;
57  char *rate_str;
65  int sliding; ///< 1 if sliding mode, 0 otherwise
66  int mode; ///< channel display mode
67  int color_mode; ///< display color scheme
68  int scale;
69  int fscale;
70  float saturation; ///< color saturation multiplier
71  float rotation; ///< color rotation
72  int start, stop; ///< zoom mode
73  int data;
74  int xpos; ///< x position (current column)
75  FFTContext **fft; ///< Fast Fourier Transform context
76  FFTContext **ifft; ///< Inverse Fast Fourier Transform context
77  int fft_bits; ///< number of bits (FFT window size = 1<<fft_bits)
78  FFTComplex **fft_data; ///< bins holder for each (displayed) channels
79  FFTComplex **fft_scratch; ///< scratch buffers
80  float *window_func_lut; ///< Window function LUT
81  float **magnitudes;
82  float **phases;
83  int win_func;
84  int win_size;
85  int buf_size;
86  double win_scale;
87  float overlap;
88  float gain;
89  int consumed;
90  int hop_size;
91  float *combine_buffer; ///< color combining buffer (3 * h items)
92  float **color_buffer; ///< color buffer (3 * h * ch items)
94  int64_t pts;
95  int64_t old_pts;
96  int old_len;
98  int legend;
100  int (*plot_channel)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
102 
103 #define OFFSET(x) offsetof(ShowSpectrumContext, x)
104 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
105 
106 static const AVOption showspectrum_options[] = {
107  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
108  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
109  { "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_SLIDES-1, FLAGS, "slide" },
110  { "replace", "replace old columns with new", 0, AV_OPT_TYPE_CONST, {.i64=REPLACE}, 0, 0, FLAGS, "slide" },
111  { "scroll", "scroll from right to left", 0, AV_OPT_TYPE_CONST, {.i64=SCROLL}, 0, 0, FLAGS, "slide" },
112  { "fullframe", "return full frames", 0, AV_OPT_TYPE_CONST, {.i64=FULLFRAME}, 0, 0, FLAGS, "slide" },
113  { "rscroll", "scroll from left to right", 0, AV_OPT_TYPE_CONST, {.i64=RSCROLL}, 0, 0, FLAGS, "slide" },
114  { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
115  { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
116  { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
117  { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
118  { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
119  { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
120  { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
121  { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
122  { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
123  { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
124  { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
125  { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
126  { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" },
127  { "magma", "magma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MAGMA}, 0, 0, FLAGS, "color" },
128  { "green", "green based coloring", 0, AV_OPT_TYPE_CONST, {.i64=GREEN}, 0, 0, FLAGS, "color" },
129  { "viridis", "viridis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=VIRIDIS}, 0, 0, FLAGS, "color" },
130  { "plasma", "plasma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=PLASMA}, 0, 0, FLAGS, "color" },
131  { "cividis", "cividis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=CIVIDIS}, 0, 0, FLAGS, "color" },
132  { "terrain", "terrain based coloring", 0, AV_OPT_TYPE_CONST, {.i64=TERRAIN}, 0, 0, FLAGS, "color" },
133  { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
134  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
135  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
136  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
137  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
138  { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
139  { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
140  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=F_LINEAR}, 0, NB_FSCALES-1, FLAGS, "fscale" },
141  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=F_LINEAR}, 0, 0, FLAGS, "fscale" },
142  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=F_LOG}, 0, 0, FLAGS, "fscale" },
143  { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
144  { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
145  { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
146  { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
147  { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
148  { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
149  { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
150  { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
151  { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
152  { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
153  { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
154  { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
155  { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
156  { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
157  { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
158  { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
159  { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
160  { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
161  { "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
162  { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
163  { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
164  { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
165  { "bohman", "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN}, 0, 0, FLAGS, "win_func" },
166  { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
167  { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
168  { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
169  { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl = 0}, 0, 1, FLAGS },
170  { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
171  { "data", "set data mode", OFFSET(data), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NB_DMODES-1, FLAGS, "data" },
172  { "magnitude", NULL, 0, AV_OPT_TYPE_CONST, {.i64=D_MAGNITUDE}, 0, 0, FLAGS, "data" },
173  { "phase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=D_PHASE}, 0, 0, FLAGS, "data" },
174  { "rotation", "color rotation", OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl = 0}, -1, 1, FLAGS },
175  { "start", "start frequency", OFFSET(start), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT32_MAX, FLAGS },
176  { "stop", "stop frequency", OFFSET(stop), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT32_MAX, FLAGS },
177  { "fps", "set video rate", OFFSET(rate_str), AV_OPT_TYPE_STRING, {.str = "auto"}, 0, 0, FLAGS },
178  { "legend", "draw legend", OFFSET(legend), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
179  { NULL }
180 };
181 
182 AVFILTER_DEFINE_CLASS(showspectrum);
183 
184 static const struct ColorTable {
185  float a, y, u, v;
186 } color_table[][8] = {
187  [INTENSITY] = {
188  { 0, 0, 0, 0 },
189  { 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 },
190  { 0.30, .18572281794568020, .1772436246393981, .17475554840414750 },
191  { 0.60, .28184980583656130, -.1593064119945782, .47132074554608920 },
192  { 0.73, .65830621175547810, -.3716070802232764, .24352759331252930 },
193  { 0.78, .76318535758242900, -.4307467689263783, .16866496622310430 },
194  { 0.91, .95336363636363640, -.2045454545454546, .03313636363636363 },
195  { 1, 1, 0, 0 }},
196  [RAINBOW] = {
197  { 0, 0, 0, 0 },
198  { 0.13, 44/256., (189-128)/256., (138-128)/256. },
199  { 0.25, 29/256., (186-128)/256., (119-128)/256. },
200  { 0.38, 119/256., (194-128)/256., (53-128)/256. },
201  { 0.60, 111/256., (73-128)/256., (59-128)/256. },
202  { 0.73, 205/256., (19-128)/256., (149-128)/256. },
203  { 0.86, 135/256., (83-128)/256., (200-128)/256. },
204  { 1, 73/256., (95-128)/256., (225-128)/256. }},
205  [MORELAND] = {
206  { 0, 44/256., (181-128)/256., (112-128)/256. },
207  { 0.13, 126/256., (177-128)/256., (106-128)/256. },
208  { 0.25, 164/256., (163-128)/256., (109-128)/256. },
209  { 0.38, 200/256., (140-128)/256., (120-128)/256. },
210  { 0.60, 201/256., (117-128)/256., (141-128)/256. },
211  { 0.73, 177/256., (103-128)/256., (165-128)/256. },
212  { 0.86, 136/256., (100-128)/256., (183-128)/256. },
213  { 1, 68/256., (117-128)/256., (203-128)/256. }},
214  [NEBULAE] = {
215  { 0, 10/256., (134-128)/256., (132-128)/256. },
216  { 0.23, 21/256., (137-128)/256., (130-128)/256. },
217  { 0.45, 35/256., (134-128)/256., (134-128)/256. },
218  { 0.57, 51/256., (130-128)/256., (139-128)/256. },
219  { 0.67, 104/256., (116-128)/256., (162-128)/256. },
220  { 0.77, 120/256., (105-128)/256., (188-128)/256. },
221  { 0.87, 140/256., (105-128)/256., (188-128)/256. },
222  { 1, 1, 0, 0 }},
223  [FIRE] = {
224  { 0, 0, 0, 0 },
225  { 0.23, 44/256., (132-128)/256., (127-128)/256. },
226  { 0.45, 62/256., (116-128)/256., (140-128)/256. },
227  { 0.57, 75/256., (105-128)/256., (152-128)/256. },
228  { 0.67, 95/256., (91-128)/256., (166-128)/256. },
229  { 0.77, 126/256., (74-128)/256., (172-128)/256. },
230  { 0.87, 164/256., (73-128)/256., (162-128)/256. },
231  { 1, 1, 0, 0 }},
232  [FIERY] = {
233  { 0, 0, 0, 0 },
234  { 0.23, 36/256., (116-128)/256., (163-128)/256. },
235  { 0.45, 52/256., (102-128)/256., (200-128)/256. },
236  { 0.57, 116/256., (84-128)/256., (196-128)/256. },
237  { 0.67, 157/256., (67-128)/256., (181-128)/256. },
238  { 0.77, 193/256., (40-128)/256., (155-128)/256. },
239  { 0.87, 221/256., (101-128)/256., (134-128)/256. },
240  { 1, 1, 0, 0 }},
241  [FRUIT] = {
242  { 0, 0, 0, 0 },
243  { 0.20, 29/256., (136-128)/256., (119-128)/256. },
244  { 0.30, 60/256., (119-128)/256., (90-128)/256. },
245  { 0.40, 85/256., (91-128)/256., (85-128)/256. },
246  { 0.50, 116/256., (70-128)/256., (105-128)/256. },
247  { 0.60, 151/256., (50-128)/256., (146-128)/256. },
248  { 0.70, 191/256., (63-128)/256., (178-128)/256. },
249  { 1, 98/256., (80-128)/256., (221-128)/256. }},
250  [COOL] = {
251  { 0, 0, 0, 0 },
252  { .15, 0, .5, -.5 },
253  { 1, 1, -.5, .5 }},
254  [MAGMA] = {
255  { 0, 0, 0, 0 },
256  { 0.10, 23/256., (175-128)/256., (120-128)/256. },
257  { 0.23, 43/256., (158-128)/256., (144-128)/256. },
258  { 0.35, 85/256., (138-128)/256., (179-128)/256. },
259  { 0.48, 96/256., (128-128)/256., (189-128)/256. },
260  { 0.64, 128/256., (103-128)/256., (214-128)/256. },
261  { 0.92, 205/256., (80-128)/256., (152-128)/256. },
262  { 1, 1, 0, 0 }},
263  [GREEN] = {
264  { 0, 0, 0, 0 },
265  { .75, .5, 0, -.5 },
266  { 1, 1, 0, 0 }},
267  [VIRIDIS] = {
268  { 0, 0, 0, 0 },
269  { 0.10, 0x39/255., (0x9D -128)/255., (0x8F -128)/255. },
270  { 0.23, 0x5C/255., (0x9A -128)/255., (0x68 -128)/255. },
271  { 0.35, 0x69/255., (0x93 -128)/255., (0x57 -128)/255. },
272  { 0.48, 0x76/255., (0x88 -128)/255., (0x4B -128)/255. },
273  { 0.64, 0x8A/255., (0x72 -128)/255., (0x4F -128)/255. },
274  { 0.80, 0xA3/255., (0x50 -128)/255., (0x66 -128)/255. },
275  { 1, 0xCC/255., (0x2F -128)/255., (0x87 -128)/255. }},
276  [PLASMA] = {
277  { 0, 0, 0, 0 },
278  { 0.10, 0x27/255., (0xC2 -128)/255., (0x82 -128)/255. },
279  { 0.58, 0x5B/255., (0x9A -128)/255., (0xAE -128)/255. },
280  { 0.70, 0x89/255., (0x44 -128)/255., (0xAB -128)/255. },
281  { 0.80, 0xB4/255., (0x2B -128)/255., (0x9E -128)/255. },
282  { 0.91, 0xD2/255., (0x38 -128)/255., (0x92 -128)/255. },
283  { 1, 1, 0, 0. }},
284  [CIVIDIS] = {
285  { 0, 0, 0, 0 },
286  { 0.20, 0x28/255., (0x98 -128)/255., (0x6F -128)/255. },
287  { 0.50, 0x48/255., (0x95 -128)/255., (0x74 -128)/255. },
288  { 0.63, 0x69/255., (0x84 -128)/255., (0x7F -128)/255. },
289  { 0.76, 0x89/255., (0x75 -128)/255., (0x84 -128)/255. },
290  { 0.90, 0xCE/255., (0x35 -128)/255., (0x95 -128)/255. },
291  { 1, 1, 0, 0. }},
292  [TERRAIN] = {
293  { 0, 0, 0, 0 },
294  { 0.15, 0, .5, 0 },
295  { 0.60, 1, -.5, -.5 },
296  { 0.85, 1, -.5, .5 },
297  { 1, 1, 0, 0 }},
298 };
299 
301 {
302  ShowSpectrumContext *s = ctx->priv;
303  int i;
304 
306  if (s->fft) {
307  for (i = 0; i < s->nb_display_channels; i++)
308  av_fft_end(s->fft[i]);
309  }
310  av_freep(&s->fft);
311  if (s->ifft) {
312  for (i = 0; i < s->nb_display_channels; i++)
313  av_fft_end(s->ifft[i]);
314  }
315  av_freep(&s->ifft);
316  if (s->fft_data) {
317  for (i = 0; i < s->nb_display_channels; i++)
318  av_freep(&s->fft_data[i]);
319  }
320  av_freep(&s->fft_data);
321  if (s->fft_scratch) {
322  for (i = 0; i < s->nb_display_channels; i++)
323  av_freep(&s->fft_scratch[i]);
324  }
325  av_freep(&s->fft_scratch);
326  if (s->color_buffer) {
327  for (i = 0; i < s->nb_display_channels; i++)
328  av_freep(&s->color_buffer[i]);
329  }
330  av_freep(&s->color_buffer);
332  if (s->magnitudes) {
333  for (i = 0; i < s->nb_display_channels; i++)
334  av_freep(&s->magnitudes[i]);
335  }
336  av_freep(&s->magnitudes);
339  if (s->phases) {
340  for (i = 0; i < s->nb_display_channels; i++)
341  av_freep(&s->phases[i]);
342  }
343  av_freep(&s->phases);
344 }
345 
347 {
350  AVFilterLink *inlink = ctx->inputs[0];
351  AVFilterLink *outlink = ctx->outputs[0];
354  int ret;
355 
356  /* set input audio formats */
357  formats = ff_make_format_list(sample_fmts);
358  if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
359  return ret;
360 
361  layouts = ff_all_channel_layouts();
362  if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
363  return ret;
364 
365  formats = ff_all_samplerates();
366  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
367  return ret;
368 
369  /* set output video format */
370  formats = ff_make_format_list(pix_fmts);
371  if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
372  return ret;
373 
374  return 0;
375 }
376 
377 static int run_channel_fft(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
378 {
379  ShowSpectrumContext *s = ctx->priv;
380  AVFilterLink *inlink = ctx->inputs[0];
381  const float *window_func_lut = s->window_func_lut;
382  AVFrame *fin = arg;
383  const int ch = jobnr;
384  int n;
385 
386  /* fill FFT input with the number of samples available */
387  const float *p = (float *)fin->extended_data[ch];
388 
389  for (n = 0; n < s->win_size; n++) {
390  s->fft_data[ch][n].re = p[n] * window_func_lut[n];
391  s->fft_data[ch][n].im = 0;
392  }
393 
394  if (s->stop) {
395  float theta, phi, psi, a, b, S, c;
396  FFTComplex *g = s->fft_data[ch];
397  FFTComplex *h = s->fft_scratch[ch];
398  int L = s->buf_size;
399  int N = s->win_size;
400  int M = s->win_size / 2;
401 
402  phi = 2.f * M_PI * (s->stop - s->start) / (float)inlink->sample_rate / (M - 1);
403  theta = 2.f * M_PI * s->start / (float)inlink->sample_rate;
404 
405  for (int n = 0; n < M; n++) {
406  h[n].re = cosf(n * n / 2.f * phi);
407  h[n].im = sinf(n * n / 2.f * phi);
408  }
409 
410  for (int n = M; n < L; n++) {
411  h[n].re = 0.f;
412  h[n].im = 0.f;
413  }
414 
415  for (int n = L - N; n < L; n++) {
416  h[n].re = cosf((L - n) * (L - n) / 2.f * phi);
417  h[n].im = sinf((L - n) * (L - n) / 2.f * phi);
418  }
419 
420  for (int n = 0; n < N; n++) {
421  g[n].re = s->fft_data[ch][n].re;
422  g[n].im = s->fft_data[ch][n].im;
423  }
424 
425  for (int n = N; n < L; n++) {
426  g[n].re = 0.f;
427  g[n].im = 0.f;
428  }
429 
430  for (int n = 0; n < N; n++) {
431  psi = n * theta + n * n / 2.f * phi;
432  c = cosf(psi);
433  S = -sinf(psi);
434  a = c * g[n].re - S * g[n].im;
435  b = S * g[n].re + c * g[n].im;
436  g[n].re = a;
437  g[n].im = b;
438  }
439 
440  av_fft_permute(s->fft[ch], h);
441  av_fft_calc(s->fft[ch], h);
442 
443  av_fft_permute(s->fft[ch], g);
444  av_fft_calc(s->fft[ch], g);
445 
446  for (int n = 0; n < L; n++) {
447  c = g[n].re;
448  S = g[n].im;
449  a = c * h[n].re - S * h[n].im;
450  b = S * h[n].re + c * h[n].im;
451 
452  g[n].re = a / L;
453  g[n].im = b / L;
454  }
455 
456  av_fft_permute(s->ifft[ch], g);
457  av_fft_calc(s->ifft[ch], g);
458 
459  for (int k = 0; k < M; k++) {
460  psi = k * k / 2.f * phi;
461  c = cosf(psi);
462  S = -sinf(psi);
463  a = c * g[k].re - S * g[k].im;
464  b = S * g[k].re + c * g[k].im;
465  s->fft_data[ch][k].re = a;
466  s->fft_data[ch][k].im = b;
467  }
468  } else {
469  /* run FFT on each samples set */
470  av_fft_permute(s->fft[ch], s->fft_data[ch]);
471  av_fft_calc(s->fft[ch], s->fft_data[ch]);
472  }
473 
474  return 0;
475 }
476 
477 static void drawtext(AVFrame *pic, int x, int y, const char *txt, int o)
478 {
479  const uint8_t *font;
480  int font_height;
481  int i;
482 
483  font = avpriv_cga_font, font_height = 8;
484 
485  for (i = 0; txt[i]; i++) {
486  int char_y, mask;
487 
488  if (o) {
489  for (char_y = font_height - 1; char_y >= 0; char_y--) {
490  uint8_t *p = pic->data[0] + (y + i * 10) * pic->linesize[0] + x;
491  for (mask = 0x80; mask; mask >>= 1) {
492  if (font[txt[i] * font_height + font_height - 1 - char_y] & mask)
493  p[char_y] = ~p[char_y];
494  p += pic->linesize[0];
495  }
496  }
497  } else {
498  uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8);
499  for (char_y = 0; char_y < font_height; char_y++) {
500  for (mask = 0x80; mask; mask >>= 1) {
501  if (font[txt[i] * font_height + char_y] & mask)
502  *p = ~(*p);
503  p++;
504  }
505  p += pic->linesize[0] - 8;
506  }
507  }
508  }
509 }
510 
512  float *yf, float *uf, float *vf)
513 {
514  switch (s->mode) {
515  case COMBINED:
516  // reduce range by channel count
517  *yf = 256.0f / s->nb_display_channels;
518  switch (s->color_mode) {
519  case RAINBOW:
520  case MORELAND:
521  case NEBULAE:
522  case FIRE:
523  case FIERY:
524  case FRUIT:
525  case COOL:
526  case GREEN:
527  case VIRIDIS:
528  case PLASMA:
529  case CIVIDIS:
530  case TERRAIN:
531  case MAGMA:
532  case INTENSITY:
533  *uf = *yf;
534  *vf = *yf;
535  break;
536  case CHANNEL:
537  /* adjust saturation for mixed UV coloring */
538  /* this factor is correct for infinite channels, an approximation otherwise */
539  *uf = *yf * M_PI;
540  *vf = *yf * M_PI;
541  break;
542  default:
543  av_assert0(0);
544  }
545  break;
546  case SEPARATE:
547  // full range
548  *yf = 256.0f;
549  *uf = 256.0f;
550  *vf = 256.0f;
551  break;
552  default:
553  av_assert0(0);
554  }
555 
556  if (s->color_mode == CHANNEL) {
557  if (s->nb_display_channels > 1) {
558  *uf *= 0.5f * sinf((2 * M_PI * ch) / s->nb_display_channels + M_PI * s->rotation);
559  *vf *= 0.5f * cosf((2 * M_PI * ch) / s->nb_display_channels + M_PI * s->rotation);
560  } else {
561  *uf *= 0.5f * sinf(M_PI * s->rotation);
562  *vf *= 0.5f * cosf(M_PI * s->rotation + M_PI_2);
563  }
564  } else {
565  *uf += *uf * sinf(M_PI * s->rotation);
566  *vf += *vf * cosf(M_PI * s->rotation + M_PI_2);
567  }
568 
569  *uf *= s->saturation;
570  *vf *= s->saturation;
571 }
572 
574  float yf, float uf, float vf,
575  float a, float *out)
576 {
577  if (s->color_mode > CHANNEL) {
578  const int cm = s->color_mode;
579  float y, u, v;
580  int i;
581 
582  for (i = 1; i < FF_ARRAY_ELEMS(color_table[cm]) - 1; i++)
583  if (color_table[cm][i].a >= a)
584  break;
585  // i now is the first item >= the color
586  // now we know to interpolate between item i - 1 and i
587  if (a <= color_table[cm][i - 1].a) {
588  y = color_table[cm][i - 1].y;
589  u = color_table[cm][i - 1].u;
590  v = color_table[cm][i - 1].v;
591  } else if (a >= color_table[cm][i].a) {
592  y = color_table[cm][i].y;
593  u = color_table[cm][i].u;
594  v = color_table[cm][i].v;
595  } else {
596  float start = color_table[cm][i - 1].a;
597  float end = color_table[cm][i].a;
598  float lerpfrac = (a - start) / (end - start);
599  y = color_table[cm][i - 1].y * (1.0f - lerpfrac)
600  + color_table[cm][i].y * lerpfrac;
601  u = color_table[cm][i - 1].u * (1.0f - lerpfrac)
602  + color_table[cm][i].u * lerpfrac;
603  v = color_table[cm][i - 1].v * (1.0f - lerpfrac)
604  + color_table[cm][i].v * lerpfrac;
605  }
606 
607  out[0] = y * yf;
608  out[1] = u * uf;
609  out[2] = v * vf;
610  } else {
611  out[0] = a * yf;
612  out[1] = a * uf;
613  out[2] = a * vf;
614  }
615 }
616 
617 static char *get_time(AVFilterContext *ctx, float seconds, int x)
618 {
619  char *units;
620 
621  if (x == 0)
622  units = av_asprintf("0");
623  else if (log10(seconds) > 6)
624  units = av_asprintf("%.2fh", seconds / (60 * 60));
625  else if (log10(seconds) > 3)
626  units = av_asprintf("%.2fm", seconds / 60);
627  else
628  units = av_asprintf("%.2fs", seconds);
629  return units;
630 }
631 
632 static float log_scale(const float value, const float min, const float max)
633 {
634  if (value < min)
635  return min;
636  if (value > max)
637  return max;
638 
639  {
640  const float b = logf(max / min) / (max - min);
641  const float a = max / expf(max * b);
642 
643  return expf(value * b) * a;
644  }
645 }
646 
647 static float get_log_hz(const int bin, const int num_bins, const float sample_rate)
648 {
649  const float max_freq = sample_rate / 2;
650  const float hz_per_bin = max_freq / num_bins;
651  const float freq = hz_per_bin * bin;
652  const float scaled_freq = log_scale(freq + 1, 21, max_freq) - 1;
653 
654  return num_bins * scaled_freq / max_freq;
655 }
656 
657 static float inv_log_scale(const float value, const float min, const float max)
658 {
659  if (value < min)
660  return min;
661  if (value > max)
662  return max;
663 
664  {
665  const float b = logf(max / min) / (max - min);
666  const float a = max / expf(max * b);
667 
668  return logf(value / a) / b;
669  }
670 }
671 
672 static float bin_pos(const int bin, const int num_bins, const float sample_rate)
673 {
674  const float max_freq = sample_rate / 2;
675  const float hz_per_bin = max_freq / num_bins;
676  const float freq = hz_per_bin * bin;
677  const float scaled_freq = inv_log_scale(freq + 1, 21, max_freq) - 1;
678 
679  return num_bins * scaled_freq / max_freq;
680 }
681 
683 {
684  ShowSpectrumContext *s = ctx->priv;
685  AVFilterLink *inlink = ctx->inputs[0];
686  AVFilterLink *outlink = ctx->outputs[0];
687  int ch, y, x = 0, sz = s->orientation == VERTICAL ? s->w : s->h;
688  int multi = (s->mode == SEPARATE && s->color_mode == CHANNEL);
689  float spp = samples / (float)sz;
690  char *text;
691  uint8_t *dst;
692  char chlayout_str[128];
693 
694  av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), inlink->channels,
695  inlink->channel_layout);
696 
697  text = av_asprintf("%d Hz | %s", inlink->sample_rate, chlayout_str);
698  if (!text)
699  return AVERROR(ENOMEM);
700 
701  drawtext(s->outpicref, 2, outlink->h - 10, "CREATED BY LIBAVFILTER", 0);
702  drawtext(s->outpicref, outlink->w - 2 - strlen(text) * 10, outlink->h - 10, text, 0);
703  av_freep(&text);
704  if (s->stop) {
705  text = av_asprintf("Zoom: %d Hz - %d Hz", s->start, s->stop);
706  if (!text)
707  return AVERROR(ENOMEM);
708  drawtext(s->outpicref, outlink->w - 2 - strlen(text) * 10, 3, text, 0);
709  av_freep(&text);
710  }
711 
712  dst = s->outpicref->data[0] + (s->start_y - 1) * s->outpicref->linesize[0] + s->start_x - 1;
713  for (x = 0; x < s->w + 1; x++)
714  dst[x] = 200;
715  dst = s->outpicref->data[0] + (s->start_y + s->h) * s->outpicref->linesize[0] + s->start_x - 1;
716  for (x = 0; x < s->w + 1; x++)
717  dst[x] = 200;
718  for (y = 0; y < s->h + 2; y++) {
719  dst = s->outpicref->data[0] + (y + s->start_y - 1) * s->outpicref->linesize[0];
720  dst[s->start_x - 1] = 200;
721  dst[s->start_x + s->w] = 200;
722  }
723  if (s->orientation == VERTICAL) {
724  int h = s->mode == SEPARATE ? s->h / s->nb_display_channels : s->h;
725  int hh = s->mode == SEPARATE ? -(s->h % s->nb_display_channels) + 1 : 1;
726  for (ch = 0; ch < (s->mode == SEPARATE ? s->nb_display_channels : 1); ch++) {
727  for (y = 0; y < h; y += 20) {
728  dst = s->outpicref->data[0] + (s->start_y + h * (ch + 1) - y - hh) * s->outpicref->linesize[0];
729  dst[s->start_x - 2] = 200;
730  dst[s->start_x + s->w + 1] = 200;
731  }
732  for (y = 0; y < h; y += 40) {
733  dst = s->outpicref->data[0] + (s->start_y + h * (ch + 1) - y - hh) * s->outpicref->linesize[0];
734  dst[s->start_x - 3] = 200;
735  dst[s->start_x + s->w + 2] = 200;
736  }
737  dst = s->outpicref->data[0] + (s->start_y - 2) * s->outpicref->linesize[0] + s->start_x;
738  for (x = 0; x < s->w; x+=40)
739  dst[x] = 200;
740  dst = s->outpicref->data[0] + (s->start_y - 3) * s->outpicref->linesize[0] + s->start_x;
741  for (x = 0; x < s->w; x+=80)
742  dst[x] = 200;
743  dst = s->outpicref->data[0] + (s->h + s->start_y + 1) * s->outpicref->linesize[0] + s->start_x;
744  for (x = 0; x < s->w; x+=40) {
745  dst[x] = 200;
746  }
747  dst = s->outpicref->data[0] + (s->h + s->start_y + 2) * s->outpicref->linesize[0] + s->start_x;
748  for (x = 0; x < s->w; x+=80) {
749  dst[x] = 200;
750  }
751  for (y = 0; y < h; y += 40) {
752  float range = s->stop ? s->stop - s->start : inlink->sample_rate / 2;
753  float bin = s->fscale == F_LINEAR ? y : get_log_hz(y, h, inlink->sample_rate);
754  float hertz = s->start + bin * range / (float)(1 << (int)ceil(log2(h)));
755  char *units;
756 
757  if (hertz == 0)
758  units = av_asprintf("DC");
759  else
760  units = av_asprintf("%.2f", hertz);
761  if (!units)
762  return AVERROR(ENOMEM);
763 
764  drawtext(s->outpicref, s->start_x - 8 * strlen(units) - 4, h * (ch + 1) + s->start_y - y - 4 - hh, units, 0);
765  av_free(units);
766  }
767  }
768 
769  for (x = 0; x < s->w && s->single_pic; x+=80) {
770  float seconds = x * spp / inlink->sample_rate;
771  char *units = get_time(ctx, seconds, x);
772  if (!units)
773  return AVERROR(ENOMEM);
774 
775  drawtext(s->outpicref, s->start_x + x - 4 * strlen(units), s->h + s->start_y + 6, units, 0);
776  drawtext(s->outpicref, s->start_x + x - 4 * strlen(units), s->start_y - 12, units, 0);
777  av_free(units);
778  }
779 
780  drawtext(s->outpicref, outlink->w / 2 - 4 * 4, outlink->h - s->start_y / 2, "TIME", 0);
781  drawtext(s->outpicref, s->start_x / 7, outlink->h / 2 - 14 * 4, "FREQUENCY (Hz)", 1);
782  } else {
783  int w = s->mode == SEPARATE ? s->w / s->nb_display_channels : s->w;
784  for (y = 0; y < s->h; y += 20) {
785  dst = s->outpicref->data[0] + (s->start_y + y) * s->outpicref->linesize[0];
786  dst[s->start_x - 2] = 200;
787  dst[s->start_x + s->w + 1] = 200;
788  }
789  for (y = 0; y < s->h; y += 40) {
790  dst = s->outpicref->data[0] + (s->start_y + y) * s->outpicref->linesize[0];
791  dst[s->start_x - 3] = 200;
792  dst[s->start_x + s->w + 2] = 200;
793  }
794  for (ch = 0; ch < (s->mode == SEPARATE ? s->nb_display_channels : 1); ch++) {
795  dst = s->outpicref->data[0] + (s->start_y - 2) * s->outpicref->linesize[0] + s->start_x + w * ch;
796  for (x = 0; x < w; x+=40)
797  dst[x] = 200;
798  dst = s->outpicref->data[0] + (s->start_y - 3) * s->outpicref->linesize[0] + s->start_x + w * ch;
799  for (x = 0; x < w; x+=80)
800  dst[x] = 200;
801  dst = s->outpicref->data[0] + (s->h + s->start_y + 1) * s->outpicref->linesize[0] + s->start_x + w * ch;
802  for (x = 0; x < w; x+=40) {
803  dst[x] = 200;
804  }
805  dst = s->outpicref->data[0] + (s->h + s->start_y + 2) * s->outpicref->linesize[0] + s->start_x + w * ch;
806  for (x = 0; x < w; x+=80) {
807  dst[x] = 200;
808  }
809  for (x = 0; x < w - 79; x += 80) {
810  float range = s->stop ? s->stop - s->start : inlink->sample_rate / 2;
811  float bin = s->fscale == F_LINEAR ? x : get_log_hz(x, w, inlink->sample_rate);
812  float hertz = s->start + bin * range / (float)(1 << (int)ceil(log2(w)));
813  char *units;
814 
815  if (hertz == 0)
816  units = av_asprintf("DC");
817  else
818  units = av_asprintf("%.2f", hertz);
819  if (!units)
820  return AVERROR(ENOMEM);
821 
822  drawtext(s->outpicref, s->start_x - 4 * strlen(units) + x + w * ch, s->start_y - 12, units, 0);
823  drawtext(s->outpicref, s->start_x - 4 * strlen(units) + x + w * ch, s->h + s->start_y + 6, units, 0);
824  av_free(units);
825  }
826  }
827  for (y = 0; y < s->h && s->single_pic; y+=40) {
828  float seconds = y * spp / inlink->sample_rate;
829  char *units = get_time(ctx, seconds, x);
830  if (!units)
831  return AVERROR(ENOMEM);
832 
833  drawtext(s->outpicref, s->start_x - 8 * strlen(units) - 4, s->start_y + y - 4, units, 0);
834  av_free(units);
835  }
836  drawtext(s->outpicref, s->start_x / 7, outlink->h / 2 - 4 * 4, "TIME", 1);
837  drawtext(s->outpicref, outlink->w / 2 - 14 * 4, outlink->h - s->start_y / 2, "FREQUENCY (Hz)", 0);
838  }
839 
840  for (ch = 0; ch < (multi ? s->nb_display_channels : 1); ch++) {
841  int h = multi ? s->h / s->nb_display_channels : s->h;
842 
843  for (y = 0; y < h; y++) {
844  float out[3] = { 0., 127.5, 127.5};
845  int chn;
846 
847  for (chn = 0; chn < (s->mode == SEPARATE ? 1 : s->nb_display_channels); chn++) {
848  float yf, uf, vf;
849  int channel = (multi) ? s->nb_display_channels - ch - 1 : chn;
850  float lout[3];
851 
852  color_range(s, channel, &yf, &uf, &vf);
853  pick_color(s, yf, uf, vf, y / (float)h, lout);
854  out[0] += lout[0];
855  out[1] += lout[1];
856  out[2] += lout[2];
857  }
858  memset(s->outpicref->data[0]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[0] + s->w + s->start_x + 20, av_clip_uint8(out[0]), 10);
859  memset(s->outpicref->data[1]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[1] + s->w + s->start_x + 20, av_clip_uint8(out[1]), 10);
860  memset(s->outpicref->data[2]+(s->start_y + h * (ch + 1) - y - 1) * s->outpicref->linesize[2] + s->w + s->start_x + 20, av_clip_uint8(out[2]), 10);
861  }
862 
863  for (y = 0; ch == 0 && y < h; y += h / 10) {
864  float value = 120.f * log10f(1.f - y / (float)h);
865  char *text;
866 
867  if (value < -120)
868  break;
869  text = av_asprintf("%.0f dB", value);
870  if (!text)
871  continue;
872  drawtext(s->outpicref, s->w + s->start_x + 35, s->start_y + y - 5, text, 0);
873  av_free(text);
874  }
875  }
876 
877  return 0;
878 }
879 
880 static float get_value(AVFilterContext *ctx, int ch, int y)
881 {
882  ShowSpectrumContext *s = ctx->priv;
883  float *magnitudes = s->magnitudes[ch];
884  float *phases = s->phases[ch];
885  float a;
886 
887  switch (s->data) {
888  case D_MAGNITUDE:
889  /* get magnitude */
890  a = magnitudes[y];
891  break;
892  case D_PHASE:
893  /* get phase */
894  a = phases[y];
895  break;
896  default:
897  av_assert0(0);
898  }
899 
900  /* apply scale */
901  switch (s->scale) {
902  case LINEAR:
903  a = av_clipf(a, 0, 1);
904  break;
905  case SQRT:
906  a = av_clipf(sqrtf(a), 0, 1);
907  break;
908  case CBRT:
909  a = av_clipf(cbrtf(a), 0, 1);
910  break;
911  case FOURTHRT:
912  a = av_clipf(sqrtf(sqrtf(a)), 0, 1);
913  break;
914  case FIFTHRT:
915  a = av_clipf(powf(a, 0.20), 0, 1);
916  break;
917  case LOG:
918  a = 1.f + log10f(av_clipf(a, 1e-6, 1)) / 6.f; // zero = -120dBFS
919  break;
920  default:
921  av_assert0(0);
922  }
923 
924  return a;
925 }
926 
927 static int plot_channel_lin(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
928 {
929  ShowSpectrumContext *s = ctx->priv;
930  const int h = s->orientation == VERTICAL ? s->channel_height : s->channel_width;
931  const int ch = jobnr;
932  float yf, uf, vf;
933  int y;
934 
935  /* decide color range */
936  color_range(s, ch, &yf, &uf, &vf);
937 
938  /* draw the channel */
939  for (y = 0; y < h; y++) {
940  int row = (s->mode == COMBINED) ? y : ch * h + y;
941  float *out = &s->color_buffer[ch][3 * row];
942  float a = get_value(ctx, ch, y);
943 
944  pick_color(s, yf, uf, vf, a, out);
945  }
946 
947  return 0;
948 }
949 
950 static int plot_channel_log(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
951 {
952  ShowSpectrumContext *s = ctx->priv;
953  AVFilterLink *inlink = ctx->inputs[0];
954  const int h = s->orientation == VERTICAL ? s->channel_height : s->channel_width;
955  const int ch = jobnr;
956  float y, yf, uf, vf;
957  int yy = 0;
958 
959  /* decide color range */
960  color_range(s, ch, &yf, &uf, &vf);
961 
962  /* draw the channel */
963  for (y = 0; y < h && yy < h; yy++) {
964  float pos0 = bin_pos(yy+0, h, inlink->sample_rate);
965  float pos1 = bin_pos(yy+1, h, inlink->sample_rate);
966  float delta = pos1 - pos0;
967  float a0, a1;
968 
969  a0 = get_value(ctx, ch, yy+0);
970  a1 = get_value(ctx, ch, FFMIN(yy+1, h-1));
971  for (float j = pos0; j < pos1 && y + j - pos0 < h; j++) {
972  float row = (s->mode == COMBINED) ? y + j - pos0 : ch * h + y + j - pos0;
973  float *out = &s->color_buffer[ch][3 * FFMIN(lrintf(row), h-1)];
974  float lerpfrac = (j - pos0) / delta;
975 
976  pick_color(s, yf, uf, vf, lerpfrac * a1 + (1.f-lerpfrac) * a0, out);
977  }
978  y += delta;
979  }
980 
981  return 0;
982 }
983 
984 static int config_output(AVFilterLink *outlink)
985 {
986  AVFilterContext *ctx = outlink->src;
987  AVFilterLink *inlink = ctx->inputs[0];
988  ShowSpectrumContext *s = ctx->priv;
989  int i, fft_bits, h, w;
990  float overlap;
991 
992  switch (s->fscale) {
993  case F_LINEAR: s->plot_channel = plot_channel_lin; break;
994  case F_LOG: s->plot_channel = plot_channel_log; break;
995  default: return AVERROR_BUG;
996  }
997 
998  s->stop = FFMIN(s->stop, inlink->sample_rate / 2);
999  if (s->stop && s->stop <= s->start) {
1000  av_log(ctx, AV_LOG_ERROR, "Stop frequency should be greater than start.\n");
1001  return AVERROR(EINVAL);
1002  }
1003 
1004  if (!strcmp(ctx->filter->name, "showspectrumpic"))
1005  s->single_pic = 1;
1006 
1007  outlink->w = s->w;
1008  outlink->h = s->h;
1009  outlink->sample_aspect_ratio = (AVRational){1,1};
1010 
1011  if (s->legend) {
1012  s->start_x = (log10(inlink->sample_rate) + 1) * 25;
1013  s->start_y = 64;
1014  outlink->w += s->start_x * 2;
1015  outlink->h += s->start_y * 2;
1016  }
1017 
1018  h = (s->mode == COMBINED || s->orientation == HORIZONTAL) ? s->h : s->h / inlink->channels;
1019  w = (s->mode == COMBINED || s->orientation == VERTICAL) ? s->w : s->w / inlink->channels;
1020  s->channel_height = h;
1021  s->channel_width = w;
1022 
1023  if (s->orientation == VERTICAL) {
1024  /* FFT window size (precision) according to the requested output frame height */
1025  for (fft_bits = 1; 1 << fft_bits < 2 * h; fft_bits++);
1026  } else {
1027  /* FFT window size (precision) according to the requested output frame width */
1028  for (fft_bits = 1; 1 << fft_bits < 2 * w; fft_bits++);
1029  }
1030 
1031  s->win_size = 1 << fft_bits;
1032  s->buf_size = s->win_size << !!s->stop;
1033 
1034  if (!s->fft) {
1035  s->fft = av_calloc(inlink->channels, sizeof(*s->fft));
1036  if (!s->fft)
1037  return AVERROR(ENOMEM);
1038  }
1039 
1040  if (s->stop) {
1041  if (!s->ifft) {
1042  s->ifft = av_calloc(inlink->channels, sizeof(*s->ifft));
1043  if (!s->ifft)
1044  return AVERROR(ENOMEM);
1045  }
1046  }
1047 
1048  /* (re-)configuration if the video output changed (or first init) */
1049  if (fft_bits != s->fft_bits) {
1050  AVFrame *outpicref;
1051 
1052  s->fft_bits = fft_bits;
1053 
1054  /* FFT buffers: x2 for each (display) channel buffer.
1055  * Note: we use free and malloc instead of a realloc-like function to
1056  * make sure the buffer is aligned in memory for the FFT functions. */
1057  for (i = 0; i < s->nb_display_channels; i++) {
1058  if (s->stop) {
1059  av_fft_end(s->ifft[i]);
1060  av_freep(&s->fft_scratch[i]);
1061  }
1062  av_fft_end(s->fft[i]);
1063  av_freep(&s->fft_data[i]);
1064  }
1065  av_freep(&s->fft_data);
1066 
1067  s->nb_display_channels = inlink->channels;
1068  for (i = 0; i < s->nb_display_channels; i++) {
1069  s->fft[i] = av_fft_init(fft_bits + !!s->stop, 0);
1070  if (s->stop) {
1071  s->ifft[i] = av_fft_init(fft_bits + !!s->stop, 1);
1072  if (!s->ifft[i]) {
1073  av_log(ctx, AV_LOG_ERROR, "Unable to create Inverse FFT context. "
1074  "The window size might be too high.\n");
1075  return AVERROR(EINVAL);
1076  }
1077  }
1078  if (!s->fft[i]) {
1079  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
1080  "The window size might be too high.\n");
1081  return AVERROR(EINVAL);
1082  }
1083  }
1084 
1085  s->magnitudes = av_calloc(s->nb_display_channels, sizeof(*s->magnitudes));
1086  if (!s->magnitudes)
1087  return AVERROR(ENOMEM);
1088  for (i = 0; i < s->nb_display_channels; i++) {
1089  s->magnitudes[i] = av_calloc(s->orientation == VERTICAL ? s->h : s->w, sizeof(**s->magnitudes));
1090  if (!s->magnitudes[i])
1091  return AVERROR(ENOMEM);
1092  }
1093 
1094  s->phases = av_calloc(s->nb_display_channels, sizeof(*s->phases));
1095  if (!s->phases)
1096  return AVERROR(ENOMEM);
1097  for (i = 0; i < s->nb_display_channels; i++) {
1098  s->phases[i] = av_calloc(s->orientation == VERTICAL ? s->h : s->w, sizeof(**s->phases));
1099  if (!s->phases[i])
1100  return AVERROR(ENOMEM);
1101  }
1102 
1103  av_freep(&s->color_buffer);
1105  if (!s->color_buffer)
1106  return AVERROR(ENOMEM);
1107  for (i = 0; i < s->nb_display_channels; i++) {
1108  s->color_buffer[i] = av_calloc(s->orientation == VERTICAL ? s->h * 3 : s->w * 3, sizeof(**s->color_buffer));
1109  if (!s->color_buffer[i])
1110  return AVERROR(ENOMEM);
1111  }
1112 
1113  s->fft_data = av_calloc(s->nb_display_channels, sizeof(*s->fft_data));
1114  if (!s->fft_data)
1115  return AVERROR(ENOMEM);
1116  s->fft_scratch = av_calloc(s->nb_display_channels, sizeof(*s->fft_scratch));
1117  if (!s->fft_scratch)
1118  return AVERROR(ENOMEM);
1119  for (i = 0; i < s->nb_display_channels; i++) {
1120  s->fft_data[i] = av_calloc(s->buf_size, sizeof(**s->fft_data));
1121  if (!s->fft_data[i])
1122  return AVERROR(ENOMEM);
1123 
1124  s->fft_scratch[i] = av_calloc(s->buf_size, sizeof(**s->fft_scratch));
1125  if (!s->fft_scratch[i])
1126  return AVERROR(ENOMEM);
1127  }
1128 
1129  /* pre-calc windowing function */
1130  s->window_func_lut =
1132  sizeof(*s->window_func_lut));
1133  if (!s->window_func_lut)
1134  return AVERROR(ENOMEM);
1135  generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
1136  if (s->overlap == 1)
1137  s->overlap = overlap;
1138  s->hop_size = (1.f - s->overlap) * s->win_size;
1139  if (s->hop_size < 1) {
1140  av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
1141  return AVERROR(EINVAL);
1142  }
1143 
1144  for (s->win_scale = 0, i = 0; i < s->win_size; i++) {
1145  s->win_scale += s->window_func_lut[i] * s->window_func_lut[i];
1146  }
1147  s->win_scale = 1.f / sqrtf(s->win_scale);
1148 
1149  /* prepare the initial picref buffer (black frame) */
1150  av_frame_free(&s->outpicref);
1151  s->outpicref = outpicref =
1152  ff_get_video_buffer(outlink, outlink->w, outlink->h);
1153  if (!outpicref)
1154  return AVERROR(ENOMEM);
1155  outpicref->sample_aspect_ratio = (AVRational){1,1};
1156  for (i = 0; i < outlink->h; i++) {
1157  memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w);
1158  memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
1159  memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
1160  }
1161  outpicref->color_range = AVCOL_RANGE_JPEG;
1162 
1163  if (!s->single_pic && s->legend)
1164  draw_legend(ctx, 0);
1165  }
1166 
1167  if ((s->orientation == VERTICAL && s->xpos >= s->w) ||
1168  (s->orientation == HORIZONTAL && s->xpos >= s->h))
1169  s->xpos = 0;
1170 
1171  s->auto_frame_rate = av_make_q(inlink->sample_rate, s->hop_size);
1172  if (s->orientation == VERTICAL && s->sliding == FULLFRAME)
1173  s->auto_frame_rate.den *= s->w;
1174  if (s->orientation == HORIZONTAL && s->sliding == FULLFRAME)
1175  s->auto_frame_rate.den *= s->h;
1176  if (!s->single_pic && strcmp(s->rate_str, "auto")) {
1177  int ret = av_parse_video_rate(&s->frame_rate, s->rate_str);
1178  if (ret < 0)
1179  return ret;
1180  } else {
1181  s->frame_rate = s->auto_frame_rate;
1182  }
1183  outlink->frame_rate = s->frame_rate;
1184  outlink->time_base = av_inv_q(outlink->frame_rate);
1185 
1186  if (s->orientation == VERTICAL) {
1187  s->combine_buffer =
1188  av_realloc_f(s->combine_buffer, s->h * 3,
1189  sizeof(*s->combine_buffer));
1190  } else {
1191  s->combine_buffer =
1192  av_realloc_f(s->combine_buffer, s->w * 3,
1193  sizeof(*s->combine_buffer));
1194  }
1195 
1196  av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d FFT window size:%d\n",
1197  s->w, s->h, s->win_size);
1198 
1200  s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
1201  if (!s->fifo)
1202  return AVERROR(ENOMEM);
1203  return 0;
1204 }
1205 
1206 #define RE(y, ch) s->fft_data[ch][y].re
1207 #define IM(y, ch) s->fft_data[ch][y].im
1208 #define MAGNITUDE(y, ch) hypotf(RE(y, ch), IM(y, ch))
1209 #define PHASE(y, ch) atan2f(IM(y, ch), RE(y, ch))
1210 
1211 static int calc_channel_magnitudes(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
1212 {
1213  ShowSpectrumContext *s = ctx->priv;
1214  const double w = s->win_scale * (s->scale == LOG ? s->win_scale : 1);
1215  int y, h = s->orientation == VERTICAL ? s->h : s->w;
1216  const float f = s->gain * w;
1217  const int ch = jobnr;
1218  float *magnitudes = s->magnitudes[ch];
1219 
1220  for (y = 0; y < h; y++)
1221  magnitudes[y] = MAGNITUDE(y, ch) * f;
1222 
1223  return 0;
1224 }
1225 
1226 static int calc_channel_phases(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
1227 {
1228  ShowSpectrumContext *s = ctx->priv;
1229  const int h = s->orientation == VERTICAL ? s->h : s->w;
1230  const int ch = jobnr;
1231  float *phases = s->phases[ch];
1232  int y;
1233 
1234  for (y = 0; y < h; y++)
1235  phases[y] = (PHASE(y, ch) / M_PI + 1) / 2;
1236 
1237  return 0;
1238 }
1239 
1241 {
1242  const double w = s->win_scale * (s->scale == LOG ? s->win_scale : 1);
1243  int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
1244  const float f = s->gain * w;
1245 
1246  for (ch = 0; ch < s->nb_display_channels; ch++) {
1247  float *magnitudes = s->magnitudes[ch];
1248 
1249  for (y = 0; y < h; y++)
1250  magnitudes[y] += MAGNITUDE(y, ch) * f;
1251  }
1252 }
1253 
1255 {
1256  int ch, y, h = s->orientation == VERTICAL ? s->h : s->w;
1257 
1258  for (ch = 0; ch < s->nb_display_channels; ch++) {
1259  float *magnitudes = s->magnitudes[ch];
1260 
1261  for (y = 0; y < h; y++)
1262  magnitudes[y] *= scale;
1263  }
1264 }
1265 
1267 {
1268  int y;
1269 
1270  for (y = 0; y < size; y++) {
1271  s->combine_buffer[3 * y ] = 0;
1272  s->combine_buffer[3 * y + 1] = 127.5;
1273  s->combine_buffer[3 * y + 2] = 127.5;
1274  }
1275 }
1276 
1278 {
1279  AVFilterContext *ctx = inlink->dst;
1280  AVFilterLink *outlink = ctx->outputs[0];
1281  ShowSpectrumContext *s = ctx->priv;
1282  AVFrame *outpicref = s->outpicref;
1283  int ret, plane, x, y, z = s->orientation == VERTICAL ? s->h : s->w;
1284 
1285  /* fill a new spectrum column */
1286  /* initialize buffer for combining to black */
1287  clear_combine_buffer(s, z);
1288 
1290 
1291  for (y = 0; y < z * 3; y++) {
1292  for (x = 0; x < s->nb_display_channels; x++) {
1293  s->combine_buffer[y] += s->color_buffer[x][y];
1294  }
1295  }
1296 
1298  /* copy to output */
1299  if (s->orientation == VERTICAL) {
1300  if (s->sliding == SCROLL) {
1301  for (plane = 0; plane < 3; plane++) {
1302  for (y = 0; y < s->h; y++) {
1303  uint8_t *p = outpicref->data[plane] + s->start_x +
1304  (y + s->start_y) * outpicref->linesize[plane];
1305  memmove(p, p + 1, s->w - 1);
1306  }
1307  }
1308  s->xpos = s->w - 1;
1309  } else if (s->sliding == RSCROLL) {
1310  for (plane = 0; plane < 3; plane++) {
1311  for (y = 0; y < s->h; y++) {
1312  uint8_t *p = outpicref->data[plane] + s->start_x +
1313  (y + s->start_y) * outpicref->linesize[plane];
1314  memmove(p + 1, p, s->w - 1);
1315  }
1316  }
1317  s->xpos = 0;
1318  }
1319  for (plane = 0; plane < 3; plane++) {
1320  uint8_t *p = outpicref->data[plane] + s->start_x +
1321  (outlink->h - 1 - s->start_y) * outpicref->linesize[plane] +
1322  s->xpos;
1323  for (y = 0; y < s->h; y++) {
1324  *p = lrintf(av_clipf(s->combine_buffer[3 * y + plane], 0, 255));
1325  p -= outpicref->linesize[plane];
1326  }
1327  }
1328  } else {
1329  if (s->sliding == SCROLL) {
1330  for (plane = 0; plane < 3; plane++) {
1331  for (y = 1; y < s->h; y++) {
1332  memmove(outpicref->data[plane] + (y-1 + s->start_y) * outpicref->linesize[plane] + s->start_x,
1333  outpicref->data[plane] + (y + s->start_y) * outpicref->linesize[plane] + s->start_x,
1334  s->w);
1335  }
1336  }
1337  s->xpos = s->h - 1;
1338  } else if (s->sliding == RSCROLL) {
1339  for (plane = 0; plane < 3; plane++) {
1340  for (y = s->h - 1; y >= 1; y--) {
1341  memmove(outpicref->data[plane] + (y + s->start_y) * outpicref->linesize[plane] + s->start_x,
1342  outpicref->data[plane] + (y-1 + s->start_y) * outpicref->linesize[plane] + s->start_x,
1343  s->w);
1344  }
1345  }
1346  s->xpos = 0;
1347  }
1348  for (plane = 0; plane < 3; plane++) {
1349  uint8_t *p = outpicref->data[plane] + s->start_x +
1350  (s->xpos + s->start_y) * outpicref->linesize[plane];
1351  for (x = 0; x < s->w; x++) {
1352  *p = lrintf(av_clipf(s->combine_buffer[3 * x + plane], 0, 255));
1353  p++;
1354  }
1355  }
1356  }
1357 
1358  if (s->sliding != FULLFRAME || s->xpos == 0)
1359  outpicref->pts = av_rescale_q(insamples->pts, inlink->time_base, outlink->time_base);
1360 
1361  s->xpos++;
1362  if (s->orientation == VERTICAL && s->xpos >= s->w)
1363  s->xpos = 0;
1364  if (s->orientation == HORIZONTAL && s->xpos >= s->h)
1365  s->xpos = 0;
1366  if (!s->single_pic && (s->sliding != FULLFRAME || s->xpos == 0)) {
1367  if (s->old_pts < outpicref->pts) {
1368  if (s->legend) {
1369  char *units = get_time(ctx, insamples->pts /(float)inlink->sample_rate, x);
1370  if (!units)
1371  return AVERROR(ENOMEM);
1372 
1373  if (s->orientation == VERTICAL) {
1374  for (y = 0; y < 10; y++) {
1375  memset(s->outpicref->data[0] + outlink->w / 2 - 4 * s->old_len +
1376  (outlink->h - s->start_y / 2 - 20 + y) * s->outpicref->linesize[0], 0, 10 * s->old_len);
1377  }
1378  drawtext(s->outpicref,
1379  outlink->w / 2 - 4 * strlen(units),
1380  outlink->h - s->start_y / 2 - 20,
1381  units, 0);
1382  } else {
1383  for (y = 0; y < 10 * s->old_len; y++) {
1384  memset(s->outpicref->data[0] + s->start_x / 7 + 20 +
1385  (outlink->h / 2 - 4 * s->old_len + y) * s->outpicref->linesize[0], 0, 10);
1386  }
1387  drawtext(s->outpicref,
1388  s->start_x / 7 + 20,
1389  outlink->h / 2 - 4 * strlen(units),
1390  units, 1);
1391  }
1392  s->old_len = strlen(units);
1393  av_free(units);
1394  }
1395  s->old_pts = outpicref->pts;
1396  ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref));
1397  if (ret < 0)
1398  return ret;
1399  return 0;
1400  }
1401  }
1402 
1403  return 1;
1404 }
1405 
1406 #if CONFIG_SHOWSPECTRUM_FILTER
1407 
1408 static int activate(AVFilterContext *ctx)
1409 {
1410  AVFilterLink *inlink = ctx->inputs[0];
1411  AVFilterLink *outlink = ctx->outputs[0];
1412  ShowSpectrumContext *s = ctx->priv;
1413  int ret;
1414 
1415  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
1416 
1417  if (av_audio_fifo_size(s->fifo) < s->win_size) {
1418  AVFrame *frame = NULL;
1419 
1420  ret = ff_inlink_consume_frame(inlink, &frame);
1421  if (ret < 0)
1422  return ret;
1423  if (ret > 0) {
1424  s->pts = frame->pts;
1425  s->consumed = 0;
1426 
1427  av_audio_fifo_write(s->fifo, (void **)frame->extended_data, frame->nb_samples);
1428  av_frame_free(&frame);
1429  }
1430  }
1431 
1432  if (s->outpicref && av_audio_fifo_size(s->fifo) >= s->win_size) {
1433  AVFrame *fin = ff_get_audio_buffer(inlink, s->win_size);
1434  if (!fin)
1435  return AVERROR(ENOMEM);
1436 
1437  fin->pts = s->pts + s->consumed;
1438  s->consumed += s->hop_size;
1439  ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data,
1441  if (ret < 0) {
1442  av_frame_free(&fin);
1443  return ret;
1444  }
1445 
1446  av_assert0(fin->nb_samples == s->win_size);
1447 
1449 
1450  if (s->data == D_MAGNITUDE)
1452 
1453  if (s->data == D_PHASE)
1455 
1456  ret = plot_spectrum_column(inlink, fin);
1457 
1458  av_frame_free(&fin);
1460  if (ret <= 0)
1461  return ret;
1462  }
1463 
1464  if (ff_outlink_get_status(inlink) == AVERROR_EOF &&
1465  s->sliding == FULLFRAME &&
1466  s->xpos > 0 && s->outpicref) {
1467  int64_t pts;
1468 
1469  if (s->orientation == VERTICAL) {
1470  for (int i = 0; i < outlink->h; i++) {
1471  memset(s->outpicref->data[0] + i * s->outpicref->linesize[0] + s->xpos, 0, outlink->w - s->xpos);
1472  memset(s->outpicref->data[1] + i * s->outpicref->linesize[1] + s->xpos, 128, outlink->w - s->xpos);
1473  memset(s->outpicref->data[2] + i * s->outpicref->linesize[2] + s->xpos, 128, outlink->w - s->xpos);
1474  }
1475  } else {
1476  for (int i = s->xpos; i < outlink->h; i++) {
1477  memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w);
1478  memset(s->outpicref->data[1] + i * s->outpicref->linesize[1], 128, outlink->w);
1479  memset(s->outpicref->data[2] + i * s->outpicref->linesize[2], 128, outlink->w);
1480  }
1481  }
1482  s->outpicref->pts += s->consumed;
1483  pts = s->outpicref->pts;
1484  ret = ff_filter_frame(outlink, s->outpicref);
1485  s->outpicref = NULL;
1486  ff_outlink_set_status(outlink, AVERROR_EOF, pts);
1487  return 0;
1488  }
1489 
1490  FF_FILTER_FORWARD_STATUS(inlink, outlink);
1491  if (ff_outlink_frame_wanted(outlink) && av_audio_fifo_size(s->fifo) < s->win_size) {
1492  ff_inlink_request_frame(inlink);
1493  return 0;
1494  }
1495 
1496  if (av_audio_fifo_size(s->fifo) >= s->win_size) {
1497  ff_filter_set_ready(ctx, 10);
1498  return 0;
1499  }
1500  return FFERROR_NOT_READY;
1501 }
1502 
1503 static const AVFilterPad showspectrum_inputs[] = {
1504  {
1505  .name = "default",
1506  .type = AVMEDIA_TYPE_AUDIO,
1507  },
1508  { NULL }
1509 };
1510 
1511 static const AVFilterPad showspectrum_outputs[] = {
1512  {
1513  .name = "default",
1514  .type = AVMEDIA_TYPE_VIDEO,
1515  .config_props = config_output,
1516  },
1517  { NULL }
1518 };
1519 
1521  .name = "showspectrum",
1522  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
1523  .uninit = uninit,
1524  .query_formats = query_formats,
1525  .priv_size = sizeof(ShowSpectrumContext),
1526  .inputs = showspectrum_inputs,
1527  .outputs = showspectrum_outputs,
1528  .activate = activate,
1529  .priv_class = &showspectrum_class,
1531 };
1532 #endif // CONFIG_SHOWSPECTRUM_FILTER
1533 
1534 #if CONFIG_SHOWSPECTRUMPIC_FILTER
1535 
1536 static const AVOption showspectrumpic_options[] = {
1537  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
1538  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "4096x2048"}, 0, 0, FLAGS },
1539  { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, 0, NB_MODES-1, FLAGS, "mode" },
1540  { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
1541  { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
1542  { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=INTENSITY}, 0, NB_CLMODES-1, FLAGS, "color" },
1543  { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
1544  { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
1545  { "rainbow", "rainbow based coloring", 0, AV_OPT_TYPE_CONST, {.i64=RAINBOW}, 0, 0, FLAGS, "color" },
1546  { "moreland", "moreland based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MORELAND}, 0, 0, FLAGS, "color" },
1547  { "nebulae", "nebulae based coloring", 0, AV_OPT_TYPE_CONST, {.i64=NEBULAE}, 0, 0, FLAGS, "color" },
1548  { "fire", "fire based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIRE}, 0, 0, FLAGS, "color" },
1549  { "fiery", "fiery based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FIERY}, 0, 0, FLAGS, "color" },
1550  { "fruit", "fruit based coloring", 0, AV_OPT_TYPE_CONST, {.i64=FRUIT}, 0, 0, FLAGS, "color" },
1551  { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" },
1552  { "magma", "magma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MAGMA}, 0, 0, FLAGS, "color" },
1553  { "green", "green based coloring", 0, AV_OPT_TYPE_CONST, {.i64=GREEN}, 0, 0, FLAGS, "color" },
1554  { "viridis", "viridis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=VIRIDIS}, 0, 0, FLAGS, "color" },
1555  { "plasma", "plasma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=PLASMA}, 0, 0, FLAGS, "color" },
1556  { "cividis", "cividis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=CIVIDIS}, 0, 0, FLAGS, "color" },
1557  { "terrain", "terrain based coloring", 0, AV_OPT_TYPE_CONST, {.i64=TERRAIN}, 0, 0, FLAGS, "color" },
1558  { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LOG}, 0, NB_SCALES-1, FLAGS, "scale" },
1559  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
1560  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
1561  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
1562  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
1563  { "4thrt","4th root", 0, AV_OPT_TYPE_CONST, {.i64=FOURTHRT}, 0, 0, FLAGS, "scale" },
1564  { "5thrt","5th root", 0, AV_OPT_TYPE_CONST, {.i64=FIFTHRT}, 0, 0, FLAGS, "scale" },
1565  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=F_LINEAR}, 0, NB_FSCALES-1, FLAGS, "fscale" },
1566  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=F_LINEAR}, 0, 0, FLAGS, "fscale" },
1567  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=F_LOG}, 0, 0, FLAGS, "fscale" },
1568  { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
1569  { "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
1570  { "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
1571  { "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
1572  { "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
1573  { "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
1574  { "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
1575  { "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
1576  { "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
1577  { "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
1578  { "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
1579  { "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
1580  { "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
1581  { "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
1582  { "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
1583  { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
1584  { "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
1585  { "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
1586  { "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
1587  { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
1588  { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
1589  { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
1590  { "bohman", "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN}, 0, 0, FLAGS, "win_func" },
1591  { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" },
1592  { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" },
1593  { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" },
1594  { "gain", "set scale gain", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl = 1}, 0, 128, FLAGS },
1595  { "legend", "draw legend", OFFSET(legend), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
1596  { "rotation", "color rotation", OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl = 0}, -1, 1, FLAGS },
1597  { "start", "start frequency", OFFSET(start), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT32_MAX, FLAGS },
1598  { "stop", "stop frequency", OFFSET(stop), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT32_MAX, FLAGS },
1599  { NULL }
1600 };
1601 
1602 AVFILTER_DEFINE_CLASS(showspectrumpic);
1603 
1604 static int showspectrumpic_request_frame(AVFilterLink *outlink)
1605 {
1606  AVFilterContext *ctx = outlink->src;
1607  ShowSpectrumContext *s = ctx->priv;
1608  AVFilterLink *inlink = ctx->inputs[0];
1609  int ret, samples;
1610 
1611  ret = ff_request_frame(inlink);
1612  samples = av_audio_fifo_size(s->fifo);
1613  if (ret == AVERROR_EOF && s->outpicref && samples > 0) {
1614  int consumed = 0;
1615  int x = 0, sz = s->orientation == VERTICAL ? s->w : s->h;
1616  int ch, spf, spb;
1617  AVFrame *fin;
1618 
1619  spf = s->win_size * (samples / ((s->win_size * sz) * ceil(samples / (float)(s->win_size * sz))));
1620  spf = FFMAX(1, spf);
1621 
1622  spb = (samples / (spf * sz)) * spf;
1623 
1624  fin = ff_get_audio_buffer(inlink, s->win_size);
1625  if (!fin)
1626  return AVERROR(ENOMEM);
1627 
1628  while (x < sz) {
1629  ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
1630  if (ret < 0) {
1631  av_frame_free(&fin);
1632  return ret;
1633  }
1634 
1635  av_audio_fifo_drain(s->fifo, spf);
1636 
1637  if (ret < s->win_size) {
1638  for (ch = 0; ch < s->nb_display_channels; ch++) {
1639  memset(fin->extended_data[ch] + ret * sizeof(float), 0,
1640  (s->win_size - ret) * sizeof(float));
1641  }
1642  }
1643 
1645  acalc_magnitudes(s);
1646 
1647  consumed += spf;
1648  if (consumed >= spb) {
1649  int h = s->orientation == VERTICAL ? s->h : s->w;
1650 
1651  scale_magnitudes(s, 1.f / (consumed / spf));
1652  plot_spectrum_column(inlink, fin);
1653  consumed = 0;
1654  x++;
1655  for (ch = 0; ch < s->nb_display_channels; ch++)
1656  memset(s->magnitudes[ch], 0, h * sizeof(float));
1657  }
1658  }
1659 
1660  av_frame_free(&fin);
1661  s->outpicref->pts = 0;
1662 
1663  if (s->legend)
1664  draw_legend(ctx, samples);
1665 
1666  ret = ff_filter_frame(outlink, s->outpicref);
1667  s->outpicref = NULL;
1668  }
1669 
1670  return ret;
1671 }
1672 
1673 static int showspectrumpic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
1674 {
1675  AVFilterContext *ctx = inlink->dst;
1676  ShowSpectrumContext *s = ctx->priv;
1677  int ret;
1678 
1679  ret = av_audio_fifo_write(s->fifo, (void **)insamples->extended_data, insamples->nb_samples);
1680  av_frame_free(&insamples);
1681  return ret;
1682 }
1683 
1684 static const AVFilterPad showspectrumpic_inputs[] = {
1685  {
1686  .name = "default",
1687  .type = AVMEDIA_TYPE_AUDIO,
1688  .filter_frame = showspectrumpic_filter_frame,
1689  },
1690  { NULL }
1691 };
1692 
1693 static const AVFilterPad showspectrumpic_outputs[] = {
1694  {
1695  .name = "default",
1696  .type = AVMEDIA_TYPE_VIDEO,
1697  .config_props = config_output,
1698  .request_frame = showspectrumpic_request_frame,
1699  },
1700  { NULL }
1701 };
1702 
1704  .name = "showspectrumpic",
1705  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output single picture."),
1706  .uninit = uninit,
1707  .query_formats = query_formats,
1708  .priv_size = sizeof(ShowSpectrumContext),
1709  .inputs = showspectrumpic_inputs,
1710  .outputs = showspectrumpic_outputs,
1711  .priv_class = &showspectrumpic_class,
1713 };
1714 
1715 #endif // CONFIG_SHOWSPECTRUMPIC_FILTER
int plane
Definition: avisynth_c.h:384
float, planar
Definition: samplefmt.h:69
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1494
#define NULL
Definition: coverity.c:32
AVAudioFifo * av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, int nb_samples)
Allocate an AVAudioFifo.
Definition: audio_fifo.c:59
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
Definition: parseutils.c:179
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
av_cold void av_fft_end(FFTContext *s)
Definition: avfft.c:48
float rotation
color rotation
static int plot_channel_log(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
Main libavfilter public API header.
const char * g
Definition: vf_curves.c:115
static float get_log_hz(const int bin, const int num_bins, const float sample_rate)
FFTComplex ** fft_data
bins holder for each (displayed) channels
static int plot_channel_lin(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define a0
Definition: regdef.h:46
static int draw_legend(AVFilterContext *ctx, int samples)
void av_audio_fifo_free(AVAudioFifo *af)
Free an AVAudioFifo.
Definition: audio_fifo.c:45
FF_FILTER_FORWARD_STATUS(inlink, outlink)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
#define a1
Definition: regdef.h:47
float * window_func_lut
Window function LUT.
FFTSample re
Definition: avfft.h:38
color_range
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
Definition: avfft.c:38
static void generate_window_func(float *lut, int N, int win_func, float *overlap)
Definition: window_func.h:36
int fft_bits
number of bits (FFT window size = 1<<fft_bits)
static int query_formats(AVFilterContext *ctx)
return FFERROR_NOT_READY
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
int sliding
1 if sliding mode, 0 otherwise
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1620
#define N
Definition: af_mcompand.c:54
int mode
channel display mode
#define log2(x)
Definition: libm.h:404
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
DisplayScale
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:434
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
#define M(a, b)
Definition: vp3dsp.c:45
static void drawtext(AVFrame *pic, int x, int y, const char *txt, int o)
uint8_t
#define av_cold
Definition: attributes.h:82
float delta
static const AVOption showspectrum_options[]
AVOptions.
static int calc_channel_magnitudes(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
filter_frame For filters that do not use the activate() callback
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
AVRational auto_frame_rate
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
#define cosf(x)
Definition: libm.h:78
static int run_channel_fft(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int calc_channel_phases(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
#define max(a, b)
Definition: cuda_runtime.h:33
ptrdiff_t size
Definition: opengl_enc.c:100
static char * get_time(AVFilterContext *ctx, float seconds, int x)
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function.If this function returns true
#define av_log(a,...)
SlideMode
#define cm
Definition: dvbsubdec.c:37
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define expf(x)
Definition: libm.h:283
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define PHASE(y, ch)
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
static const uint16_t mask[17]
Definition: lzw.c:38
#define S(s, c, i)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
float ** color_buffer
color buffer (3 * h * ch items)
void * priv
private data for use by the filter
Definition: avfilter.h:353
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:539
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
float saturation
color saturation multiplier
const char * arg
Definition: jacosubdec.c:66
float * combine_buffer
color combining buffer (3 * h items)
simple assert() macros that are a bit more flexible than ISO C assert().
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
Definition: avfft.c:28
#define FFMAX(a, b)
Definition: common.h:94
#define powf(x, y)
Definition: libm.h:50
Context for an Audio FIFO Buffer.
Definition: audio_fifo.c:34
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
int av_audio_fifo_size(AVAudioFifo *af)
Get the current number of samples in the AVAudioFifo available for reading.
Definition: audio_fifo.c:228
#define b
Definition: input.c:41
Definition: fft.h:88
audio channel layout utility functions
#define FFMIN(a, b)
Definition: common.h:96
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:439
#define M_PI_2
Definition: mathematics.h:55
AVFormatContext * ctx
Definition: movenc.c:48
ColorMode
static void acalc_magnitudes(ShowSpectrumContext *s)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define FLAGS
static float get_value(AVFilterContext *ctx, int ch, int y)
int n
Definition: avisynth_c.h:760
#define MAGNITUDE(y, ch)
#define L(x)
Definition: vp56_arith.h:36
FrequencyScale
Definition: avf_showfreqs.c:41
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (...
Definition: formats.c:400
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
FFTContext ** ifft
Inverse Fast Fourier Transform context.
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:523
A list of supported channel layouts.
Definition: formats.h:85
if(ret)
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
static float bin_pos(const int bin, const int num_bins, const float sample_rate)
Orientation
#define sinf(x)
Definition: libm.h:419
sample_rate
static int config_output(AVFilterLink *outlink)
static void color_range(ShowSpectrumContext *s, int ch, float *yf, float *uf, float *vf)
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static const struct ColorTable color_table[][8]
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
AVFilter ff_avf_showspectrum
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
FFT functions.
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
static av_always_inline float cbrtf(float x)
Definition: libm.h:61
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1643
AVFILTER_DEFINE_CLASS(showspectrum)
Rational number (pair of numerator and denominator).
Definition: rational.h:58
const char * name
Filter name.
Definition: avfilter.h:148
offset must point to two consecutive integers
Definition: opt.h:233
DataMode
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:394
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples)
Write data to an AVAudioFifo.
Definition: audio_fifo.c:112
static av_cold void uninit(AVFilterContext *ctx)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples)
Drain data from an AVAudioFifo.
Definition: audio_fifo.c:201
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples)
int
FFTSample im
Definition: avfft.h:38
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
FFTContext ** fft
Fast Fourier Transform context.
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:193
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
int den
Denominator.
Definition: rational.h:60
avfilter_execute_func * execute
Definition: internal.h:155
#define av_free(p)
Audio FIFO Buffer.
int(* plot_channel)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples)
Peek data from an AVAudioFifo.
Definition: audio_fifo.c:138
AVFilter ff_avf_showspectrumpic
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
#define OFFSET(x)
FILE * out
Definition: movenc.c:54
Filter the word “frame” indicates either a video frame or a group of audio samples
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
static float log_scale(const float value, const float min, const float max)
static void scale_magnitudes(ShowSpectrumContext *s, float scale)
#define log10f(x)
Definition: libm.h:414
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
formats
Definition: signature.h:48
static void pick_color(ShowSpectrumContext *s, float yf, float uf, float vf, float a, float *out)
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
int xpos
x position (current column)
float min
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().
Definition: avfft.c:43
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static void clear_combine_buffer(ShowSpectrumContext *s, int size)
static float inv_log_scale(const float value, const float min, const float max)
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
for(j=16;j >0;--j)
FFTComplex ** fft_scratch
scratch buffers
CGA/EGA/VGA ROM font data.
int color_mode
display color scheme
DisplayMode