FFmpeg
avf_showcqt.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015 Muhammad Faiz <mfcc64@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "config.h"
22 #include "libavcodec/avfft.h"
23 #include "libavutil/avassert.h"
24 #include "libavutil/opt.h"
26 #include "libavutil/eval.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavutil/time.h"
29 #include "avfilter.h"
30 #include "internal.h"
31 #include "lavfutils.h"
32 #include "lswsutils.h"
33 
34 #if CONFIG_LIBFREETYPE
35 #include <ft2build.h>
36 #include FT_FREETYPE_H
37 #endif
38 
39 #if CONFIG_LIBFONTCONFIG
40 #include <fontconfig/fontconfig.h>
41 #endif
42 
43 #include "avf_showcqt.h"
44 
45 #define BASEFREQ 20.01523126408007475
46 #define ENDFREQ 20495.59681441799654
47 #define TLENGTH "384*tc/(384+tc*f)"
48 #define TLENGTH_MIN 0.001
49 #define VOLUME_MAX 100.0
50 #define FONTCOLOR "st(0, (midi(f)-59.5)/12);" \
51  "st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));" \
52  "r(1-ld(1)) + b(ld(1))"
53 #define CSCHEME "1|0.5|0|0|0.5|1"
54 #define PTS_STEP 10
55 #define PTS_TOLERANCE 1
56 
57 #define OFFSET(x) offsetof(ShowCQTContext, x)
58 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
59 
60 static const AVOption showcqt_options[] = {
61  { "size", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, { .str = "1920x1080" }, 0, 0, FLAGS },
62  { "s", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, { .str = "1920x1080" }, 0, 0, FLAGS },
63  { "fps", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 1, 1000, FLAGS },
64  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 1, 1000, FLAGS },
65  { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 1, 1000, FLAGS },
66  { "bar_h", "set bargraph height", OFFSET(bar_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
67  { "axis_h", "set axis height", OFFSET(axis_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
68  { "sono_h", "set sonogram height", OFFSET(sono_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
69  { "fullhd", "set fullhd size", OFFSET(fullhd), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
70  { "sono_v", "set sonogram volume", OFFSET(sono_v), AV_OPT_TYPE_STRING, { .str = "16" }, 0, 0, FLAGS },
71  { "volume", "set sonogram volume", OFFSET(sono_v), AV_OPT_TYPE_STRING, { .str = "16" }, 0, 0, FLAGS },
72  { "bar_v", "set bargraph volume", OFFSET(bar_v), AV_OPT_TYPE_STRING, { .str = "sono_v" }, 0, 0, FLAGS },
73  { "volume2", "set bargraph volume", OFFSET(bar_v), AV_OPT_TYPE_STRING, { .str = "sono_v" }, 0, 0, FLAGS },
74  { "sono_g", "set sonogram gamma", OFFSET(sono_g), AV_OPT_TYPE_FLOAT, { .dbl = 3.0 }, 1.0, 7.0, FLAGS },
75  { "gamma", "set sonogram gamma", OFFSET(sono_g), AV_OPT_TYPE_FLOAT, { .dbl = 3.0 }, 1.0, 7.0, FLAGS },
76  { "bar_g", "set bargraph gamma", OFFSET(bar_g), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 7.0, FLAGS },
77  { "gamma2", "set bargraph gamma", OFFSET(bar_g), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 7.0, FLAGS },
78  { "bar_t", "set bar transparency", OFFSET(bar_t), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 0.0, 1.0, FLAGS },
79  { "timeclamp", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.002, 1.0, FLAGS },
80  { "tc", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.002, 1.0, FLAGS },
81  { "attack", "set attack time", OFFSET(attack), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0.0, 1.0, FLAGS },
82  { "basefreq", "set base frequency", OFFSET(basefreq), AV_OPT_TYPE_DOUBLE, { .dbl = BASEFREQ }, 10.0, 100000.0, FLAGS },
83  { "endfreq", "set end frequency", OFFSET(endfreq), AV_OPT_TYPE_DOUBLE, { .dbl = ENDFREQ }, 10.0, 100000.0, FLAGS },
84  { "coeffclamp", "set coeffclamp", OFFSET(coeffclamp), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 0.1, 10.0, FLAGS },
85  { "tlength", "set tlength", OFFSET(tlength), AV_OPT_TYPE_STRING, { .str = TLENGTH }, 0, 0, FLAGS },
86  { "count", "set transform count", OFFSET(count), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, 30, FLAGS },
87  { "fcount", "set frequency count", OFFSET(fcount), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 10, FLAGS },
88  { "fontfile", "set axis font file", OFFSET(fontfile), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
89  { "font", "set axis font", OFFSET(font), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
90  { "fontcolor", "set font color", OFFSET(fontcolor), AV_OPT_TYPE_STRING, { .str = FONTCOLOR }, 0, 0, FLAGS },
91  { "axisfile", "set axis image", OFFSET(axisfile), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
92  { "axis", "draw axis", OFFSET(axis), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
93  { "text", "draw axis", OFFSET(axis), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
94  { "csp", "set color space", OFFSET(csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED }, 0, INT_MAX, FLAGS, "csp" },
95  { "unspecified", "unspecified", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_UNSPECIFIED }, 0, 0, FLAGS, "csp" },
96  { "bt709", "bt709", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT709 }, 0, 0, FLAGS, "csp" },
97  { "fcc", "fcc", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_FCC }, 0, 0, FLAGS, "csp" },
98  { "bt470bg", "bt470bg", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT470BG }, 0, 0, FLAGS, "csp" },
99  { "smpte170m", "smpte170m", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_SMPTE170M }, 0, 0, FLAGS, "csp" },
100  { "smpte240m", "smpte240m", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_SMPTE240M }, 0, 0, FLAGS, "csp" },
101  { "bt2020ncl", "bt2020ncl", 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT2020_NCL }, 0, 0, FLAGS, "csp" },
102  { "cscheme", "set color scheme", OFFSET(cscheme), AV_OPT_TYPE_STRING, { .str = CSCHEME }, 0, 0, FLAGS },
103  { NULL }
104 };
105 
106 AVFILTER_DEFINE_CLASS(showcqt);
107 
109 {
110  int k;
111  int level = AV_LOG_DEBUG;
112  int64_t plot_time;
113 
114  if (s->fft_time)
115  av_log(s->ctx, level, "fft_time = %16.3f s.\n", s->fft_time * 1e-6);
116  if (s->cqt_time)
117  av_log(s->ctx, level, "cqt_time = %16.3f s.\n", s->cqt_time * 1e-6);
118  if (s->process_cqt_time)
119  av_log(s->ctx, level, "process_cqt_time = %16.3f s.\n", s->process_cqt_time * 1e-6);
120  if (s->update_sono_time)
121  av_log(s->ctx, level, "update_sono_time = %16.3f s.\n", s->update_sono_time * 1e-6);
122  if (s->alloc_time)
123  av_log(s->ctx, level, "alloc_time = %16.3f s.\n", s->alloc_time * 1e-6);
124  if (s->bar_time)
125  av_log(s->ctx, level, "bar_time = %16.3f s.\n", s->bar_time * 1e-6);
126  if (s->axis_time)
127  av_log(s->ctx, level, "axis_time = %16.3f s.\n", s->axis_time * 1e-6);
128  if (s->sono_time)
129  av_log(s->ctx, level, "sono_time = %16.3f s.\n", s->sono_time * 1e-6);
130 
131  plot_time = s->fft_time + s->cqt_time + s->process_cqt_time + s->update_sono_time
132  + s->alloc_time + s->bar_time + s->axis_time + s->sono_time;
133  if (plot_time)
134  av_log(s->ctx, level, "plot_time = %16.3f s.\n", plot_time * 1e-6);
135 
136  s->fft_time = s->cqt_time = s->process_cqt_time = s->update_sono_time
137  = s->alloc_time = s->bar_time = s->axis_time = s->sono_time = 0;
138  /* axis_frame may be non reference counted frame */
139  if (s->axis_frame && !s->axis_frame->buf[0]) {
140  av_freep(s->axis_frame->data);
141  for (k = 0; k < 4; k++)
142  s->axis_frame->data[k] = NULL;
143  }
144 
145  av_frame_free(&s->axis_frame);
146  av_frame_free(&s->sono_frame);
147  av_fft_end(s->fft_ctx);
148  s->fft_ctx = NULL;
149  if (s->coeffs)
150  for (k = 0; k < s->cqt_len; k++)
151  av_freep(&s->coeffs[k].val);
152  av_freep(&s->coeffs);
153  av_freep(&s->fft_data);
154  av_freep(&s->fft_result);
155  av_freep(&s->cqt_result);
156  av_freep(&s->attack_data);
157  av_freep(&s->c_buf);
158  av_freep(&s->h_buf);
159  av_freep(&s->rcp_h_buf);
160  av_freep(&s->freq);
161  av_freep(&s->sono_v_buf);
162  av_freep(&s->bar_v_buf);
163 }
164 
165 static double *create_freq_table(double base, double end, int n)
166 {
167  double log_base, log_end;
168  double rcp_n = 1.0 / n;
169  double *freq;
170  int x;
171 
172  freq = av_malloc_array(n, sizeof(*freq));
173  if (!freq)
174  return NULL;
175 
176  log_base = log(base);
177  log_end = log(end);
178  for (x = 0; x < n; x++) {
179  double log_freq = log_base + (x + 0.5) * (log_end - log_base) * rcp_n;
180  freq[x] = exp(log_freq);
181  }
182  return freq;
183 }
184 
185 static double clip_with_log(void *log_ctx, const char *name,
186  double val, double min, double max,
187  double nan_replace, int idx)
188 {
189  int level = AV_LOG_WARNING;
190  if (isnan(val)) {
191  av_log(log_ctx, level, "[%d] %s is nan, setting it to %g.\n",
192  idx, name, nan_replace);
193  val = nan_replace;
194  } else if (val < min) {
195  av_log(log_ctx, level, "[%d] %s is too low (%g), setting it to %g.\n",
196  idx, name, val, min);
197  val = min;
198  } else if (val > max) {
199  av_log(log_ctx, level, "[%d] %s it too high (%g), setting it to %g.\n",
200  idx, name, val, max);
201  val = max;
202  }
203  return val;
204 }
205 
206 static double a_weighting(void *p, double f)
207 {
208  double ret = 12200.0*12200.0 * (f*f*f*f);
209  ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) *
210  sqrt((f*f + 107.7*107.7) * (f*f + 737.9*737.9));
211  return ret;
212 }
213 
214 static double b_weighting(void *p, double f)
215 {
216  double ret = 12200.0*12200.0 * (f*f*f);
217  ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) * sqrt(f*f + 158.5*158.5);
218  return ret;
219 }
220 
221 static double c_weighting(void *p, double f)
222 {
223  double ret = 12200.0*12200.0 * (f*f);
224  ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0);
225  return ret;
226 }
227 
229 {
230  const char *func_names[] = { "a_weighting", "b_weighting", "c_weighting", NULL };
231  const char *sono_names[] = { "timeclamp", "tc", "frequency", "freq", "f", "bar_v", NULL };
232  const char *bar_names[] = { "timeclamp", "tc", "frequency", "freq", "f", "sono_v", NULL };
233  double (*funcs[])(void *, double) = { a_weighting, b_weighting, c_weighting };
234  AVExpr *sono = NULL, *bar = NULL;
235  int x, ret = AVERROR(ENOMEM);
236 
237  s->sono_v_buf = av_malloc_array(s->cqt_len, sizeof(*s->sono_v_buf));
238  s->bar_v_buf = av_malloc_array(s->cqt_len, sizeof(*s->bar_v_buf));
239  if (!s->sono_v_buf || !s->bar_v_buf)
240  goto error;
241 
242  if ((ret = av_expr_parse(&sono, s->sono_v, sono_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0)
243  goto error;
244 
245  if ((ret = av_expr_parse(&bar, s->bar_v, bar_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0)
246  goto error;
247 
248  for (x = 0; x < s->cqt_len; x++) {
249  double vars[] = { s->timeclamp, s->timeclamp, s->freq[x], s->freq[x], s->freq[x], 0.0 };
250  double vol = clip_with_log(s->ctx, "sono_v", av_expr_eval(sono, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
251  vars[5] = vol;
252  vol = clip_with_log(s->ctx, "bar_v", av_expr_eval(bar, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
253  s->bar_v_buf[x] = vol * vol;
254  vars[5] = vol;
255  vol = clip_with_log(s->ctx, "sono_v", av_expr_eval(sono, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
256  s->sono_v_buf[x] = vol * vol;
257  }
258  av_expr_free(sono);
259  av_expr_free(bar);
260  return 0;
261 
262 error:
263  av_freep(&s->sono_v_buf);
264  av_freep(&s->bar_v_buf);
265  av_expr_free(sono);
266  av_expr_free(bar);
267  return ret;
268 }
269 
270 static void cqt_calc(FFTComplex *dst, const FFTComplex *src, const Coeffs *coeffs,
271  int len, int fft_len)
272 {
273  int k, x, i, j;
274  for (k = 0; k < len; k++) {
275  FFTComplex l, r, a = {0,0}, b = {0,0};
276 
277  for (x = 0; x < coeffs[k].len; x++) {
278  FFTSample u = coeffs[k].val[x];
279  i = coeffs[k].start + x;
280  j = fft_len - i;
281  a.re += u * src[i].re;
282  a.im += u * src[i].im;
283  b.re += u * src[j].re;
284  b.im += u * src[j].im;
285  }
286 
287  /* separate left and right, (and multiply by 2.0) */
288  l.re = a.re + b.re;
289  l.im = a.im - b.im;
290  r.re = b.im + a.im;
291  r.im = b.re - a.re;
292  dst[k].re = l.re * l.re + l.im * l.im;
293  dst[k].im = r.re * r.re + r.im * r.im;
294  }
295 }
296 
298 {
299  const char *var_names[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
300  AVExpr *expr = NULL;
301  int rate = s->ctx->inputs[0]->sample_rate;
302  int nb_cqt_coeffs = 0;
303  int k, x, ret;
304 
305  if ((ret = av_expr_parse(&expr, s->tlength, var_names, NULL, NULL, NULL, NULL, 0, s->ctx)) < 0)
306  goto error;
307 
308  ret = AVERROR(ENOMEM);
309  if (!(s->coeffs = av_calloc(s->cqt_len, sizeof(*s->coeffs))))
310  goto error;
311 
312  for (k = 0; k < s->cqt_len; k++) {
313  double vars[] = { s->timeclamp, s->timeclamp, s->freq[k], s->freq[k], s->freq[k] };
314  double flen, center, tlength;
315  int start, end, m = k;
316 
317  if (s->freq[k] > 0.5 * rate)
318  continue;
319  tlength = clip_with_log(s->ctx, "tlength", av_expr_eval(expr, vars, NULL),
320  TLENGTH_MIN, s->timeclamp, s->timeclamp, k);
321 
322  flen = 8.0 * s->fft_len / (tlength * rate);
323  center = s->freq[k] * s->fft_len / rate;
324  start = FFMAX(0, ceil(center - 0.5 * flen));
325  end = FFMIN(s->fft_len, floor(center + 0.5 * flen));
326 
327  s->coeffs[m].start = start & ~(s->cqt_align - 1);
328  s->coeffs[m].len = (end | (s->cqt_align - 1)) + 1 - s->coeffs[m].start;
329  nb_cqt_coeffs += s->coeffs[m].len;
330  if (!(s->coeffs[m].val = av_calloc(s->coeffs[m].len, sizeof(*s->coeffs[m].val))))
331  goto error;
332 
333  for (x = start; x <= end; x++) {
334  int sign = (x & 1) ? (-1) : 1;
335  double y = 2.0 * M_PI * (x - center) * (1.0 / flen);
336  /* nuttall window */
337  double w = 0.355768 + 0.487396 * cos(y) + 0.144232 * cos(2*y) + 0.012604 * cos(3*y);
338  w *= sign * (1.0 / s->fft_len);
339  s->coeffs[m].val[x - s->coeffs[m].start] = w;
340  }
341 
342  if (s->permute_coeffs)
343  s->permute_coeffs(s->coeffs[m].val, s->coeffs[m].len);
344  }
345 
346  av_expr_free(expr);
347  av_log(s->ctx, AV_LOG_INFO, "nb_cqt_coeffs = %d.\n", nb_cqt_coeffs);
348  return 0;
349 
350 error:
351  av_expr_free(expr);
352  if (s->coeffs)
353  for (k = 0; k < s->cqt_len; k++)
354  av_freep(&s->coeffs[k].val);
355  av_freep(&s->coeffs);
356  return ret;
357 }
358 
360 {
361  AVFrame *out;
362  out = av_frame_alloc();
363  if (!out)
364  return NULL;
365  out->format = format;
366  out->width = w;
367  out->height = h;
368  if (av_frame_get_buffer(out, 0) < 0) {
369  av_frame_free(&out);
370  return NULL;
371  }
373  memset(out->data[0], 0, out->linesize[0] * h);
374  } else {
375  int hh = (format == AV_PIX_FMT_YUV420P || format == AV_PIX_FMT_YUVA420P) ? h / 2 : h;
376  memset(out->data[0], 16, out->linesize[0] * h);
377  memset(out->data[1], 128, out->linesize[1] * hh);
378  memset(out->data[2], 128, out->linesize[2] * hh);
379  if (out->data[3])
380  memset(out->data[3], 0, out->linesize[3] * h);
381  }
382  return out;
383 }
384 
386 {
387  switch (format) {
388  case AV_PIX_FMT_RGB24: format = AV_PIX_FMT_RGBA; break;
389  case AV_PIX_FMT_YUV444P:
390  case AV_PIX_FMT_YUV422P:
392  }
393  return format;
394 }
395 
397 {
398  if (!(s->axis_frame = alloc_frame_empty(convert_axis_pixel_format(s->format), s->width, s->axis_h)))
399  return AVERROR(ENOMEM);
400  return 0;
401 }
402 
404 {
405  uint8_t *tmp_data[4] = { NULL };
406  int tmp_linesize[4];
407  enum AVPixelFormat tmp_format;
408  int tmp_w, tmp_h, ret;
409 
410  if ((ret = ff_load_image(tmp_data, tmp_linesize, &tmp_w, &tmp_h, &tmp_format,
411  s->axisfile, s->ctx)) < 0)
412  goto error;
413 
414  ret = AVERROR(ENOMEM);
415  if (!(s->axis_frame = av_frame_alloc()))
416  goto error;
417 
418  if ((ret = ff_scale_image(s->axis_frame->data, s->axis_frame->linesize, s->width, s->axis_h,
419  convert_axis_pixel_format(s->format), tmp_data, tmp_linesize, tmp_w, tmp_h,
420  tmp_format, s->ctx)) < 0)
421  goto error;
422 
423  s->axis_frame->width = s->width;
424  s->axis_frame->height = s->axis_h;
425  s->axis_frame->format = convert_axis_pixel_format(s->format);
426  av_freep(tmp_data);
427  return 0;
428 
429 error:
430  av_frame_free(&s->axis_frame);
431  av_freep(tmp_data);
432  return ret;
433 }
434 
435 static double midi(void *p, double f)
436 {
437  return log2(f/440.0) * 12.0 + 69.0;
438 }
439 
440 static double r_func(void *p, double x)
441 {
442  x = av_clipd(x, 0.0, 1.0);
443  return lrint(x*255.0) << 16;
444 }
445 
446 static double g_func(void *p, double x)
447 {
448  x = av_clipd(x, 0.0, 1.0);
449  return lrint(x*255.0) << 8;
450 }
451 
452 static double b_func(void *p, double x)
453 {
454  x = av_clipd(x, 0.0, 1.0);
455  return lrint(x*255.0);
456 }
457 
459 {
460  const char *var_names[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
461  const char *func_names[] = { "midi", "r", "g", "b", NULL };
462  double (*funcs[])(void *, double) = { midi, r_func, g_func, b_func };
463  AVExpr *expr = NULL;
464  double *freq = NULL;
465  int x, xs, y, ret;
466  int width = half ? 1920/2 : 1920, height = half ? 16 : 32;
467  int step = half ? 2 : 1;
468 
469  if (s->basefreq != (double) BASEFREQ || s->endfreq != (double) ENDFREQ) {
470  av_log(s->ctx, AV_LOG_WARNING, "font axis rendering is not implemented in non-default frequency range,"
471  " please use axisfile option instead.\n");
472  return AVERROR(EINVAL);
473  }
474 
475  if (s->cqt_len == 1920)
476  freq = s->freq;
477  else if (!(freq = create_freq_table(s->basefreq, s->endfreq, 1920)))
478  return AVERROR(ENOMEM);
479 
480  if ((ret = av_expr_parse(&expr, s->fontcolor, var_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0) {
481  if (freq != s->freq)
482  av_freep(&freq);
483  return ret;
484  }
485 
486  for (x = 0, xs = 0; x < width; x++, xs += step) {
487  double vars[] = { s->timeclamp, s->timeclamp, freq[xs], freq[xs], freq[xs] };
488  int color = (int) av_expr_eval(expr, vars, NULL);
489  uint8_t r = (color >> 16) & 0xFF, g = (color >> 8) & 0xFF, b = color & 0xFF;
490  uint8_t *data = tmp->data[0];
491  int linesize = tmp->linesize[0];
492  for (y = 0; y < height; y++) {
493  data[linesize * y + 4 * x] = r;
494  data[linesize * y + 4 * x + 1] = g;
495  data[linesize * y + 4 * x + 2] = b;
496  }
497  }
498 
499  av_expr_free(expr);
500  if (freq != s->freq)
501  av_freep(&freq);
502  return 0;
503 }
504 
505 static int render_freetype(ShowCQTContext *s, AVFrame *tmp, char *fontfile)
506 {
507 #if CONFIG_LIBFREETYPE
508  const char *str = "EF G A BC D ";
509  uint8_t *data = tmp->data[0];
510  int linesize = tmp->linesize[0];
511  FT_Library lib = NULL;
512  FT_Face face = NULL;
513  int font_width = 16, font_height = 32;
514  int font_repeat = font_width * 12;
515  int linear_hori_advance = font_width * 65536;
516  int non_monospace_warning = 0;
517  int x;
518 
519  if (!fontfile)
520  return AVERROR(EINVAL);
521 
522  if (FT_Init_FreeType(&lib))
523  goto fail;
524 
525  if (FT_New_Face(lib, fontfile, 0, &face))
526  goto fail;
527 
528  if (FT_Set_Char_Size(face, 16*64, 0, 0, 0))
529  goto fail;
530 
531  if (FT_Load_Char(face, 'A', FT_LOAD_RENDER))
532  goto fail;
533 
534  if (FT_Set_Char_Size(face, 16*64 * linear_hori_advance / face->glyph->linearHoriAdvance, 0, 0, 0))
535  goto fail;
536 
537  for (x = 0; x < 12; x++) {
538  int sx, sy, rx, bx, by, dx, dy;
539 
540  if (str[x] == ' ')
541  continue;
542 
543  if (FT_Load_Char(face, str[x], FT_LOAD_RENDER))
544  goto fail;
545 
546  if (face->glyph->advance.x != font_width*64 && !non_monospace_warning) {
547  av_log(s->ctx, AV_LOG_WARNING, "font is not monospace.\n");
548  non_monospace_warning = 1;
549  }
550 
551  sy = font_height - 8 - face->glyph->bitmap_top;
552  for (rx = 0; rx < 10; rx++) {
553  sx = rx * font_repeat + x * font_width + face->glyph->bitmap_left;
554  for (by = 0; by < face->glyph->bitmap.rows; by++) {
555  dy = by + sy;
556  if (dy < 0)
557  continue;
558  if (dy >= font_height)
559  break;
560 
561  for (bx = 0; bx < face->glyph->bitmap.width; bx++) {
562  dx = bx + sx;
563  if (dx < 0)
564  continue;
565  if (dx >= 1920)
566  break;
567  data[dy*linesize+4*dx+3] = face->glyph->bitmap.buffer[by*face->glyph->bitmap.width+bx];
568  }
569  }
570  }
571  }
572 
573  FT_Done_Face(face);
574  FT_Done_FreeType(lib);
575  return 0;
576 
577 fail:
578  av_log(s->ctx, AV_LOG_WARNING, "error while loading freetype font.\n");
579  FT_Done_Face(face);
580  FT_Done_FreeType(lib);
581  return AVERROR(EINVAL);
582 #else
583  if (fontfile)
584  av_log(s->ctx, AV_LOG_WARNING, "freetype is not available, ignoring fontfile option.\n");
585  return AVERROR(EINVAL);
586 #endif
587 }
588 
589 static int render_fontconfig(ShowCQTContext *s, AVFrame *tmp, char* font)
590 {
591 #if CONFIG_LIBFONTCONFIG
592  FcConfig *fontconfig;
593  FcPattern *pat, *best;
594  FcResult result = FcResultMatch;
595  char *filename;
596  int i, ret;
597 
598  if (!font)
599  return AVERROR(EINVAL);
600 
601  for (i = 0; font[i]; i++) {
602  if (font[i] == '|')
603  font[i] = ':';
604  }
605 
606  if (!(fontconfig = FcInitLoadConfigAndFonts())) {
607  av_log(s->ctx, AV_LOG_ERROR, "impossible to init fontconfig.\n");
608  return AVERROR_UNKNOWN;
609  }
610 
611  if (!(pat = FcNameParse((uint8_t *)font))) {
612  av_log(s->ctx, AV_LOG_ERROR, "could not parse fontconfig pat.\n");
613  FcConfigDestroy(fontconfig);
614  return AVERROR(EINVAL);
615  }
616 
617  FcDefaultSubstitute(pat);
618 
619  if (!FcConfigSubstitute(fontconfig, pat, FcMatchPattern)) {
620  av_log(s->ctx, AV_LOG_ERROR, "could not substitue fontconfig options.\n");
621  FcPatternDestroy(pat);
622  FcConfigDestroy(fontconfig);
623  return AVERROR(ENOMEM);
624  }
625 
626  best = FcFontMatch(fontconfig, pat, &result);
627  FcPatternDestroy(pat);
628 
629  ret = AVERROR(EINVAL);
630  if (!best || result != FcResultMatch) {
631  av_log(s->ctx, AV_LOG_ERROR, "cannot find a valid font for %s.\n", font);
632  goto fail;
633  }
634 
635  if (FcPatternGetString(best, FC_FILE, 0, (FcChar8 **)&filename) != FcResultMatch) {
636  av_log(s->ctx, AV_LOG_ERROR, "no file path for %s\n", font);
637  goto fail;
638  }
639 
640  ret = render_freetype(s, tmp, filename);
641 
642 fail:
643  FcPatternDestroy(best);
644  FcConfigDestroy(fontconfig);
645  return ret;
646 #else
647  if (font)
648  av_log(s->ctx, AV_LOG_WARNING, "fontconfig is not available, ignoring font option.\n");
649  return AVERROR(EINVAL);
650 #endif
651 }
652 
654 {
655  const char *str = "EF G A BC D ";
656  int x, u, v, mask;
657  uint8_t *data = tmp->data[0];
658  int linesize = tmp->linesize[0];
659  int width = 1920/2, height = 16;
660 
661  for (x = 0; x < width; x += width/10) {
662  uint8_t *startptr = data + 4 * x;
663  for (u = 0; u < 12; u++) {
664  for (v = 0; v < height; v++) {
665  uint8_t *p = startptr + v * linesize + height/2 * 4 * u;
666  for (mask = 0x80; mask; mask >>= 1, p += 4) {
667  if (mask & avpriv_vga16_font[str[u] * 16 + v])
668  p[3] = 255;
669  else
670  p[3] = 0;
671  }
672  }
673  }
674  }
675 
676  return 0;
677 }
678 
680 {
681  AVFrame *tmp = NULL;
682  int ret = AVERROR(ENOMEM);
683  int width = 1920, height = 32;
684  int default_font = 0;
685 
687  goto fail;
688 
689  if (!(s->axis_frame = av_frame_alloc()))
690  goto fail;
691 
692  if (render_freetype(s, tmp, s->fontfile) < 0 &&
693  render_fontconfig(s, tmp, s->font) < 0 &&
694  (default_font = 1, ret = render_default_font(tmp)) < 0)
695  goto fail;
696 
697  if (default_font)
698  width /= 2, height /= 2;
699 
700  if ((ret = init_axis_color(s, tmp, default_font)) < 0)
701  goto fail;
702 
703  if ((ret = ff_scale_image(s->axis_frame->data, s->axis_frame->linesize, s->width, s->axis_h,
704  convert_axis_pixel_format(s->format), tmp->data, tmp->linesize,
705  width, height, AV_PIX_FMT_RGBA, s->ctx)) < 0)
706  goto fail;
707 
708  av_frame_free(&tmp);
709  s->axis_frame->width = s->width;
710  s->axis_frame->height = s->axis_h;
711  s->axis_frame->format = convert_axis_pixel_format(s->format);
712  return 0;
713 
714 fail:
715  av_frame_free(&tmp);
716  av_frame_free(&s->axis_frame);
717  return ret;
718 }
719 
720 static float calculate_gamma(float v, float g)
721 {
722  if (g == 1.0f)
723  return v;
724  if (g == 2.0f)
725  return sqrtf(v);
726  if (g == 3.0f)
727  return cbrtf(v);
728  if (g == 4.0f)
729  return sqrtf(sqrtf(v));
730  return expf(logf(v) / g);
731 }
732 
733 static void rgb_from_cqt(ColorFloat *c, const FFTComplex *v, float g, int len, float cscheme[6])
734 {
735  int x;
736  for (x = 0; x < len; x++) {
737  c[x].rgb.r = 255.0f * calculate_gamma(FFMIN(1.0f, cscheme[0] * v[x].re + cscheme[3] * v[x].im), g);
738  c[x].rgb.g = 255.0f * calculate_gamma(FFMIN(1.0f, cscheme[1] * v[x].re + cscheme[4] * v[x].im), g);
739  c[x].rgb.b = 255.0f * calculate_gamma(FFMIN(1.0f, cscheme[2] * v[x].re + cscheme[5] * v[x].im), g);
740  }
741 }
742 
743 static void yuv_from_cqt(ColorFloat *c, const FFTComplex *v, float gamma, int len, float cm[3][3], float cscheme[6])
744 {
745  int x;
746  for (x = 0; x < len; x++) {
747  float r, g, b;
748  r = calculate_gamma(FFMIN(1.0f, cscheme[0] * v[x].re + cscheme[3] * v[x].im), gamma);
749  g = calculate_gamma(FFMIN(1.0f, cscheme[1] * v[x].re + cscheme[4] * v[x].im), gamma);
750  b = calculate_gamma(FFMIN(1.0f, cscheme[2] * v[x].re + cscheme[5] * v[x].im), gamma);
751  c[x].yuv.y = cm[0][0] * r + cm[0][1] * g + cm[0][2] * b;
752  c[x].yuv.u = cm[1][0] * r + cm[1][1] * g + cm[1][2] * b;
753  c[x].yuv.v = cm[2][0] * r + cm[2][1] * g + cm[2][2] * b;
754  }
755 }
756 
757 static void draw_bar_rgb(AVFrame *out, const float *h, const float *rcp_h,
758  const ColorFloat *c, int bar_h, float bar_t)
759 {
760  int x, y, w = out->width;
761  float mul, ht, rcp_bar_h = 1.0f / bar_h, rcp_bar_t = 1.0f / bar_t;
762  uint8_t *v = out->data[0], *lp;
763  int ls = out->linesize[0];
764 
765  for (y = 0; y < bar_h; y++) {
766  ht = (bar_h - y) * rcp_bar_h;
767  lp = v + y * ls;
768  for (x = 0; x < w; x++) {
769  if (h[x] <= ht) {
770  *lp++ = 0;
771  *lp++ = 0;
772  *lp++ = 0;
773  } else {
774  mul = (h[x] - ht) * rcp_h[x];
775  mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f;
776  *lp++ = lrintf(mul * c[x].rgb.r);
777  *lp++ = lrintf(mul * c[x].rgb.g);
778  *lp++ = lrintf(mul * c[x].rgb.b);
779  }
780  }
781  }
782 }
783 
784 #define DRAW_BAR_WITH_CHROMA(x) \
785 do { \
786  if (h[x] <= ht) { \
787  *lpy++ = 16; \
788  *lpu++ = 128; \
789  *lpv++ = 128; \
790  } else { \
791  mul = (h[x] - ht) * rcp_h[x]; \
792  mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f; \
793  *lpy++ = lrintf(mul * c[x].yuv.y + 16.0f); \
794  *lpu++ = lrintf(mul * c[x].yuv.u + 128.0f); \
795  *lpv++ = lrintf(mul * c[x].yuv.v + 128.0f); \
796  } \
797 } while (0)
798 
799 #define DRAW_BAR_WITHOUT_CHROMA(x) \
800 do { \
801  if (h[x] <= ht) { \
802  *lpy++ = 16; \
803  } else { \
804  mul = (h[x] - ht) * rcp_h[x]; \
805  mul = (mul < bar_t) ? (mul * rcp_bar_t) : 1.0f; \
806  *lpy++ = lrintf(mul * c[x].yuv.y + 16.0f); \
807  } \
808 } while (0)
809 
810 static void draw_bar_yuv(AVFrame *out, const float *h, const float *rcp_h,
811  const ColorFloat *c, int bar_h, float bar_t)
812 {
813  int x, y, yh, w = out->width;
814  float mul, ht, rcp_bar_h = 1.0f / bar_h, rcp_bar_t = 1.0f / bar_t;
815  uint8_t *vy = out->data[0], *vu = out->data[1], *vv = out->data[2];
816  uint8_t *lpy, *lpu, *lpv;
817  int lsy = out->linesize[0], lsu = out->linesize[1], lsv = out->linesize[2];
818  int fmt = out->format;
819 
820  for (y = 0; y < bar_h; y += 2) {
821  yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
822  ht = (bar_h - y) * rcp_bar_h;
823  lpy = vy + y * lsy;
824  lpu = vu + yh * lsu;
825  lpv = vv + yh * lsv;
826  if (fmt == AV_PIX_FMT_YUV444P) {
827  for (x = 0; x < w; x += 2) {
830  }
831  } else {
832  for (x = 0; x < w; x += 2) {
835  }
836  }
837 
838  ht = (bar_h - (y+1)) * rcp_bar_h;
839  lpy = vy + (y+1) * lsy;
840  lpu = vu + (y+1) * lsu;
841  lpv = vv + (y+1) * lsv;
842  if (fmt == AV_PIX_FMT_YUV444P) {
843  for (x = 0; x < w; x += 2) {
846  }
847  } else if (fmt == AV_PIX_FMT_YUV422P) {
848  for (x = 0; x < w; x += 2) {
851  }
852  } else {
853  for (x = 0; x < w; x += 2) {
856  }
857  }
858  }
859 }
860 
861 static void draw_axis_rgb(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
862 {
863  int x, y, w = axis->width, h = axis->height;
864  float a, rcp_255 = 1.0f / 255.0f;
865  uint8_t *lp, *lpa;
866 
867  for (y = 0; y < h; y++) {
868  lp = out->data[0] + (off + y) * out->linesize[0];
869  lpa = axis->data[0] + y * axis->linesize[0];
870  for (x = 0; x < w; x++) {
871  if (!lpa[3]) {
872  *lp++ = lrintf(c[x].rgb.r);
873  *lp++ = lrintf(c[x].rgb.g);
874  *lp++ = lrintf(c[x].rgb.b);
875  } else if (lpa[3] == 255) {
876  *lp++ = lpa[0];
877  *lp++ = lpa[1];
878  *lp++ = lpa[2];
879  } else {
880  a = rcp_255 * lpa[3];
881  *lp++ = lrintf(a * lpa[0] + (1.0f - a) * c[x].rgb.r);
882  *lp++ = lrintf(a * lpa[1] + (1.0f - a) * c[x].rgb.g);
883  *lp++ = lrintf(a * lpa[2] + (1.0f - a) * c[x].rgb.b);
884  }
885  lpa += 4;
886  }
887  }
888 }
889 
890 #define BLEND_WITH_CHROMA(c) \
891 do { \
892  if (!*lpaa) { \
893  *lpy = lrintf(c.yuv.y + 16.0f); \
894  *lpu = lrintf(c.yuv.u + 128.0f); \
895  *lpv = lrintf(c.yuv.v + 128.0f); \
896  } else if (255 == *lpaa) { \
897  *lpy = *lpay; \
898  *lpu = *lpau; \
899  *lpv = *lpav; \
900  } else { \
901  float a = (1.0f/255.0f) * (*lpaa); \
902  *lpy = lrintf(a * (*lpay) + (1.0f - a) * (c.yuv.y + 16.0f)); \
903  *lpu = lrintf(a * (*lpau) + (1.0f - a) * (c.yuv.u + 128.0f)); \
904  *lpv = lrintf(a * (*lpav) + (1.0f - a) * (c.yuv.v + 128.0f)); \
905  } \
906  lpy++; lpu++; lpv++; \
907  lpay++; lpau++; lpav++; lpaa++; \
908 } while (0)
909 
910 #define BLEND_WITHOUT_CHROMA(c, alpha_inc) \
911 do { \
912  if (!*lpaa) { \
913  *lpy = lrintf(c.yuv.y + 16.0f); \
914  } else if (255 == *lpaa) { \
915  *lpy = *lpay; \
916  } else { \
917  float a = (1.0f/255.0f) * (*lpaa); \
918  *lpy = lrintf(a * (*lpay) + (1.0f - a) * (c.yuv.y + 16.0f)); \
919  } \
920  lpy++; \
921  lpay++; lpaa += alpha_inc; \
922 } while (0)
923 
924 #define BLEND_CHROMA2(c) \
925 do { \
926  if (!lpaa[0] && !lpaa[1]) { \
927  *lpu = lrintf(c.yuv.u + 128.0f); \
928  *lpv = lrintf(c.yuv.v + 128.0f); \
929  } else if (255 == lpaa[0] && 255 == lpaa[1]) { \
930  *lpu = *lpau; *lpv = *lpav; \
931  } else { \
932  float a0 = (0.5f/255.0f) * lpaa[0]; \
933  float a1 = (0.5f/255.0f) * lpaa[1]; \
934  float b = 1.0f - a0 - a1; \
935  *lpu = lrintf(a0 * lpau[0] + a1 * lpau[1] + b * (c.yuv.u + 128.0f)); \
936  *lpv = lrintf(a0 * lpav[0] + a1 * lpav[1] + b * (c.yuv.v + 128.0f)); \
937  } \
938  lpau += 2; lpav += 2; lpaa++; lpu++; lpv++; \
939 } while (0)
940 
941 #define BLEND_CHROMA2x2(c) \
942 do { \
943  if (!lpaa[0] && !lpaa[1] && !lpaa[lsaa] && !lpaa[lsaa+1]) { \
944  *lpu = lrintf(c.yuv.u + 128.0f); \
945  *lpv = lrintf(c.yuv.v + 128.0f); \
946  } else if (255 == lpaa[0] && 255 == lpaa[1] && \
947  255 == lpaa[lsaa] && 255 == lpaa[lsaa+1]) { \
948  *lpu = *lpau; *lpv = *lpav; \
949  } else { \
950  float a0 = (0.25f/255.0f) * lpaa[0]; \
951  float a1 = (0.25f/255.0f) * lpaa[1]; \
952  float a2 = (0.25f/255.0f) * lpaa[lsaa]; \
953  float a3 = (0.25f/255.0f) * lpaa[lsaa+1]; \
954  float b = 1.0f - a0 - a1 - a2 - a3; \
955  *lpu = lrintf(a0 * lpau[0] + a1 * lpau[1] + a2 * lpau[lsau] + a3 * lpau[lsau+1] \
956  + b * (c.yuv.u + 128.0f)); \
957  *lpv = lrintf(a0 * lpav[0] + a1 * lpav[1] + a2 * lpav[lsav] + a3 * lpav[lsav+1] \
958  + b * (c.yuv.v + 128.0f)); \
959  } \
960  lpau += 2; lpav += 2; lpaa++; lpu++; lpv++; \
961 } while (0)
962 
963 static void draw_axis_yuv(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
964 {
965  int fmt = out->format, x, y, yh, w = axis->width, h = axis->height;
966  int offh = (fmt == AV_PIX_FMT_YUV420P) ? off / 2 : off;
967  uint8_t *vy = out->data[0], *vu = out->data[1], *vv = out->data[2];
968  uint8_t *vay = axis->data[0], *vau = axis->data[1], *vav = axis->data[2], *vaa = axis->data[3];
969  int lsy = out->linesize[0], lsu = out->linesize[1], lsv = out->linesize[2];
970  int lsay = axis->linesize[0], lsau = axis->linesize[1], lsav = axis->linesize[2], lsaa = axis->linesize[3];
971  uint8_t *lpy, *lpu, *lpv, *lpay, *lpau, *lpav, *lpaa;
972 
973  for (y = 0; y < h; y += 2) {
974  yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
975  lpy = vy + (off + y) * lsy;
976  lpu = vu + (offh + yh) * lsu;
977  lpv = vv + (offh + yh) * lsv;
978  lpay = vay + y * lsay;
979  lpau = vau + y * lsau;
980  lpav = vav + y * lsav;
981  lpaa = vaa + y * lsaa;
982  if (fmt == AV_PIX_FMT_YUV444P) {
983  for (x = 0; x < w; x += 2) {
984  BLEND_WITH_CHROMA(c[x]);
985  BLEND_WITH_CHROMA(c[x+1]);
986  }
987  } else if (fmt == AV_PIX_FMT_YUV422P) {
988  for (x = 0; x < w; x += 2) {
989  BLEND_WITHOUT_CHROMA(c[x], 0);
990  BLEND_CHROMA2(c[x]);
991  BLEND_WITHOUT_CHROMA(c[x+1], 1);
992  }
993  } else {
994  for (x = 0; x < w; x += 2) {
995  BLEND_WITHOUT_CHROMA(c[x], 0);
996  BLEND_CHROMA2x2(c[x]);
997  BLEND_WITHOUT_CHROMA(c[x+1], 1);
998  }
999  }
1000 
1001  lpy = vy + (off + y + 1) * lsy;
1002  lpu = vu + (off + y + 1) * lsu;
1003  lpv = vv + (off + y + 1) * lsv;
1004  lpay = vay + (y + 1) * lsay;
1005  lpau = vau + (y + 1) * lsau;
1006  lpav = vav + (y + 1) * lsav;
1007  lpaa = vaa + (y + 1) * lsaa;
1008  if (fmt == AV_PIX_FMT_YUV444P) {
1009  for (x = 0; x < w; x += 2) {
1010  BLEND_WITH_CHROMA(c[x]);
1011  BLEND_WITH_CHROMA(c[x+1]);
1012  }
1013  } else if (fmt == AV_PIX_FMT_YUV422P) {
1014  for (x = 0; x < w; x += 2) {
1015  BLEND_WITHOUT_CHROMA(c[x], 0);
1016  BLEND_CHROMA2(c[x]);
1017  BLEND_WITHOUT_CHROMA(c[x+1], 1);
1018  }
1019  } else {
1020  for (x = 0; x < w; x += 2) {
1021  BLEND_WITHOUT_CHROMA(c[x], 1);
1022  BLEND_WITHOUT_CHROMA(c[x+1], 1);
1023  }
1024  }
1025  }
1026 }
1027 
1028 static void draw_sono(AVFrame *out, AVFrame *sono, int off, int idx)
1029 {
1030  int fmt = out->format, h = sono->height;
1031  int nb_planes = (fmt == AV_PIX_FMT_RGB24) ? 1 : 3;
1032  int offh = (fmt == AV_PIX_FMT_YUV420P) ? off / 2 : off;
1033  int inc = (fmt == AV_PIX_FMT_YUV420P) ? 2 : 1;
1034  int ls, i, y, yh;
1035 
1036  ls = FFMIN(out->linesize[0], sono->linesize[0]);
1037  for (y = 0; y < h; y++) {
1038  memcpy(out->data[0] + (off + y) * out->linesize[0],
1039  sono->data[0] + (idx + y) % h * sono->linesize[0], ls);
1040  }
1041 
1042  for (i = 1; i < nb_planes; i++) {
1043  ls = FFMIN(out->linesize[i], sono->linesize[i]);
1044  for (y = 0; y < h; y += inc) {
1045  yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
1046  memcpy(out->data[i] + (offh + yh) * out->linesize[i],
1047  sono->data[i] + (idx + y) % h * sono->linesize[i], ls);
1048  }
1049  }
1050 }
1051 
1052 static void update_sono_rgb(AVFrame *sono, const ColorFloat *c, int idx)
1053 {
1054  int x, w = sono->width;
1055  uint8_t *lp = sono->data[0] + idx * sono->linesize[0];
1056 
1057  for (x = 0; x < w; x++) {
1058  *lp++ = lrintf(c[x].rgb.r);
1059  *lp++ = lrintf(c[x].rgb.g);
1060  *lp++ = lrintf(c[x].rgb.b);
1061  }
1062 }
1063 
1064 static void update_sono_yuv(AVFrame *sono, const ColorFloat *c, int idx)
1065 {
1066  int x, fmt = sono->format, w = sono->width;
1067  uint8_t *lpy = sono->data[0] + idx * sono->linesize[0];
1068  uint8_t *lpu = sono->data[1] + idx * sono->linesize[1];
1069  uint8_t *lpv = sono->data[2] + idx * sono->linesize[2];
1070 
1071  for (x = 0; x < w; x += 2) {
1072  *lpy++ = lrintf(c[x].yuv.y + 16.0f);
1073  *lpu++ = lrintf(c[x].yuv.u + 128.0f);
1074  *lpv++ = lrintf(c[x].yuv.v + 128.0f);
1075  *lpy++ = lrintf(c[x+1].yuv.y + 16.0f);
1076  if (fmt == AV_PIX_FMT_YUV444P) {
1077  *lpu++ = lrintf(c[x+1].yuv.u + 128.0f);
1078  *lpv++ = lrintf(c[x+1].yuv.v + 128.0f);
1079  }
1080  }
1081 }
1082 
1084 {
1085  int x, i;
1086  if (!s->sono_count) {
1087  for (x = 0; x < s->cqt_len; x++) {
1088  s->h_buf[x] = s->bar_v_buf[x] * 0.5f * (s->cqt_result[x].re + s->cqt_result[x].im);
1089  }
1090  if (s->fcount > 1) {
1091  float rcp_fcount = 1.0f / s->fcount;
1092  for (x = 0; x < s->width; x++) {
1093  float h = 0.0f;
1094  for (i = 0; i < s->fcount; i++)
1095  h += s->h_buf[s->fcount * x + i];
1096  s->h_buf[x] = rcp_fcount * h;
1097  }
1098  }
1099  for (x = 0; x < s->width; x++) {
1100  s->h_buf[x] = calculate_gamma(s->h_buf[x], s->bar_g);
1101  s->rcp_h_buf[x] = 1.0f / (s->h_buf[x] + 0.0001f);
1102  }
1103  }
1104 
1105  for (x = 0; x < s->cqt_len; x++) {
1106  s->cqt_result[x].re *= s->sono_v_buf[x];
1107  s->cqt_result[x].im *= s->sono_v_buf[x];
1108  }
1109 
1110  if (s->fcount > 1) {
1111  float rcp_fcount = 1.0f / s->fcount;
1112  for (x = 0; x < s->width; x++) {
1113  FFTComplex result = {0.0f, 0.0f};
1114  for (i = 0; i < s->fcount; i++) {
1115  result.re += s->cqt_result[s->fcount * x + i].re;
1116  result.im += s->cqt_result[s->fcount * x + i].im;
1117  }
1118  s->cqt_result[x].re = rcp_fcount * result.re;
1119  s->cqt_result[x].im = rcp_fcount * result.im;
1120  }
1121  }
1122 
1123  if (s->format == AV_PIX_FMT_RGB24)
1124  rgb_from_cqt(s->c_buf, s->cqt_result, s->sono_g, s->width, s->cscheme_v);
1125  else
1126  yuv_from_cqt(s->c_buf, s->cqt_result, s->sono_g, s->width, s->cmatrix, s->cscheme_v);
1127 }
1128 
1129 static int plot_cqt(AVFilterContext *ctx, AVFrame **frameout)
1130 {
1131  AVFilterLink *outlink = ctx->outputs[0];
1132  ShowCQTContext *s = ctx->priv;
1133  int64_t last_time, cur_time;
1134 
1135 #define UPDATE_TIME(t) \
1136  cur_time = av_gettime_relative(); \
1137  t += cur_time - last_time; \
1138  last_time = cur_time
1139 
1140  last_time = av_gettime_relative();
1141 
1142  memcpy(s->fft_result, s->fft_data, s->fft_len * sizeof(*s->fft_data));
1143  if (s->attack_data) {
1144  int k;
1145  for (k = 0; k < s->remaining_fill_max; k++) {
1146  s->fft_result[s->fft_len/2+k].re *= s->attack_data[k];
1147  s->fft_result[s->fft_len/2+k].im *= s->attack_data[k];
1148  }
1149  }
1150 
1151  av_fft_permute(s->fft_ctx, s->fft_result);
1152  av_fft_calc(s->fft_ctx, s->fft_result);
1153  s->fft_result[s->fft_len] = s->fft_result[0];
1154  UPDATE_TIME(s->fft_time);
1155 
1156  s->cqt_calc(s->cqt_result, s->fft_result, s->coeffs, s->cqt_len, s->fft_len);
1157  UPDATE_TIME(s->cqt_time);
1158 
1159  process_cqt(s);
1160  UPDATE_TIME(s->process_cqt_time);
1161 
1162  if (s->sono_h) {
1163  s->update_sono(s->sono_frame, s->c_buf, s->sono_idx);
1164  UPDATE_TIME(s->update_sono_time);
1165  }
1166 
1167  if (!s->sono_count) {
1168  AVFrame *out = *frameout = ff_get_video_buffer(outlink, outlink->w, outlink->h);
1169  if (!out)
1170  return AVERROR(ENOMEM);
1171  out->sample_aspect_ratio = av_make_q(1, 1);
1172  out->color_range = AVCOL_RANGE_MPEG;
1173  out->colorspace = s->csp;
1174  UPDATE_TIME(s->alloc_time);
1175 
1176  if (s->bar_h) {
1177  s->draw_bar(out, s->h_buf, s->rcp_h_buf, s->c_buf, s->bar_h, s->bar_t);
1178  UPDATE_TIME(s->bar_time);
1179  }
1180 
1181  if (s->axis_h) {
1182  s->draw_axis(out, s->axis_frame, s->c_buf, s->bar_h);
1183  UPDATE_TIME(s->axis_time);
1184  }
1185 
1186  if (s->sono_h) {
1187  s->draw_sono(out, s->sono_frame, s->bar_h + s->axis_h, s->sono_idx);
1188  UPDATE_TIME(s->sono_time);
1189  }
1190  out->pts = s->next_pts;
1191  s->next_pts += PTS_STEP;
1192  }
1193  s->sono_count = (s->sono_count + 1) % s->count;
1194  if (s->sono_h)
1195  s->sono_idx = (s->sono_idx + s->sono_h - 1) % s->sono_h;
1196  return 0;
1197 }
1198 
1200 {
1201  double kr, kg, kb;
1202 
1203  /* from vf_colorspace.c */
1204  switch (s->csp) {
1205  default:
1206  av_log(s->ctx, AV_LOG_WARNING, "unsupported colorspace, setting it to unspecified.\n");
1207  s->csp = AVCOL_SPC_UNSPECIFIED;
1208  case AVCOL_SPC_UNSPECIFIED:
1209  case AVCOL_SPC_BT470BG:
1210  case AVCOL_SPC_SMPTE170M:
1211  kr = 0.299; kb = 0.114; break;
1212  case AVCOL_SPC_BT709:
1213  kr = 0.2126; kb = 0.0722; break;
1214  case AVCOL_SPC_FCC:
1215  kr = 0.30; kb = 0.11; break;
1216  case AVCOL_SPC_SMPTE240M:
1217  kr = 0.212; kb = 0.087; break;
1218  case AVCOL_SPC_BT2020_NCL:
1219  kr = 0.2627; kb = 0.0593; break;
1220  }
1221 
1222  kg = 1.0 - kr - kb;
1223  s->cmatrix[0][0] = 219.0 * kr;
1224  s->cmatrix[0][1] = 219.0 * kg;
1225  s->cmatrix[0][2] = 219.0 * kb;
1226  s->cmatrix[1][0] = -112.0 * kr / (1.0 - kb);
1227  s->cmatrix[1][1] = -112.0 * kg / (1.0 - kb);
1228  s->cmatrix[1][2] = 112.0;
1229  s->cmatrix[2][0] = 112.0;
1230  s->cmatrix[2][1] = -112.0 * kg / (1.0 - kr);
1231  s->cmatrix[2][2] = -112.0 * kb / (1.0 - kr);
1232 }
1233 
1235 {
1236  char tail[2];
1237  int k;
1238 
1239  if (sscanf(s->cscheme, " %f | %f | %f | %f | %f | %f %1s", &s->cscheme_v[0],
1240  &s->cscheme_v[1], &s->cscheme_v[2], &s->cscheme_v[3], &s->cscheme_v[4],
1241  &s->cscheme_v[5], tail) != 6)
1242  goto fail;
1243 
1244  for (k = 0; k < 6; k++)
1245  if (isnan(s->cscheme_v[k]) || s->cscheme_v[k] < 0.0f || s->cscheme_v[k] > 1.0f)
1246  goto fail;
1247 
1248  return 0;
1249 
1250 fail:
1251  av_log(s->ctx, AV_LOG_ERROR, "invalid cscheme.\n");
1252  return AVERROR(EINVAL);
1253 }
1254 
1255 /* main filter control */
1257 {
1258  ShowCQTContext *s = ctx->priv;
1259  s->ctx = ctx;
1260 
1261  if (!s->fullhd) {
1262  av_log(ctx, AV_LOG_WARNING, "fullhd option is deprecated, use size/s option instead.\n");
1263  if (s->width != 1920 || s->height != 1080) {
1264  av_log(ctx, AV_LOG_ERROR, "fullhd set to 0 but with custom dimension.\n");
1265  return AVERROR(EINVAL);
1266  }
1267  s->width /= 2;
1268  s->height /= 2;
1269  s->fullhd = 1;
1270  }
1271 
1272  if (s->axis_h < 0) {
1273  s->axis_h = s->width / 60;
1274  if (s->axis_h & 1)
1275  s->axis_h++;
1276  if (s->bar_h >= 0 && s->sono_h >= 0)
1277  s->axis_h = s->height - s->bar_h - s->sono_h;
1278  if (s->bar_h >= 0 && s->sono_h < 0)
1279  s->axis_h = FFMIN(s->axis_h, s->height - s->bar_h);
1280  if (s->bar_h < 0 && s->sono_h >= 0)
1281  s->axis_h = FFMIN(s->axis_h, s->height - s->sono_h);
1282  }
1283 
1284  if (s->bar_h < 0) {
1285  s->bar_h = (s->height - s->axis_h) / 2;
1286  if (s->bar_h & 1)
1287  s->bar_h--;
1288  if (s->sono_h >= 0)
1289  s->bar_h = s->height - s->sono_h - s->axis_h;
1290  }
1291 
1292  if (s->sono_h < 0)
1293  s->sono_h = s->height - s->axis_h - s->bar_h;
1294 
1295  if ((s->width & 1) || (s->height & 1) || (s->bar_h & 1) || (s->axis_h & 1) || (s->sono_h & 1) ||
1296  (s->bar_h < 0) || (s->axis_h < 0) || (s->sono_h < 0) || (s->bar_h > s->height) ||
1297  (s->axis_h > s->height) || (s->sono_h > s->height) || (s->bar_h + s->axis_h + s->sono_h != s->height)) {
1298  av_log(ctx, AV_LOG_ERROR, "invalid dimension.\n");
1299  return AVERROR(EINVAL);
1300  }
1301 
1302  if (!s->fcount) {
1303  do {
1304  s->fcount++;
1305  } while(s->fcount * s->width < 1920 && s->fcount < 10);
1306  }
1307 
1309 
1310  return init_cscheme(s);
1311 }
1312 
1314 {
1315  common_uninit(ctx->priv);
1316 }
1317 
1319 {
1322  AVFilterLink *inlink = ctx->inputs[0];
1323  AVFilterLink *outlink = ctx->outputs[0];
1325  static const enum AVPixelFormat pix_fmts[] = {
1328  };
1329  static const int64_t channel_layouts[] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO_DOWNMIX, -1 };
1330  int ret;
1331 
1332  /* set input audio formats */
1334  if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
1335  return ret;
1336 
1338  if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
1339  return ret;
1340 
1342  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
1343  return ret;
1344 
1345  /* set output video format */
1347  if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
1348  return ret;
1349 
1350  return 0;
1351 }
1352 
1353 static int config_output(AVFilterLink *outlink)
1354 {
1355  AVFilterContext *ctx = outlink->src;
1356  AVFilterLink *inlink = ctx->inputs[0];
1357  ShowCQTContext *s = ctx->priv;
1358  int ret;
1359 
1360  common_uninit(s);
1361 
1362  outlink->w = s->width;
1363  outlink->h = s->height;
1364  s->format = outlink->format;
1365  outlink->sample_aspect_ratio = av_make_q(1, 1);
1366  outlink->frame_rate = s->rate;
1367  outlink->time_base = av_mul_q(av_inv_q(s->rate), av_make_q(1, PTS_STEP));
1368  av_log(ctx, AV_LOG_INFO, "video: %dx%d %s %d/%d fps, bar_h = %d, axis_h = %d, sono_h = %d.\n",
1369  s->width, s->height, av_get_pix_fmt_name(s->format), s->rate.num, s->rate.den,
1370  s->bar_h, s->axis_h, s->sono_h);
1371 
1372  s->cqt_len = s->width * s->fcount;
1373  if (!(s->freq = create_freq_table(s->basefreq, s->endfreq, s->cqt_len)))
1374  return AVERROR(ENOMEM);
1375 
1376  if ((ret = init_volume(s)) < 0)
1377  return ret;
1378 
1379  s->fft_bits = FFMAX(ceil(log2(inlink->sample_rate * s->timeclamp)), 4);
1380  s->fft_len = 1 << s->fft_bits;
1381  av_log(ctx, AV_LOG_INFO, "fft_len = %d, cqt_len = %d.\n", s->fft_len, s->cqt_len);
1382 
1383  s->fft_ctx = av_fft_init(s->fft_bits, 0);
1384  s->fft_data = av_calloc(s->fft_len, sizeof(*s->fft_data));
1385  s->fft_result = av_calloc(s->fft_len + 64, sizeof(*s->fft_result));
1386  s->cqt_result = av_malloc_array(s->cqt_len, sizeof(*s->cqt_result));
1387  if (!s->fft_ctx || !s->fft_data || !s->fft_result || !s->cqt_result)
1388  return AVERROR(ENOMEM);
1389 
1390  s->remaining_fill_max = s->fft_len / 2;
1391  if (s->attack > 0.0) {
1392  int k;
1393 
1394  s->remaining_fill_max = FFMIN(s->remaining_fill_max, ceil(inlink->sample_rate * s->attack));
1395  s->attack_data = av_malloc_array(s->remaining_fill_max, sizeof(*s->attack_data));
1396  if (!s->attack_data)
1397  return AVERROR(ENOMEM);
1398 
1399  for (k = 0; k < s->remaining_fill_max; k++) {
1400  double y = M_PI * k / (inlink->sample_rate * s->attack);
1401  s->attack_data[k] = 0.355768 + 0.487396 * cos(y) + 0.144232 * cos(2*y) + 0.012604 * cos(3*y);
1402  }
1403  }
1404 
1405  s->cqt_align = 1;
1406  s->cqt_calc = cqt_calc;
1407  s->permute_coeffs = NULL;
1408  s->draw_sono = draw_sono;
1409  if (s->format == AV_PIX_FMT_RGB24) {
1410  s->draw_bar = draw_bar_rgb;
1411  s->draw_axis = draw_axis_rgb;
1412  s->update_sono = update_sono_rgb;
1413  } else {
1414  s->draw_bar = draw_bar_yuv;
1415  s->draw_axis = draw_axis_yuv;
1416  s->update_sono = update_sono_yuv;
1417  }
1418 
1419  if (ARCH_X86)
1421 
1422  if ((ret = init_cqt(s)) < 0)
1423  return ret;
1424 
1425  if (s->axis_h) {
1426  if (!s->axis) {
1427  if ((ret = init_axis_empty(s)) < 0)
1428  return ret;
1429  } else if (s->axisfile) {
1430  if (init_axis_from_file(s) < 0) {
1431  av_log(ctx, AV_LOG_WARNING, "loading axis image failed, fallback to font rendering.\n");
1432  if (init_axis_from_font(s) < 0) {
1433  av_log(ctx, AV_LOG_WARNING, "loading axis font failed, disable text drawing.\n");
1434  if ((ret = init_axis_empty(s)) < 0)
1435  return ret;
1436  }
1437  }
1438  } else {
1439  if (init_axis_from_font(s) < 0) {
1440  av_log(ctx, AV_LOG_WARNING, "loading axis font failed, disable text drawing.\n");
1441  if ((ret = init_axis_empty(s)) < 0)
1442  return ret;
1443  }
1444  }
1445  }
1446 
1447  if (s->sono_h) {
1448  s->sono_frame = alloc_frame_empty((outlink->format == AV_PIX_FMT_YUV420P) ?
1449  AV_PIX_FMT_YUV422P : outlink->format, s->width, s->sono_h);
1450  if (!s->sono_frame)
1451  return AVERROR(ENOMEM);
1452  }
1453 
1454  s->h_buf = av_malloc_array(s->cqt_len, sizeof (*s->h_buf));
1455  s->rcp_h_buf = av_malloc_array(s->width, sizeof(*s->rcp_h_buf));
1456  s->c_buf = av_malloc_array(s->width, sizeof(*s->c_buf));
1457  if (!s->h_buf || !s->rcp_h_buf || !s->c_buf)
1458  return AVERROR(ENOMEM);
1459 
1460  s->sono_count = 0;
1461  s->next_pts = 0;
1462  s->sono_idx = 0;
1463  s->remaining_fill = s->remaining_fill_max;
1464  s->remaining_frac = 0;
1465  s->step_frac = av_div_q(av_make_q(inlink->sample_rate, s->count) , s->rate);
1466  s->step = (int)(s->step_frac.num / s->step_frac.den);
1467  s->step_frac.num %= s->step_frac.den;
1468  if (s->step_frac.num) {
1469  av_log(ctx, AV_LOG_INFO, "audio: %d Hz, step = %d + %d/%d.\n",
1470  inlink->sample_rate, s->step, s->step_frac.num, s->step_frac.den);
1471  av_log(ctx, AV_LOG_WARNING, "fractional step.\n");
1472  } else {
1473  av_log(ctx, AV_LOG_INFO, "audio: %d Hz, step = %d.\n",
1474  inlink->sample_rate, s->step);
1475  }
1476 
1477  return 0;
1478 }
1479 
1480 
1481 static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
1482 {
1483  AVFilterContext *ctx = inlink->dst;
1484  AVFilterLink *outlink = ctx->outputs[0];
1485  ShowCQTContext *s = ctx->priv;
1486  int remaining, step, ret, x, i, j, m;
1487  float *audio_data;
1488  AVFrame *out = NULL;
1489 
1490  if (!insamples) {
1491  while (s->remaining_fill < s->remaining_fill_max) {
1492  memset(&s->fft_data[s->fft_len/2 + s->remaining_fill_max - s->remaining_fill], 0, sizeof(*s->fft_data) * s->remaining_fill);
1493  ret = plot_cqt(ctx, &out);
1494  if (ret < 0)
1495  return ret;
1496 
1497  step = s->step + (s->step_frac.num + s->remaining_frac) / s->step_frac.den;
1498  s->remaining_frac = (s->step_frac.num + s->remaining_frac) % s->step_frac.den;
1499  for (x = 0; x < (s->fft_len/2 + s->remaining_fill_max - step); x++)
1500  s->fft_data[x] = s->fft_data[x+step];
1501  s->remaining_fill += step;
1502 
1503  if (out)
1504  return ff_filter_frame(outlink, out);
1505  }
1506  return AVERROR_EOF;
1507  }
1508 
1509  remaining = insamples->nb_samples;
1510  audio_data = (float*) insamples->data[0];
1511 
1512  while (remaining) {
1513  i = insamples->nb_samples - remaining;
1514  j = s->fft_len/2 + s->remaining_fill_max - s->remaining_fill;
1515  if (remaining >= s->remaining_fill) {
1516  for (m = 0; m < s->remaining_fill; m++) {
1517  s->fft_data[j+m].re = audio_data[2*(i+m)];
1518  s->fft_data[j+m].im = audio_data[2*(i+m)+1];
1519  }
1520  ret = plot_cqt(ctx, &out);
1521  if (ret < 0) {
1522  av_frame_free(&insamples);
1523  return ret;
1524  }
1525  remaining -= s->remaining_fill;
1526  if (out) {
1527  int64_t pts = av_rescale_q(insamples->pts, inlink->time_base, av_make_q(1, inlink->sample_rate));
1528  pts += insamples->nb_samples - remaining - s->remaining_fill_max;
1529  pts = av_rescale_q(pts, av_make_q(1, inlink->sample_rate), outlink->time_base);
1530  if (FFABS(pts - out->pts) > PTS_TOLERANCE) {
1531  av_log(ctx, AV_LOG_DEBUG, "changing pts from %"PRId64" (%.3f) to %"PRId64" (%.3f).\n",
1532  out->pts, out->pts * av_q2d(outlink->time_base),
1533  pts, pts * av_q2d(outlink->time_base));
1534  out->pts = pts;
1535  s->next_pts = pts + PTS_STEP;
1536  }
1537  ret = ff_filter_frame(outlink, out);
1538  if (ret < 0) {
1539  av_frame_free(&insamples);
1540  return ret;
1541  }
1542  out = NULL;
1543  }
1544  step = s->step + (s->step_frac.num + s->remaining_frac) / s->step_frac.den;
1545  s->remaining_frac = (s->step_frac.num + s->remaining_frac) % s->step_frac.den;
1546  for (m = 0; m < s->fft_len/2 + s->remaining_fill_max - step; m++)
1547  s->fft_data[m] = s->fft_data[m+step];
1548  s->remaining_fill = step;
1549  } else {
1550  for (m = 0; m < remaining; m++) {
1551  s->fft_data[j+m].re = audio_data[2*(i+m)];
1552  s->fft_data[j+m].im = audio_data[2*(i+m)+1];
1553  }
1554  s->remaining_fill -= remaining;
1555  remaining = 0;
1556  }
1557  }
1558  av_frame_free(&insamples);
1559  return 0;
1560 }
1561 
1562 static int request_frame(AVFilterLink *outlink)
1563 {
1564  AVFilterLink *inlink = outlink->src->inputs[0];
1565  int ret;
1566 
1568  if (ret == AVERROR_EOF)
1570  return ret;
1571 }
1572 
1573 static const AVFilterPad showcqt_inputs[] = {
1574  {
1575  .name = "default",
1576  .type = AVMEDIA_TYPE_AUDIO,
1577  .filter_frame = filter_frame,
1578  },
1579  { NULL }
1580 };
1581 
1582 static const AVFilterPad showcqt_outputs[] = {
1583  {
1584  .name = "default",
1585  .type = AVMEDIA_TYPE_VIDEO,
1586  .config_props = config_output,
1587  .request_frame = request_frame,
1588  },
1589  { NULL }
1590 };
1591 
1593  .name = "showcqt",
1594  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant/Clamped Q Transform) spectrum video output."),
1595  .init = init,
1596  .uninit = uninit,
1597  .query_formats = query_formats,
1598  .priv_size = sizeof(ShowCQTContext),
1601  .priv_class = &showcqt_class,
1602 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:30
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
av_fft_end
av_cold void av_fft_end(FFTContext *s)
Definition: avfft.c:48
rgb::b
uint8_t b
Definition: rpzaenc.c:61
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:86
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
level
uint8_t level
Definition: svq3.c:206
draw_axis_yuv
static void draw_axis_yuv(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
Definition: avf_showcqt.c:963
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
draw_bar_yuv
static void draw_bar_yuv(AVFrame *out, const float *h, const float *rcp_h, const ColorFloat *c, int bar_h, float bar_t)
Definition: avf_showcqt.c:810
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:337
color
Definition: vf_paletteuse.c:583
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:925
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:461
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
ColorFloat
Definition: avf_showcqt.h:41
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:238
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
init_cscheme
static int init_cscheme(ShowCQTContext *s)
Definition: avf_showcqt.c:1234
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ff_make_format64_list
AVFilterChannelLayouts * ff_make_format64_list(const int64_t *fmts)
Definition: formats.c:295
im
float im
Definition: fft.c:82
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:27
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AVFrame::width
int width
Definition: frame.h:376
w
uint8_t w
Definition: llviddspenc.c:39
AVOption
AVOption.
Definition: opt.h:248
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:142
expf
#define expf(x)
Definition: libm.h:283
FLAGS
#define FLAGS
Definition: avf_showcqt.c:58
half
static uint8_t half(int a, int b)
Definition: mobiclip.c:541
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:408
av_fft_permute
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
Definition: avfft.c:38
BLEND_WITH_CHROMA
#define BLEND_WITH_CHROMA(c)
Definition: avf_showcqt.c:890
base
uint8_t base
Definition: vp3data.h:141
c_weighting
static double c_weighting(void *p, double f)
Definition: avf_showcqt.c:221
PTS_STEP
#define PTS_STEP
Definition: avf_showcqt.c:54
max
#define max(a, b)
Definition: cuda_runtime.h:33
CSCHEME
#define CSCHEME
Definition: avf_showcqt.c:53
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
render_default_font
static int render_default_font(AVFrame *tmp)
Definition: avf_showcqt.c:653
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
draw_axis_rgb
static void draw_axis_rgb(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
Definition: avf_showcqt.c:861
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
rgb_from_cqt
static void rgb_from_cqt(ColorFloat *c, const FFTComplex *v, float g, int len, float cscheme[6])
Definition: avf_showcqt.c:733
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:518
rgb
Definition: rpzaenc.c:58
a_weighting
static double a_weighting(void *p, double f)
Definition: avf_showcqt.c:206
fail
#define fail()
Definition: checkasm.h:133
val
static double val(void *priv, double ch)
Definition: aeval.c:76
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: avf_showcqt.c:1313
pts
static int64_t pts
Definition: transcode_aac.c:652
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_STEREO
Definition: channel_layout.h:91
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
avassert.h
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
lrint
#define lrint
Definition: tablegen.h:53
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
mask
static const uint16_t mask[17]
Definition: lzw.c:38
width
#define width
BLEND_WITHOUT_CHROMA
#define BLEND_WITHOUT_CHROMA(c, alpha_inc)
Definition: avf_showcqt.c:910
s
#define s(width, name)
Definition: cbs_vp9.c:257
UPDATE_TIME
#define UPDATE_TIME(t)
DRAW_BAR_WITHOUT_CHROMA
#define DRAW_BAR_WITHOUT_CHROMA(x)
Definition: avf_showcqt.c:799
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
init_colormatrix
static void init_colormatrix(ShowCQTContext *s)
Definition: avf_showcqt.c:1199
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
floor
static __device__ float floor(float a)
Definition: cuda_runtime.h:173
g
const char * g
Definition: vf_curves.c:117
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:519
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:466
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CH_LAYOUT_STEREO_DOWNMIX
#define AV_CH_LAYOUT_STEREO_DOWNMIX
Definition: channel_layout.h:117
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
BLEND_CHROMA2x2
#define BLEND_CHROMA2x2(c)
Definition: avf_showcqt.c:941
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
cqt_calc
static void cqt_calc(FFTComplex *dst, const FFTComplex *src, const Coeffs *coeffs, int len, int fft_len)
Definition: avf_showcqt.c:270
AVExpr
Definition: eval.c:157
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
ShowCQTContext
Definition: avf_showcqt.h:46
f
#define f(width, name)
Definition: cbs_vp9.c:255
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
mul
static float mul(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:40
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
clip_with_log
static double clip_with_log(void *log_ctx, const char *name, double val, double min, double max, double nan_replace, int idx)
Definition: avf_showcqt.c:185
xs
#define xs(width, name, var, subs,...)
Definition: cbs_vp9.c:353
result
and forward the result(frame or status change) to the corresponding input. If nothing is possible
NULL
#define NULL
Definition: coverity.c:32
init
static av_cold int init(AVFilterContext *ctx)
Definition: avf_showcqt.c:1256
vars
static const uint8_t vars[2][12]
Definition: camellia.c:179
ff_avf_showcqt
AVFilter ff_avf_showcqt
Definition: avf_showcqt.c:1592
render_freetype
static int render_freetype(ShowCQTContext *s, AVFrame *tmp, char *fontfile)
Definition: avf_showcqt.c:505
isnan
#define isnan(x)
Definition: libm.h:340
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:235
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
src
#define src
Definition: vp8dsp.c:255
time.h
FFTSample
float FFTSample
Definition: avfft.h:35
avfft.h
showcqt_inputs
static const AVFilterPad showcqt_inputs[]
Definition: avf_showcqt.c:1573
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
exp
int8_t exp
Definition: eval.c:72
alloc_frame_empty
static AVFrame * alloc_frame_empty(enum AVPixelFormat format, int w, int h)
Definition: avf_showcqt.c:359
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
init_axis_from_file
static int init_axis_from_file(ShowCQTContext *s)
Definition: avf_showcqt.c:403
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
yuv_from_cqt
static void yuv_from_cqt(ColorFloat *c, const FFTComplex *v, float gamma, int len, float cm[3][3], float cscheme[6])
Definition: avf_showcqt.c:743
eval.h
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
showcqt_outputs
static const AVFilterPad showcqt_outputs[]
Definition: avf_showcqt.c:1582
process_cqt
static void process_cqt(ShowCQTContext *s)
Definition: avf_showcqt.c:1083
calculate_gamma
static float calculate_gamma(float v, float g)
Definition: avf_showcqt.c:720
init_axis_color
static int init_axis_color(ShowCQTContext *s, AVFrame *tmp, int half)
Definition: avf_showcqt.c:458
av_clipd
#define av_clipd
Definition: common.h:173
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
init_axis_from_font
static int init_axis_from_font(ShowCQTContext *s)
Definition: avf_showcqt.c:679
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
update_sono_rgb
static void update_sono_rgb(AVFrame *sono, const ColorFloat *c, int idx)
Definition: avf_showcqt.c:1052
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: avf_showcqt.c:1318
ff_load_image
int ff_load_image(uint8_t *data[4], int linesize[4], int *w, int *h, enum AVPixelFormat *pix_fmt, const char *filename, void *log_ctx)
Load image from filename and put the resulting image in data.
Definition: lavfutils.c:25
FFTComplex::im
FFTSample im
Definition: avfft.h:38
TLENGTH
#define TLENGTH
Definition: avf_showcqt.c:47
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:391
FFTComplex::re
FFTSample re
Definition: avfft.h:38
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
rgb::g
uint8_t g
Definition: rpzaenc.c:60
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
init_volume
static int init_volume(ShowCQTContext *s)
Definition: avf_showcqt.c:228
xga_font_data.h
ENDFREQ
#define ENDFREQ
Definition: avf_showcqt.c:46
M_PI
#define M_PI
Definition: mathematics.h:52
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
BLEND_CHROMA2
#define BLEND_CHROMA2(c)
Definition: avf_showcqt.c:924
Coeffs::len
int len
Definition: avf_showcqt.h:30
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:520
convert_axis_pixel_format
static enum AVPixelFormat convert_axis_pixel_format(enum AVPixelFormat format)
Definition: avf_showcqt.c:385
create_freq_table
static double * create_freq_table(double base, double end, int n)
Definition: avf_showcqt.c:165
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
i
int i
Definition: input.c:407
b_weighting
static double b_weighting(void *p, double f)
Definition: avf_showcqt.c:214
avpriv_vga16_font
const uint8_t avpriv_vga16_font[4096]
Definition: xga_font_data.c:160
lrintf
#define lrintf(x)
Definition: libm_mips.h:70
b_func
static double b_func(void *p, double x)
Definition: avf_showcqt.c:452
r_func
static double r_func(void *p, double x)
Definition: avf_showcqt.c:440
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:523
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
VOLUME_MAX
#define VOLUME_MAX
Definition: avf_showcqt.c:49
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
var_names
static const char *const var_names[]
Definition: setts_bsf.c:33
cbrtf
static av_always_inline float cbrtf(float x)
Definition: libm.h:61
TLENGTH_MIN
#define TLENGTH_MIN
Definition: avf_showcqt.c:48
uint8_t
uint8_t
Definition: audio_convert.c:194
rgb::r
uint8_t r
Definition: rpzaenc.c:59
OFFSET
#define OFFSET(x)
Definition: avf_showcqt.c:57
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:452
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
draw_bar_rgb
static void draw_bar_rgb(AVFrame *out, const float *h, const float *rcp_h, const ColorFloat *c, int bar_h, float bar_t)
Definition: avf_showcqt.c:757
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:515
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:569
Coeffs::start
int start
Definition: avf_showcqt.h:30
log2
#define log2(x)
Definition: libm.h:404
common_uninit
static void common_uninit(ShowCQTContext *s)
Definition: avf_showcqt.c:108
AVFilter
Filter definition.
Definition: avfilter.h:145
PTS_TOLERANCE
#define PTS_TOLERANCE
Definition: avf_showcqt.c:55
plot_cqt
static int plot_cqt(AVFilterContext *ctx, AVFrame **frameout)
Definition: avf_showcqt.c:1129
ret
ret
Definition: filter_design.txt:187
config_output
static int config_output(AVFilterLink *outlink)
Definition: avf_showcqt.c:1353
FONTCOLOR
#define FONTCOLOR
Definition: avf_showcqt.c:50
av_fft_init
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
Definition: avfft.c:28
showcqt_options
static const AVOption showcqt_options[]
Definition: avf_showcqt.c:60
AVFrame::height
int height
Definition: frame.h:376
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:421
Coeffs
Definition: avf_showcqt.h:28
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:517
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
midi
static double midi(void *p, double f)
Definition: avf_showcqt.c:435
Coeffs::val
FFTSample * val
Definition: avf_showcqt.h:29
avf_showcqt.h
cm
#define cm
Definition: dvbsubdec.c:37
init_cqt
static int init_cqt(ShowCQTContext *s)
Definition: avf_showcqt.c:297
update_sono_yuv
static void update_sono_yuv(AVFrame *sono, const ColorFloat *c, int idx)
Definition: avf_showcqt.c:1064
ff_showcqt_init_x86
void ff_showcqt_init_x86(ShowCQTContext *s)
Definition: avf_showcqt_init.c:47
lavfutils.h
g_func
static double g_func(void *p, double x)
Definition: avf_showcqt.c:446
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
Definition: avf_showcqt.c:1481
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
BASEFREQ
#define BASEFREQ
Definition: avf_showcqt.c:45
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
channel_layouts
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:114
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: avf_showcqt.c:1562
convert_header.str
string str
Definition: convert_header.py:20
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
h
h
Definition: vp9dsp_template.c:2038
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(showcqt)
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
int
int
Definition: ffmpeg_filter.c:170
ff_scale_image
int ff_scale_image(uint8_t *dst_data[4], int dst_linesize[4], int dst_w, int dst_h, enum AVPixelFormat dst_pix_fmt, uint8_t *const src_data[4], int src_linesize[4], int src_w, int src_h, enum AVPixelFormat src_pix_fmt, void *log_ctx)
Scale image using libswscale.
Definition: lswsutils.c:22
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
av_fft_calc
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().
Definition: avfft.c:43
lswsutils.h
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:63
draw_sono
static void draw_sono(AVFrame *out, AVFrame *sono, int off, int idx)
Definition: avf_showcqt.c:1028
DRAW_BAR_WITH_CHROMA
#define DRAW_BAR_WITH_CHROMA(x)
Definition: avf_showcqt.c:784
init_axis_empty
static int init_axis_empty(ShowCQTContext *s)
Definition: avf_showcqt.c:396
FFTComplex
Definition: avfft.h:37
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
re
float re
Definition: fft.c:82
funcs
CheckasmFunc * funcs
Definition: checkasm.c:269
min
float min
Definition: vorbis_enc_data.h:456
render_fontconfig
static int render_fontconfig(ShowCQTContext *s, AVFrame *tmp, char *font)
Definition: avf_showcqt.c:589