FFmpeg
af_anequalizer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/ffmath.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/parseutils.h"
27 #include "avfilter.h"
28 #include "internal.h"
29 #include "audio.h"
30 
31 #define FILTER_ORDER 4
32 
33 enum FilterType {
38 };
39 
40 typedef struct FoSection {
41  double a0, a1, a2, a3, a4;
42  double b0, b1, b2, b3, b4;
43 
44  double num[4];
45  double denum[4];
46 } FoSection;
47 
48 typedef struct EqualizatorFilter {
49  int ignore;
50  int channel;
51  int type;
52 
53  double freq;
54  double gain;
55  double width;
56 
59 
60 typedef struct AudioNEqualizerContext {
61  const AVClass *class;
62  char *args;
63  char *colors;
65  int w, h;
66 
67  double mag;
68  int fscale;
74 
75 #define OFFSET(x) offsetof(AudioNEqualizerContext, x)
76 #define A AV_OPT_FLAG_AUDIO_PARAM
77 #define V AV_OPT_FLAG_VIDEO_PARAM
78 #define F AV_OPT_FLAG_FILTERING_PARAM
79 
80 static const AVOption anequalizer_options[] = {
81  { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
82  { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
83  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
84  { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
85  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
86  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
87  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
88  { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
89  { NULL }
90 };
91 
92 AVFILTER_DEFINE_CLASS(anequalizer);
93 
95 {
96  AudioNEqualizerContext *s = ctx->priv;
97  char *colors, *color, *saveptr = NULL;
98  int ch, i, n;
99 
100  colors = av_strdup(s->colors);
101  if (!colors)
102  return;
103 
104  memset(out->data[0], 0, s->h * out->linesize[0]);
105 
106  for (ch = 0; ch < inlink->ch_layout.nb_channels; ch++) {
107  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
108  int prev_v = -1;
109  double f;
110 
111  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
112  if (color)
113  av_parse_color(fg, color, -1, ctx);
114 
115  for (f = 0; f < s->w; f++) {
116  double zr, zi, zr2, zi2;
117  double Hr, Hi;
118  double Hmag = 1;
119  double w;
120  int v, y, x;
121 
122  w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
123  zr = cos(w);
124  zr2 = zr * zr;
125  zi = -sin(w);
126  zi2 = zi * zi;
127 
128  for (n = 0; n < s->nb_filters; n++) {
129  if (s->filters[n].channel != ch ||
130  s->filters[n].ignore)
131  continue;
132 
133  for (i = 0; i < FILTER_ORDER / 2; i++) {
134  FoSection *S = &s->filters[n].section[i];
135 
136  /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
137  ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
138 
139  Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
140  Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
141  Hmag *= hypot(Hr, Hi);
142  Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
143  Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
144  Hmag /= hypot(Hr, Hi);
145  }
146  }
147 
148  v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
149  x = lrint(f);
150  if (prev_v == -1)
151  prev_v = v;
152  if (v <= prev_v) {
153  for (y = v; y <= prev_v; y++)
154  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
155  } else {
156  for (y = prev_v; y <= v; y++)
157  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
158  }
159 
160  prev_v = v;
161  }
162  }
163 
164  av_free(colors);
165 }
166 
167 static int config_video(AVFilterLink *outlink)
168 {
169  AVFilterContext *ctx = outlink->src;
170  AudioNEqualizerContext *s = ctx->priv;
171  AVFilterLink *inlink = ctx->inputs[0];
172  AVFrame *out;
173 
174  outlink->w = s->w;
175  outlink->h = s->h;
176 
177  av_frame_free(&s->video);
178  s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
179  if (!out)
180  return AVERROR(ENOMEM);
181  outlink->sample_aspect_ratio = (AVRational){1,1};
182 
184 
185  return 0;
186 }
187 
189 {
190  AudioNEqualizerContext *s = ctx->priv;
191  AVFilterPad pad, vpad;
192  int ret;
193 
194  pad = (AVFilterPad){
195  .name = "out0",
196  .type = AVMEDIA_TYPE_AUDIO,
197  };
198 
199  ret = ff_append_outpad(ctx, &pad);
200  if (ret < 0)
201  return ret;
202 
203  if (s->draw_curves) {
204  vpad = (AVFilterPad){
205  .name = "out1",
206  .type = AVMEDIA_TYPE_VIDEO,
207  .config_props = config_video,
208  };
209  ret = ff_append_outpad(ctx, &vpad);
210  if (ret < 0)
211  return ret;
212  }
213 
214  return 0;
215 }
216 
218 {
219  AVFilterLink *inlink = ctx->inputs[0];
220  AVFilterLink *outlink = ctx->outputs[0];
221  AudioNEqualizerContext *s = ctx->priv;
224  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
225  static const enum AVSampleFormat sample_fmts[] = {
228  };
229  int ret;
230 
231  if (s->draw_curves) {
232  AVFilterLink *videolink = ctx->outputs[1];
234  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
235  return ret;
236  }
237 
239  if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0 ||
240  (ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
241  return ret;
242 
244  if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0 ||
246  return ret;
247 
249  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0 ||
250  (ret = ff_formats_ref(formats, &outlink->incfg.samplerates)) < 0)
251  return ret;
252 
253  return 0;
254 }
255 
257 {
258  AudioNEqualizerContext *s = ctx->priv;
259 
260  av_frame_free(&s->video);
261  av_freep(&s->filters);
262  s->nb_filters = 0;
263  s->nb_allocated = 0;
264 }
265 
266 static void butterworth_fo_section(FoSection *S, double beta,
267  double si, double g, double g0,
268  double D, double c0)
269 {
270  if (c0 == 1 || c0 == -1) {
271  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
272  S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
273  S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
274  S->b3 = 0;
275  S->b4 = 0;
276 
277  S->a0 = 1;
278  S->a1 = 2*c0*(beta*beta - 1)/D;
279  S->a2 = (beta*beta - 2*beta*si + 1)/D;
280  S->a3 = 0;
281  S->a4 = 0;
282  } else {
283  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
284  S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
285  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
286  S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
287  S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
288 
289  S->a0 = 1;
290  S->a1 = -4*c0*(1 + si*beta)/D;
291  S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
292  S->a3 = -4*c0*(1 - si*beta)/D;
293  S->a4 = (beta*beta - 2*si*beta + 1)/D;
294  }
295 }
296 
298  int N, double w0, double wb,
299  double G, double Gb, double G0)
300 {
301  double g, c0, g0, beta;
302  double epsilon;
303  int r = N % 2;
304  int L = (N - r) / 2;
305  int i;
306 
307  if (G == 0 && G0 == 0) {
308  f->section[0].a0 = 1;
309  f->section[0].b0 = 1;
310  f->section[1].a0 = 1;
311  f->section[1].b0 = 1;
312  return;
313  }
314 
315  G = ff_exp10(G/20);
316  Gb = ff_exp10(Gb/20);
317  G0 = ff_exp10(G0/20);
318 
319  epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
320  g = pow(G, 1.0 / N);
321  g0 = pow(G0, 1.0 / N);
322  beta = pow(epsilon, -1.0 / N) * tan(wb/2);
323  c0 = cos(w0);
324 
325  for (i = 1; i <= L; i++) {
326  double ui = (2.0 * i - 1) / N;
327  double si = sin(M_PI * ui / 2.0);
328  double Di = beta * beta + 2 * si * beta + 1;
329 
330  butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
331  }
332 }
333 
334 static void chebyshev1_fo_section(FoSection *S, double a,
335  double c, double tetta_b,
336  double g0, double si, double b,
337  double D, double c0)
338 {
339  if (c0 == 1 || c0 == -1) {
340  S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
341  S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
342  S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
343  S->b3 = 0;
344  S->b4 = 0;
345 
346  S->a0 = 1;
347  S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
348  S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
349  S->a3 = 0;
350  S->a4 = 0;
351  } else {
352  S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
353  S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
354  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
355  S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
356  S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
357 
358  S->a0 = 1;
359  S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
360  S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
361  S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
362  S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
363  }
364 }
365 
367  int N, double w0, double wb,
368  double G, double Gb, double G0)
369 {
370  double a, b, c0, g0, alfa, beta, tetta_b;
371  double epsilon;
372  int r = N % 2;
373  int L = (N - r) / 2;
374  int i;
375 
376  if (G == 0 && G0 == 0) {
377  f->section[0].a0 = 1;
378  f->section[0].b0 = 1;
379  f->section[1].a0 = 1;
380  f->section[1].b0 = 1;
381  return;
382  }
383 
384  G = ff_exp10(G/20);
385  Gb = ff_exp10(Gb/20);
386  G0 = ff_exp10(G0/20);
387 
388  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
389  g0 = pow(G0,1.0/N);
390  alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
391  beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
392  a = 0.5 * (alfa - 1.0/alfa);
393  b = 0.5 * (beta - g0*g0*(1/beta));
394  tetta_b = tan(wb/2);
395  c0 = cos(w0);
396 
397  for (i = 1; i <= L; i++) {
398  double ui = (2.0*i-1.0)/N;
399  double ci = cos(M_PI*ui/2.0);
400  double si = sin(M_PI*ui/2.0);
401  double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
402 
403  chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
404  }
405 }
406 
407 static void chebyshev2_fo_section(FoSection *S, double a,
408  double c, double tetta_b,
409  double g, double si, double b,
410  double D, double c0)
411 {
412  if (c0 == 1 || c0 == -1) {
413  S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
414  S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
415  S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
416  S->b3 = 0;
417  S->b4 = 0;
418 
419  S->a0 = 1;
420  S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
421  S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
422  S->a3 = 0;
423  S->a4 = 0;
424  } else {
425  S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
426  S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
427  S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
428  S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
429  S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
430 
431  S->a0 = 1;
432  S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
433  S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
434  S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
435  S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
436  }
437 }
438 
440  int N, double w0, double wb,
441  double G, double Gb, double G0)
442 {
443  double a, b, c0, tetta_b;
444  double epsilon, g, eu, ew;
445  int r = N % 2;
446  int L = (N - r) / 2;
447  int i;
448 
449  if (G == 0 && G0 == 0) {
450  f->section[0].a0 = 1;
451  f->section[0].b0 = 1;
452  f->section[1].a0 = 1;
453  f->section[1].b0 = 1;
454  return;
455  }
456 
457  G = ff_exp10(G/20);
458  Gb = ff_exp10(Gb/20);
459  G0 = ff_exp10(G0/20);
460 
461  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
462  g = pow(G, 1.0 / N);
463  eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
464  ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
465  a = (eu - 1.0/eu)/2.0;
466  b = (ew - g*g/ew)/2.0;
467  tetta_b = tan(wb/2);
468  c0 = cos(w0);
469 
470  for (i = 1; i <= L; i++) {
471  double ui = (2.0 * i - 1.0)/N;
472  double ci = cos(M_PI * ui / 2.0);
473  double si = sin(M_PI * ui / 2.0);
474  double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
475 
476  chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
477  }
478 }
479 
480 static double butterworth_compute_bw_gain_db(double gain)
481 {
482  double bw_gain = 0;
483 
484  if (gain <= -6)
485  bw_gain = gain + 3;
486  else if(gain > -6 && gain < 6)
487  bw_gain = gain * 0.5;
488  else if(gain >= 6)
489  bw_gain = gain - 3;
490 
491  return bw_gain;
492 }
493 
494 static double chebyshev1_compute_bw_gain_db(double gain)
495 {
496  double bw_gain = 0;
497 
498  if (gain <= -6)
499  bw_gain = gain + 1;
500  else if(gain > -6 && gain < 6)
501  bw_gain = gain * 0.9;
502  else if(gain >= 6)
503  bw_gain = gain - 1;
504 
505  return bw_gain;
506 }
507 
508 static double chebyshev2_compute_bw_gain_db(double gain)
509 {
510  double bw_gain = 0;
511 
512  if (gain <= -6)
513  bw_gain = -3;
514  else if(gain > -6 && gain < 6)
515  bw_gain = gain * 0.3;
516  else if(gain >= 6)
517  bw_gain = 3;
518 
519  return bw_gain;
520 }
521 
522 static inline double hz_2_rad(double x, double fs)
523 {
524  return 2 * M_PI * x / fs;
525 }
526 
528 {
529  double w0 = hz_2_rad(f->freq, sample_rate);
530  double wb = hz_2_rad(f->width, sample_rate);
531  double bw_gain;
532 
533  switch (f->type) {
534  case BUTTERWORTH:
535  bw_gain = butterworth_compute_bw_gain_db(f->gain);
536  butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
537  break;
538  case CHEBYSHEV1:
539  bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
540  chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
541  break;
542  case CHEBYSHEV2:
543  bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
544  chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
545  break;
546  }
547 
548 }
549 
551 {
552  equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
553  if (s->nb_filters >= s->nb_allocated - 1) {
555 
556  filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
557  if (!filters)
558  return AVERROR(ENOMEM);
559  memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
560  av_free(s->filters);
561  s->filters = filters;
562  s->nb_allocated *= 2;
563  }
564  s->nb_filters++;
565 
566  return 0;
567 }
568 
570 {
571  AVFilterContext *ctx = inlink->dst;
572  AudioNEqualizerContext *s = ctx->priv;
573  char *args = av_strdup(s->args);
574  char *saveptr = NULL;
575  int ret = 0;
576 
577  if (!args)
578  return AVERROR(ENOMEM);
579 
580  s->nb_allocated = 32 * inlink->ch_layout.nb_channels;
581  s->filters = av_calloc(inlink->ch_layout.nb_channels, 32 * sizeof(*s->filters));
582  if (!s->filters) {
583  s->nb_allocated = 0;
584  av_free(args);
585  return AVERROR(ENOMEM);
586  }
587 
588  while (1) {
589  char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
590 
591  if (!arg)
592  break;
593 
594  s->filters[s->nb_filters].type = 0;
595  if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
596  &s->filters[s->nb_filters].freq,
597  &s->filters[s->nb_filters].width,
598  &s->filters[s->nb_filters].gain,
599  &s->filters[s->nb_filters].type) != 5 &&
600  sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
601  &s->filters[s->nb_filters].freq,
602  &s->filters[s->nb_filters].width,
603  &s->filters[s->nb_filters].gain) != 4 ) {
604  av_free(args);
605  return AVERROR(EINVAL);
606  }
607 
608  if (s->filters[s->nb_filters].freq < 0 ||
609  s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
610  s->filters[s->nb_filters].ignore = 1;
611 
612  if (s->filters[s->nb_filters].channel < 0 ||
613  s->filters[s->nb_filters].channel >= inlink->ch_layout.nb_channels)
614  s->filters[s->nb_filters].ignore = 1;
615 
616  s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
617  ret = add_filter(s, inlink);
618  if (ret < 0)
619  break;
620  }
621 
622  av_free(args);
623 
624  return ret;
625 }
626 
627 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
628  char *res, int res_len, int flags)
629 {
630  AudioNEqualizerContext *s = ctx->priv;
631  AVFilterLink *inlink = ctx->inputs[0];
632  int ret = AVERROR(ENOSYS);
633 
634  if (!strcmp(cmd, "change")) {
635  double freq, width, gain;
636  int filter;
637 
638  if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
639  return AVERROR(EINVAL);
640 
641  if (filter < 0 || filter >= s->nb_filters)
642  return AVERROR(EINVAL);
643 
644  if (freq < 0 || freq > inlink->sample_rate / 2.0)
645  return AVERROR(EINVAL);
646 
647  s->filters[filter].freq = freq;
648  s->filters[filter].width = width;
649  s->filters[filter].gain = gain;
650  equalizer(&s->filters[filter], inlink->sample_rate);
651  if (s->draw_curves)
652  draw_curves(ctx, inlink, s->video);
653 
654  ret = 0;
655  }
656 
657  return ret;
658 }
659 
660 static inline double section_process(FoSection *S, double in)
661 {
662  double out;
663 
664  out = S->b0 * in;
665  out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
666  out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
667  out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
668  out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
669 
670  S->num[3] = S->num[2];
671  S->num[2] = S->num[1];
672  S->num[1] = S->num[0];
673  S->num[0] = in;
674 
675  S->denum[3] = S->denum[2];
676  S->denum[2] = S->denum[1];
677  S->denum[1] = S->denum[0];
678  S->denum[0] = out;
679 
680  return out;
681 }
682 
683 static double process_sample(FoSection *s1, double in)
684 {
685  double p0 = in, p1;
686  int i;
687 
688  for (i = 0; i < FILTER_ORDER / 2; i++) {
689  p1 = section_process(&s1[i], p0);
690  p0 = p1;
691  }
692 
693  return p1;
694 }
695 
697  int jobnr, int nb_jobs)
698 {
699  AudioNEqualizerContext *s = ctx->priv;
700  AVFrame *buf = arg;
701  const int start = (buf->ch_layout.nb_channels * jobnr) / nb_jobs;
702  const int end = (buf->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
703 
704  for (int i = 0; i < s->nb_filters; i++) {
705  EqualizatorFilter *f = &s->filters[i];
706  double *bptr;
707 
708  if (f->gain == 0. || f->ignore)
709  continue;
710  if (f->channel < start ||
711  f->channel >= end)
712  continue;
713 
714  bptr = (double *)buf->extended_data[f->channel];
715  for (int n = 0; n < buf->nb_samples; n++) {
716  double sample = bptr[n];
717 
718  sample = process_sample(f->section, sample);
719  bptr[n] = sample;
720  }
721  }
722 
723  return 0;
724 }
725 
727 {
728  AVFilterContext *ctx = inlink->dst;
729  AudioNEqualizerContext *s = ctx->priv;
730  AVFilterLink *outlink = ctx->outputs[0];
731 
732  if (!ctx->is_disabled)
734  FFMIN(inlink->ch_layout.nb_channels, ff_filter_get_nb_threads(ctx)));
735 
736  if (s->draw_curves) {
737  AVFrame *clone;
738 
739  const int64_t pts = buf->pts +
740  av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
741  outlink->time_base);
742  int ret;
743 
744  s->video->pts = pts;
745  clone = av_frame_clone(s->video);
746  if (!clone)
747  return AVERROR(ENOMEM);
748  ret = ff_filter_frame(ctx->outputs[1], clone);
749  if (ret < 0)
750  return ret;
751  }
752 
753  return ff_filter_frame(outlink, buf);
754 }
755 
756 static const AVFilterPad inputs[] = {
757  {
758  .name = "default",
759  .type = AVMEDIA_TYPE_AUDIO,
761  .config_props = config_input,
762  .filter_frame = filter_frame,
763  },
764 };
765 
767  .name = "anequalizer",
768  .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
769  .priv_size = sizeof(AudioNEqualizerContext),
770  .priv_class = &anequalizer_class,
771  .init = init,
772  .uninit = uninit,
774  .outputs = NULL,
776  .process_command = process_command,
780 };
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_anequalizer.c:167
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:101
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AudioNEqualizerContext::args
char * args
Definition: af_anequalizer.c:62
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVFilterFormatsConfig::samplerates
AVFilterFormats * samplerates
Lists of supported sample rates, only for audio.
Definition: avfilter.h:501
ff_exp10
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
av_clip
#define av_clip
Definition: common.h:95
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:380
butterworth_fo_section
static void butterworth_fo_section(FoSection *S, double beta, double si, double g, double g0, double D, double c0)
Definition: af_anequalizer.c:266
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AVFilterFormatsConfig::channel_layouts
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:506
out
FILE * out
Definition: movenc.c:54
F
#define F
Definition: af_anequalizer.c:78
color
Definition: vf_paletteuse.c:509
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:591
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:326
av_parse_color
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:356
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:566
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:437
EqualizatorFilter
Definition: af_anequalizer.c:48
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:41
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:171
filter_channels
static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_anequalizer.c:696
CHEBYSHEV2
@ CHEBYSHEV2
Definition: af_anequalizer.c:36
EqualizatorFilter::width
double width
Definition: af_anequalizer.c:55
chebyshev1_bp_filter
static void chebyshev1_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:366
EqualizatorFilter::gain
double gain
Definition: af_anequalizer.c:54
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
chebyshev2_bp_filter
static void chebyshev2_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:439
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
anequalizer_options
static const AVOption anequalizer_options[]
Definition: af_anequalizer.c:80
AudioNEqualizerContext::mag
double mag
Definition: af_anequalizer.c:67
D
D(D(float, sse)
Definition: rematrix_init.c:29
AudioNEqualizerContext::nb_filters
int nb_filters
Definition: af_anequalizer.c:69
FILTER_ORDER
#define FILTER_ORDER
Definition: af_anequalizer.c:31
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
Definition: af_anequalizer.c:726
FoSection::b1
double b1
Definition: af_anequalizer.c:42
AudioNEqualizerContext::h
int h
Definition: af_anequalizer.c:65
AudioNEqualizerContext::fscale
int fscale
Definition: af_anequalizer.c:68
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:723
CHEBYSHEV1
@ CHEBYSHEV1
Definition: af_anequalizer.c:35
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_anequalizer.c:569
pts
static int64_t pts
Definition: transcode_aac.c:653
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(anequalizer)
BUTTERWORTH
@ BUTTERWORTH
Definition: af_anequalizer.c:34
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
FoSection::denum
double denum[4]
Definition: af_anequalizer.c:45
FilterType
FilterType
Definition: af_adenorm.c:26
lrint
#define lrint
Definition: tablegen.h:53
FoSection::a2
double a2
Definition: af_anequalizer.c:41
av_cold
#define av_cold
Definition: attributes.h:90
equalizer
static void equalizer(EqualizatorFilter *f, double sample_rate)
Definition: af_anequalizer.c:527
chebyshev2_fo_section
static void chebyshev2_fo_section(FoSection *S, double a, double c, double tetta_b, double g, double si, double b, double D, double c0)
Definition: af_anequalizer.c:407
FoSection::num
double num[4]
Definition: af_anequalizer.c:44
width
#define width
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:256
FoSection::a0
double a0
Definition: af_anequalizer.c:41
g
const char * g
Definition: vf_curves.c:127
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
s1
#define s1
Definition: regdef.h:38
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:596
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:179
chebyshev1_fo_section
static void chebyshev1_fo_section(FoSection *S, double a, double c, double tetta_b, double g0, double si, double b, double D, double c0)
Definition: af_anequalizer.c:334
filters
#define filters(fmt, type, inverse, clp, inverset, clip, one, clip_fn, packed)
Definition: af_crystalizer.c:55
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_anequalizer.c:217
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:465
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
A
#define A
Definition: af_anequalizer.c:76
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
hz_2_rad
static double hz_2_rad(double x, double fs)
Definition: af_anequalizer.c:522
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
arg
const char * arg
Definition: jacosubdec.c:67
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_anequalizer.c:256
AudioNEqualizerContext
Definition: af_anequalizer.c:60
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:258
inputs
static const AVFilterPad inputs[]
Definition: af_anequalizer.c:756
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:235
FoSection::b3
double b3
Definition: af_anequalizer.c:42
parseutils.h
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_anequalizer.c:627
AudioNEqualizerContext::video
AVFrame * video
Definition: af_anequalizer.c:72
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
f
f
Definition: af_crystalizer.c:122
draw_curves
static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
Definition: af_anequalizer.c:94
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
sample
#define sample
Definition: flacdsp_template.c:44
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:94
section
Definition: ffprobe.c:158
FoSection::b4
double b4
Definition: af_anequalizer.c:42
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FoSection
Definition: af_anequalizer.c:40
N
#define N
Definition: af_mcompand.c:53
add_filter
static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
Definition: af_anequalizer.c:550
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
EqualizatorFilter::channel
int channel
Definition: af_anequalizer.c:50
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:410
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
FoSection::b2
double b2
Definition: af_anequalizer.c:42
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:391
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:777
FoSection::b0
double b0
Definition: af_anequalizer.c:42
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
section_process
static double section_process(FoSection *S, double in)
Definition: af_anequalizer.c:660
NB_TYPES
@ NB_TYPES
Definition: af_anequalizer.c:37
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
chebyshev1_compute_bw_gain_db
static double chebyshev1_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:494
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
chebyshev2_compute_bw_gain_db
static double chebyshev2_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:508
V
#define V
Definition: af_anequalizer.c:77
OFFSET
#define OFFSET(x)
Definition: af_anequalizer.c:75
AVFilter
Filter definition.
Definition: avfilter.h:161
FoSection::a3
double a3
Definition: af_anequalizer.c:41
ret
ret
Definition: filter_design.txt:187
FoSection::a1
double a1
Definition: af_anequalizer.c:41
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:551
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
butterworth_bp_filter
static void butterworth_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:297
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
EqualizatorFilter::type
int type
Definition: af_anequalizer.c:51
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
EqualizatorFilter::freq
double freq
Definition: af_anequalizer.c:53
L
#define L(x)
Definition: vpx_arith.h:36
ffmath.h
G
#define G
Definition: huffyuv.h:43
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:496
AudioNEqualizerContext::w
int w
Definition: af_anequalizer.c:65
ff_append_outpad
int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:137
process_sample
static double process_sample(FoSection *s1, double in)
Definition: af_anequalizer.c:683
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
ff_af_anequalizer
const AVFilter ff_af_anequalizer
Definition: af_anequalizer.c:766
AudioNEqualizerContext::filters
EqualizatorFilter * filters
Definition: af_anequalizer.c:71
AudioNEqualizerContext::nb_allocated
int nb_allocated
Definition: af_anequalizer.c:70
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:150
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AudioNEqualizerContext::colors
char * colors
Definition: af_anequalizer.c:63
EqualizatorFilter::ignore
int ignore
Definition: af_anequalizer.c:49
AudioNEqualizerContext::draw_curves
int draw_curves
Definition: af_anequalizer.c:64
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:146
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_anequalizer.c:188
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
FoSection::a4
double a4
Definition: af_anequalizer.c:41
ui
#define ui(width, name)
Definition: cbs_mpeg2.c:43
butterworth_compute_bw_gain_db
static double butterworth_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:480
AVFILTERPAD_FLAG_NEEDS_WRITABLE
#define AVFILTERPAD_FLAG_NEEDS_WRITABLE
The filter expects writable frames from its input link, duplicating data buffers if needed.
Definition: internal.h:68