FFmpeg
af_anequalizer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/ffmath.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/parseutils.h"
27 #include "avfilter.h"
28 #include "internal.h"
29 #include "audio.h"
30 
31 #define FILTER_ORDER 4
32 
33 enum FilterType {
38 };
39 
40 typedef struct FoSection {
41  double a0, a1, a2, a3, a4;
42  double b0, b1, b2, b3, b4;
43 
44  double num[4];
45  double denum[4];
46 } FoSection;
47 
48 typedef struct EqualizatorFilter {
49  int ignore;
50  int channel;
51  int type;
52 
53  double freq;
54  double gain;
55  double width;
56 
59 
60 typedef struct AudioNEqualizerContext {
61  const AVClass *class;
62  char *args;
63  char *colors;
65  int w, h;
66 
67  double mag;
68  int fscale;
74 
75 #define OFFSET(x) offsetof(AudioNEqualizerContext, x)
76 #define A AV_OPT_FLAG_AUDIO_PARAM
77 #define V AV_OPT_FLAG_VIDEO_PARAM
78 #define F AV_OPT_FLAG_FILTERING_PARAM
79 
80 static const AVOption anequalizer_options[] = {
81  { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
82  { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
83  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
84  { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
85  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
86  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
87  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
88  { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
89  { NULL }
90 };
91 
92 AVFILTER_DEFINE_CLASS(anequalizer);
93 
95 {
97  char *colors, *color, *saveptr = NULL;
98  int ch, i, n;
99 
100  colors = av_strdup(s->colors);
101  if (!colors)
102  return;
103 
104  memset(out->data[0], 0, s->h * out->linesize[0]);
105 
106  for (ch = 0; ch < inlink->channels; ch++) {
107  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
108  int prev_v = -1;
109  double f;
110 
111  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
112  if (color)
113  av_parse_color(fg, color, -1, ctx);
114 
115  for (f = 0; f < s->w; f++) {
116  double zr, zi, zr2, zi2;
117  double Hr, Hi;
118  double Hmag = 1;
119  double w;
120  int v, y, x;
121 
122  w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
123  zr = cos(w);
124  zr2 = zr * zr;
125  zi = -sin(w);
126  zi2 = zi * zi;
127 
128  for (n = 0; n < s->nb_filters; n++) {
129  if (s->filters[n].channel != ch ||
130  s->filters[n].ignore)
131  continue;
132 
133  for (i = 0; i < FILTER_ORDER / 2; i++) {
134  FoSection *S = &s->filters[n].section[i];
135 
136  /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
137  ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
138 
139  Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
140  Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
141  Hmag *= hypot(Hr, Hi);
142  Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
143  Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
144  Hmag /= hypot(Hr, Hi);
145  }
146  }
147 
148  v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
149  x = lrint(f);
150  if (prev_v == -1)
151  prev_v = v;
152  if (v <= prev_v) {
153  for (y = v; y <= prev_v; y++)
154  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
155  } else {
156  for (y = prev_v; y <= v; y++)
157  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
158  }
159 
160  prev_v = v;
161  }
162  }
163 
164  av_free(colors);
165 }
166 
167 static int config_video(AVFilterLink *outlink)
168 {
169  AVFilterContext *ctx = outlink->src;
170  AudioNEqualizerContext *s = ctx->priv;
171  AVFilterLink *inlink = ctx->inputs[0];
172  AVFrame *out;
173 
174  outlink->w = s->w;
175  outlink->h = s->h;
176 
177  av_frame_free(&s->video);
178  s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
179  if (!out)
180  return AVERROR(ENOMEM);
181  outlink->sample_aspect_ratio = (AVRational){1,1};
182 
183  draw_curves(ctx, inlink, out);
184 
185  return 0;
186 }
187 
189 {
190  AudioNEqualizerContext *s = ctx->priv;
191  AVFilterPad pad, vpad;
192  int ret;
193 
194  pad = (AVFilterPad){
195  .name = av_strdup("out0"),
196  .type = AVMEDIA_TYPE_AUDIO,
197  };
198 
199  if (!pad.name)
200  return AVERROR(ENOMEM);
201 
202  if (s->draw_curves) {
203  vpad = (AVFilterPad){
204  .name = av_strdup("out1"),
205  .type = AVMEDIA_TYPE_VIDEO,
206  .config_props = config_video,
207  };
208  if (!vpad.name) {
209  av_freep(&pad.name);
210  return AVERROR(ENOMEM);
211  }
212  }
213 
214  ret = ff_insert_outpad(ctx, 0, &pad);
215  if (ret < 0) {
216  av_freep(&pad.name);
217  return ret;
218  }
219 
220  if (s->draw_curves) {
221  ret = ff_insert_outpad(ctx, 1, &vpad);
222  if (ret < 0) {
223  av_freep(&vpad.name);
224  return ret;
225  }
226  }
227 
228  return 0;
229 }
230 
232 {
233  AVFilterLink *inlink = ctx->inputs[0];
234  AVFilterLink *outlink = ctx->outputs[0];
235  AudioNEqualizerContext *s = ctx->priv;
238  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
239  static const enum AVSampleFormat sample_fmts[] = {
242  };
243  int ret;
244 
245  if (s->draw_curves) {
246  AVFilterLink *videolink = ctx->outputs[1];
247  formats = ff_make_format_list(pix_fmts);
248  if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
249  return ret;
250  }
251 
252  formats = ff_make_format_list(sample_fmts);
253  if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0 ||
254  (ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
255  return ret;
256 
257  layouts = ff_all_channel_counts();
258  if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0 ||
259  (ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts)) < 0)
260  return ret;
261 
262  formats = ff_all_samplerates();
263  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0 ||
264  (ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0)
265  return ret;
266 
267  return 0;
268 }
269 
271 {
272  AudioNEqualizerContext *s = ctx->priv;
273 
274  for (int i = 0; i < ctx->nb_outputs; i++)
275  av_freep(&ctx->output_pads[i].name);
276  av_frame_free(&s->video);
277  av_freep(&s->filters);
278  s->nb_filters = 0;
279  s->nb_allocated = 0;
280 }
281 
282 static void butterworth_fo_section(FoSection *S, double beta,
283  double si, double g, double g0,
284  double D, double c0)
285 {
286  if (c0 == 1 || c0 == -1) {
287  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
288  S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
289  S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
290  S->b3 = 0;
291  S->b4 = 0;
292 
293  S->a0 = 1;
294  S->a1 = 2*c0*(beta*beta - 1)/D;
295  S->a2 = (beta*beta - 2*beta*si + 1)/D;
296  S->a3 = 0;
297  S->a4 = 0;
298  } else {
299  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
300  S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
301  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
302  S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
303  S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
304 
305  S->a0 = 1;
306  S->a1 = -4*c0*(1 + si*beta)/D;
307  S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
308  S->a3 = -4*c0*(1 - si*beta)/D;
309  S->a4 = (beta*beta - 2*si*beta + 1)/D;
310  }
311 }
312 
314  int N, double w0, double wb,
315  double G, double Gb, double G0)
316 {
317  double g, c0, g0, beta;
318  double epsilon;
319  int r = N % 2;
320  int L = (N - r) / 2;
321  int i;
322 
323  if (G == 0 && G0 == 0) {
324  f->section[0].a0 = 1;
325  f->section[0].b0 = 1;
326  f->section[1].a0 = 1;
327  f->section[1].b0 = 1;
328  return;
329  }
330 
331  G = ff_exp10(G/20);
332  Gb = ff_exp10(Gb/20);
333  G0 = ff_exp10(G0/20);
334 
335  epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
336  g = pow(G, 1.0 / N);
337  g0 = pow(G0, 1.0 / N);
338  beta = pow(epsilon, -1.0 / N) * tan(wb/2);
339  c0 = cos(w0);
340 
341  for (i = 1; i <= L; i++) {
342  double ui = (2.0 * i - 1) / N;
343  double si = sin(M_PI * ui / 2.0);
344  double Di = beta * beta + 2 * si * beta + 1;
345 
346  butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
347  }
348 }
349 
350 static void chebyshev1_fo_section(FoSection *S, double a,
351  double c, double tetta_b,
352  double g0, double si, double b,
353  double D, double c0)
354 {
355  if (c0 == 1 || c0 == -1) {
356  S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
357  S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
358  S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
359  S->b3 = 0;
360  S->b4 = 0;
361 
362  S->a0 = 1;
363  S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
364  S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
365  S->a3 = 0;
366  S->a4 = 0;
367  } else {
368  S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
369  S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
370  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
371  S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
372  S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
373 
374  S->a0 = 1;
375  S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
376  S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
377  S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
378  S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
379  }
380 }
381 
383  int N, double w0, double wb,
384  double G, double Gb, double G0)
385 {
386  double a, b, c0, g0, alfa, beta, tetta_b;
387  double epsilon;
388  int r = N % 2;
389  int L = (N - r) / 2;
390  int i;
391 
392  if (G == 0 && G0 == 0) {
393  f->section[0].a0 = 1;
394  f->section[0].b0 = 1;
395  f->section[1].a0 = 1;
396  f->section[1].b0 = 1;
397  return;
398  }
399 
400  G = ff_exp10(G/20);
401  Gb = ff_exp10(Gb/20);
402  G0 = ff_exp10(G0/20);
403 
404  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
405  g0 = pow(G0,1.0/N);
406  alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
407  beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
408  a = 0.5 * (alfa - 1.0/alfa);
409  b = 0.5 * (beta - g0*g0*(1/beta));
410  tetta_b = tan(wb/2);
411  c0 = cos(w0);
412 
413  for (i = 1; i <= L; i++) {
414  double ui = (2.0*i-1.0)/N;
415  double ci = cos(M_PI*ui/2.0);
416  double si = sin(M_PI*ui/2.0);
417  double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
418 
419  chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
420  }
421 }
422 
423 static void chebyshev2_fo_section(FoSection *S, double a,
424  double c, double tetta_b,
425  double g, double si, double b,
426  double D, double c0)
427 {
428  if (c0 == 1 || c0 == -1) {
429  S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
430  S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
431  S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
432  S->b3 = 0;
433  S->b4 = 0;
434 
435  S->a0 = 1;
436  S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
437  S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
438  S->a3 = 0;
439  S->a4 = 0;
440  } else {
441  S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
442  S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
443  S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
444  S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
445  S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
446 
447  S->a0 = 1;
448  S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
449  S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
450  S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
451  S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
452  }
453 }
454 
456  int N, double w0, double wb,
457  double G, double Gb, double G0)
458 {
459  double a, b, c0, tetta_b;
460  double epsilon, g, eu, ew;
461  int r = N % 2;
462  int L = (N - r) / 2;
463  int i;
464 
465  if (G == 0 && G0 == 0) {
466  f->section[0].a0 = 1;
467  f->section[0].b0 = 1;
468  f->section[1].a0 = 1;
469  f->section[1].b0 = 1;
470  return;
471  }
472 
473  G = ff_exp10(G/20);
474  Gb = ff_exp10(Gb/20);
475  G0 = ff_exp10(G0/20);
476 
477  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
478  g = pow(G, 1.0 / N);
479  eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
480  ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
481  a = (eu - 1.0/eu)/2.0;
482  b = (ew - g*g/ew)/2.0;
483  tetta_b = tan(wb/2);
484  c0 = cos(w0);
485 
486  for (i = 1; i <= L; i++) {
487  double ui = (2.0 * i - 1.0)/N;
488  double ci = cos(M_PI * ui / 2.0);
489  double si = sin(M_PI * ui / 2.0);
490  double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
491 
492  chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
493  }
494 }
495 
496 static double butterworth_compute_bw_gain_db(double gain)
497 {
498  double bw_gain = 0;
499 
500  if (gain <= -6)
501  bw_gain = gain + 3;
502  else if(gain > -6 && gain < 6)
503  bw_gain = gain * 0.5;
504  else if(gain >= 6)
505  bw_gain = gain - 3;
506 
507  return bw_gain;
508 }
509 
510 static double chebyshev1_compute_bw_gain_db(double gain)
511 {
512  double bw_gain = 0;
513 
514  if (gain <= -6)
515  bw_gain = gain + 1;
516  else if(gain > -6 && gain < 6)
517  bw_gain = gain * 0.9;
518  else if(gain >= 6)
519  bw_gain = gain - 1;
520 
521  return bw_gain;
522 }
523 
524 static double chebyshev2_compute_bw_gain_db(double gain)
525 {
526  double bw_gain = 0;
527 
528  if (gain <= -6)
529  bw_gain = -3;
530  else if(gain > -6 && gain < 6)
531  bw_gain = gain * 0.3;
532  else if(gain >= 6)
533  bw_gain = 3;
534 
535  return bw_gain;
536 }
537 
538 static inline double hz_2_rad(double x, double fs)
539 {
540  return 2 * M_PI * x / fs;
541 }
542 
544 {
545  double w0 = hz_2_rad(f->freq, sample_rate);
546  double wb = hz_2_rad(f->width, sample_rate);
547  double bw_gain;
548 
549  switch (f->type) {
550  case BUTTERWORTH:
551  bw_gain = butterworth_compute_bw_gain_db(f->gain);
552  butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
553  break;
554  case CHEBYSHEV1:
555  bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
556  chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
557  break;
558  case CHEBYSHEV2:
559  bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
560  chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
561  break;
562  }
563 
564 }
565 
567 {
568  equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
569  if (s->nb_filters >= s->nb_allocated) {
571 
572  filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
573  if (!filters)
574  return AVERROR(ENOMEM);
575  memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
576  av_free(s->filters);
577  s->filters = filters;
578  s->nb_allocated *= 2;
579  }
580  s->nb_filters++;
581 
582  return 0;
583 }
584 
586 {
587  AVFilterContext *ctx = inlink->dst;
588  AudioNEqualizerContext *s = ctx->priv;
589  char *args = av_strdup(s->args);
590  char *saveptr = NULL;
591  int ret = 0;
592 
593  if (!args)
594  return AVERROR(ENOMEM);
595 
596  s->nb_allocated = 32 * inlink->channels;
597  s->filters = av_calloc(inlink->channels, 32 * sizeof(*s->filters));
598  if (!s->filters) {
599  s->nb_allocated = 0;
600  av_free(args);
601  return AVERROR(ENOMEM);
602  }
603 
604  while (1) {
605  char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
606 
607  if (!arg)
608  break;
609 
610  s->filters[s->nb_filters].type = 0;
611  if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
612  &s->filters[s->nb_filters].freq,
613  &s->filters[s->nb_filters].width,
614  &s->filters[s->nb_filters].gain,
615  &s->filters[s->nb_filters].type) != 5 &&
616  sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
617  &s->filters[s->nb_filters].freq,
618  &s->filters[s->nb_filters].width,
619  &s->filters[s->nb_filters].gain) != 4 ) {
620  av_free(args);
621  return AVERROR(EINVAL);
622  }
623 
624  if (s->filters[s->nb_filters].freq < 0 ||
625  s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
626  s->filters[s->nb_filters].ignore = 1;
627 
628  if (s->filters[s->nb_filters].channel < 0 ||
629  s->filters[s->nb_filters].channel >= inlink->channels)
630  s->filters[s->nb_filters].ignore = 1;
631 
632  s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
633  ret = add_filter(s, inlink);
634  if (ret < 0)
635  break;
636  }
637 
638  av_free(args);
639 
640  return ret;
641 }
642 
643 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
644  char *res, int res_len, int flags)
645 {
646  AudioNEqualizerContext *s = ctx->priv;
647  AVFilterLink *inlink = ctx->inputs[0];
648  int ret = AVERROR(ENOSYS);
649 
650  if (!strcmp(cmd, "change")) {
651  double freq, width, gain;
652  int filter;
653 
654  if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
655  return AVERROR(EINVAL);
656 
657  if (filter < 0 || filter >= s->nb_filters)
658  return AVERROR(EINVAL);
659 
660  if (freq < 0 || freq > inlink->sample_rate / 2.0)
661  return AVERROR(EINVAL);
662 
663  s->filters[filter].freq = freq;
664  s->filters[filter].width = width;
665  s->filters[filter].gain = gain;
666  equalizer(&s->filters[filter], inlink->sample_rate);
667  if (s->draw_curves)
668  draw_curves(ctx, inlink, s->video);
669 
670  ret = 0;
671  }
672 
673  return ret;
674 }
675 
676 static inline double section_process(FoSection *S, double in)
677 {
678  double out;
679 
680  out = S->b0 * in;
681  out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
682  out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
683  out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
684  out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
685 
686  S->num[3] = S->num[2];
687  S->num[2] = S->num[1];
688  S->num[1] = S->num[0];
689  S->num[0] = in;
690 
691  S->denum[3] = S->denum[2];
692  S->denum[2] = S->denum[1];
693  S->denum[1] = S->denum[0];
694  S->denum[0] = out;
695 
696  return out;
697 }
698 
699 static double process_sample(FoSection *s1, double in)
700 {
701  double p0 = in, p1;
702  int i;
703 
704  for (i = 0; i < FILTER_ORDER / 2; i++) {
705  p1 = section_process(&s1[i], p0);
706  p0 = p1;
707  }
708 
709  return p1;
710 }
711 
713 {
714  AVFilterContext *ctx = inlink->dst;
715  AudioNEqualizerContext *s = ctx->priv;
716  AVFilterLink *outlink = ctx->outputs[0];
717  double *bptr;
718  int i, n;
719 
720  for (i = 0; i < s->nb_filters; i++) {
721  EqualizatorFilter *f = &s->filters[i];
722 
723  if (f->gain == 0. || f->ignore)
724  continue;
725 
726  bptr = (double *)buf->extended_data[f->channel];
727  for (n = 0; n < buf->nb_samples; n++) {
728  double sample = bptr[n];
729 
730  sample = process_sample(f->section, sample);
731  bptr[n] = sample;
732  }
733  }
734 
735  if (s->draw_curves) {
736  const int64_t pts = buf->pts +
737  av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
738  outlink->time_base);
739  int ret;
740 
741  s->video->pts = pts;
742  ret = ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video));
743  if (ret < 0)
744  return ret;
745  }
746 
747  return ff_filter_frame(outlink, buf);
748 }
749 
750 static const AVFilterPad inputs[] = {
751  {
752  .name = "default",
753  .type = AVMEDIA_TYPE_AUDIO,
754  .config_props = config_input,
755  .filter_frame = filter_frame,
756  .needs_writable = 1,
757  },
758  { NULL }
759 };
760 
762  .name = "anequalizer",
763  .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
764  .priv_size = sizeof(AudioNEqualizerContext),
765  .priv_class = &anequalizer_class,
766  .init = init,
767  .uninit = uninit,
769  .inputs = inputs,
770  .outputs = NULL,
773 };
#define NULL
Definition: coverity.c:32
static void chebyshev2_fo_section(FoSection *S, double a, double c, double tetta_b, double g, double si, double b, double D, double c0)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
static void chebyshev1_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Main libavfilter public API header.
const char * g
Definition: vf_curves.c:115
static double butterworth_compute_bw_gain_db(double gain)
static int config_video(AVFilterLink *outlink)
static void butterworth_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
double, planar
Definition: samplefmt.h:70
static void butterworth_fo_section(FoSection *S, double beta, double si, double g, double g0, double D, double c0)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
FoSection section[2]
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVFILTER_DEFINE_CLASS(anequalizer)
#define sample
#define N
Definition: af_mcompand.c:54
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
FilterType
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:434
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
uint8_t
#define av_cold
Definition: attributes.h:82
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
AVOptions.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
#define f(width, name)
Definition: cbs_vp9.c:255
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
static const AVOption anequalizer_options[]
static void chebyshev2_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
static void chebyshev1_fo_section(FoSection *S, double a, double c, double tetta_b, double g0, double si, double b, double D, double c0)
static double hz_2_rad(double x, double fs)
#define FILTER_ORDER
#define A
static av_cold void uninit(AVFilterContext *ctx)
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
A filter pad used for either input or output.
Definition: internal.h:54
#define F
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
static int config_input(AVFilterLink *inlink)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
#define S(s, c, i)
static void equalizer(EqualizatorFilter *f, double sample_rate)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
void * priv
private data for use by the filter
Definition: avfilter.h:353
const char * arg
Definition: jacosubdec.c:66
#define V
static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
static int query_formats(AVFilterContext *ctx)
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
static av_const double hypot(double x, double y)
Definition: libm.h:366
#define b
Definition: input.c:41
#define width
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:439
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
static const AVFilterPad inputs[]
int n
Definition: avisynth_c.h:760
#define L(x)
Definition: vp56_arith.h:36
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static double process_sample(FoSection *s1, double in)
A list of supported channel layouts.
Definition: formats.h:85
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
sample_rate
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static double chebyshev2_compute_bw_gain_db(double gain)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
void * buf
Definition: avisynth_c.h:766
AVFilter ff_af_anequalizer
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
const char * name
Filter name.
Definition: avfilter.h:148
static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
#define s1
Definition: regdef.h:38
offset must point to two consecutive integers
Definition: opt.h:233
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:394
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static double section_process(FoSection *S, double in)
double denum[4]
internal math functions header
static double chebyshev1_compute_bw_gain_db(double gain)
#define G
Definition: huffyuvdsp.h:33
D(D(float, sse)
Definition: rematrix_init.c:28
#define ui(width, name)
Definition: cbs_mpeg2.c:43
static av_cold int init(AVFilterContext *ctx)
#define OFFSET(x)
#define av_free(p)
static const struct PPFilter filters[]
Definition: postprocess.c:134
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define lrint
Definition: tablegen.h:53
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
formats
Definition: signature.h:48
EqualizatorFilter * filters
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:409
double num[4]
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:285
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
for(j=16;j >0;--j)
#define AV_WL32(p, v)
Definition: intreadwrite.h:426