FFmpeg
af_anequalizer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/ffmath.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/parseutils.h"
27 #include "avfilter.h"
28 #include "internal.h"
29 #include "audio.h"
30 
31 #define FILTER_ORDER 4
32 
33 enum FilterType {
38 };
39 
40 typedef struct FoSection {
41  double a0, a1, a2, a3, a4;
42  double b0, b1, b2, b3, b4;
43 
44  double num[4];
45  double denum[4];
46 } FoSection;
47 
48 typedef struct EqualizatorFilter {
49  int ignore;
50  int channel;
51  int type;
52 
53  double freq;
54  double gain;
55  double width;
56 
59 
60 typedef struct AudioNEqualizerContext {
61  const AVClass *class;
62  char *args;
63  char *colors;
65  int w, h;
66 
67  double mag;
68  int fscale;
74 
75 #define OFFSET(x) offsetof(AudioNEqualizerContext, x)
76 #define A AV_OPT_FLAG_AUDIO_PARAM
77 #define V AV_OPT_FLAG_VIDEO_PARAM
78 #define F AV_OPT_FLAG_FILTERING_PARAM
79 
80 static const AVOption anequalizer_options[] = {
81  { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
82  { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
83  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
84  { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
85  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
86  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
87  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
88  { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
89  { NULL }
90 };
91 
92 AVFILTER_DEFINE_CLASS(anequalizer);
93 
95 {
97  char *colors, *color, *saveptr = NULL;
98  int ch, i, n;
99 
100  colors = av_strdup(s->colors);
101  if (!colors)
102  return;
103 
104  memset(out->data[0], 0, s->h * out->linesize[0]);
105 
106  for (ch = 0; ch < inlink->channels; ch++) {
107  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
108  int prev_v = -1;
109  double f;
110 
111  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
112  if (color)
113  av_parse_color(fg, color, -1, ctx);
114 
115  for (f = 0; f < s->w; f++) {
116  double zr, zi, zr2, zi2;
117  double Hr, Hi;
118  double Hmag = 1;
119  double w;
120  int v, y, x;
121 
122  w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
123  zr = cos(w);
124  zr2 = zr * zr;
125  zi = -sin(w);
126  zi2 = zi * zi;
127 
128  for (n = 0; n < s->nb_filters; n++) {
129  if (s->filters[n].channel != ch ||
130  s->filters[n].ignore)
131  continue;
132 
133  for (i = 0; i < FILTER_ORDER / 2; i++) {
134  FoSection *S = &s->filters[n].section[i];
135 
136  /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
137  ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
138 
139  Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
140  Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
141  Hmag *= hypot(Hr, Hi);
142  Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
143  Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
144  Hmag /= hypot(Hr, Hi);
145  }
146  }
147 
148  v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
149  x = lrint(f);
150  if (prev_v == -1)
151  prev_v = v;
152  if (v <= prev_v) {
153  for (y = v; y <= prev_v; y++)
154  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
155  } else {
156  for (y = prev_v; y <= v; y++)
157  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
158  }
159 
160  prev_v = v;
161  }
162  }
163 
164  av_free(colors);
165 }
166 
167 static int config_video(AVFilterLink *outlink)
168 {
169  AVFilterContext *ctx = outlink->src;
170  AudioNEqualizerContext *s = ctx->priv;
171  AVFilterLink *inlink = ctx->inputs[0];
172  AVFrame *out;
173 
174  outlink->w = s->w;
175  outlink->h = s->h;
176 
177  av_frame_free(&s->video);
178  s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
179  if (!out)
180  return AVERROR(ENOMEM);
181  outlink->sample_aspect_ratio = (AVRational){1,1};
182 
183  draw_curves(ctx, inlink, out);
184 
185  return 0;
186 }
187 
189 {
190  AudioNEqualizerContext *s = ctx->priv;
191  AVFilterPad pad, vpad;
192  int ret;
193 
194  pad = (AVFilterPad){
195  .name = av_strdup("out0"),
196  .type = AVMEDIA_TYPE_AUDIO,
197  };
198 
199  if (!pad.name)
200  return AVERROR(ENOMEM);
201 
202  if (s->draw_curves) {
203  vpad = (AVFilterPad){
204  .name = av_strdup("out1"),
205  .type = AVMEDIA_TYPE_VIDEO,
206  .config_props = config_video,
207  };
208  if (!vpad.name)
209  return AVERROR(ENOMEM);
210  }
211 
212  ret = ff_insert_outpad(ctx, 0, &pad);
213  if (ret < 0) {
214  av_freep(&pad.name);
215  return ret;
216  }
217 
218  if (s->draw_curves) {
219  ret = ff_insert_outpad(ctx, 1, &vpad);
220  if (ret < 0) {
221  av_freep(&vpad.name);
222  return ret;
223  }
224  }
225 
226  return 0;
227 }
228 
230 {
231  AVFilterLink *inlink = ctx->inputs[0];
232  AVFilterLink *outlink = ctx->outputs[0];
233  AudioNEqualizerContext *s = ctx->priv;
236  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
237  static const enum AVSampleFormat sample_fmts[] = {
240  };
241  int ret;
242 
243  if (s->draw_curves) {
244  AVFilterLink *videolink = ctx->outputs[1];
245  formats = ff_make_format_list(pix_fmts);
246  if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
247  return ret;
248  }
249 
250  formats = ff_make_format_list(sample_fmts);
251  if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0 ||
252  (ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
253  return ret;
254 
255  layouts = ff_all_channel_counts();
256  if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0 ||
257  (ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts)) < 0)
258  return ret;
259 
260  formats = ff_all_samplerates();
261  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0 ||
262  (ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0)
263  return ret;
264 
265  return 0;
266 }
267 
269 {
270  AudioNEqualizerContext *s = ctx->priv;
271 
272  for (int i = 0; i < ctx->nb_outputs; i++)
273  av_freep(&ctx->output_pads[i].name);
274  av_frame_free(&s->video);
275  av_freep(&s->filters);
276  s->nb_filters = 0;
277  s->nb_allocated = 0;
278 }
279 
280 static void butterworth_fo_section(FoSection *S, double beta,
281  double si, double g, double g0,
282  double D, double c0)
283 {
284  if (c0 == 1 || c0 == -1) {
285  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
286  S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
287  S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
288  S->b3 = 0;
289  S->b4 = 0;
290 
291  S->a0 = 1;
292  S->a1 = 2*c0*(beta*beta - 1)/D;
293  S->a2 = (beta*beta - 2*beta*si + 1)/D;
294  S->a3 = 0;
295  S->a4 = 0;
296  } else {
297  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
298  S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
299  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
300  S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
301  S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
302 
303  S->a0 = 1;
304  S->a1 = -4*c0*(1 + si*beta)/D;
305  S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
306  S->a3 = -4*c0*(1 - si*beta)/D;
307  S->a4 = (beta*beta - 2*si*beta + 1)/D;
308  }
309 }
310 
312  int N, double w0, double wb,
313  double G, double Gb, double G0)
314 {
315  double g, c0, g0, beta;
316  double epsilon;
317  int r = N % 2;
318  int L = (N - r) / 2;
319  int i;
320 
321  if (G == 0 && G0 == 0) {
322  f->section[0].a0 = 1;
323  f->section[0].b0 = 1;
324  f->section[1].a0 = 1;
325  f->section[1].b0 = 1;
326  return;
327  }
328 
329  G = ff_exp10(G/20);
330  Gb = ff_exp10(Gb/20);
331  G0 = ff_exp10(G0/20);
332 
333  epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
334  g = pow(G, 1.0 / N);
335  g0 = pow(G0, 1.0 / N);
336  beta = pow(epsilon, -1.0 / N) * tan(wb/2);
337  c0 = cos(w0);
338 
339  for (i = 1; i <= L; i++) {
340  double ui = (2.0 * i - 1) / N;
341  double si = sin(M_PI * ui / 2.0);
342  double Di = beta * beta + 2 * si * beta + 1;
343 
344  butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
345  }
346 }
347 
348 static void chebyshev1_fo_section(FoSection *S, double a,
349  double c, double tetta_b,
350  double g0, double si, double b,
351  double D, double c0)
352 {
353  if (c0 == 1 || c0 == -1) {
354  S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
355  S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
356  S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
357  S->b3 = 0;
358  S->b4 = 0;
359 
360  S->a0 = 1;
361  S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
362  S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
363  S->a3 = 0;
364  S->a4 = 0;
365  } else {
366  S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
367  S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
368  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
369  S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
370  S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
371 
372  S->a0 = 1;
373  S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
374  S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
375  S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
376  S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
377  }
378 }
379 
381  int N, double w0, double wb,
382  double G, double Gb, double G0)
383 {
384  double a, b, c0, g0, alfa, beta, tetta_b;
385  double epsilon;
386  int r = N % 2;
387  int L = (N - r) / 2;
388  int i;
389 
390  if (G == 0 && G0 == 0) {
391  f->section[0].a0 = 1;
392  f->section[0].b0 = 1;
393  f->section[1].a0 = 1;
394  f->section[1].b0 = 1;
395  return;
396  }
397 
398  G = ff_exp10(G/20);
399  Gb = ff_exp10(Gb/20);
400  G0 = ff_exp10(G0/20);
401 
402  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
403  g0 = pow(G0,1.0/N);
404  alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
405  beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
406  a = 0.5 * (alfa - 1.0/alfa);
407  b = 0.5 * (beta - g0*g0*(1/beta));
408  tetta_b = tan(wb/2);
409  c0 = cos(w0);
410 
411  for (i = 1; i <= L; i++) {
412  double ui = (2.0*i-1.0)/N;
413  double ci = cos(M_PI*ui/2.0);
414  double si = sin(M_PI*ui/2.0);
415  double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
416 
417  chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
418  }
419 }
420 
421 static void chebyshev2_fo_section(FoSection *S, double a,
422  double c, double tetta_b,
423  double g, double si, double b,
424  double D, double c0)
425 {
426  if (c0 == 1 || c0 == -1) {
427  S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
428  S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
429  S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
430  S->b3 = 0;
431  S->b4 = 0;
432 
433  S->a0 = 1;
434  S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
435  S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
436  S->a3 = 0;
437  S->a4 = 0;
438  } else {
439  S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
440  S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
441  S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
442  S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
443  S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
444 
445  S->a0 = 1;
446  S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
447  S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
448  S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
449  S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
450  }
451 }
452 
454  int N, double w0, double wb,
455  double G, double Gb, double G0)
456 {
457  double a, b, c0, tetta_b;
458  double epsilon, g, eu, ew;
459  int r = N % 2;
460  int L = (N - r) / 2;
461  int i;
462 
463  if (G == 0 && G0 == 0) {
464  f->section[0].a0 = 1;
465  f->section[0].b0 = 1;
466  f->section[1].a0 = 1;
467  f->section[1].b0 = 1;
468  return;
469  }
470 
471  G = ff_exp10(G/20);
472  Gb = ff_exp10(Gb/20);
473  G0 = ff_exp10(G0/20);
474 
475  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
476  g = pow(G, 1.0 / N);
477  eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
478  ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
479  a = (eu - 1.0/eu)/2.0;
480  b = (ew - g*g/ew)/2.0;
481  tetta_b = tan(wb/2);
482  c0 = cos(w0);
483 
484  for (i = 1; i <= L; i++) {
485  double ui = (2.0 * i - 1.0)/N;
486  double ci = cos(M_PI * ui / 2.0);
487  double si = sin(M_PI * ui / 2.0);
488  double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
489 
490  chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
491  }
492 }
493 
494 static double butterworth_compute_bw_gain_db(double gain)
495 {
496  double bw_gain = 0;
497 
498  if (gain <= -6)
499  bw_gain = gain + 3;
500  else if(gain > -6 && gain < 6)
501  bw_gain = gain * 0.5;
502  else if(gain >= 6)
503  bw_gain = gain - 3;
504 
505  return bw_gain;
506 }
507 
508 static double chebyshev1_compute_bw_gain_db(double gain)
509 {
510  double bw_gain = 0;
511 
512  if (gain <= -6)
513  bw_gain = gain + 1;
514  else if(gain > -6 && gain < 6)
515  bw_gain = gain * 0.9;
516  else if(gain >= 6)
517  bw_gain = gain - 1;
518 
519  return bw_gain;
520 }
521 
522 static double chebyshev2_compute_bw_gain_db(double gain)
523 {
524  double bw_gain = 0;
525 
526  if (gain <= -6)
527  bw_gain = -3;
528  else if(gain > -6 && gain < 6)
529  bw_gain = gain * 0.3;
530  else if(gain >= 6)
531  bw_gain = 3;
532 
533  return bw_gain;
534 }
535 
536 static inline double hz_2_rad(double x, double fs)
537 {
538  return 2 * M_PI * x / fs;
539 }
540 
542 {
543  double w0 = hz_2_rad(f->freq, sample_rate);
544  double wb = hz_2_rad(f->width, sample_rate);
545  double bw_gain;
546 
547  switch (f->type) {
548  case BUTTERWORTH:
549  bw_gain = butterworth_compute_bw_gain_db(f->gain);
550  butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
551  break;
552  case CHEBYSHEV1:
553  bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
554  chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
555  break;
556  case CHEBYSHEV2:
557  bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
558  chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
559  break;
560  }
561 
562 }
563 
565 {
566  equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
567  if (s->nb_filters >= s->nb_allocated) {
569 
570  filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
571  if (!filters)
572  return AVERROR(ENOMEM);
573  memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
574  av_free(s->filters);
575  s->filters = filters;
576  s->nb_allocated *= 2;
577  }
578  s->nb_filters++;
579 
580  return 0;
581 }
582 
584 {
585  AVFilterContext *ctx = inlink->dst;
586  AudioNEqualizerContext *s = ctx->priv;
587  char *args = av_strdup(s->args);
588  char *saveptr = NULL;
589  int ret = 0;
590 
591  if (!args)
592  return AVERROR(ENOMEM);
593 
594  s->nb_allocated = 32 * inlink->channels;
595  s->filters = av_calloc(inlink->channels, 32 * sizeof(*s->filters));
596  if (!s->filters) {
597  s->nb_allocated = 0;
598  av_free(args);
599  return AVERROR(ENOMEM);
600  }
601 
602  while (1) {
603  char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
604 
605  if (!arg)
606  break;
607 
608  s->filters[s->nb_filters].type = 0;
609  if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
610  &s->filters[s->nb_filters].freq,
611  &s->filters[s->nb_filters].width,
612  &s->filters[s->nb_filters].gain,
613  &s->filters[s->nb_filters].type) != 5 &&
614  sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
615  &s->filters[s->nb_filters].freq,
616  &s->filters[s->nb_filters].width,
617  &s->filters[s->nb_filters].gain) != 4 ) {
618  av_free(args);
619  return AVERROR(EINVAL);
620  }
621 
622  if (s->filters[s->nb_filters].freq < 0 ||
623  s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
624  s->filters[s->nb_filters].ignore = 1;
625 
626  if (s->filters[s->nb_filters].channel < 0 ||
627  s->filters[s->nb_filters].channel >= inlink->channels)
628  s->filters[s->nb_filters].ignore = 1;
629 
630  s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
631  ret = add_filter(s, inlink);
632  if (ret < 0)
633  break;
634  }
635 
636  av_free(args);
637 
638  return ret;
639 }
640 
641 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
642  char *res, int res_len, int flags)
643 {
644  AudioNEqualizerContext *s = ctx->priv;
645  AVFilterLink *inlink = ctx->inputs[0];
646  int ret = AVERROR(ENOSYS);
647 
648  if (!strcmp(cmd, "change")) {
649  double freq, width, gain;
650  int filter;
651 
652  if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
653  return AVERROR(EINVAL);
654 
655  if (filter < 0 || filter >= s->nb_filters)
656  return AVERROR(EINVAL);
657 
658  if (freq < 0 || freq > inlink->sample_rate / 2.0)
659  return AVERROR(EINVAL);
660 
661  s->filters[filter].freq = freq;
662  s->filters[filter].width = width;
663  s->filters[filter].gain = gain;
664  equalizer(&s->filters[filter], inlink->sample_rate);
665  if (s->draw_curves)
666  draw_curves(ctx, inlink, s->video);
667 
668  ret = 0;
669  }
670 
671  return ret;
672 }
673 
674 static inline double section_process(FoSection *S, double in)
675 {
676  double out;
677 
678  out = S->b0 * in;
679  out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
680  out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
681  out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
682  out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
683 
684  S->num[3] = S->num[2];
685  S->num[2] = S->num[1];
686  S->num[1] = S->num[0];
687  S->num[0] = in;
688 
689  S->denum[3] = S->denum[2];
690  S->denum[2] = S->denum[1];
691  S->denum[1] = S->denum[0];
692  S->denum[0] = out;
693 
694  return out;
695 }
696 
697 static double process_sample(FoSection *s1, double in)
698 {
699  double p0 = in, p1;
700  int i;
701 
702  for (i = 0; i < FILTER_ORDER / 2; i++) {
703  p1 = section_process(&s1[i], p0);
704  p0 = p1;
705  }
706 
707  return p1;
708 }
709 
711 {
712  AVFilterContext *ctx = inlink->dst;
713  AudioNEqualizerContext *s = ctx->priv;
714  AVFilterLink *outlink = ctx->outputs[0];
715  double *bptr;
716  int i, n;
717 
718  for (i = 0; i < s->nb_filters; i++) {
719  EqualizatorFilter *f = &s->filters[i];
720 
721  if (f->gain == 0. || f->ignore)
722  continue;
723 
724  bptr = (double *)buf->extended_data[f->channel];
725  for (n = 0; n < buf->nb_samples; n++) {
726  double sample = bptr[n];
727 
728  sample = process_sample(f->section, sample);
729  bptr[n] = sample;
730  }
731  }
732 
733  if (s->draw_curves) {
734  const int64_t pts = buf->pts +
735  av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
736  outlink->time_base);
737  int ret;
738 
739  s->video->pts = pts;
740  ret = ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video));
741  if (ret < 0)
742  return ret;
743  }
744 
745  return ff_filter_frame(outlink, buf);
746 }
747 
748 static const AVFilterPad inputs[] = {
749  {
750  .name = "default",
751  .type = AVMEDIA_TYPE_AUDIO,
752  .config_props = config_input,
753  .filter_frame = filter_frame,
754  .needs_writable = 1,
755  },
756  { NULL }
757 };
758 
760  .name = "anequalizer",
761  .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
762  .priv_size = sizeof(AudioNEqualizerContext),
763  .priv_class = &anequalizer_class,
764  .init = init,
765  .uninit = uninit,
767  .inputs = inputs,
768  .outputs = NULL,
771 };
#define NULL
Definition: coverity.c:32
static void chebyshev2_fo_section(FoSection *S, double a, double c, double tetta_b, double g, double si, double b, double D, double c0)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
static void chebyshev1_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Main libavfilter public API header.
const char * g
Definition: vf_curves.c:115
static double butterworth_compute_bw_gain_db(double gain)
static int config_video(AVFilterLink *outlink)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static void butterworth_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
double, planar
Definition: samplefmt.h:70
static void butterworth_fo_section(FoSection *S, double beta, double si, double g, double g0, double D, double c0)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
FoSection section[2]
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVFILTER_DEFINE_CLASS(anequalizer)
#define sample
#define N
Definition: af_mcompand.c:54
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
FilterType
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
uint8_t
#define av_cold
Definition: attributes.h:82
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
AVOptions.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
#define f(width, name)
Definition: cbs_vp9.c:255
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
static const AVOption anequalizer_options[]
static void chebyshev2_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
static void chebyshev1_fo_section(FoSection *S, double a, double c, double tetta_b, double g0, double si, double b, double D, double c0)
static double hz_2_rad(double x, double fs)
#define FILTER_ORDER
#define A
static av_cold void uninit(AVFilterContext *ctx)
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
A filter pad used for either input or output.
Definition: internal.h:54
#define F
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
static int config_input(AVFilterLink *inlink)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
#define S(s, c, i)
static void equalizer(EqualizatorFilter *f, double sample_rate)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
void * priv
private data for use by the filter
Definition: avfilter.h:353
const char * arg
Definition: jacosubdec.c:66
#define V
static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
static int query_formats(AVFilterContext *ctx)
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
static av_const double hypot(double x, double y)
Definition: libm.h:366
#define b
Definition: input.c:41
#define width
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
static const AVFilterPad inputs[]
int n
Definition: avisynth_c.h:760
#define L(x)
Definition: vp56_arith.h:36
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static double process_sample(FoSection *s1, double in)
A list of supported channel layouts.
Definition: formats.h:85
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
sample_rate
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static double chebyshev2_compute_bw_gain_db(double gain)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
void * buf
Definition: avisynth_c.h:766
AVFilter ff_af_anequalizer
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
const char * name
Filter name.
Definition: avfilter.h:148
static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
#define s1
Definition: regdef.h:38
offset must point to two consecutive integers
Definition: opt.h:233
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static double section_process(FoSection *S, double in)
double denum[4]
internal math functions header
static double chebyshev1_compute_bw_gain_db(double gain)
#define G
Definition: huffyuvdsp.h:33
D(D(float, sse)
Definition: rematrix_init.c:28
#define ui(width, name)
Definition: cbs_mpeg2.c:43
static av_cold int init(AVFilterContext *ctx)
#define OFFSET(x)
#define av_free(p)
static const struct PPFilter filters[]
Definition: postprocess.c:134
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define lrint
Definition: tablegen.h:53
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
formats
Definition: signature.h:48
EqualizatorFilter * filters
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:410
double num[4]
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:285
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
for(j=16;j >0;--j)
#define AV_WL32(p, v)
Definition: intreadwrite.h:426