FFmpeg
af_aiir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/avassert.h"
24 #include "libavutil/avstring.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
28 #include "audio.h"
29 #include "avfilter.h"
30 #include "internal.h"
31 
32 typedef struct ThreadData {
33  AVFrame *in, *out;
34 } ThreadData;
35 
36 typedef struct Pair {
37  int a, b;
38 } Pair;
39 
40 typedef struct BiquadContext {
41  double a[3];
42  double b[3];
43  double i1, i2;
44  double o1, o2;
46 
47 typedef struct IIRChannel {
48  int nb_ab[2];
49  double *ab[2];
50  double g;
51  double *cache[2];
53  int clippings;
54 } IIRChannel;
55 
56 typedef struct AudioIIRContext {
57  const AVClass *class;
58  char *a_str, *b_str, *g_str;
59  double dry_gain, wet_gain;
60  double mix;
61  int normalize;
62  int format;
63  int process;
64  int precision;
65  int response;
66  int w, h;
69 
71 
73  int channels;
74  enum AVSampleFormat sample_format;
75 
76  int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
78 
80 {
81  AudioIIRContext *s = ctx->priv;
84  enum AVSampleFormat sample_fmts[] = {
87  };
88  static const enum AVPixelFormat pix_fmts[] = {
91  };
92  int ret;
93 
94  if (s->response) {
95  AVFilterLink *videolink = ctx->outputs[1];
96 
97  formats = ff_make_format_list(pix_fmts);
98  if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
99  return ret;
100  }
101 
102  layouts = ff_all_channel_counts();
103  if (!layouts)
104  return AVERROR(ENOMEM);
105  ret = ff_set_common_channel_layouts(ctx, layouts);
106  if (ret < 0)
107  return ret;
108 
109  sample_fmts[0] = s->sample_format;
110  formats = ff_make_format_list(sample_fmts);
111  if (!formats)
112  return AVERROR(ENOMEM);
113  ret = ff_set_common_formats(ctx, formats);
114  if (ret < 0)
115  return ret;
116 
117  formats = ff_all_samplerates();
118  if (!formats)
119  return AVERROR(ENOMEM);
120  return ff_set_common_samplerates(ctx, formats);
121 }
122 
123 #define IIR_CH(name, type, min, max, need_clipping) \
124 static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
125 { \
126  AudioIIRContext *s = ctx->priv; \
127  const double ig = s->dry_gain; \
128  const double og = s->wet_gain; \
129  const double mix = s->mix; \
130  ThreadData *td = arg; \
131  AVFrame *in = td->in, *out = td->out; \
132  const type *src = (const type *)in->extended_data[ch]; \
133  double *oc = (double *)s->iir[ch].cache[0]; \
134  double *ic = (double *)s->iir[ch].cache[1]; \
135  const int nb_a = s->iir[ch].nb_ab[0]; \
136  const int nb_b = s->iir[ch].nb_ab[1]; \
137  const double *a = s->iir[ch].ab[0]; \
138  const double *b = s->iir[ch].ab[1]; \
139  const double g = s->iir[ch].g; \
140  int *clippings = &s->iir[ch].clippings; \
141  type *dst = (type *)out->extended_data[ch]; \
142  int n; \
143  \
144  for (n = 0; n < in->nb_samples; n++) { \
145  double sample = 0.; \
146  int x; \
147  \
148  memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
149  memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
150  ic[0] = src[n] * ig; \
151  for (x = 0; x < nb_b; x++) \
152  sample += b[x] * ic[x]; \
153  \
154  for (x = 1; x < nb_a; x++) \
155  sample -= a[x] * oc[x]; \
156  \
157  oc[0] = sample; \
158  sample *= og * g; \
159  sample = sample * mix + ic[0] * (1. - mix); \
160  if (need_clipping && sample < min) { \
161  (*clippings)++; \
162  dst[n] = min; \
163  } else if (need_clipping && sample > max) { \
164  (*clippings)++; \
165  dst[n] = max; \
166  } else { \
167  dst[n] = sample; \
168  } \
169  } \
170  \
171  return 0; \
172 }
173 
174 IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
175 IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
176 IIR_CH(fltp, float, -1., 1., 0)
177 IIR_CH(dblp, double, -1., 1., 0)
178 
179 #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
180 static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
181 { \
182  AudioIIRContext *s = ctx->priv; \
183  const double ig = s->dry_gain; \
184  const double og = s->wet_gain; \
185  const double mix = s->mix; \
186  ThreadData *td = arg; \
187  AVFrame *in = td->in, *out = td->out; \
188  const type *src = (const type *)in->extended_data[ch]; \
189  type *dst = (type *)out->extended_data[ch]; \
190  IIRChannel *iir = &s->iir[ch]; \
191  const double g = iir->g; \
192  int *clippings = &iir->clippings; \
193  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
194  int n, i; \
195  \
196  for (i = 0; i < nb_biquads; i++) { \
197  const double a1 = -iir->biquads[i].a[1]; \
198  const double a2 = -iir->biquads[i].a[2]; \
199  const double b0 = iir->biquads[i].b[0]; \
200  const double b1 = iir->biquads[i].b[1]; \
201  const double b2 = iir->biquads[i].b[2]; \
202  double i1 = iir->biquads[i].i1; \
203  double i2 = iir->biquads[i].i2; \
204  double o1 = iir->biquads[i].o1; \
205  double o2 = iir->biquads[i].o2; \
206  \
207  for (n = 0; n < in->nb_samples; n++) { \
208  double sample = ig * (i ? dst[n] : src[n]); \
209  double o0 = sample * b0 + i1 * b1 + i2 * b2 + o1 * a1 + o2 * a2; \
210  \
211  i2 = i1; \
212  i1 = src[n]; \
213  o2 = o1; \
214  o1 = o0; \
215  o0 *= og * g; \
216  \
217  o0 = o0 * mix + (1. - mix) * sample; \
218  if (need_clipping && o0 < min) { \
219  (*clippings)++; \
220  dst[n] = min; \
221  } else if (need_clipping && o0 > max) { \
222  (*clippings)++; \
223  dst[n] = max; \
224  } else { \
225  dst[n] = o0; \
226  } \
227  } \
228  iir->biquads[i].i1 = i1; \
229  iir->biquads[i].i2 = i2; \
230  iir->biquads[i].o1 = o1; \
231  iir->biquads[i].o2 = o2; \
232  } \
233  \
234  return 0; \
235 }
236 
237 SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
238 SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
239 SERIAL_IIR_CH(fltp, float, -1., 1., 0)
240 SERIAL_IIR_CH(dblp, double, -1., 1., 0)
241 
242 static void count_coefficients(char *item_str, int *nb_items)
243 {
244  char *p;
245 
246  if (!item_str)
247  return;
248 
249  *nb_items = 1;
250  for (p = item_str; *p && *p != '|'; p++) {
251  if (*p == ' ')
252  (*nb_items)++;
253  }
254 }
255 
256 static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
257 {
258  AudioIIRContext *s = ctx->priv;
259  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
260  int i;
261 
262  p = old_str = av_strdup(item_str);
263  if (!p)
264  return AVERROR(ENOMEM);
265  for (i = 0; i < nb_items; i++) {
266  if (!(arg = av_strtok(p, "|", &saveptr)))
267  arg = prev_arg;
268 
269  if (!arg) {
270  av_freep(&old_str);
271  return AVERROR(EINVAL);
272  }
273 
274  p = NULL;
275  if (sscanf(arg, "%lf", &s->iir[i].g) != 1) {
276  av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
277  av_freep(&old_str);
278  return AVERROR(EINVAL);
279  }
280 
281  prev_arg = arg;
282  }
283 
284  av_freep(&old_str);
285 
286  return 0;
287 }
288 
289 static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
290 {
291  char *p, *arg, *old_str, *saveptr = NULL;
292  int i;
293 
294  p = old_str = av_strdup(item_str);
295  if (!p)
296  return AVERROR(ENOMEM);
297  for (i = 0; i < nb_items; i++) {
298  if (!(arg = av_strtok(p, " ", &saveptr)))
299  break;
300 
301  p = NULL;
302  if (sscanf(arg, "%lf", &dst[i]) != 1) {
303  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
304  av_freep(&old_str);
305  return AVERROR(EINVAL);
306  }
307  }
308 
309  av_freep(&old_str);
310 
311  return 0;
312 }
313 
314 static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
315 {
316  char *p, *arg, *old_str, *saveptr = NULL;
317  int i;
318 
319  p = old_str = av_strdup(item_str);
320  if (!p)
321  return AVERROR(ENOMEM);
322  for (i = 0; i < nb_items; i++) {
323  if (!(arg = av_strtok(p, " ", &saveptr)))
324  break;
325 
326  p = NULL;
327  if (sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
328  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
329  av_freep(&old_str);
330  return AVERROR(EINVAL);
331  }
332  }
333 
334  av_freep(&old_str);
335 
336  return 0;
337 }
338 
339 static const char *format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd" };
340 
341 static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
342 {
343  AudioIIRContext *s = ctx->priv;
344  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
345  int i, ret;
346 
347  p = old_str = av_strdup(item_str);
348  if (!p)
349  return AVERROR(ENOMEM);
350  for (i = 0; i < channels; i++) {
351  IIRChannel *iir = &s->iir[i];
352 
353  if (!(arg = av_strtok(p, "|", &saveptr)))
354  arg = prev_arg;
355 
356  if (!arg) {
357  av_freep(&old_str);
358  return AVERROR(EINVAL);
359  }
360 
361  count_coefficients(arg, &iir->nb_ab[ab]);
362 
363  p = NULL;
364  iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
365  iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
366  if (!iir->ab[ab] || !iir->cache[ab]) {
367  av_freep(&old_str);
368  return AVERROR(ENOMEM);
369  }
370 
371  if (s->format) {
372  ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
373  } else {
374  ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
375  }
376  if (ret < 0) {
377  av_freep(&old_str);
378  return ret;
379  }
380  prev_arg = arg;
381  }
382 
383  av_freep(&old_str);
384 
385  return 0;
386 }
387 
388 static void multiply(double wre, double wim, int npz, double *coeffs)
389 {
390  double nwre = -wre, nwim = -wim;
391  double cre, cim;
392  int i;
393 
394  for (i = npz; i >= 1; i--) {
395  cre = coeffs[2 * i + 0];
396  cim = coeffs[2 * i + 1];
397 
398  coeffs[2 * i + 0] = (nwre * cre - nwim * cim) + coeffs[2 * (i - 1) + 0];
399  coeffs[2 * i + 1] = (nwre * cim + nwim * cre) + coeffs[2 * (i - 1) + 1];
400  }
401 
402  cre = coeffs[0];
403  cim = coeffs[1];
404  coeffs[0] = nwre * cre - nwim * cim;
405  coeffs[1] = nwre * cim + nwim * cre;
406 }
407 
408 static int expand(AVFilterContext *ctx, double *pz, int nb, double *coeffs)
409 {
410  int i;
411 
412  coeffs[0] = 1.0;
413  coeffs[1] = 0.0;
414 
415  for (i = 0; i < nb; i++) {
416  coeffs[2 * (i + 1) ] = 0.0;
417  coeffs[2 * (i + 1) + 1] = 0.0;
418  }
419 
420  for (i = 0; i < nb; i++)
421  multiply(pz[2 * i], pz[2 * i + 1], nb, coeffs);
422 
423  for (i = 0; i < nb + 1; i++) {
424  if (fabs(coeffs[2 * i + 1]) > FLT_EPSILON) {
425  av_log(ctx, AV_LOG_ERROR, "coeff: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
426  coeffs[2 * i + 1], i);
427  return AVERROR(EINVAL);
428  }
429  }
430 
431  return 0;
432 }
433 
435 {
436  AudioIIRContext *s = ctx->priv;
437  IIRChannel *iir = &s->iir[ch];
438  double sum_den = 0.;
439 
440  if (!s->normalize)
441  return;
442 
443  for (int i = 0; i < iir->nb_ab[1]; i++) {
444  sum_den += iir->ab[1][i];
445  }
446 
447  if (sum_den > 1e-6) {
448  double factor, sum_num = 0.;
449 
450  for (int i = 0; i < iir->nb_ab[0]; i++) {
451  sum_num += iir->ab[0][i];
452  }
453 
454  factor = sum_num / sum_den;
455 
456  for (int i = 0; i < iir->nb_ab[1]; i++) {
457  iir->ab[1][i] *= factor;
458  }
459  }
460 }
461 
463 {
464  AudioIIRContext *s = ctx->priv;
465  int ch, i, j, ret = 0;
466 
467  for (ch = 0; ch < channels; ch++) {
468  IIRChannel *iir = &s->iir[ch];
469  double *topc, *botc;
470 
471  topc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*topc));
472  botc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*botc));
473  if (!topc || !botc) {
474  ret = AVERROR(ENOMEM);
475  goto fail;
476  }
477 
478  ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
479  if (ret < 0) {
480  goto fail;
481  }
482 
483  ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
484  if (ret < 0) {
485  goto fail;
486  }
487 
488  for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
489  iir->ab[1][j] = topc[2 * i];
490  }
491  iir->nb_ab[1]++;
492 
493  for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
494  iir->ab[0][j] = botc[2 * i];
495  }
496  iir->nb_ab[0]++;
497 
498  normalize_coeffs(ctx, ch);
499 
500 fail:
501  av_free(topc);
502  av_free(botc);
503  if (ret < 0)
504  break;
505  }
506 
507  return ret;
508 }
509 
511 {
512  AudioIIRContext *s = ctx->priv;
513  int ch, ret;
514 
515  for (ch = 0; ch < channels; ch++) {
516  IIRChannel *iir = &s->iir[ch];
517  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
518  int current_biquad = 0;
519 
520  iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
521  if (!iir->biquads)
522  return AVERROR(ENOMEM);
523 
524  while (nb_biquads--) {
525  Pair outmost_pole = { -1, -1 };
526  Pair nearest_zero = { -1, -1 };
527  double zeros[4] = { 0 };
528  double poles[4] = { 0 };
529  double b[6] = { 0 };
530  double a[6] = { 0 };
531  double min_distance = DBL_MAX;
532  double max_mag = 0;
533  double factor;
534  int i;
535 
536  for (i = 0; i < iir->nb_ab[0]; i++) {
537  double mag;
538 
539  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
540  continue;
541  mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
542 
543  if (mag > max_mag) {
544  max_mag = mag;
545  outmost_pole.a = i;
546  }
547  }
548 
549  for (i = 0; i < iir->nb_ab[0]; i++) {
550  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
551  continue;
552 
553  if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
554  iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
555  outmost_pole.b = i;
556  break;
557  }
558  }
559 
560  av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
561 
562  if (outmost_pole.a < 0 || outmost_pole.b < 0)
563  return AVERROR(EINVAL);
564 
565  for (i = 0; i < iir->nb_ab[1]; i++) {
566  double distance;
567 
568  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
569  continue;
570  distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
571  iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
572 
573  if (distance < min_distance) {
574  min_distance = distance;
575  nearest_zero.a = i;
576  }
577  }
578 
579  for (i = 0; i < iir->nb_ab[1]; i++) {
580  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
581  continue;
582 
583  if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
584  iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
585  nearest_zero.b = i;
586  break;
587  }
588  }
589 
590  av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
591 
592  if (nearest_zero.a < 0 || nearest_zero.b < 0)
593  return AVERROR(EINVAL);
594 
595  poles[0] = iir->ab[0][2 * outmost_pole.a ];
596  poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
597 
598  zeros[0] = iir->ab[1][2 * nearest_zero.a ];
599  zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
600 
601  if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
602  zeros[2] = 0;
603  zeros[3] = 0;
604 
605  poles[2] = 0;
606  poles[3] = 0;
607  } else {
608  poles[2] = iir->ab[0][2 * outmost_pole.b ];
609  poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
610 
611  zeros[2] = iir->ab[1][2 * nearest_zero.b ];
612  zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
613  }
614 
615  ret = expand(ctx, zeros, 2, b);
616  if (ret < 0)
617  return ret;
618 
619  ret = expand(ctx, poles, 2, a);
620  if (ret < 0)
621  return ret;
622 
623  iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
624  iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
625  iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
626  iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
627 
628  iir->biquads[current_biquad].a[0] = 1.;
629  iir->biquads[current_biquad].a[1] = a[2] / a[4];
630  iir->biquads[current_biquad].a[2] = a[0] / a[4];
631  iir->biquads[current_biquad].b[0] = b[4] / a[4];
632  iir->biquads[current_biquad].b[1] = b[2] / a[4];
633  iir->biquads[current_biquad].b[2] = b[0] / a[4];
634 
635  if (s->normalize &&
636  fabs(iir->biquads[current_biquad].b[0] +
637  iir->biquads[current_biquad].b[1] +
638  iir->biquads[current_biquad].b[2]) > 1e-6) {
639  factor = (iir->biquads[current_biquad].a[0] +
640  iir->biquads[current_biquad].a[1] +
641  iir->biquads[current_biquad].a[2]) /
642  (iir->biquads[current_biquad].b[0] +
643  iir->biquads[current_biquad].b[1] +
644  iir->biquads[current_biquad].b[2]);
645 
646  av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
647 
648  iir->biquads[current_biquad].b[0] *= factor;
649  iir->biquads[current_biquad].b[1] *= factor;
650  iir->biquads[current_biquad].b[2] *= factor;
651  }
652 
653  iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
654  iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
655  iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
656 
657  av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
658  iir->biquads[current_biquad].a[0],
659  iir->biquads[current_biquad].a[1],
660  iir->biquads[current_biquad].a[2],
661  iir->biquads[current_biquad].b[0],
662  iir->biquads[current_biquad].b[1],
663  iir->biquads[current_biquad].b[2]);
664 
665  current_biquad++;
666  }
667  }
668 
669  return 0;
670 }
671 
673 {
674  AudioIIRContext *s = ctx->priv;
675  int ch;
676 
677  for (ch = 0; ch < channels; ch++) {
678  IIRChannel *iir = &s->iir[ch];
679  int n;
680 
681  for (n = 0; n < iir->nb_ab[0]; n++) {
682  double r = iir->ab[0][2*n];
683  double angle = iir->ab[0][2*n+1];
684 
685  iir->ab[0][2*n] = r * cos(angle);
686  iir->ab[0][2*n+1] = r * sin(angle);
687  }
688 
689  for (n = 0; n < iir->nb_ab[1]; n++) {
690  double r = iir->ab[1][2*n];
691  double angle = iir->ab[1][2*n+1];
692 
693  iir->ab[1][2*n] = r * cos(angle);
694  iir->ab[1][2*n+1] = r * sin(angle);
695  }
696  }
697 }
698 
700 {
701  AudioIIRContext *s = ctx->priv;
702  int ch;
703 
704  for (ch = 0; ch < channels; ch++) {
705  IIRChannel *iir = &s->iir[ch];
706  int n;
707 
708  for (n = 0; n < iir->nb_ab[0]; n++) {
709  double r = iir->ab[0][2*n];
710  double angle = M_PI*iir->ab[0][2*n+1]/180.;
711 
712  iir->ab[0][2*n] = r * cos(angle);
713  iir->ab[0][2*n+1] = r * sin(angle);
714  }
715 
716  for (n = 0; n < iir->nb_ab[1]; n++) {
717  double r = iir->ab[1][2*n];
718  double angle = M_PI*iir->ab[1][2*n+1]/180.;
719 
720  iir->ab[1][2*n] = r * cos(angle);
721  iir->ab[1][2*n+1] = r * sin(angle);
722  }
723  }
724 }
725 
727 {
728  AudioIIRContext *s = ctx->priv;
729  int ch;
730 
731  for (ch = 0; ch < channels; ch++) {
732  IIRChannel *iir = &s->iir[ch];
733 
734  for (int n = 0; n < iir->nb_ab[0]; n++) {
735  double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
736 
737  if (pr >= 1.) {
738  av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
739  break;
740  }
741  }
742  }
743 }
744 
745 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
746 {
747  const uint8_t *font;
748  int font_height;
749  int i;
750 
751  font = avpriv_cga_font, font_height = 8;
752 
753  for (i = 0; txt[i]; i++) {
754  int char_y, mask;
755 
756  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
757  for (char_y = 0; char_y < font_height; char_y++) {
758  for (mask = 0x80; mask; mask >>= 1) {
759  if (font[txt[i] * font_height + char_y] & mask)
760  AV_WL32(p, color);
761  p += 4;
762  }
763  p += pic->linesize[0] - 8 * 4;
764  }
765  }
766 }
767 
768 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
769 {
770  int dx = FFABS(x1-x0);
771  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
772  int err = (dx>dy ? dx : -dy) / 2, e2;
773 
774  for (;;) {
775  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
776 
777  if (x0 == x1 && y0 == y1)
778  break;
779 
780  e2 = err;
781 
782  if (e2 >-dx) {
783  err -= dy;
784  x0--;
785  }
786 
787  if (e2 < dy) {
788  err += dx;
789  y0 += sy;
790  }
791  }
792 }
793 
794 static void get_response(int channel, int format, double w,
795  const double *b, const double *a,
796  int nb_b, int nb_a, double *r, double *i)
797 {
798  double realz, realp;
799  double imagz, imagp;
800  double real, imag;
801  double div;
802 
803  if (format == 0) {
804  realz = 0., realp = 0.;
805  imagz = 0., imagp = 0.;
806  for (int x = 0; x < nb_a; x++) {
807  realz += cos(-x * w) * a[x];
808  imagz += sin(-x * w) * a[x];
809  }
810 
811  for (int x = 0; x < nb_b; x++) {
812  realp += cos(-x * w) * b[x];
813  imagp += sin(-x * w) * b[x];
814  }
815 
816  div = realp * realp + imagp * imagp;
817  real = (realz * realp + imagz * imagp) / div;
818  imag = (imagz * realp - imagp * realz) / div;
819  } else {
820  real = 1;
821  imag = 0;
822  for (int x = 0; x < nb_a; x++) {
823  double ore, oim, re, im;
824 
825  re = cos(w) - a[2 * x];
826  im = sin(w) - a[2 * x + 1];
827 
828  ore = real;
829  oim = imag;
830 
831  real = ore * re - oim * im;
832  imag = ore * im + oim * re;
833  }
834 
835  for (int x = 0; x < nb_b; x++) {
836  double ore, oim, re, im;
837 
838  re = cos(w) - b[2 * x];
839  im = sin(w) - b[2 * x + 1];
840 
841  ore = real;
842  oim = imag;
843  div = re * re + im * im;
844 
845  real = (ore * re + oim * im) / div;
846  imag = (oim * re - ore * im) / div;
847  }
848  }
849 
850  *r = real;
851  *i = imag;
852 }
853 
855 {
856  AudioIIRContext *s = ctx->priv;
857  double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
858  double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
859  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
860  char text[32];
861  int ch, i;
862 
863  memset(out->data[0], 0, s->h * out->linesize[0]);
864 
865  phase = av_malloc_array(s->w, sizeof(*phase));
866  temp = av_malloc_array(s->w, sizeof(*temp));
867  mag = av_malloc_array(s->w, sizeof(*mag));
868  delay = av_malloc_array(s->w, sizeof(*delay));
869  if (!mag || !phase || !delay || !temp)
870  goto end;
871 
872  ch = av_clip(s->ir_channel, 0, s->channels - 1);
873  for (i = 0; i < s->w; i++) {
874  const double *b = s->iir[ch].ab[0];
875  const double *a = s->iir[ch].ab[1];
876  const int nb_b = s->iir[ch].nb_ab[0];
877  const int nb_a = s->iir[ch].nb_ab[1];
878  double w = i * M_PI / (s->w - 1);
879  double real, imag;
880 
881  get_response(ch, s->format, w, b, a, nb_b, nb_a, &real, &imag);
882 
883  mag[i] = s->iir[ch].g * hypot(real, imag);
884  phase[i] = atan2(imag, real);
885  min = fmin(min, mag[i]);
886  max = fmax(max, mag[i]);
887  }
888 
889  temp[0] = 0.;
890  for (i = 0; i < s->w - 1; i++) {
891  double d = phase[i] - phase[i + 1];
892  temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
893  }
894 
895  min_phase = phase[0];
896  max_phase = phase[0];
897  for (i = 1; i < s->w; i++) {
898  temp[i] += temp[i - 1];
899  phase[i] += temp[i];
900  min_phase = fmin(min_phase, phase[i]);
901  max_phase = fmax(max_phase, phase[i]);
902  }
903 
904  for (i = 0; i < s->w - 1; i++) {
905  double div = s->w / (double)sample_rate;
906 
907  delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
908  min_delay = fmin(min_delay, delay[i + 1]);
909  max_delay = fmax(max_delay, delay[i + 1]);
910  }
911  delay[0] = delay[1];
912 
913  for (i = 0; i < s->w; i++) {
914  int ymag = mag[i] / max * (s->h - 1);
915  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
916  int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
917 
918  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
919  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
920  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
921 
922  if (prev_ymag < 0)
923  prev_ymag = ymag;
924  if (prev_yphase < 0)
925  prev_yphase = yphase;
926  if (prev_ydelay < 0)
927  prev_ydelay = ydelay;
928 
929  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
930  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
931  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
932 
933  prev_ymag = ymag;
934  prev_yphase = yphase;
935  prev_ydelay = ydelay;
936  }
937 
938  if (s->w > 400 && s->h > 100) {
939  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
940  snprintf(text, sizeof(text), "%.2f", max);
941  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
942 
943  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
944  snprintf(text, sizeof(text), "%.2f", min);
945  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
946 
947  drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
948  snprintf(text, sizeof(text), "%.2f", max_phase);
949  drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
950 
951  drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
952  snprintf(text, sizeof(text), "%.2f", min_phase);
953  drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
954 
955  drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
956  snprintf(text, sizeof(text), "%.2f", max_delay);
957  drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
958 
959  drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
960  snprintf(text, sizeof(text), "%.2f", min_delay);
961  drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
962  }
963 
964 end:
965  av_free(delay);
966  av_free(temp);
967  av_free(phase);
968  av_free(mag);
969 }
970 
971 static int config_output(AVFilterLink *outlink)
972 {
973  AVFilterContext *ctx = outlink->src;
974  AudioIIRContext *s = ctx->priv;
975  AVFilterLink *inlink = ctx->inputs[0];
976  int ch, ret, i;
977 
978  s->channels = inlink->channels;
979  s->iir = av_calloc(s->channels, sizeof(*s->iir));
980  if (!s->iir)
981  return AVERROR(ENOMEM);
982 
983  ret = read_gains(ctx, s->g_str, inlink->channels);
984  if (ret < 0)
985  return ret;
986 
987  ret = read_channels(ctx, inlink->channels, s->a_str, 0);
988  if (ret < 0)
989  return ret;
990 
991  ret = read_channels(ctx, inlink->channels, s->b_str, 1);
992  if (ret < 0)
993  return ret;
994 
995  if (s->format == 2) {
996  convert_pr2zp(ctx, inlink->channels);
997  } else if (s->format == 3) {
998  convert_pd2zp(ctx, inlink->channels);
999  }
1000  if (s->format > 0) {
1001  check_stability(ctx, inlink->channels);
1002  }
1003 
1004  if (s->format == 0)
1005  av_log(ctx, AV_LOG_WARNING, "tf coefficients format is not recommended for too high number of zeros/poles.\n");
1006 
1007  if (s->format > 0 && s->process == 0) {
1008  av_log(ctx, AV_LOG_WARNING, "Direct processsing is not recommended for zp coefficients format.\n");
1009 
1010  ret = convert_zp2tf(ctx, inlink->channels);
1011  if (ret < 0)
1012  return ret;
1013  } else if (s->format == 0 && s->process == 1) {
1014  av_log(ctx, AV_LOG_ERROR, "Serial cascading is not implemented for transfer function.\n");
1015  return AVERROR_PATCHWELCOME;
1016  } else if (s->format > 0 && s->process == 1) {
1017  if (inlink->format == AV_SAMPLE_FMT_S16P)
1018  av_log(ctx, AV_LOG_WARNING, "Serial cascading is not recommended for i16 precision.\n");
1019 
1020  ret = decompose_zp2biquads(ctx, inlink->channels);
1021  if (ret < 0)
1022  return ret;
1023  }
1024 
1025  for (ch = 0; s->format == 0 && ch < inlink->channels; ch++) {
1026  IIRChannel *iir = &s->iir[ch];
1027 
1028  for (i = 1; i < iir->nb_ab[0]; i++) {
1029  iir->ab[0][i] /= iir->ab[0][0];
1030  }
1031 
1032  iir->ab[0][0] = 1.0;
1033  for (i = 0; i < iir->nb_ab[1]; i++) {
1034  iir->ab[1][i] *= iir->g;
1035  }
1036 
1037  normalize_coeffs(ctx, ch);
1038  }
1039 
1040  switch (inlink->format) {
1041  case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
1042  case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
1043  case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
1044  case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
1045  }
1046 
1047  av_frame_free(&s->video);
1048  if (s->response) {
1049  s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
1050  if (!s->video)
1051  return AVERROR(ENOMEM);
1052 
1053  draw_response(ctx, s->video, inlink->sample_rate);
1054  }
1055 
1056  return 0;
1057 }
1058 
1060 {
1061  AVFilterContext *ctx = inlink->dst;
1062  AudioIIRContext *s = ctx->priv;
1063  AVFilterLink *outlink = ctx->outputs[0];
1064  ThreadData td;
1065  AVFrame *out;
1066  int ch, ret;
1067 
1068  if (av_frame_is_writable(in)) {
1069  out = in;
1070  } else {
1071  out = ff_get_audio_buffer(outlink, in->nb_samples);
1072  if (!out) {
1073  av_frame_free(&in);
1074  return AVERROR(ENOMEM);
1075  }
1076  av_frame_copy_props(out, in);
1077  }
1078 
1079  td.in = in;
1080  td.out = out;
1081  ctx->internal->execute(ctx, s->iir_channel, &td, NULL, outlink->channels);
1082 
1083  for (ch = 0; ch < outlink->channels; ch++) {
1084  if (s->iir[ch].clippings > 0)
1085  av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
1086  ch, s->iir[ch].clippings);
1087  s->iir[ch].clippings = 0;
1088  }
1089 
1090  if (in != out)
1091  av_frame_free(&in);
1092 
1093  if (s->response) {
1094  AVFilterLink *outlink = ctx->outputs[1];
1095  int64_t old_pts = s->video->pts;
1096  int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
1097 
1098  if (new_pts > old_pts) {
1099  AVFrame *clone;
1100 
1101  s->video->pts = new_pts;
1102  clone = av_frame_clone(s->video);
1103  if (!clone)
1104  return AVERROR(ENOMEM);
1105  ret = ff_filter_frame(outlink, clone);
1106  if (ret < 0)
1107  return ret;
1108  }
1109  }
1110 
1111  return ff_filter_frame(outlink, out);
1112 }
1113 
1114 static int config_video(AVFilterLink *outlink)
1115 {
1116  AVFilterContext *ctx = outlink->src;
1117  AudioIIRContext *s = ctx->priv;
1118 
1119  outlink->sample_aspect_ratio = (AVRational){1,1};
1120  outlink->w = s->w;
1121  outlink->h = s->h;
1122  outlink->frame_rate = s->rate;
1123  outlink->time_base = av_inv_q(outlink->frame_rate);
1124 
1125  return 0;
1126 }
1127 
1129 {
1130  AudioIIRContext *s = ctx->priv;
1131  AVFilterPad pad, vpad;
1132  int ret;
1133 
1134  if (!s->a_str || !s->b_str || !s->g_str) {
1135  av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
1136  return AVERROR(EINVAL);
1137  }
1138 
1139  switch (s->precision) {
1140  case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
1141  case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
1142  case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
1143  case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
1144  default: return AVERROR_BUG;
1145  }
1146 
1147  pad = (AVFilterPad){
1148  .name = av_strdup("default"),
1149  .type = AVMEDIA_TYPE_AUDIO,
1150  .config_props = config_output,
1151  };
1152 
1153  if (!pad.name)
1154  return AVERROR(ENOMEM);
1155 
1156  if (s->response) {
1157  vpad = (AVFilterPad){
1158  .name = av_strdup("filter_response"),
1159  .type = AVMEDIA_TYPE_VIDEO,
1160  .config_props = config_video,
1161  };
1162  if (!vpad.name)
1163  return AVERROR(ENOMEM);
1164  }
1165 
1166  ret = ff_insert_outpad(ctx, 0, &pad);
1167  if (ret < 0)
1168  return ret;
1169 
1170  if (s->response) {
1171  ret = ff_insert_outpad(ctx, 1, &vpad);
1172  if (ret < 0)
1173  return ret;
1174  }
1175 
1176  return 0;
1177 }
1178 
1180 {
1181  AudioIIRContext *s = ctx->priv;
1182  int ch;
1183 
1184  if (s->iir) {
1185  for (ch = 0; ch < s->channels; ch++) {
1186  IIRChannel *iir = &s->iir[ch];
1187  av_freep(&iir->ab[0]);
1188  av_freep(&iir->ab[1]);
1189  av_freep(&iir->cache[0]);
1190  av_freep(&iir->cache[1]);
1191  av_freep(&iir->biquads);
1192  }
1193  }
1194  av_freep(&s->iir);
1195 
1196  av_freep(&ctx->output_pads[0].name);
1197  if (s->response)
1198  av_freep(&ctx->output_pads[1].name);
1199  av_frame_free(&s->video);
1200 }
1201 
1202 static const AVFilterPad inputs[] = {
1203  {
1204  .name = "default",
1205  .type = AVMEDIA_TYPE_AUDIO,
1206  .filter_frame = filter_frame,
1207  },
1208  { NULL }
1209 };
1210 
1211 #define OFFSET(x) offsetof(AudioIIRContext, x)
1212 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1213 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1214 
1215 static const AVOption aiir_options[] = {
1216  { "zeros", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1217  { "z", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1218  { "poles", "set A/denominator/poles coefficients", OFFSET(a_str),AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1219  { "p", "set A/denominator/poles coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1220  { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1221  { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1222  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1223  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1224  { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, 0, 3, AF, "format" },
1225  { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, 0, 3, AF, "format" },
1226  { "tf", "transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "format" },
1227  { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "format" },
1228  { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "format" },
1229  { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "format" },
1230  { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "process" },
1231  { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "process" },
1232  { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "process" },
1233  { "s", "serial cascading", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "process" },
1234  { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1235  { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1236  { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "precision" },
1237  { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "precision" },
1238  { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "precision" },
1239  { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "precision" },
1240  { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1241  { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1242  { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1243  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
1244  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
1245  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
1246  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
1247  { NULL },
1248 };
1249 
1250 AVFILTER_DEFINE_CLASS(aiir);
1251 
1253  .name = "aiir",
1254  .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
1255  .priv_size = sizeof(AudioIIRContext),
1256  .priv_class = &aiir_class,
1257  .init = init,
1258  .uninit = uninit,
1260  .inputs = inputs,
1263 };
float, planar
Definition: samplefmt.h:69
#define NULL
Definition: coverity.c:32
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates...
Definition: formats.c:581
char * a_str
Definition: af_aiir.c:58
AVFrame * out
Definition: af_adeclick.c:494
static const char * format[]
Definition: af_aiir.c:339
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
enum AVSampleFormat sample_format
Definition: af_aiir.c:74
AVOption.
Definition: opt.h:246
BiquadContext * biquads
Definition: af_aiir.c:52
float re
Definition: fft.c:82
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
Main libavfilter public API header.
else temp
Definition: vf_mcdeint.c:256
static int config_video(AVFilterLink *outlink)
Definition: af_aiir.c:1114
static void convert_pd2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:699
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_aiir.c:745
int clippings
Definition: af_aiir.c:53
static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:510
double, planar
Definition: samplefmt.h:70
#define SERIAL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:179
static void count_coefficients(char *item_str, int *nb_items)
Definition: af_aiir.c:242
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static int config_output(AVFilterLink *outlink)
Definition: af_aiir.c:971
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
uint8_t
#define av_cold
Definition: attributes.h:82
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
AVOptions.
int ** delay
Definition: af_headphone.c:156
double * cache[2]
Definition: af_aiir.c:51
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
AVFilter ff_af_aiir
Definition: af_aiir.c:1252
static av_cold int init(AVFilterContext *ctx)
Definition: af_aiir.c:1128
static int expand(AVFilterContext *ctx, double *pz, int nb, double *coeffs)
Definition: af_aiir.c:408
static void normalize_coeffs(AVFilterContext *ctx, int ch)
Definition: af_aiir.c:434
AVFrame * dst
Definition: vf_blend.c:55
#define IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:123
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
Definition: af_aiir.c:289
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
static int query_formats(AVFilterContext *ctx)
Definition: af_aiir.c:79
channels
Definition: aptx.h:33
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:600
#define td
Definition: regdef.h:70
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
Definition: af_aiir.c:341
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const char * r
Definition: vf_curves.c:114
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
const char * arg
Definition: jacosubdec.c:66
simple assert() macros that are a bit more flexible than ISO C assert().
#define AF
Definition: af_aiir.c:1212
#define FFMAX(a, b)
Definition: common.h:94
char * g_str
Definition: af_aiir.c:58
#define fail()
Definition: checkasm.h:123
static float distance(float x, float y, int band)
double g
Definition: af_aiir.c:50
static av_const double hypot(double x, double y)
Definition: libm.h:366
#define NAN
Definition: mathematics.h:64
static int convert_zp2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:462
signed 32 bits, planar
Definition: samplefmt.h:68
char * b_str
Definition: af_aiir.c:58
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:470
double a[3]
Definition: af_aiir.c:41
int32_t
AVFormatContext * ctx
Definition: movenc.c:48
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
A list of supported channel layouts.
Definition: formats.h:85
#define VF
Definition: af_aiir.c:1213
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static int mix(int c0, int c1)
Definition: 4xm.c:714
sample_rate
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
double wet_gain
Definition: af_aiir.c:59
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:595
Used for passing data between threads.
Definition: dsddec.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
double fmax(double, double)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
Describe the class of an AVClass context structure.
Definition: log.h:67
static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
Definition: af_aiir.c:256
Filter definition.
Definition: avfilter.h:144
AVFrame * mask
static void convert_pr2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:672
static void get_response(int channel, int format, double w, const double *b, const double *a, int nb_b, int nb_a, double *r, double *i)
Definition: af_aiir.c:794
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define isnan(x)
Definition: libm.h:340
AVFrame * b
float im
Definition: fft.c:82
int a
Definition: af_aiir.c:37
offset must point to AVRational
Definition: opt.h:236
int b
Definition: af_aiir.c:37
double mix
Definition: af_aiir.c:60
static const int factor[16]
Definition: vf_pp7.c:75
const char * name
Filter name.
Definition: avfilter.h:148
static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
Definition: af_aiir.c:314
#define snprintf
Definition: snprintf.h:34
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aiir.c:1179
offset must point to two consecutive integers
Definition: opt.h:233
static const AVFilterPad inputs[]
Definition: af_aiir.c:1202
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
AVFrame * min
Definition: vf_threshold.c:74
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:425
#define flags(name, subs,...)
Definition: cbs_av1.c:564
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
int(* iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
Definition: af_aiir.c:76
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_aiir.c:768
IIRChannel * iir
Definition: af_aiir.c:72
int
#define OFFSET(x)
Definition: af_aiir.c:1211
static void check_stability(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:726
AVFrame * video
Definition: af_aiir.c:70
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
double fmin(double, double)
avfilter_execute_func * execute
Definition: internal.h:144
double * ab[2]
Definition: af_aiir.c:49
AVRational rate
Definition: af_aiir.c:68
#define av_free(p)
double b[3]
Definition: af_aiir.c:42
A list of supported formats for one end of a filter link.
Definition: formats.h:64
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aiir.c:1059
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
AVFILTER_DEFINE_CLASS(aiir)
#define av_freep(p)
const void ** s
signed 16 bits, planar
Definition: samplefmt.h:67
#define M_PI
Definition: mathematics.h:52
#define av_malloc_array(a, b)
int nb_ab[2]
Definition: af_aiir.c:48
AVFrame * in
Definition: af_afftdn.c:1083
formats
Definition: signature.h:48
AVFilterLink * inlink
Definition: vf_blend.c:56
internal API functions
static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
Definition: af_aiir.c:854
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:440
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:274
AVFrame * a
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:156
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:366
Definition: af_aiir.c:36
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:588
int ir_channel
Definition: af_aiir.c:67
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
static const AVOption aiir_options[]
Definition: af_aiir.c:1215
CGA/EGA/VGA ROM font data.
AVFrame * max
Definition: vf_threshold.c:75
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
static void multiply(double wre, double wim, int npz, double *coeffs)
Definition: af_aiir.c:388