FFmpeg
af_aiir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/avassert.h"
24 #include "libavutil/avstring.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
28 #include "audio.h"
29 #include "avfilter.h"
30 #include "internal.h"
31 
32 typedef struct ThreadData {
33  AVFrame *in, *out;
34 } ThreadData;
35 
36 typedef struct Pair {
37  int a, b;
38 } Pair;
39 
40 typedef struct BiquadContext {
41  double a[3];
42  double b[3];
43  double w1, w2;
45 
46 typedef struct IIRChannel {
47  int nb_ab[2];
48  double *ab[2];
49  double g;
50  double *cache[2];
51  double fir;
53  int clippings;
54 } IIRChannel;
55 
56 typedef struct AudioIIRContext {
57  const AVClass *class;
58  char *a_str, *b_str, *g_str;
59  double dry_gain, wet_gain;
60  double mix;
61  int normalize;
62  int format;
63  int process;
64  int precision;
65  int response;
66  int w, h;
69 
71 
73  int channels;
74  enum AVSampleFormat sample_format;
75 
76  int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
78 
80 {
81  AudioIIRContext *s = ctx->priv;
84  enum AVSampleFormat sample_fmts[] = {
87  };
88  static const enum AVPixelFormat pix_fmts[] = {
91  };
92  int ret;
93 
94  if (s->response) {
95  AVFilterLink *videolink = ctx->outputs[1];
96 
97  formats = ff_make_format_list(pix_fmts);
98  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
99  return ret;
100  }
101 
102  layouts = ff_all_channel_counts();
103  if (!layouts)
104  return AVERROR(ENOMEM);
105  ret = ff_set_common_channel_layouts(ctx, layouts);
106  if (ret < 0)
107  return ret;
108 
109  sample_fmts[0] = s->sample_format;
110  formats = ff_make_format_list(sample_fmts);
111  if (!formats)
112  return AVERROR(ENOMEM);
113  ret = ff_set_common_formats(ctx, formats);
114  if (ret < 0)
115  return ret;
116 
117  formats = ff_all_samplerates();
118  if (!formats)
119  return AVERROR(ENOMEM);
120  return ff_set_common_samplerates(ctx, formats);
121 }
122 
123 #define IIR_CH(name, type, min, max, need_clipping) \
124 static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
125 { \
126  AudioIIRContext *s = ctx->priv; \
127  const double ig = s->dry_gain; \
128  const double og = s->wet_gain; \
129  const double mix = s->mix; \
130  ThreadData *td = arg; \
131  AVFrame *in = td->in, *out = td->out; \
132  const type *src = (const type *)in->extended_data[ch]; \
133  double *oc = (double *)s->iir[ch].cache[0]; \
134  double *ic = (double *)s->iir[ch].cache[1]; \
135  const int nb_a = s->iir[ch].nb_ab[0]; \
136  const int nb_b = s->iir[ch].nb_ab[1]; \
137  const double *a = s->iir[ch].ab[0]; \
138  const double *b = s->iir[ch].ab[1]; \
139  const double g = s->iir[ch].g; \
140  int *clippings = &s->iir[ch].clippings; \
141  type *dst = (type *)out->extended_data[ch]; \
142  int n; \
143  \
144  for (n = 0; n < in->nb_samples; n++) { \
145  double sample = 0.; \
146  int x; \
147  \
148  memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
149  memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
150  ic[0] = src[n] * ig; \
151  for (x = 0; x < nb_b; x++) \
152  sample += b[x] * ic[x]; \
153  \
154  for (x = 1; x < nb_a; x++) \
155  sample -= a[x] * oc[x]; \
156  \
157  oc[0] = sample; \
158  sample *= og * g; \
159  sample = sample * mix + ic[0] * (1. - mix); \
160  if (need_clipping && sample < min) { \
161  (*clippings)++; \
162  dst[n] = min; \
163  } else if (need_clipping && sample > max) { \
164  (*clippings)++; \
165  dst[n] = max; \
166  } else { \
167  dst[n] = sample; \
168  } \
169  } \
170  \
171  return 0; \
172 }
173 
174 IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
175 IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
176 IIR_CH(fltp, float, -1., 1., 0)
177 IIR_CH(dblp, double, -1., 1., 0)
178 
179 #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
180 static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, \
181  int ch, int nb_jobs) \
182 { \
183  AudioIIRContext *s = ctx->priv; \
184  const double ig = s->dry_gain; \
185  const double og = s->wet_gain; \
186  const double mix = s->mix; \
187  const double imix = 1. - mix; \
188  ThreadData *td = arg; \
189  AVFrame *in = td->in, *out = td->out; \
190  const type *src = (const type *)in->extended_data[ch]; \
191  type *dst = (type *)out->extended_data[ch]; \
192  IIRChannel *iir = &s->iir[ch]; \
193  const double g = iir->g; \
194  int *clippings = &iir->clippings; \
195  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
196  int n, i; \
197  \
198  for (i = nb_biquads - 1; i >= 0; i--) { \
199  const double a1 = -iir->biquads[i].a[1]; \
200  const double a2 = -iir->biquads[i].a[2]; \
201  const double b0 = iir->biquads[i].b[0]; \
202  const double b1 = iir->biquads[i].b[1]; \
203  const double b2 = iir->biquads[i].b[2]; \
204  double w1 = iir->biquads[i].w1; \
205  double w2 = iir->biquads[i].w2; \
206  \
207  for (n = 0; n < in->nb_samples; n++) { \
208  double i0 = ig * (i ? dst[n] : src[n]); \
209  double o0 = i0 * b0 + w1; \
210  \
211  w1 = b1 * i0 + w2 + a1 * o0; \
212  w2 = b2 * i0 + a2 * o0; \
213  o0 *= og * g; \
214  \
215  o0 = o0 * mix + imix * i0; \
216  if (need_clipping && o0 < min) { \
217  (*clippings)++; \
218  dst[n] = min; \
219  } else if (need_clipping && o0 > max) { \
220  (*clippings)++; \
221  dst[n] = max; \
222  } else { \
223  dst[n] = o0; \
224  } \
225  } \
226  iir->biquads[i].w1 = w1; \
227  iir->biquads[i].w2 = w2; \
228  } \
229  \
230  return 0; \
231 }
232 
233 SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
234 SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
235 SERIAL_IIR_CH(fltp, float, -1., 1., 0)
236 SERIAL_IIR_CH(dblp, double, -1., 1., 0)
237 
238 #define PARALLEL_IIR_CH(name, type, min, max, need_clipping) \
239 static int iir_ch_parallel_## name(AVFilterContext *ctx, void *arg, \
240  int ch, int nb_jobs) \
241 { \
242  AudioIIRContext *s = ctx->priv; \
243  const double ig = s->dry_gain; \
244  const double og = s->wet_gain; \
245  const double mix = s->mix; \
246  const double imix = 1. - mix; \
247  ThreadData *td = arg; \
248  AVFrame *in = td->in, *out = td->out; \
249  const type *src = (const type *)in->extended_data[ch]; \
250  type *dst = (type *)out->extended_data[ch]; \
251  IIRChannel *iir = &s->iir[ch]; \
252  const double g = iir->g; \
253  const double fir = iir->fir; \
254  int *clippings = &iir->clippings; \
255  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
256  int n, i; \
257  \
258  for (i = 0; i < nb_biquads; i++) { \
259  const double a1 = -iir->biquads[i].a[1]; \
260  const double a2 = -iir->biquads[i].a[2]; \
261  const double b1 = iir->biquads[i].b[1]; \
262  const double b2 = iir->biquads[i].b[2]; \
263  double w1 = iir->biquads[i].w1; \
264  double w2 = iir->biquads[i].w2; \
265  \
266  for (n = 0; n < in->nb_samples; n++) { \
267  double i0 = ig * src[n]; \
268  double o0 = w1; \
269  \
270  w1 = b1 * i0 + w2 + a1 * o0; \
271  w2 = b2 * i0 + a2 * o0; \
272  o0 *= og * g; \
273  o0 += dst[n]; \
274  \
275  if (need_clipping && o0 < min) { \
276  (*clippings)++; \
277  dst[n] = min; \
278  } else if (need_clipping && o0 > max) { \
279  (*clippings)++; \
280  dst[n] = max; \
281  } else { \
282  dst[n] = o0; \
283  } \
284  } \
285  iir->biquads[i].w1 = w1; \
286  iir->biquads[i].w2 = w2; \
287  } \
288  \
289  for (n = 0; n < in->nb_samples; n++) { \
290  dst[n] += fir * src[n]; \
291  dst[n] = dst[n] * mix + imix * src[n]; \
292  } \
293  \
294  return 0; \
295 }
296 
297 PARALLEL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
298 PARALLEL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
299 PARALLEL_IIR_CH(fltp, float, -1., 1., 0)
300 PARALLEL_IIR_CH(dblp, double, -1., 1., 0)
301 
302 static void count_coefficients(char *item_str, int *nb_items)
303 {
304  char *p;
305 
306  if (!item_str)
307  return;
308 
309  *nb_items = 1;
310  for (p = item_str; *p && *p != '|'; p++) {
311  if (*p == ' ')
312  (*nb_items)++;
313  }
314 }
315 
316 static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
317 {
318  AudioIIRContext *s = ctx->priv;
319  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
320  int i;
321 
322  p = old_str = av_strdup(item_str);
323  if (!p)
324  return AVERROR(ENOMEM);
325  for (i = 0; i < nb_items; i++) {
326  if (!(arg = av_strtok(p, "|", &saveptr)))
327  arg = prev_arg;
328 
329  if (!arg) {
330  av_freep(&old_str);
331  return AVERROR(EINVAL);
332  }
333 
334  p = NULL;
335  if (av_sscanf(arg, "%lf", &s->iir[i].g) != 1) {
336  av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
337  av_freep(&old_str);
338  return AVERROR(EINVAL);
339  }
340 
341  prev_arg = arg;
342  }
343 
344  av_freep(&old_str);
345 
346  return 0;
347 }
348 
349 static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
350 {
351  char *p, *arg, *old_str, *saveptr = NULL;
352  int i;
353 
354  p = old_str = av_strdup(item_str);
355  if (!p)
356  return AVERROR(ENOMEM);
357  for (i = 0; i < nb_items; i++) {
358  if (!(arg = av_strtok(p, " ", &saveptr)))
359  break;
360 
361  p = NULL;
362  if (av_sscanf(arg, "%lf", &dst[i]) != 1) {
363  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
364  av_freep(&old_str);
365  return AVERROR(EINVAL);
366  }
367  }
368 
369  av_freep(&old_str);
370 
371  return 0;
372 }
373 
374 static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
375 {
376  char *p, *arg, *old_str, *saveptr = NULL;
377  int i;
378 
379  p = old_str = av_strdup(item_str);
380  if (!p)
381  return AVERROR(ENOMEM);
382  for (i = 0; i < nb_items; i++) {
383  if (!(arg = av_strtok(p, " ", &saveptr)))
384  break;
385 
386  p = NULL;
387  if (av_sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
388  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
389  av_freep(&old_str);
390  return AVERROR(EINVAL);
391  }
392  }
393 
394  av_freep(&old_str);
395 
396  return 0;
397 }
398 
399 static const char *format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd", "%lf %lfi" };
400 
401 static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
402 {
403  AudioIIRContext *s = ctx->priv;
404  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
405  int i, ret;
406 
407  p = old_str = av_strdup(item_str);
408  if (!p)
409  return AVERROR(ENOMEM);
410  for (i = 0; i < channels; i++) {
411  IIRChannel *iir = &s->iir[i];
412 
413  if (!(arg = av_strtok(p, "|", &saveptr)))
414  arg = prev_arg;
415 
416  if (!arg) {
417  av_freep(&old_str);
418  return AVERROR(EINVAL);
419  }
420 
421  count_coefficients(arg, &iir->nb_ab[ab]);
422 
423  p = NULL;
424  iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
425  iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
426  if (!iir->ab[ab] || !iir->cache[ab]) {
427  av_freep(&old_str);
428  return AVERROR(ENOMEM);
429  }
430 
431  if (s->format > 0) {
432  ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
433  } else {
434  ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
435  }
436  if (ret < 0) {
437  av_freep(&old_str);
438  return ret;
439  }
440  prev_arg = arg;
441  }
442 
443  av_freep(&old_str);
444 
445  return 0;
446 }
447 
448 static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
449 {
450  *RE = re * re2 - im * im2;
451  *IM = re * im2 + re2 * im;
452 }
453 
454 static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
455 {
456  coefs[2 * n] = 1.0;
457 
458  for (int i = 1; i <= n; i++) {
459  for (int j = n - i; j < n; j++) {
460  double re, im;
461 
462  cmul(coefs[2 * (j + 1)], coefs[2 * (j + 1) + 1],
463  pz[2 * (i - 1)], pz[2 * (i - 1) + 1], &re, &im);
464 
465  coefs[2 * j] -= re;
466  coefs[2 * j + 1] -= im;
467  }
468  }
469 
470  for (int i = 0; i < n + 1; i++) {
471  if (fabs(coefs[2 * i + 1]) > FLT_EPSILON) {
472  av_log(ctx, AV_LOG_ERROR, "coefs: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
473  coefs[2 * i + 1], i);
474  return AVERROR(EINVAL);
475  }
476  }
477 
478  return 0;
479 }
480 
481 static void normalize_coeffs(AVFilterContext *ctx, int ch)
482 {
483  AudioIIRContext *s = ctx->priv;
484  IIRChannel *iir = &s->iir[ch];
485  double sum_den = 0.;
486 
487  if (!s->normalize)
488  return;
489 
490  for (int i = 0; i < iir->nb_ab[1]; i++) {
491  sum_den += iir->ab[1][i];
492  }
493 
494  if (sum_den > 1e-6) {
495  double factor, sum_num = 0.;
496 
497  for (int i = 0; i < iir->nb_ab[0]; i++) {
498  sum_num += iir->ab[0][i];
499  }
500 
501  factor = sum_num / sum_den;
502 
503  for (int i = 0; i < iir->nb_ab[1]; i++) {
504  iir->ab[1][i] *= factor;
505  }
506  }
507 }
508 
510 {
511  AudioIIRContext *s = ctx->priv;
512  int ch, i, j, ret = 0;
513 
514  for (ch = 0; ch < channels; ch++) {
515  IIRChannel *iir = &s->iir[ch];
516  double *topc, *botc;
517 
518  topc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*topc));
519  botc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*botc));
520  if (!topc || !botc) {
521  ret = AVERROR(ENOMEM);
522  goto fail;
523  }
524 
525  ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
526  if (ret < 0) {
527  goto fail;
528  }
529 
530  ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
531  if (ret < 0) {
532  goto fail;
533  }
534 
535  for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
536  iir->ab[1][j] = topc[2 * i];
537  }
538  iir->nb_ab[1]++;
539 
540  for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
541  iir->ab[0][j] = botc[2 * i];
542  }
543  iir->nb_ab[0]++;
544 
545  normalize_coeffs(ctx, ch);
546 
547 fail:
548  av_free(topc);
549  av_free(botc);
550  if (ret < 0)
551  break;
552  }
553 
554  return ret;
555 }
556 
558 {
559  AudioIIRContext *s = ctx->priv;
560  int ch, ret;
561 
562  for (ch = 0; ch < channels; ch++) {
563  IIRChannel *iir = &s->iir[ch];
564  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
565  int current_biquad = 0;
566 
567  iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
568  if (!iir->biquads)
569  return AVERROR(ENOMEM);
570 
571  while (nb_biquads--) {
572  Pair outmost_pole = { -1, -1 };
573  Pair nearest_zero = { -1, -1 };
574  double zeros[4] = { 0 };
575  double poles[4] = { 0 };
576  double b[6] = { 0 };
577  double a[6] = { 0 };
578  double min_distance = DBL_MAX;
579  double max_mag = 0;
580  double factor;
581  int i;
582 
583  for (i = 0; i < iir->nb_ab[0]; i++) {
584  double mag;
585 
586  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
587  continue;
588  mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
589 
590  if (mag > max_mag) {
591  max_mag = mag;
592  outmost_pole.a = i;
593  }
594  }
595 
596  for (i = 0; i < iir->nb_ab[0]; i++) {
597  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
598  continue;
599 
600  if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
601  iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
602  outmost_pole.b = i;
603  break;
604  }
605  }
606 
607  av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
608 
609  if (outmost_pole.a < 0 || outmost_pole.b < 0)
610  return AVERROR(EINVAL);
611 
612  for (i = 0; i < iir->nb_ab[1]; i++) {
613  double distance;
614 
615  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
616  continue;
617  distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
618  iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
619 
620  if (distance < min_distance) {
621  min_distance = distance;
622  nearest_zero.a = i;
623  }
624  }
625 
626  for (i = 0; i < iir->nb_ab[1]; i++) {
627  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
628  continue;
629 
630  if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
631  iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
632  nearest_zero.b = i;
633  break;
634  }
635  }
636 
637  av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
638 
639  if (nearest_zero.a < 0 || nearest_zero.b < 0)
640  return AVERROR(EINVAL);
641 
642  poles[0] = iir->ab[0][2 * outmost_pole.a ];
643  poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
644 
645  zeros[0] = iir->ab[1][2 * nearest_zero.a ];
646  zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
647 
648  if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
649  zeros[2] = 0;
650  zeros[3] = 0;
651 
652  poles[2] = 0;
653  poles[3] = 0;
654  } else {
655  poles[2] = iir->ab[0][2 * outmost_pole.b ];
656  poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
657 
658  zeros[2] = iir->ab[1][2 * nearest_zero.b ];
659  zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
660  }
661 
662  ret = expand(ctx, zeros, 2, b);
663  if (ret < 0)
664  return ret;
665 
666  ret = expand(ctx, poles, 2, a);
667  if (ret < 0)
668  return ret;
669 
670  iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
671  iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
672  iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
673  iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
674 
675  iir->biquads[current_biquad].a[0] = 1.;
676  iir->biquads[current_biquad].a[1] = a[2] / a[4];
677  iir->biquads[current_biquad].a[2] = a[0] / a[4];
678  iir->biquads[current_biquad].b[0] = b[4] / a[4];
679  iir->biquads[current_biquad].b[1] = b[2] / a[4];
680  iir->biquads[current_biquad].b[2] = b[0] / a[4];
681 
682  if (s->normalize &&
683  fabs(iir->biquads[current_biquad].b[0] +
684  iir->biquads[current_biquad].b[1] +
685  iir->biquads[current_biquad].b[2]) > 1e-6) {
686  factor = (iir->biquads[current_biquad].a[0] +
687  iir->biquads[current_biquad].a[1] +
688  iir->biquads[current_biquad].a[2]) /
689  (iir->biquads[current_biquad].b[0] +
690  iir->biquads[current_biquad].b[1] +
691  iir->biquads[current_biquad].b[2]);
692 
693  av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
694 
695  iir->biquads[current_biquad].b[0] *= factor;
696  iir->biquads[current_biquad].b[1] *= factor;
697  iir->biquads[current_biquad].b[2] *= factor;
698  }
699 
700  iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
701  iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
702  iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
703 
704  av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
705  iir->biquads[current_biquad].a[0],
706  iir->biquads[current_biquad].a[1],
707  iir->biquads[current_biquad].a[2],
708  iir->biquads[current_biquad].b[0],
709  iir->biquads[current_biquad].b[1],
710  iir->biquads[current_biquad].b[2]);
711 
712  current_biquad++;
713  }
714  }
715 
716  return 0;
717 }
718 
719 static void biquad_process(double *x, double *y, int length,
720  double b0, double b1, double b2,
721  double a1, double a2)
722 {
723  double w1 = 0., w2 = 0.;
724 
725  a1 = -a1;
726  a2 = -a2;
727 
728  for (int n = 0; n < length; n++) {
729  double out, in = x[n];
730 
731  y[n] = out = in * b0 + w1;
732  w1 = b1 * in + w2 + a1 * out;
733  w2 = b2 * in + a2 * out;
734  }
735 }
736 
737 static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
738 {
739  double sum = 0.;
740 
741  for (int i = 0; i < n; i++) {
742  for (int j = i; j < n; j++) {
743  sum = 0.;
744  for (int k = 0; k < i; k++)
745  sum += lu[i * n + k] * lu[k * n + j];
746  lu[i * n + j] = matrix[j * n + i] - sum;
747  }
748  for (int j = i + 1; j < n; j++) {
749  sum = 0.;
750  for (int k = 0; k < i; k++)
751  sum += lu[j * n + k] * lu[k * n + i];
752  lu[j * n + i] = (1. / lu[i * n + i]) * (matrix[i * n + j] - sum);
753  }
754  }
755 
756  for (int i = 0; i < n; i++) {
757  sum = 0.;
758  for (int k = 0; k < i; k++)
759  sum += lu[i * n + k] * y[k];
760  y[i] = vector[i] - sum;
761  }
762 
763  for (int i = n - 1; i >= 0; i--) {
764  sum = 0.;
765  for (int k = i + 1; k < n; k++)
766  sum += lu[i * n + k] * x[k];
767  x[i] = (1 / lu[i * n + i]) * (y[i] - sum);
768  }
769 }
770 
772 {
773  AudioIIRContext *s = ctx->priv;
774  int ret = 0;
775 
776  for (int ch = 0; ch < channels; ch++) {
777  IIRChannel *iir = &s->iir[ch];
778  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
779  int length = nb_biquads * 2 + 1;
780  double *impulse = av_calloc(length, sizeof(*impulse));
781  double *y = av_calloc(length, sizeof(*y));
782  double *resp = av_calloc(length, sizeof(*resp));
783  double *M = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*M));
784  double *W = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*W));
785 
786  if (!impulse || !y || !resp || !M) {
787  av_free(impulse);
788  av_free(y);
789  av_free(resp);
790  av_free(M);
791  av_free(W);
792  return AVERROR(ENOMEM);
793  }
794 
795  impulse[0] = 1.;
796 
797  for (int n = 0; n < nb_biquads; n++) {
798  BiquadContext *biquad = &iir->biquads[n];
799 
800  biquad_process(n ? y : impulse, y, length,
801  biquad->b[0], biquad->b[1], biquad->b[2],
802  biquad->a[1], biquad->a[2]);
803  }
804 
805  for (int n = 0; n < nb_biquads; n++) {
806  BiquadContext *biquad = &iir->biquads[n];
807 
808  biquad_process(impulse, resp, length - 1,
809  1., 0., 0., biquad->a[1], biquad->a[2]);
810 
811  memcpy(M + n * 2 * (length - 1), resp, sizeof(*resp) * (length - 1));
812  memcpy(M + n * 2 * (length - 1) + length, resp, sizeof(*resp) * (length - 2));
813  memset(resp, 0, length * sizeof(*resp));
814  }
815 
816  solve(M, &y[1], length - 1, &impulse[1], resp, W);
817 
818  iir->fir = y[0];
819 
820  for (int n = 0; n < nb_biquads; n++) {
821  BiquadContext *biquad = &iir->biquads[n];
822 
823  biquad->b[0] = 0.;
824  biquad->b[1] = resp[n * 2 + 0];
825  biquad->b[2] = resp[n * 2 + 1];
826  }
827 
828  av_free(impulse);
829  av_free(y);
830  av_free(resp);
831  av_free(M);
832  av_free(W);
833 
834  if (ret < 0)
835  return ret;
836  }
837 
838  return 0;
839 }
840 
842 {
843  AudioIIRContext *s = ctx->priv;
844  int ch;
845 
846  for (ch = 0; ch < channels; ch++) {
847  IIRChannel *iir = &s->iir[ch];
848  int n;
849 
850  for (n = 0; n < iir->nb_ab[0]; n++) {
851  double r = iir->ab[0][2*n];
852  double angle = iir->ab[0][2*n+1];
853 
854  iir->ab[0][2*n] = r * cos(angle);
855  iir->ab[0][2*n+1] = r * sin(angle);
856  }
857 
858  for (n = 0; n < iir->nb_ab[1]; n++) {
859  double r = iir->ab[1][2*n];
860  double angle = iir->ab[1][2*n+1];
861 
862  iir->ab[1][2*n] = r * cos(angle);
863  iir->ab[1][2*n+1] = r * sin(angle);
864  }
865  }
866 }
867 
869 {
870  AudioIIRContext *s = ctx->priv;
871  int ch;
872 
873  for (ch = 0; ch < channels; ch++) {
874  IIRChannel *iir = &s->iir[ch];
875  int n;
876 
877  for (n = 0; n < iir->nb_ab[0]; n++) {
878  double sr = iir->ab[0][2*n];
879  double si = iir->ab[0][2*n+1];
880 
881  iir->ab[0][2*n] = exp(sr) * cos(si);
882  iir->ab[0][2*n+1] = exp(sr) * sin(si);
883  }
884 
885  for (n = 0; n < iir->nb_ab[1]; n++) {
886  double sr = iir->ab[1][2*n];
887  double si = iir->ab[1][2*n+1];
888 
889  iir->ab[1][2*n] = exp(sr) * cos(si);
890  iir->ab[1][2*n+1] = exp(sr) * sin(si);
891  }
892  }
893 }
894 
895 static double fact(double i)
896 {
897  if (i <= 0.)
898  return 1.;
899  return i * fact(i - 1.);
900 }
901 
902 static double coef_sf2zf(double *a, int N, int n)
903 {
904  double z = 0.;
905 
906  for (int i = 0; i <= N; i++) {
907  double acc = 0.;
908 
909  for (int k = FFMAX(n - N + i, 0); k <= FFMIN(i, n); k++) {
910  acc += ((fact(i) * fact(N - i)) /
911  (fact(k) * fact(i - k) * fact(n - k) * fact(N - i - n + k))) *
912  ((k & 1) ? -1. : 1.);;
913  }
914 
915  z += a[i] * pow(2., i) * acc;
916  }
917 
918  return z;
919 }
920 
922 {
923  AudioIIRContext *s = ctx->priv;
924  int ch;
925 
926  for (ch = 0; ch < channels; ch++) {
927  IIRChannel *iir = &s->iir[ch];
928  double *temp0 = av_calloc(iir->nb_ab[0], sizeof(*temp0));
929  double *temp1 = av_calloc(iir->nb_ab[1], sizeof(*temp1));
930 
931  if (!temp0 || !temp1)
932  goto next;
933 
934  memcpy(temp0, iir->ab[0], iir->nb_ab[0] * sizeof(*temp0));
935  memcpy(temp1, iir->ab[1], iir->nb_ab[1] * sizeof(*temp1));
936 
937  for (int n = 0; n < iir->nb_ab[0]; n++)
938  iir->ab[0][n] = coef_sf2zf(temp0, iir->nb_ab[0] - 1, n);
939 
940  for (int n = 0; n < iir->nb_ab[1]; n++)
941  iir->ab[1][n] = coef_sf2zf(temp1, iir->nb_ab[1] - 1, n);
942 
943 next:
944  av_free(temp0);
945  av_free(temp1);
946  }
947 }
948 
950 {
951  AudioIIRContext *s = ctx->priv;
952  int ch;
953 
954  for (ch = 0; ch < channels; ch++) {
955  IIRChannel *iir = &s->iir[ch];
956  int n;
957 
958  for (n = 0; n < iir->nb_ab[0]; n++) {
959  double r = iir->ab[0][2*n];
960  double angle = M_PI*iir->ab[0][2*n+1]/180.;
961 
962  iir->ab[0][2*n] = r * cos(angle);
963  iir->ab[0][2*n+1] = r * sin(angle);
964  }
965 
966  for (n = 0; n < iir->nb_ab[1]; n++) {
967  double r = iir->ab[1][2*n];
968  double angle = M_PI*iir->ab[1][2*n+1]/180.;
969 
970  iir->ab[1][2*n] = r * cos(angle);
971  iir->ab[1][2*n+1] = r * sin(angle);
972  }
973  }
974 }
975 
977 {
978  AudioIIRContext *s = ctx->priv;
979  int ch;
980 
981  for (ch = 0; ch < channels; ch++) {
982  IIRChannel *iir = &s->iir[ch];
983 
984  for (int n = 0; n < iir->nb_ab[0]; n++) {
985  double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
986 
987  if (pr >= 1.) {
988  av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
989  break;
990  }
991  }
992  }
993 }
994 
995 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
996 {
997  const uint8_t *font;
998  int font_height;
999  int i;
1000 
1001  font = avpriv_cga_font, font_height = 8;
1002 
1003  for (i = 0; txt[i]; i++) {
1004  int char_y, mask;
1005 
1006  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
1007  for (char_y = 0; char_y < font_height; char_y++) {
1008  for (mask = 0x80; mask; mask >>= 1) {
1009  if (font[txt[i] * font_height + char_y] & mask)
1010  AV_WL32(p, color);
1011  p += 4;
1012  }
1013  p += pic->linesize[0] - 8 * 4;
1014  }
1015  }
1016 }
1017 
1018 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
1019 {
1020  int dx = FFABS(x1-x0);
1021  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
1022  int err = (dx>dy ? dx : -dy) / 2, e2;
1023 
1024  for (;;) {
1025  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
1026 
1027  if (x0 == x1 && y0 == y1)
1028  break;
1029 
1030  e2 = err;
1031 
1032  if (e2 >-dx) {
1033  err -= dy;
1034  x0--;
1035  }
1036 
1037  if (e2 < dy) {
1038  err += dx;
1039  y0 += sy;
1040  }
1041  }
1042 }
1043 
1044 static double distance(double x0, double x1, double y0, double y1)
1045 {
1046  return hypot(x0 - x1, y0 - y1);
1047 }
1048 
1049 static void get_response(int channel, int format, double w,
1050  const double *b, const double *a,
1051  int nb_b, int nb_a, double *magnitude, double *phase)
1052 {
1053  double realz, realp;
1054  double imagz, imagp;
1055  double real, imag;
1056  double div;
1057 
1058  if (format == 0) {
1059  realz = 0., realp = 0.;
1060  imagz = 0., imagp = 0.;
1061  for (int x = 0; x < nb_a; x++) {
1062  realz += cos(-x * w) * a[x];
1063  imagz += sin(-x * w) * a[x];
1064  }
1065 
1066  for (int x = 0; x < nb_b; x++) {
1067  realp += cos(-x * w) * b[x];
1068  imagp += sin(-x * w) * b[x];
1069  }
1070 
1071  div = realp * realp + imagp * imagp;
1072  real = (realz * realp + imagz * imagp) / div;
1073  imag = (imagz * realp - imagp * realz) / div;
1074 
1075  *magnitude = hypot(real, imag);
1076  *phase = atan2(imag, real);
1077  } else {
1078  double p = 1., z = 1.;
1079  double acc = 0.;
1080 
1081  for (int x = 0; x < nb_a; x++) {
1082  z *= distance(cos(w), a[2 * x], sin(w), a[2 * x + 1]);
1083  acc += atan2(sin(w) - a[2 * x + 1], cos(w) - a[2 * x]);
1084  }
1085 
1086  for (int x = 0; x < nb_b; x++) {
1087  p *= distance(cos(w), b[2 * x], sin(w), b[2 * x + 1]);
1088  acc -= atan2(sin(w) - b[2 * x + 1], cos(w) - b[2 * x]);
1089  }
1090 
1091  *magnitude = z / p;
1092  *phase = acc;
1093  }
1094 }
1095 
1097 {
1098  AudioIIRContext *s = ctx->priv;
1099  double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
1100  double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
1101  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
1102  char text[32];
1103  int ch, i;
1104 
1105  memset(out->data[0], 0, s->h * out->linesize[0]);
1106 
1107  phase = av_malloc_array(s->w, sizeof(*phase));
1108  temp = av_malloc_array(s->w, sizeof(*temp));
1109  mag = av_malloc_array(s->w, sizeof(*mag));
1110  delay = av_malloc_array(s->w, sizeof(*delay));
1111  if (!mag || !phase || !delay || !temp)
1112  goto end;
1113 
1114  ch = av_clip(s->ir_channel, 0, s->channels - 1);
1115  for (i = 0; i < s->w; i++) {
1116  const double *b = s->iir[ch].ab[0];
1117  const double *a = s->iir[ch].ab[1];
1118  const int nb_b = s->iir[ch].nb_ab[0];
1119  const int nb_a = s->iir[ch].nb_ab[1];
1120  double w = i * M_PI / (s->w - 1);
1121  double m, p;
1122 
1123  get_response(ch, s->format, w, b, a, nb_b, nb_a, &m, &p);
1124 
1125  mag[i] = s->iir[ch].g * m;
1126  phase[i] = p;
1127  min = fmin(min, mag[i]);
1128  max = fmax(max, mag[i]);
1129  }
1130 
1131  temp[0] = 0.;
1132  for (i = 0; i < s->w - 1; i++) {
1133  double d = phase[i] - phase[i + 1];
1134  temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
1135  }
1136 
1137  min_phase = phase[0];
1138  max_phase = phase[0];
1139  for (i = 1; i < s->w; i++) {
1140  temp[i] += temp[i - 1];
1141  phase[i] += temp[i];
1142  min_phase = fmin(min_phase, phase[i]);
1143  max_phase = fmax(max_phase, phase[i]);
1144  }
1145 
1146  for (i = 0; i < s->w - 1; i++) {
1147  double div = s->w / (double)sample_rate;
1148 
1149  delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
1150  min_delay = fmin(min_delay, delay[i + 1]);
1151  max_delay = fmax(max_delay, delay[i + 1]);
1152  }
1153  delay[0] = delay[1];
1154 
1155  for (i = 0; i < s->w; i++) {
1156  int ymag = mag[i] / max * (s->h - 1);
1157  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
1158  int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
1159 
1160  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
1161  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
1162  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
1163 
1164  if (prev_ymag < 0)
1165  prev_ymag = ymag;
1166  if (prev_yphase < 0)
1167  prev_yphase = yphase;
1168  if (prev_ydelay < 0)
1169  prev_ydelay = ydelay;
1170 
1171  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
1172  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
1173  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
1174 
1175  prev_ymag = ymag;
1176  prev_yphase = yphase;
1177  prev_ydelay = ydelay;
1178  }
1179 
1180  if (s->w > 400 && s->h > 100) {
1181  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
1182  snprintf(text, sizeof(text), "%.2f", max);
1183  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
1184 
1185  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
1186  snprintf(text, sizeof(text), "%.2f", min);
1187  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
1188 
1189  drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
1190  snprintf(text, sizeof(text), "%.2f", max_phase);
1191  drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
1192 
1193  drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
1194  snprintf(text, sizeof(text), "%.2f", min_phase);
1195  drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
1196 
1197  drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
1198  snprintf(text, sizeof(text), "%.2f", max_delay);
1199  drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
1200 
1201  drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
1202  snprintf(text, sizeof(text), "%.2f", min_delay);
1203  drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
1204  }
1205 
1206 end:
1207  av_free(delay);
1208  av_free(temp);
1209  av_free(phase);
1210  av_free(mag);
1211 }
1212 
1213 static int config_output(AVFilterLink *outlink)
1214 {
1215  AVFilterContext *ctx = outlink->src;
1216  AudioIIRContext *s = ctx->priv;
1217  AVFilterLink *inlink = ctx->inputs[0];
1218  int ch, ret, i;
1219 
1220  s->channels = inlink->channels;
1221  s->iir = av_calloc(s->channels, sizeof(*s->iir));
1222  if (!s->iir)
1223  return AVERROR(ENOMEM);
1224 
1225  ret = read_gains(ctx, s->g_str, inlink->channels);
1226  if (ret < 0)
1227  return ret;
1228 
1229  ret = read_channels(ctx, inlink->channels, s->a_str, 0);
1230  if (ret < 0)
1231  return ret;
1232 
1233  ret = read_channels(ctx, inlink->channels, s->b_str, 1);
1234  if (ret < 0)
1235  return ret;
1236 
1237  if (s->format == -1) {
1238  convert_sf2tf(ctx, inlink->channels);
1239  s->format = 0;
1240  } else if (s->format == 2) {
1241  convert_pr2zp(ctx, inlink->channels);
1242  } else if (s->format == 3) {
1243  convert_pd2zp(ctx, inlink->channels);
1244  } else if (s->format == 4) {
1245  convert_sp2zp(ctx, inlink->channels);
1246  }
1247  if (s->format > 0) {
1248  check_stability(ctx, inlink->channels);
1249  }
1250 
1251  av_frame_free(&s->video);
1252  if (s->response) {
1253  s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
1254  if (!s->video)
1255  return AVERROR(ENOMEM);
1256 
1257  draw_response(ctx, s->video, inlink->sample_rate);
1258  }
1259 
1260  if (s->format == 0)
1261  av_log(ctx, AV_LOG_WARNING, "transfer function coefficients format is not recommended for too high number of zeros/poles.\n");
1262 
1263  if (s->format > 0 && s->process == 0) {
1264  av_log(ctx, AV_LOG_WARNING, "Direct processsing is not recommended for zp coefficients format.\n");
1265 
1266  ret = convert_zp2tf(ctx, inlink->channels);
1267  if (ret < 0)
1268  return ret;
1269  } else if (s->format <= 0 && s->process == 1) {
1270  av_log(ctx, AV_LOG_ERROR, "Serial processing is not implemented for transfer function.\n");
1271  return AVERROR_PATCHWELCOME;
1272  } else if (s->format <= 0 && s->process == 2) {
1273  av_log(ctx, AV_LOG_ERROR, "Parallel processing is not implemented for transfer function.\n");
1274  return AVERROR_PATCHWELCOME;
1275  } else if (s->format > 0 && s->process == 1) {
1276  ret = decompose_zp2biquads(ctx, inlink->channels);
1277  if (ret < 0)
1278  return ret;
1279  } else if (s->format > 0 && s->process == 2) {
1280  if (s->precision > 1)
1281  av_log(ctx, AV_LOG_WARNING, "Parallel processing is not recommended for fixed-point precisions.\n");
1282  ret = decompose_zp2biquads(ctx, inlink->channels);
1283  if (ret < 0)
1284  return ret;
1285  ret = convert_serial2parallel(ctx, inlink->channels);
1286  if (ret < 0)
1287  return ret;
1288  }
1289 
1290  for (ch = 0; s->format == 0 && ch < inlink->channels; ch++) {
1291  IIRChannel *iir = &s->iir[ch];
1292 
1293  for (i = 1; i < iir->nb_ab[0]; i++) {
1294  iir->ab[0][i] /= iir->ab[0][0];
1295  }
1296 
1297  iir->ab[0][0] = 1.0;
1298  for (i = 0; i < iir->nb_ab[1]; i++) {
1299  iir->ab[1][i] *= iir->g;
1300  }
1301 
1302  normalize_coeffs(ctx, ch);
1303  }
1304 
1305  switch (inlink->format) {
1306  case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 2 ? iir_ch_parallel_dblp : s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
1307  case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 2 ? iir_ch_parallel_fltp : s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
1308  case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s32p : s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
1309  case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s16p : s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
1310  }
1311 
1312  return 0;
1313 }
1314 
1316 {
1317  AVFilterContext *ctx = inlink->dst;
1318  AudioIIRContext *s = ctx->priv;
1319  AVFilterLink *outlink = ctx->outputs[0];
1320  ThreadData td;
1321  AVFrame *out;
1322  int ch, ret;
1323 
1324  if (av_frame_is_writable(in) && s->process != 2) {
1325  out = in;
1326  } else {
1327  out = ff_get_audio_buffer(outlink, in->nb_samples);
1328  if (!out) {
1329  av_frame_free(&in);
1330  return AVERROR(ENOMEM);
1331  }
1332  av_frame_copy_props(out, in);
1333  }
1334 
1335  td.in = in;
1336  td.out = out;
1337  ctx->internal->execute(ctx, s->iir_channel, &td, NULL, outlink->channels);
1338 
1339  for (ch = 0; ch < outlink->channels; ch++) {
1340  if (s->iir[ch].clippings > 0)
1341  av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
1342  ch, s->iir[ch].clippings);
1343  s->iir[ch].clippings = 0;
1344  }
1345 
1346  if (in != out)
1347  av_frame_free(&in);
1348 
1349  if (s->response) {
1350  AVFilterLink *outlink = ctx->outputs[1];
1351  int64_t old_pts = s->video->pts;
1352  int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
1353 
1354  if (new_pts > old_pts) {
1355  AVFrame *clone;
1356 
1357  s->video->pts = new_pts;
1358  clone = av_frame_clone(s->video);
1359  if (!clone)
1360  return AVERROR(ENOMEM);
1361  ret = ff_filter_frame(outlink, clone);
1362  if (ret < 0)
1363  return ret;
1364  }
1365  }
1366 
1367  return ff_filter_frame(outlink, out);
1368 }
1369 
1370 static int config_video(AVFilterLink *outlink)
1371 {
1372  AVFilterContext *ctx = outlink->src;
1373  AudioIIRContext *s = ctx->priv;
1374 
1375  outlink->sample_aspect_ratio = (AVRational){1,1};
1376  outlink->w = s->w;
1377  outlink->h = s->h;
1378  outlink->frame_rate = s->rate;
1379  outlink->time_base = av_inv_q(outlink->frame_rate);
1380 
1381  return 0;
1382 }
1383 
1385 {
1386  AudioIIRContext *s = ctx->priv;
1387  AVFilterPad pad, vpad;
1388  int ret;
1389 
1390  if (!s->a_str || !s->b_str || !s->g_str) {
1391  av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
1392  return AVERROR(EINVAL);
1393  }
1394 
1395  switch (s->precision) {
1396  case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
1397  case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
1398  case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
1399  case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
1400  default: return AVERROR_BUG;
1401  }
1402 
1403  pad = (AVFilterPad){
1404  .name = "default",
1405  .type = AVMEDIA_TYPE_AUDIO,
1406  .config_props = config_output,
1407  };
1408 
1409  ret = ff_insert_outpad(ctx, 0, &pad);
1410  if (ret < 0)
1411  return ret;
1412 
1413  if (s->response) {
1414  vpad = (AVFilterPad){
1415  .name = "filter_response",
1416  .type = AVMEDIA_TYPE_VIDEO,
1417  .config_props = config_video,
1418  };
1419 
1420  ret = ff_insert_outpad(ctx, 1, &vpad);
1421  if (ret < 0)
1422  return ret;
1423  }
1424 
1425  return 0;
1426 }
1427 
1429 {
1430  AudioIIRContext *s = ctx->priv;
1431  int ch;
1432 
1433  if (s->iir) {
1434  for (ch = 0; ch < s->channels; ch++) {
1435  IIRChannel *iir = &s->iir[ch];
1436  av_freep(&iir->ab[0]);
1437  av_freep(&iir->ab[1]);
1438  av_freep(&iir->cache[0]);
1439  av_freep(&iir->cache[1]);
1440  av_freep(&iir->biquads);
1441  }
1442  }
1443  av_freep(&s->iir);
1444 
1445  av_frame_free(&s->video);
1446 }
1447 
1448 static const AVFilterPad inputs[] = {
1449  {
1450  .name = "default",
1451  .type = AVMEDIA_TYPE_AUDIO,
1452  .filter_frame = filter_frame,
1453  },
1454  { NULL }
1455 };
1456 
1457 #define OFFSET(x) offsetof(AudioIIRContext, x)
1458 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1459 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1460 
1461 static const AVOption aiir_options[] = {
1462  { "zeros", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1463  { "z", "set B/numerator/zeros coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1464  { "poles", "set A/denominator/poles coefficients", OFFSET(a_str),AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1465  { "p", "set A/denominator/poles coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1466  { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1467  { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1468  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1469  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1470  { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -1, 4, AF, "format" },
1471  { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -1, 4, AF, "format" },
1472  { "sf", "analog transfer function", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, "format" },
1473  { "tf", "digital transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "format" },
1474  { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "format" },
1475  { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "format" },
1476  { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "format" },
1477  { "sp", "S-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF, "format" },
1478  { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "process" },
1479  { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "process" },
1480  { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "process" },
1481  { "s", "serial", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "process" },
1482  { "p", "parallel", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "process" },
1483  { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1484  { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, "precision" },
1485  { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "precision" },
1486  { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "precision" },
1487  { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "precision" },
1488  { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, "precision" },
1489  { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1490  { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1491  { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1492  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
1493  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
1494  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
1495  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
1496  { NULL },
1497 };
1498 
1499 AVFILTER_DEFINE_CLASS(aiir);
1500 
1502  .name = "aiir",
1503  .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
1504  .priv_size = sizeof(AudioIIRContext),
1505  .priv_class = &aiir_class,
1506  .init = init,
1507  .uninit = uninit,
1509  .inputs = inputs,
1512 };
float, planar
Definition: samplefmt.h:69
static double distance(double x0, double x1, double y0, double y1)
Definition: af_aiir.c:1044
#define NULL
Definition: coverity.c:32
char * a_str
Definition: af_aiir.c:58
AVFrame * out
Definition: af_adeclick.c:494
static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
Definition: af_aiir.c:448
static const char * format[]
Definition: af_aiir.c:399
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
enum AVSampleFormat sample_format
Definition: af_aiir.c:74
AVOption.
Definition: opt.h:248
BiquadContext * biquads
Definition: af_aiir.c:52
float re
Definition: fft.c:82
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
Main libavfilter public API header.
else temp
Definition: vf_mcdeint.c:256
int acc
Definition: yuv2rgb.c:555
static int config_video(AVFilterLink *outlink)
Definition: af_aiir.c:1370
#define IM(x, ch)
static void convert_pd2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:949
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_aiir.c:995
int clippings
Definition: af_aiir.c:53
static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:557
double, planar
Definition: samplefmt.h:70
#define SERIAL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:179
#define a1
Definition: regdef.h:47
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *channel_layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates...
Definition: formats.c:569
static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
Definition: af_aiir.c:737
static void count_coefficients(char *item_str, int *nb_items)
Definition: af_aiir.c:302
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
static int config_output(AVFilterLink *outlink)
Definition: af_aiir.c:1213
#define N
Definition: af_mcompand.c:54
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
static void convert_sp2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:868
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
#define M(a, b)
Definition: vp3dsp.c:45
uint8_t
#define av_cold
Definition: attributes.h:88
static void biquad_process(double *x, double *y, int length, double b0, double b1, double b2, double a1, double a2)
Definition: af_aiir.c:719
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
AVOptions.
double * cache[2]
Definition: af_aiir.c:50
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
AVFilter ff_af_aiir
Definition: af_aiir.c:1501
static av_cold int init(AVFilterContext *ctx)
Definition: af_aiir.c:1384
static void normalize_coeffs(AVFilterContext *ctx, int ch)
Definition: af_aiir.c:481
static int convert_serial2parallel(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:771
AVFrame * dst
Definition: vf_blend.c:56
#define IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:123
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
Definition: af_aiir.c:349
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
static int query_formats(AVFilterContext *ctx)
Definition: af_aiir.c:79
channels
Definition: aptx.h:33
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
Definition: af_aiir.c:454
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:588
#define td
Definition: regdef.h:70
static void get_response(int channel, int format, double w, const double *b, const double *a, int nb_b, int nb_a, double *magnitude, double *phase)
Definition: af_aiir.c:1049
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
Definition: af_aiir.c:401
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
const char * r
Definition: vf_curves.c:114
void * priv
private data for use by the filter
Definition: avfilter.h:356
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
const char * arg
Definition: jacosubdec.c:66
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
#define AF
Definition: af_aiir.c:1458
#define FFMAX(a, b)
Definition: common.h:94
char * g_str
Definition: af_aiir.c:58
#define fail()
Definition: checkasm.h:123
int8_t exp
Definition: eval.c:72
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Definition: avsscanf.c:962
double g
Definition: af_aiir.c:49
static av_const double hypot(double x, double y)
Definition: libm.h:366
#define NAN
Definition: mathematics.h:64
#define FFMIN(a, b)
Definition: common.h:96
static int convert_zp2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:509
signed 32 bits, planar
Definition: samplefmt.h:68
char * b_str
Definition: af_aiir.c:58
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:467
double a[3]
Definition: af_aiir.c:41
AVFrame * m
int32_t
AVFormatContext * ctx
Definition: movenc.c:48
#define a2
Definition: regdef.h:48
static double b0(void *priv, double x, double y)
Definition: vf_xfade.c:1585
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:553
A list of supported channel layouts.
Definition: formats.h:86
#define VF
Definition: af_aiir.c:1459
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:1586
static int mix(int c0, int c1)
Definition: 4xm.c:714
sample_rate
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
double wet_gain
Definition: af_aiir.c:59
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:607
Used for passing data between threads.
Definition: dsddec.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
double fmax(double, double)
int ** delay
Definition: af_sofalizer.c:331
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
Describe the class of an AVClass context structure.
Definition: log.h:67
static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
Definition: af_aiir.c:316
Filter definition.
Definition: avfilter.h:145
AVFrame * mask
static void convert_pr2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:841
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define isnan(x)
Definition: libm.h:340
AVFrame * b
float im
Definition: fft.c:82
int a
Definition: af_aiir.c:37
offset must point to AVRational
Definition: opt.h:238
int b
Definition: af_aiir.c:37
double mix
Definition: af_aiir.c:60
static const int factor[16]
Definition: vf_pp7.c:75
const char * name
Filter name.
Definition: avfilter.h:149
static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
Definition: af_aiir.c:374
#define snprintf
Definition: snprintf.h:34
#define PARALLEL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:238
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aiir.c:1428
offset must point to two consecutive integers
Definition: opt.h:235
static const AVFilterPad inputs[]
Definition: af_aiir.c:1448
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVFrame * min
Definition: vf_threshold.c:74
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:422
#define flags(name, subs,...)
Definition: cbs_av1.c:560
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:381
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
double fir
Definition: af_aiir.c:51
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:186
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
int(* iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
Definition: af_aiir.c:76
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_aiir.c:1018
IIRChannel * iir
Definition: af_aiir.c:72
int
#define OFFSET(x)
Definition: af_aiir.c:1457
static void check_stability(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:976
AVFrame * video
Definition: af_aiir.c:70
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
double fmin(double, double)
avfilter_execute_func * execute
Definition: internal.h:136
double * ab[2]
Definition: af_aiir.c:48
AVRational rate
Definition: af_aiir.c:68
#define av_free(p)
double b[3]
Definition: af_aiir.c:42
static double coef_sf2zf(double *a, int N, int n)
Definition: af_aiir.c:902
A list of supported formats for one end of a filter link.
Definition: formats.h:65
#define RE(x, ch)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aiir.c:1315
An instance of a filter.
Definition: avfilter.h:341
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:845
AVFILTER_DEFINE_CLASS(aiir)
#define av_freep(p)
const void ** s
Definition: vf_addroi.c:26
signed 16 bits, planar
Definition: samplefmt.h:67
#define M_PI
Definition: mathematics.h:52
#define av_malloc_array(a, b)
int nb_ab[2]
Definition: af_aiir.c:47
AVFrame * in
Definition: af_afftdn.c:1083
formats
Definition: signature.h:48
AVFilterLink * inlink
Definition: vf_blend.c:57
static void convert_sf2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:921
internal API functions
static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
Definition: af_aiir.c:1096
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:437
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:248
AVFrame * a
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:156
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
Definition: af_aiir.c:36
static double fact(double i)
Definition: af_aiir.c:895
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:576
int ir_channel
Definition: af_aiir.c:67
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:671
int i
Definition: input.c:407
double w2
Definition: af_aiir.c:43
static const AVOption aiir_options[]
Definition: af_aiir.c:1461
CGA/EGA/VGA ROM font data.
AVFrame * max
Definition: vf_threshold.c:75
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1587