FFmpeg
af_aiir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/avstring.h"
24 #include "libavutil/intreadwrite.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
28 #include "audio.h"
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "video.h"
33 
34 typedef struct ThreadData {
35  AVFrame *in, *out;
36 } ThreadData;
37 
38 typedef struct Pair {
39  int a, b;
40 } Pair;
41 
42 typedef struct BiquadContext {
43  double a[3];
44  double b[3];
45  double w1, w2;
47 
48 typedef struct IIRChannel {
49  int nb_ab[2];
50  double *ab[2];
51  double g;
52  double *cache[2];
53  double fir;
55  int clippings;
56 } IIRChannel;
57 
58 typedef struct AudioIIRContext {
59  const AVClass *class;
60  char *a_str, *b_str, *g_str;
61  double dry_gain, wet_gain;
62  double mix;
63  int normalize;
64  int format;
65  int process;
66  int precision;
67  int response;
68  int w, h;
71 
73 
75  int channels;
77 
78  int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
80 
82 {
83  AudioIIRContext *s = ctx->priv;
85  enum AVSampleFormat sample_fmts[] = {
88  };
89  static const enum AVPixelFormat pix_fmts[] = {
92  };
93  int ret;
94 
95  if (s->response) {
96  AVFilterLink *videolink = ctx->outputs[1];
97 
99  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
100  return ret;
101  }
102 
104  if (ret < 0)
105  return ret;
106 
107  sample_fmts[0] = s->sample_format;
109  if (ret < 0)
110  return ret;
111 
113 }
114 
115 #define IIR_CH(name, type, min, max, need_clipping) \
116 static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
117 { \
118  AudioIIRContext *s = ctx->priv; \
119  const double ig = s->dry_gain; \
120  const double og = s->wet_gain; \
121  const double mix = s->mix; \
122  ThreadData *td = arg; \
123  AVFrame *in = td->in, *out = td->out; \
124  const type *src = (const type *)in->extended_data[ch]; \
125  double *oc = (double *)s->iir[ch].cache[0]; \
126  double *ic = (double *)s->iir[ch].cache[1]; \
127  const int nb_a = s->iir[ch].nb_ab[0]; \
128  const int nb_b = s->iir[ch].nb_ab[1]; \
129  const double *a = s->iir[ch].ab[0]; \
130  const double *b = s->iir[ch].ab[1]; \
131  const double g = s->iir[ch].g; \
132  int *clippings = &s->iir[ch].clippings; \
133  type *dst = (type *)out->extended_data[ch]; \
134  int n; \
135  \
136  for (n = 0; n < in->nb_samples; n++) { \
137  double sample = 0.; \
138  int x; \
139  \
140  memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
141  memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
142  ic[0] = src[n] * ig; \
143  for (x = 0; x < nb_b; x++) \
144  sample += b[x] * ic[x]; \
145  \
146  for (x = 1; x < nb_a; x++) \
147  sample -= a[x] * oc[x]; \
148  \
149  oc[0] = sample; \
150  sample *= og * g; \
151  sample = sample * mix + ic[0] * (1. - mix); \
152  if (need_clipping && sample < min) { \
153  (*clippings)++; \
154  dst[n] = min; \
155  } else if (need_clipping && sample > max) { \
156  (*clippings)++; \
157  dst[n] = max; \
158  } else { \
159  dst[n] = sample; \
160  } \
161  } \
162  \
163  return 0; \
164 }
165 
166 IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
167 IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
168 IIR_CH(fltp, float, -1., 1., 0)
169 IIR_CH(dblp, double, -1., 1., 0)
170 
171 #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
172 static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, \
173  int ch, int nb_jobs) \
174 { \
175  AudioIIRContext *s = ctx->priv; \
176  const double ig = s->dry_gain; \
177  const double og = s->wet_gain; \
178  const double mix = s->mix; \
179  const double imix = 1. - mix; \
180  ThreadData *td = arg; \
181  AVFrame *in = td->in, *out = td->out; \
182  const type *src = (const type *)in->extended_data[ch]; \
183  type *dst = (type *)out->extended_data[ch]; \
184  IIRChannel *iir = &s->iir[ch]; \
185  const double g = iir->g; \
186  int *clippings = &iir->clippings; \
187  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
188  int n, i; \
189  \
190  for (i = nb_biquads - 1; i >= 0; i--) { \
191  const double a1 = -iir->biquads[i].a[1]; \
192  const double a2 = -iir->biquads[i].a[2]; \
193  const double b0 = iir->biquads[i].b[0]; \
194  const double b1 = iir->biquads[i].b[1]; \
195  const double b2 = iir->biquads[i].b[2]; \
196  double w1 = iir->biquads[i].w1; \
197  double w2 = iir->biquads[i].w2; \
198  \
199  for (n = 0; n < in->nb_samples; n++) { \
200  double i0 = ig * (i ? dst[n] : src[n]); \
201  double o0 = i0 * b0 + w1; \
202  \
203  w1 = b1 * i0 + w2 + a1 * o0; \
204  w2 = b2 * i0 + a2 * o0; \
205  o0 *= og * g; \
206  \
207  o0 = o0 * mix + imix * i0; \
208  if (need_clipping && o0 < min) { \
209  (*clippings)++; \
210  dst[n] = min; \
211  } else if (need_clipping && o0 > max) { \
212  (*clippings)++; \
213  dst[n] = max; \
214  } else { \
215  dst[n] = o0; \
216  } \
217  } \
218  iir->biquads[i].w1 = w1; \
219  iir->biquads[i].w2 = w2; \
220  } \
221  \
222  return 0; \
223 }
224 
225 SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
226 SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
227 SERIAL_IIR_CH(fltp, float, -1., 1., 0)
228 SERIAL_IIR_CH(dblp, double, -1., 1., 0)
229 
230 #define PARALLEL_IIR_CH(name, type, min, max, need_clipping) \
231 static int iir_ch_parallel_## name(AVFilterContext *ctx, void *arg, \
232  int ch, int nb_jobs) \
233 { \
234  AudioIIRContext *s = ctx->priv; \
235  const double ig = s->dry_gain; \
236  const double og = s->wet_gain; \
237  const double mix = s->mix; \
238  const double imix = 1. - mix; \
239  ThreadData *td = arg; \
240  AVFrame *in = td->in, *out = td->out; \
241  const type *src = (const type *)in->extended_data[ch]; \
242  type *dst = (type *)out->extended_data[ch]; \
243  IIRChannel *iir = &s->iir[ch]; \
244  const double g = iir->g; \
245  const double fir = iir->fir; \
246  int *clippings = &iir->clippings; \
247  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
248  int n, i; \
249  \
250  for (i = 0; i < nb_biquads; i++) { \
251  const double a1 = -iir->biquads[i].a[1]; \
252  const double a2 = -iir->biquads[i].a[2]; \
253  const double b1 = iir->biquads[i].b[1]; \
254  const double b2 = iir->biquads[i].b[2]; \
255  double w1 = iir->biquads[i].w1; \
256  double w2 = iir->biquads[i].w2; \
257  \
258  for (n = 0; n < in->nb_samples; n++) { \
259  double i0 = ig * src[n]; \
260  double o0 = w1; \
261  \
262  w1 = b1 * i0 + w2 + a1 * o0; \
263  w2 = b2 * i0 + a2 * o0; \
264  o0 *= og * g; \
265  o0 += dst[n]; \
266  \
267  if (need_clipping && o0 < min) { \
268  (*clippings)++; \
269  dst[n] = min; \
270  } else if (need_clipping && o0 > max) { \
271  (*clippings)++; \
272  dst[n] = max; \
273  } else { \
274  dst[n] = o0; \
275  } \
276  } \
277  iir->biquads[i].w1 = w1; \
278  iir->biquads[i].w2 = w2; \
279  } \
280  \
281  for (n = 0; n < in->nb_samples; n++) { \
282  dst[n] += fir * src[n]; \
283  dst[n] = dst[n] * mix + imix * src[n]; \
284  } \
285  \
286  return 0; \
287 }
288 
289 PARALLEL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
290 PARALLEL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
291 PARALLEL_IIR_CH(fltp, float, -1., 1., 0)
292 PARALLEL_IIR_CH(dblp, double, -1., 1., 0)
293 
294 #define LATTICE_IIR_CH(name, type, min, max, need_clipping) \
295 static int iir_ch_lattice_## name(AVFilterContext *ctx, void *arg, \
296  int ch, int nb_jobs) \
297 { \
298  AudioIIRContext *s = ctx->priv; \
299  const double ig = s->dry_gain; \
300  const double og = s->wet_gain; \
301  const double mix = s->mix; \
302  ThreadData *td = arg; \
303  AVFrame *in = td->in, *out = td->out; \
304  const type *src = (const type *)in->extended_data[ch]; \
305  double n0, n1, p0, *x = (double *)s->iir[ch].cache[0]; \
306  const int nb_stages = s->iir[ch].nb_ab[1]; \
307  const double *v = s->iir[ch].ab[0]; \
308  const double *k = s->iir[ch].ab[1]; \
309  const double g = s->iir[ch].g; \
310  int *clippings = &s->iir[ch].clippings; \
311  type *dst = (type *)out->extended_data[ch]; \
312  int n; \
313  \
314  for (n = 0; n < in->nb_samples; n++) { \
315  const double in = src[n] * ig; \
316  double out = 0.; \
317  \
318  n1 = in; \
319  for (int i = nb_stages - 1; i >= 0; i--) { \
320  n0 = n1 - k[i] * x[i]; \
321  p0 = n0 * k[i] + x[i]; \
322  out += p0 * v[i+1]; \
323  x[i] = p0; \
324  n1 = n0; \
325  } \
326  \
327  out += n1 * v[0]; \
328  memmove(&x[1], &x[0], nb_stages * sizeof(*x)); \
329  x[0] = n1; \
330  out *= og * g; \
331  out = out * mix + in * (1. - mix); \
332  if (need_clipping && out < min) { \
333  (*clippings)++; \
334  dst[n] = min; \
335  } else if (need_clipping && out > max) { \
336  (*clippings)++; \
337  dst[n] = max; \
338  } else { \
339  dst[n] = out; \
340  } \
341  } \
342  \
343  return 0; \
344 }
345 
346 LATTICE_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
347 LATTICE_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
348 LATTICE_IIR_CH(fltp, float, -1., 1., 0)
349 LATTICE_IIR_CH(dblp, double, -1., 1., 0)
350 
351 static void count_coefficients(char *item_str, int *nb_items)
352 {
353  char *p;
354 
355  if (!item_str)
356  return;
357 
358  *nb_items = 1;
359  for (p = item_str; *p && *p != '|'; p++) {
360  if (*p == ' ')
361  (*nb_items)++;
362  }
363 }
364 
365 static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
366 {
367  AudioIIRContext *s = ctx->priv;
368  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
369  int i;
370 
371  p = old_str = av_strdup(item_str);
372  if (!p)
373  return AVERROR(ENOMEM);
374  for (i = 0; i < nb_items; i++) {
375  if (!(arg = av_strtok(p, "|", &saveptr)))
376  arg = prev_arg;
377 
378  if (!arg) {
379  av_freep(&old_str);
380  return AVERROR(EINVAL);
381  }
382 
383  p = NULL;
384  if (av_sscanf(arg, "%lf", &s->iir[i].g) != 1) {
385  av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
386  av_freep(&old_str);
387  return AVERROR(EINVAL);
388  }
389 
390  prev_arg = arg;
391  }
392 
393  av_freep(&old_str);
394 
395  return 0;
396 }
397 
398 static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
399 {
400  char *p, *arg, *old_str, *saveptr = NULL;
401  int i;
402 
403  p = old_str = av_strdup(item_str);
404  if (!p)
405  return AVERROR(ENOMEM);
406  for (i = 0; i < nb_items; i++) {
407  if (!(arg = av_strtok(p, " ", &saveptr)))
408  break;
409 
410  p = NULL;
411  if (av_sscanf(arg, "%lf", &dst[i]) != 1) {
412  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
413  av_freep(&old_str);
414  return AVERROR(EINVAL);
415  }
416  }
417 
418  av_freep(&old_str);
419 
420  return 0;
421 }
422 
423 static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
424 {
425  char *p, *arg, *old_str, *saveptr = NULL;
426  int i;
427 
428  p = old_str = av_strdup(item_str);
429  if (!p)
430  return AVERROR(ENOMEM);
431  for (i = 0; i < nb_items; i++) {
432  if (!(arg = av_strtok(p, " ", &saveptr)))
433  break;
434 
435  p = NULL;
436  if (av_sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
437  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
438  av_freep(&old_str);
439  return AVERROR(EINVAL);
440  }
441  }
442 
443  av_freep(&old_str);
444 
445  return 0;
446 }
447 
448 static const char *const format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd", "%lf %lfi" };
449 
450 static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
451 {
452  AudioIIRContext *s = ctx->priv;
453  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
454  int i, ret;
455 
456  p = old_str = av_strdup(item_str);
457  if (!p)
458  return AVERROR(ENOMEM);
459  for (i = 0; i < channels; i++) {
460  IIRChannel *iir = &s->iir[i];
461 
462  if (!(arg = av_strtok(p, "|", &saveptr)))
463  arg = prev_arg;
464 
465  if (!arg) {
466  av_freep(&old_str);
467  return AVERROR(EINVAL);
468  }
469 
470  count_coefficients(arg, &iir->nb_ab[ab]);
471 
472  p = NULL;
473  iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
474  iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
475  if (!iir->ab[ab] || !iir->cache[ab]) {
476  av_freep(&old_str);
477  return AVERROR(ENOMEM);
478  }
479 
480  if (s->format > 0) {
481  ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
482  } else {
483  ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
484  }
485  if (ret < 0) {
486  av_freep(&old_str);
487  return ret;
488  }
489  prev_arg = arg;
490  }
491 
492  av_freep(&old_str);
493 
494  return 0;
495 }
496 
497 static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
498 {
499  *RE = re * re2 - im * im2;
500  *IM = re * im2 + re2 * im;
501 }
502 
503 static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
504 {
505  coefs[2 * n] = 1.0;
506 
507  for (int i = 1; i <= n; i++) {
508  for (int j = n - i; j < n; j++) {
509  double re, im;
510 
511  cmul(coefs[2 * (j + 1)], coefs[2 * (j + 1) + 1],
512  pz[2 * (i - 1)], pz[2 * (i - 1) + 1], &re, &im);
513 
514  coefs[2 * j] -= re;
515  coefs[2 * j + 1] -= im;
516  }
517  }
518 
519  for (int i = 0; i < n + 1; i++) {
520  if (fabs(coefs[2 * i + 1]) > FLT_EPSILON) {
521  av_log(ctx, AV_LOG_ERROR, "coefs: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
522  coefs[2 * i + 1], i);
523  return AVERROR(EINVAL);
524  }
525  }
526 
527  return 0;
528 }
529 
530 static void normalize_coeffs(AVFilterContext *ctx, int ch)
531 {
532  AudioIIRContext *s = ctx->priv;
533  IIRChannel *iir = &s->iir[ch];
534  double sum_den = 0.;
535 
536  if (!s->normalize)
537  return;
538 
539  for (int i = 0; i < iir->nb_ab[1]; i++) {
540  sum_den += iir->ab[1][i];
541  }
542 
543  if (sum_den > 1e-6) {
544  double factor, sum_num = 0.;
545 
546  for (int i = 0; i < iir->nb_ab[0]; i++) {
547  sum_num += iir->ab[0][i];
548  }
549 
550  factor = sum_num / sum_den;
551 
552  for (int i = 0; i < iir->nb_ab[1]; i++) {
553  iir->ab[1][i] *= factor;
554  }
555  }
556 }
557 
559 {
560  AudioIIRContext *s = ctx->priv;
561  int ch, i, j, ret = 0;
562 
563  for (ch = 0; ch < channels; ch++) {
564  IIRChannel *iir = &s->iir[ch];
565  double *topc, *botc;
566 
567  topc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*topc));
568  botc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*botc));
569  if (!topc || !botc) {
570  ret = AVERROR(ENOMEM);
571  goto fail;
572  }
573 
574  ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
575  if (ret < 0) {
576  goto fail;
577  }
578 
579  ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
580  if (ret < 0) {
581  goto fail;
582  }
583 
584  for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
585  iir->ab[1][j] = topc[2 * i];
586  }
587  iir->nb_ab[1]++;
588 
589  for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
590  iir->ab[0][j] = botc[2 * i];
591  }
592  iir->nb_ab[0]++;
593 
594  normalize_coeffs(ctx, ch);
595 
596 fail:
597  av_free(topc);
598  av_free(botc);
599  if (ret < 0)
600  break;
601  }
602 
603  return ret;
604 }
605 
607 {
608  AudioIIRContext *s = ctx->priv;
609  int ch, ret;
610 
611  for (ch = 0; ch < channels; ch++) {
612  IIRChannel *iir = &s->iir[ch];
613  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
614  int current_biquad = 0;
615 
616  iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
617  if (!iir->biquads)
618  return AVERROR(ENOMEM);
619 
620  while (nb_biquads--) {
621  Pair outmost_pole = { -1, -1 };
622  Pair nearest_zero = { -1, -1 };
623  double zeros[4] = { 0 };
624  double poles[4] = { 0 };
625  double b[6] = { 0 };
626  double a[6] = { 0 };
627  double min_distance = DBL_MAX;
628  double max_mag = 0;
629  double factor;
630  int i;
631 
632  for (i = 0; i < iir->nb_ab[0]; i++) {
633  double mag;
634 
635  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
636  continue;
637  mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
638 
639  if (mag > max_mag) {
640  max_mag = mag;
641  outmost_pole.a = i;
642  }
643  }
644 
645  for (i = 0; i < iir->nb_ab[0]; i++) {
646  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
647  continue;
648 
649  if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
650  iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
651  outmost_pole.b = i;
652  break;
653  }
654  }
655 
656  av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
657 
658  if (outmost_pole.a < 0 || outmost_pole.b < 0)
659  return AVERROR(EINVAL);
660 
661  for (i = 0; i < iir->nb_ab[1]; i++) {
662  double distance;
663 
664  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
665  continue;
666  distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
667  iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
668 
669  if (distance < min_distance) {
670  min_distance = distance;
671  nearest_zero.a = i;
672  }
673  }
674 
675  for (i = 0; i < iir->nb_ab[1]; i++) {
676  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
677  continue;
678 
679  if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
680  iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
681  nearest_zero.b = i;
682  break;
683  }
684  }
685 
686  av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
687 
688  if (nearest_zero.a < 0 || nearest_zero.b < 0)
689  return AVERROR(EINVAL);
690 
691  poles[0] = iir->ab[0][2 * outmost_pole.a ];
692  poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
693 
694  zeros[0] = iir->ab[1][2 * nearest_zero.a ];
695  zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
696 
697  if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
698  zeros[2] = 0;
699  zeros[3] = 0;
700 
701  poles[2] = 0;
702  poles[3] = 0;
703  } else {
704  poles[2] = iir->ab[0][2 * outmost_pole.b ];
705  poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
706 
707  zeros[2] = iir->ab[1][2 * nearest_zero.b ];
708  zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
709  }
710 
711  ret = expand(ctx, zeros, 2, b);
712  if (ret < 0)
713  return ret;
714 
715  ret = expand(ctx, poles, 2, a);
716  if (ret < 0)
717  return ret;
718 
719  iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
720  iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
721  iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
722  iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
723 
724  iir->biquads[current_biquad].a[0] = 1.;
725  iir->biquads[current_biquad].a[1] = a[2] / a[4];
726  iir->biquads[current_biquad].a[2] = a[0] / a[4];
727  iir->biquads[current_biquad].b[0] = b[4] / a[4];
728  iir->biquads[current_biquad].b[1] = b[2] / a[4];
729  iir->biquads[current_biquad].b[2] = b[0] / a[4];
730 
731  if (s->normalize &&
732  fabs(iir->biquads[current_biquad].b[0] +
733  iir->biquads[current_biquad].b[1] +
734  iir->biquads[current_biquad].b[2]) > 1e-6) {
735  factor = (iir->biquads[current_biquad].a[0] +
736  iir->biquads[current_biquad].a[1] +
737  iir->biquads[current_biquad].a[2]) /
738  (iir->biquads[current_biquad].b[0] +
739  iir->biquads[current_biquad].b[1] +
740  iir->biquads[current_biquad].b[2]);
741 
742  av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
743 
744  iir->biquads[current_biquad].b[0] *= factor;
745  iir->biquads[current_biquad].b[1] *= factor;
746  iir->biquads[current_biquad].b[2] *= factor;
747  }
748 
749  iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
750  iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
751  iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
752 
753  av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
754  iir->biquads[current_biquad].a[0],
755  iir->biquads[current_biquad].a[1],
756  iir->biquads[current_biquad].a[2],
757  iir->biquads[current_biquad].b[0],
758  iir->biquads[current_biquad].b[1],
759  iir->biquads[current_biquad].b[2]);
760 
761  current_biquad++;
762  }
763  }
764 
765  return 0;
766 }
767 
768 static void biquad_process(double *x, double *y, int length,
769  double b0, double b1, double b2,
770  double a1, double a2)
771 {
772  double w1 = 0., w2 = 0.;
773 
774  a1 = -a1;
775  a2 = -a2;
776 
777  for (int n = 0; n < length; n++) {
778  double out, in = x[n];
779 
780  y[n] = out = in * b0 + w1;
781  w1 = b1 * in + w2 + a1 * out;
782  w2 = b2 * in + a2 * out;
783  }
784 }
785 
786 static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
787 {
788  double sum = 0.;
789 
790  for (int i = 0; i < n; i++) {
791  for (int j = i; j < n; j++) {
792  sum = 0.;
793  for (int k = 0; k < i; k++)
794  sum += lu[i * n + k] * lu[k * n + j];
795  lu[i * n + j] = matrix[j * n + i] - sum;
796  }
797  for (int j = i + 1; j < n; j++) {
798  sum = 0.;
799  for (int k = 0; k < i; k++)
800  sum += lu[j * n + k] * lu[k * n + i];
801  lu[j * n + i] = (1. / lu[i * n + i]) * (matrix[i * n + j] - sum);
802  }
803  }
804 
805  for (int i = 0; i < n; i++) {
806  sum = 0.;
807  for (int k = 0; k < i; k++)
808  sum += lu[i * n + k] * y[k];
809  y[i] = vector[i] - sum;
810  }
811 
812  for (int i = n - 1; i >= 0; i--) {
813  sum = 0.;
814  for (int k = i + 1; k < n; k++)
815  sum += lu[i * n + k] * x[k];
816  x[i] = (1 / lu[i * n + i]) * (y[i] - sum);
817  }
818 }
819 
821 {
822  AudioIIRContext *s = ctx->priv;
823  int ret = 0;
824 
825  for (int ch = 0; ch < channels; ch++) {
826  IIRChannel *iir = &s->iir[ch];
827  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
828  int length = nb_biquads * 2 + 1;
829  double *impulse = av_calloc(length, sizeof(*impulse));
830  double *y = av_calloc(length, sizeof(*y));
831  double *resp = av_calloc(length, sizeof(*resp));
832  double *M = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*M));
833  double *W = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*W));
834 
835  if (!impulse || !y || !resp || !M) {
836  av_free(impulse);
837  av_free(y);
838  av_free(resp);
839  av_free(M);
840  av_free(W);
841  return AVERROR(ENOMEM);
842  }
843 
844  impulse[0] = 1.;
845 
846  for (int n = 0; n < nb_biquads; n++) {
847  BiquadContext *biquad = &iir->biquads[n];
848 
849  biquad_process(n ? y : impulse, y, length,
850  biquad->b[0], biquad->b[1], biquad->b[2],
851  biquad->a[1], biquad->a[2]);
852  }
853 
854  for (int n = 0; n < nb_biquads; n++) {
855  BiquadContext *biquad = &iir->biquads[n];
856 
857  biquad_process(impulse, resp, length - 1,
858  1., 0., 0., biquad->a[1], biquad->a[2]);
859 
860  memcpy(M + n * 2 * (length - 1), resp, sizeof(*resp) * (length - 1));
861  memcpy(M + n * 2 * (length - 1) + length, resp, sizeof(*resp) * (length - 2));
862  memset(resp, 0, length * sizeof(*resp));
863  }
864 
865  solve(M, &y[1], length - 1, &impulse[1], resp, W);
866 
867  iir->fir = y[0];
868 
869  for (int n = 0; n < nb_biquads; n++) {
870  BiquadContext *biquad = &iir->biquads[n];
871 
872  biquad->b[0] = 0.;
873  biquad->b[1] = resp[n * 2 + 0];
874  biquad->b[2] = resp[n * 2 + 1];
875  }
876 
877  av_free(impulse);
878  av_free(y);
879  av_free(resp);
880  av_free(M);
881  av_free(W);
882 
883  if (ret < 0)
884  return ret;
885  }
886 
887  return 0;
888 }
889 
891 {
892  AudioIIRContext *s = ctx->priv;
893  int ch;
894 
895  for (ch = 0; ch < channels; ch++) {
896  IIRChannel *iir = &s->iir[ch];
897  int n;
898 
899  for (n = 0; n < iir->nb_ab[0]; n++) {
900  double r = iir->ab[0][2*n];
901  double angle = iir->ab[0][2*n+1];
902 
903  iir->ab[0][2*n] = r * cos(angle);
904  iir->ab[0][2*n+1] = r * sin(angle);
905  }
906 
907  for (n = 0; n < iir->nb_ab[1]; n++) {
908  double r = iir->ab[1][2*n];
909  double angle = iir->ab[1][2*n+1];
910 
911  iir->ab[1][2*n] = r * cos(angle);
912  iir->ab[1][2*n+1] = r * sin(angle);
913  }
914  }
915 }
916 
918 {
919  AudioIIRContext *s = ctx->priv;
920  int ch;
921 
922  for (ch = 0; ch < channels; ch++) {
923  IIRChannel *iir = &s->iir[ch];
924  int n;
925 
926  for (n = 0; n < iir->nb_ab[0]; n++) {
927  double sr = iir->ab[0][2*n];
928  double si = iir->ab[0][2*n+1];
929 
930  iir->ab[0][2*n] = exp(sr) * cos(si);
931  iir->ab[0][2*n+1] = exp(sr) * sin(si);
932  }
933 
934  for (n = 0; n < iir->nb_ab[1]; n++) {
935  double sr = iir->ab[1][2*n];
936  double si = iir->ab[1][2*n+1];
937 
938  iir->ab[1][2*n] = exp(sr) * cos(si);
939  iir->ab[1][2*n+1] = exp(sr) * sin(si);
940  }
941  }
942 }
943 
944 static double fact(double i)
945 {
946  if (i <= 0.)
947  return 1.;
948  return i * fact(i - 1.);
949 }
950 
951 static double coef_sf2zf(double *a, int N, int n)
952 {
953  double z = 0.;
954 
955  for (int i = 0; i <= N; i++) {
956  double acc = 0.;
957 
958  for (int k = FFMAX(n - N + i, 0); k <= FFMIN(i, n); k++) {
959  acc += ((fact(i) * fact(N - i)) /
960  (fact(k) * fact(i - k) * fact(n - k) * fact(N - i - n + k))) *
961  ((k & 1) ? -1. : 1.);
962  }
963 
964  z += a[i] * pow(2., i) * acc;
965  }
966 
967  return z;
968 }
969 
971 {
972  AudioIIRContext *s = ctx->priv;
973  int ch;
974 
975  for (ch = 0; ch < channels; ch++) {
976  IIRChannel *iir = &s->iir[ch];
977  double *temp0 = av_calloc(iir->nb_ab[0], sizeof(*temp0));
978  double *temp1 = av_calloc(iir->nb_ab[1], sizeof(*temp1));
979 
980  if (!temp0 || !temp1)
981  goto next;
982 
983  memcpy(temp0, iir->ab[0], iir->nb_ab[0] * sizeof(*temp0));
984  memcpy(temp1, iir->ab[1], iir->nb_ab[1] * sizeof(*temp1));
985 
986  for (int n = 0; n < iir->nb_ab[0]; n++)
987  iir->ab[0][n] = coef_sf2zf(temp0, iir->nb_ab[0] - 1, n);
988 
989  for (int n = 0; n < iir->nb_ab[1]; n++)
990  iir->ab[1][n] = coef_sf2zf(temp1, iir->nb_ab[1] - 1, n);
991 
992 next:
993  av_free(temp0);
994  av_free(temp1);
995  }
996 }
997 
999 {
1000  AudioIIRContext *s = ctx->priv;
1001  int ch;
1002 
1003  for (ch = 0; ch < channels; ch++) {
1004  IIRChannel *iir = &s->iir[ch];
1005  int n;
1006 
1007  for (n = 0; n < iir->nb_ab[0]; n++) {
1008  double r = iir->ab[0][2*n];
1009  double angle = M_PI*iir->ab[0][2*n+1]/180.;
1010 
1011  iir->ab[0][2*n] = r * cos(angle);
1012  iir->ab[0][2*n+1] = r * sin(angle);
1013  }
1014 
1015  for (n = 0; n < iir->nb_ab[1]; n++) {
1016  double r = iir->ab[1][2*n];
1017  double angle = M_PI*iir->ab[1][2*n+1]/180.;
1018 
1019  iir->ab[1][2*n] = r * cos(angle);
1020  iir->ab[1][2*n+1] = r * sin(angle);
1021  }
1022  }
1023 }
1024 
1026 {
1027  AudioIIRContext *s = ctx->priv;
1028  int ch;
1029 
1030  for (ch = 0; ch < channels; ch++) {
1031  IIRChannel *iir = &s->iir[ch];
1032 
1033  for (int n = 0; n < iir->nb_ab[0]; n++) {
1034  double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
1035 
1036  if (pr >= 1.) {
1037  av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
1038  break;
1039  }
1040  }
1041  }
1042 }
1043 
1044 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
1045 {
1046  const uint8_t *font;
1047  int font_height;
1048  int i;
1049 
1050  font = avpriv_cga_font, font_height = 8;
1051 
1052  for (i = 0; txt[i]; i++) {
1053  int char_y, mask;
1054 
1055  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
1056  for (char_y = 0; char_y < font_height; char_y++) {
1057  for (mask = 0x80; mask; mask >>= 1) {
1058  if (font[txt[i] * font_height + char_y] & mask)
1059  AV_WL32(p, color);
1060  p += 4;
1061  }
1062  p += pic->linesize[0] - 8 * 4;
1063  }
1064  }
1065 }
1066 
1067 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
1068 {
1069  int dx = FFABS(x1-x0);
1070  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
1071  int err = (dx>dy ? dx : -dy) / 2, e2;
1072 
1073  for (;;) {
1074  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
1075 
1076  if (x0 == x1 && y0 == y1)
1077  break;
1078 
1079  e2 = err;
1080 
1081  if (e2 >-dx) {
1082  err -= dy;
1083  x0--;
1084  }
1085 
1086  if (e2 < dy) {
1087  err += dx;
1088  y0 += sy;
1089  }
1090  }
1091 }
1092 
1093 static double distance(double x0, double x1, double y0, double y1)
1094 {
1095  return hypot(x0 - x1, y0 - y1);
1096 }
1097 
1098 static void get_response(int channel, int format, double w,
1099  const double *b, const double *a,
1100  int nb_b, int nb_a, double *magnitude, double *phase)
1101 {
1102  double realz, realp;
1103  double imagz, imagp;
1104  double real, imag;
1105  double div;
1106 
1107  if (format == 0) {
1108  realz = 0., realp = 0.;
1109  imagz = 0., imagp = 0.;
1110  for (int x = 0; x < nb_a; x++) {
1111  realz += cos(-x * w) * a[x];
1112  imagz += sin(-x * w) * a[x];
1113  }
1114 
1115  for (int x = 0; x < nb_b; x++) {
1116  realp += cos(-x * w) * b[x];
1117  imagp += sin(-x * w) * b[x];
1118  }
1119 
1120  div = realp * realp + imagp * imagp;
1121  real = (realz * realp + imagz * imagp) / div;
1122  imag = (imagz * realp - imagp * realz) / div;
1123 
1124  *magnitude = hypot(real, imag);
1125  *phase = atan2(imag, real);
1126  } else {
1127  double p = 1., z = 1.;
1128  double acc = 0.;
1129 
1130  for (int x = 0; x < nb_a; x++) {
1131  z *= distance(cos(w), a[2 * x], sin(w), a[2 * x + 1]);
1132  acc += atan2(sin(w) - a[2 * x + 1], cos(w) - a[2 * x]);
1133  }
1134 
1135  for (int x = 0; x < nb_b; x++) {
1136  p *= distance(cos(w), b[2 * x], sin(w), b[2 * x + 1]);
1137  acc -= atan2(sin(w) - b[2 * x + 1], cos(w) - b[2 * x]);
1138  }
1139 
1140  *magnitude = z / p;
1141  *phase = acc;
1142  }
1143 }
1144 
1146 {
1147  AudioIIRContext *s = ctx->priv;
1148  double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
1149  double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
1150  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
1151  char text[32];
1152  int ch, i;
1153 
1154  memset(out->data[0], 0, s->h * out->linesize[0]);
1155 
1156  phase = av_malloc_array(s->w, sizeof(*phase));
1157  temp = av_malloc_array(s->w, sizeof(*temp));
1158  mag = av_malloc_array(s->w, sizeof(*mag));
1159  delay = av_malloc_array(s->w, sizeof(*delay));
1160  if (!mag || !phase || !delay || !temp)
1161  goto end;
1162 
1163  ch = av_clip(s->ir_channel, 0, s->channels - 1);
1164  for (i = 0; i < s->w; i++) {
1165  const double *b = s->iir[ch].ab[0];
1166  const double *a = s->iir[ch].ab[1];
1167  const int nb_b = s->iir[ch].nb_ab[0];
1168  const int nb_a = s->iir[ch].nb_ab[1];
1169  double w = i * M_PI / (s->w - 1);
1170  double m, p;
1171 
1172  get_response(ch, s->format, w, b, a, nb_b, nb_a, &m, &p);
1173 
1174  mag[i] = s->iir[ch].g * m;
1175  phase[i] = p;
1176  min = fmin(min, mag[i]);
1177  max = fmax(max, mag[i]);
1178  }
1179 
1180  temp[0] = 0.;
1181  for (i = 0; i < s->w - 1; i++) {
1182  double d = phase[i] - phase[i + 1];
1183  temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
1184  }
1185 
1186  min_phase = phase[0];
1187  max_phase = phase[0];
1188  for (i = 1; i < s->w; i++) {
1189  temp[i] += temp[i - 1];
1190  phase[i] += temp[i];
1191  min_phase = fmin(min_phase, phase[i]);
1192  max_phase = fmax(max_phase, phase[i]);
1193  }
1194 
1195  for (i = 0; i < s->w - 1; i++) {
1196  double div = s->w / (double)sample_rate;
1197 
1198  delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
1199  min_delay = fmin(min_delay, delay[i + 1]);
1200  max_delay = fmax(max_delay, delay[i + 1]);
1201  }
1202  delay[0] = delay[1];
1203 
1204  for (i = 0; i < s->w; i++) {
1205  int ymag = mag[i] / max * (s->h - 1);
1206  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
1207  int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
1208 
1209  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
1210  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
1211  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
1212 
1213  if (prev_ymag < 0)
1214  prev_ymag = ymag;
1215  if (prev_yphase < 0)
1216  prev_yphase = yphase;
1217  if (prev_ydelay < 0)
1218  prev_ydelay = ydelay;
1219 
1220  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
1221  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
1222  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
1223 
1224  prev_ymag = ymag;
1225  prev_yphase = yphase;
1226  prev_ydelay = ydelay;
1227  }
1228 
1229  if (s->w > 400 && s->h > 100) {
1230  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
1231  snprintf(text, sizeof(text), "%.2f", max);
1232  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
1233 
1234  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
1235  snprintf(text, sizeof(text), "%.2f", min);
1236  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
1237 
1238  drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
1239  snprintf(text, sizeof(text), "%.2f", max_phase);
1240  drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
1241 
1242  drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
1243  snprintf(text, sizeof(text), "%.2f", min_phase);
1244  drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
1245 
1246  drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
1247  snprintf(text, sizeof(text), "%.2f", max_delay);
1248  drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
1249 
1250  drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
1251  snprintf(text, sizeof(text), "%.2f", min_delay);
1252  drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
1253  }
1254 
1255 end:
1256  av_free(delay);
1257  av_free(temp);
1258  av_free(phase);
1259  av_free(mag);
1260 }
1261 
1262 static int config_output(AVFilterLink *outlink)
1263 {
1264  AVFilterContext *ctx = outlink->src;
1265  AudioIIRContext *s = ctx->priv;
1266  AVFilterLink *inlink = ctx->inputs[0];
1267  int ch, ret, i;
1268 
1269  s->channels = inlink->ch_layout.nb_channels;
1270  s->iir = av_calloc(s->channels, sizeof(*s->iir));
1271  if (!s->iir)
1272  return AVERROR(ENOMEM);
1273 
1274  ret = read_gains(ctx, s->g_str, inlink->ch_layout.nb_channels);
1275  if (ret < 0)
1276  return ret;
1277 
1278  ret = read_channels(ctx, inlink->ch_layout.nb_channels, s->a_str, 0);
1279  if (ret < 0)
1280  return ret;
1281 
1282  ret = read_channels(ctx, inlink->ch_layout.nb_channels, s->b_str, 1);
1283  if (ret < 0)
1284  return ret;
1285 
1286  if (s->format == -1) {
1287  convert_sf2tf(ctx, inlink->ch_layout.nb_channels);
1288  s->format = 0;
1289  } else if (s->format == 2) {
1290  convert_pr2zp(ctx, inlink->ch_layout.nb_channels);
1291  } else if (s->format == 3) {
1292  convert_pd2zp(ctx, inlink->ch_layout.nb_channels);
1293  } else if (s->format == 4) {
1294  convert_sp2zp(ctx, inlink->ch_layout.nb_channels);
1295  }
1296  if (s->format > 0) {
1297  check_stability(ctx, inlink->ch_layout.nb_channels);
1298  }
1299 
1300  av_frame_free(&s->video);
1301  if (s->response) {
1302  s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
1303  if (!s->video)
1304  return AVERROR(ENOMEM);
1305 
1306  draw_response(ctx, s->video, inlink->sample_rate);
1307  }
1308 
1309  if (s->format == 0)
1310  av_log(ctx, AV_LOG_WARNING, "transfer function coefficients format is not recommended for too high number of zeros/poles.\n");
1311 
1312  if (s->format > 0 && s->process == 0) {
1313  av_log(ctx, AV_LOG_WARNING, "Direct processing is not recommended for zp coefficients format.\n");
1314 
1315  ret = convert_zp2tf(ctx, inlink->ch_layout.nb_channels);
1316  if (ret < 0)
1317  return ret;
1318  } else if (s->format == -2 && s->process > 0) {
1319  av_log(ctx, AV_LOG_ERROR, "Only direct processing is implemented for lattice-ladder function.\n");
1320  return AVERROR_PATCHWELCOME;
1321  } else if (s->format <= 0 && s->process == 1) {
1322  av_log(ctx, AV_LOG_ERROR, "Serial processing is not implemented for transfer function.\n");
1323  return AVERROR_PATCHWELCOME;
1324  } else if (s->format <= 0 && s->process == 2) {
1325  av_log(ctx, AV_LOG_ERROR, "Parallel processing is not implemented for transfer function.\n");
1326  return AVERROR_PATCHWELCOME;
1327  } else if (s->format > 0 && s->process == 1) {
1328  ret = decompose_zp2biquads(ctx, inlink->ch_layout.nb_channels);
1329  if (ret < 0)
1330  return ret;
1331  } else if (s->format > 0 && s->process == 2) {
1332  if (s->precision > 1)
1333  av_log(ctx, AV_LOG_WARNING, "Parallel processing is not recommended for fixed-point precisions.\n");
1334  ret = decompose_zp2biquads(ctx, inlink->ch_layout.nb_channels);
1335  if (ret < 0)
1336  return ret;
1337  ret = convert_serial2parallel(ctx, inlink->ch_layout.nb_channels);
1338  if (ret < 0)
1339  return ret;
1340  }
1341 
1342  for (ch = 0; s->format == -2 && ch < inlink->ch_layout.nb_channels; ch++) {
1343  IIRChannel *iir = &s->iir[ch];
1344 
1345  if (iir->nb_ab[0] != iir->nb_ab[1] + 1) {
1346  av_log(ctx, AV_LOG_ERROR, "Number of ladder coefficients must be one more than number of reflection coefficients.\n");
1347  return AVERROR(EINVAL);
1348  }
1349  }
1350 
1351  for (ch = 0; s->format == 0 && ch < inlink->ch_layout.nb_channels; ch++) {
1352  IIRChannel *iir = &s->iir[ch];
1353 
1354  for (i = 1; i < iir->nb_ab[0]; i++) {
1355  iir->ab[0][i] /= iir->ab[0][0];
1356  }
1357 
1358  iir->ab[0][0] = 1.0;
1359  for (i = 0; i < iir->nb_ab[1]; i++) {
1360  iir->ab[1][i] *= iir->g;
1361  }
1362 
1363  normalize_coeffs(ctx, ch);
1364  }
1365 
1366  switch (inlink->format) {
1367  case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 2 ? iir_ch_parallel_dblp : s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
1368  case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 2 ? iir_ch_parallel_fltp : s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
1369  case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s32p : s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
1370  case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s16p : s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
1371  }
1372 
1373  if (s->format == -2) {
1374  switch (inlink->format) {
1375  case AV_SAMPLE_FMT_DBLP: s->iir_channel = iir_ch_lattice_dblp; break;
1376  case AV_SAMPLE_FMT_FLTP: s->iir_channel = iir_ch_lattice_fltp; break;
1377  case AV_SAMPLE_FMT_S32P: s->iir_channel = iir_ch_lattice_s32p; break;
1378  case AV_SAMPLE_FMT_S16P: s->iir_channel = iir_ch_lattice_s16p; break;
1379  }
1380  }
1381 
1382  return 0;
1383 }
1384 
1386 {
1387  AVFilterContext *ctx = inlink->dst;
1388  AudioIIRContext *s = ctx->priv;
1389  AVFilterLink *outlink = ctx->outputs[0];
1390  ThreadData td;
1391  AVFrame *out;
1392  int ch, ret;
1393 
1394  if (av_frame_is_writable(in) && s->process != 2) {
1395  out = in;
1396  } else {
1397  out = ff_get_audio_buffer(outlink, in->nb_samples);
1398  if (!out) {
1399  av_frame_free(&in);
1400  return AVERROR(ENOMEM);
1401  }
1402  av_frame_copy_props(out, in);
1403  }
1404 
1405  td.in = in;
1406  td.out = out;
1407  ff_filter_execute(ctx, s->iir_channel, &td, NULL, outlink->ch_layout.nb_channels);
1408 
1409  for (ch = 0; ch < outlink->ch_layout.nb_channels; ch++) {
1410  if (s->iir[ch].clippings > 0)
1411  av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
1412  ch, s->iir[ch].clippings);
1413  s->iir[ch].clippings = 0;
1414  }
1415 
1416  if (in != out)
1417  av_frame_free(&in);
1418 
1419  if (s->response) {
1420  AVFilterLink *outlink = ctx->outputs[1];
1421  int64_t old_pts = s->video->pts;
1422  int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
1423 
1424  if (new_pts > old_pts) {
1425  AVFrame *clone;
1426 
1427  s->video->pts = new_pts;
1428  clone = av_frame_clone(s->video);
1429  if (!clone)
1430  return AVERROR(ENOMEM);
1431  ret = ff_filter_frame(outlink, clone);
1432  if (ret < 0)
1433  return ret;
1434  }
1435  }
1436 
1437  return ff_filter_frame(outlink, out);
1438 }
1439 
1440 static int config_video(AVFilterLink *outlink)
1441 {
1442  AVFilterContext *ctx = outlink->src;
1443  AudioIIRContext *s = ctx->priv;
1444 
1445  outlink->sample_aspect_ratio = (AVRational){1,1};
1446  outlink->w = s->w;
1447  outlink->h = s->h;
1448  outlink->frame_rate = s->rate;
1449  outlink->time_base = av_inv_q(outlink->frame_rate);
1450 
1451  return 0;
1452 }
1453 
1455 {
1456  AudioIIRContext *s = ctx->priv;
1457  AVFilterPad pad, vpad;
1458  int ret;
1459 
1460  if (!s->a_str || !s->b_str || !s->g_str) {
1461  av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
1462  return AVERROR(EINVAL);
1463  }
1464 
1465  switch (s->precision) {
1466  case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
1467  case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
1468  case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
1469  case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
1470  default: return AVERROR_BUG;
1471  }
1472 
1473  pad = (AVFilterPad){
1474  .name = "default",
1475  .type = AVMEDIA_TYPE_AUDIO,
1476  .config_props = config_output,
1477  };
1478 
1479  ret = ff_append_outpad(ctx, &pad);
1480  if (ret < 0)
1481  return ret;
1482 
1483  if (s->response) {
1484  vpad = (AVFilterPad){
1485  .name = "filter_response",
1486  .type = AVMEDIA_TYPE_VIDEO,
1487  .config_props = config_video,
1488  };
1489 
1490  ret = ff_append_outpad(ctx, &vpad);
1491  if (ret < 0)
1492  return ret;
1493  }
1494 
1495  return 0;
1496 }
1497 
1499 {
1500  AudioIIRContext *s = ctx->priv;
1501  int ch;
1502 
1503  if (s->iir) {
1504  for (ch = 0; ch < s->channels; ch++) {
1505  IIRChannel *iir = &s->iir[ch];
1506  av_freep(&iir->ab[0]);
1507  av_freep(&iir->ab[1]);
1508  av_freep(&iir->cache[0]);
1509  av_freep(&iir->cache[1]);
1510  av_freep(&iir->biquads);
1511  }
1512  }
1513  av_freep(&s->iir);
1514 
1515  av_frame_free(&s->video);
1516 }
1517 
1518 static const AVFilterPad inputs[] = {
1519  {
1520  .name = "default",
1521  .type = AVMEDIA_TYPE_AUDIO,
1522  .filter_frame = filter_frame,
1523  },
1524 };
1525 
1526 #define OFFSET(x) offsetof(AudioIIRContext, x)
1527 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1528 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1529 
1530 static const AVOption aiir_options[] = {
1531  { "zeros", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1532  { "z", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1533  { "poles", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1534  { "p", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1535  { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1536  { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1537  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1538  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1539  { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, .unit = "format" },
1540  { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, .unit = "format" },
1541  { "ll", "lattice-ladder function", 0, AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, AF, .unit = "format" },
1542  { "sf", "analog transfer function", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, .unit = "format" },
1543  { "tf", "digital transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "format" },
1544  { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "format" },
1545  { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "format" },
1546  { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, .unit = "format" },
1547  { "sp", "S-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF, .unit = "format" },
1548  { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, .unit = "process" },
1549  { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, .unit = "process" },
1550  { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "process" },
1551  { "s", "serial", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "process" },
1552  { "p", "parallel", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "process" },
1553  { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, .unit = "precision" },
1554  { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, .unit = "precision" },
1555  { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "precision" },
1556  { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "precision" },
1557  { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "precision" },
1558  { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, .unit = "precision" },
1559  { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1560  { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1561  { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1562  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
1563  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
1564  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
1565  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
1566  { NULL },
1567 };
1568 
1569 AVFILTER_DEFINE_CLASS(aiir);
1570 
1572  .name = "aiir",
1573  .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
1574  .priv_size = sizeof(AudioIIRContext),
1575  .priv_class = &aiir_class,
1576  .init = init,
1577  .uninit = uninit,
1582 };
coef_sf2zf
static double coef_sf2zf(double *a, int N, int n)
Definition: af_aiir.c:951
Pair
Definition: af_aiir.c:38
M
#define M(a, b)
Definition: vp3dsp.c:48
AudioIIRContext::format
int format
Definition: af_aiir.c:64
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:97
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:155
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_clip
#define av_clip
Definition: common.h:99
mix
static int mix(int c0, int c1)
Definition: 4xm.c:716
r
const char * r
Definition: vf_curves.c:127
acc
int acc
Definition: yuv2rgb.c:553
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:436
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:424
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(aiir)
out
FILE * out
Definition: movenc.c:55
color
Definition: vf_paletteuse.c:512
IIRChannel::clippings
int clippings
Definition: af_aiir.c:55
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1015
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:248
AF
#define AF
Definition: af_aiir.c:1527
inputs
static const AVFilterPad inputs[]
Definition: af_aiir.c:1518
matrix
Definition: vc1dsp.c:43
IIRChannel::nb_ab
int nb_ab[2]
Definition: af_aiir.c:49
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
BiquadContext::a
double a[3]
Definition: af_aiir.c:43
aiir_options
static const AVOption aiir_options[]
Definition: af_aiir.c:1530
convert_serial2parallel
static int convert_serial2parallel(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:820
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
RE
#define RE(x, ch)
read_channels
static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
Definition: af_aiir.c:450
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AudioIIRContext::ir_channel
int ir_channel
Definition: af_aiir.c:69
IIRChannel::biquads
BiquadContext * biquads
Definition: af_aiir.c:54
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:346
b
#define b
Definition: input.c:41
IIR_CH
#define IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:115
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:65
read_tf_coefficients
static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
Definition: af_aiir.c:398
AudioIIRContext::iir_channel
int(* iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
Definition: af_aiir.c:78
check_stability
static void check_stability(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:1025
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:822
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_aiir.c:1262
float.h
max
#define max(a, b)
Definition: cuda_runtime.h:33
AudioIIRContext::b_str
char * b_str
Definition: af_aiir.c:60
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
solve
static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
Definition: af_aiir.c:786
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:527
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_aiir.c:1440
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
AudioIIRContext::video
AVFrame * video
Definition: af_aiir.c:72
video.h
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
AudioIIRContext::g_str
char * g_str
Definition: af_aiir.c:60
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
BiquadContext
Definition: af_aiir.c:42
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:2035
fail
#define fail()
Definition: checkasm.h:179
W
@ W
Definition: vf_addroi.c:27
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_aiir.c:81
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
AudioIIRContext::h
int h
Definition: af_aiir.c:68
a1
#define a1
Definition: regdef.h:47
AudioIIRContext::process
int process
Definition: af_aiir.c:65
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aiir.c:1385
convert_zp2tf
static int convert_zp2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:558
mask
static const uint16_t mask[17]
Definition: lzw.c:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:237
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:679
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:874
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
ctx
AVFormatContext * ctx
Definition: movenc.c:49
channels
channels
Definition: aptx.h:31
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:593
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
IIRChannel::cache
double * cache[2]
Definition: af_aiir.c:52
NAN
#define NAN
Definition: mathematics.h:115
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
SERIAL_IIR_CH
#define SERIAL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:171
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:73
av_sscanf
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Definition: avsscanf.c:962
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
biquad
@ biquad
Definition: af_biquads.c:79
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:245
double
double
Definition: af_crystalizer.c:131
AudioIIRContext::rate
AVRational rate
Definition: af_aiir.c:70
ff_set_common_all_channel_counts
int ff_set_common_all_channel_counts(AVFilterContext *ctx)
Equivalent to ff_set_common_channel_layouts(ctx, ff_all_channel_counts())
Definition: formats.c:804
normalize_coeffs
static void normalize_coeffs(AVFilterContext *ctx, int ch)
Definition: af_aiir.c:530
exp
int8_t exp
Definition: eval.c:73
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_aiir.c:1454
convert_sp2zp
static void convert_sp2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:917
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
BiquadContext::w1
double w1
Definition: af_aiir.c:45
BiquadContext::w2
double w2
Definition: af_aiir.c:45
format
static const char *const format[]
Definition: af_aiir.c:448
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
draw_line
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_aiir.c:1067
fmin
double fmin(double, double)
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:645
read_zp_coefficients
static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
Definition: af_aiir.c:423
IM
#define IM(x, ch)
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:2036
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
xga_font_data.h
N
#define N
Definition: af_mcompand.c:54
fact
static double fact(double i)
Definition: af_aiir.c:944
IIRChannel::g
double g
Definition: af_aiir.c:51
Pair::b
int b
Definition: af_aiir.c:39
M_PI
#define M_PI
Definition: mathematics.h:67
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
internal.h
AudioIIRContext::a_str
char * a_str
Definition: af_aiir.c:60
normalize
Definition: normalize.py:1
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:454
VF
#define VF
Definition: af_aiir.c:1528
AudioIIRContext::sample_format
enum AVSampleFormat sample_format
Definition: af_aiir.c:76
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
OFFSET
#define OFFSET(x)
Definition: af_aiir.c:1526
IIRChannel::ab
double * ab[2]
Definition: af_aiir.c:50
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
a2
#define a2
Definition: regdef.h:48
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AudioIIRContext::precision
int precision
Definition: af_aiir.c:66
convert_sf2tf
static void convert_sf2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:970
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
AudioIIRContext::wet_gain
double wet_gain
Definition: af_aiir.c:61
get_response
static void get_response(int channel, int format, double w, const double *b, const double *a, int nb_b, int nb_a, double *magnitude, double *phase)
Definition: af_aiir.c:1098
AVFilter
Filter definition.
Definition: avfilter.h:166
cmul
static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
Definition: af_aiir.c:497
PARALLEL_IIR_CH
#define PARALLEL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:230
ret
ret
Definition: filter_design.txt:187
AudioIIRContext::dry_gain
double dry_gain
Definition: af_aiir.c:61
Pair::a
int a
Definition: af_aiir.c:39
fmax
double fmax(double, double)
convert_pr2zp
static void convert_pr2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:890
BiquadContext::b
double b[3]
Definition: af_aiir.c:44
draw_response
static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
Definition: af_aiir.c:1145
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
AudioIIRContext::response
int response
Definition: af_aiir.c:67
distance
static double distance(double x0, double x1, double y0, double y1)
Definition: af_aiir.c:1093
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
temp
else temp
Definition: vf_mcdeint.c:263
AudioIIRContext::channels
int channels
Definition: af_aiir.c:75
decompose_zp2biquads
static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:606
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
factor
static const int factor[16]
Definition: vf_pp7.c:79
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
LATTICE_IIR_CH
#define LATTICE_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:294
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
audio.h
count_coefficients
static void count_coefficients(char *item_str, int *nb_items)
Definition: af_aiir.c:351
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:510
biquad_process
static void biquad_process(double *x, double *y, int length, double b0, double b1, double b2, double a1, double a2)
Definition: af_aiir.c:768
expand
static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
Definition: af_aiir.c:503
ff_append_outpad
int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:138
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
AudioIIRContext
Definition: af_aiir.c:58
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AudioIIRContext::iir
IIRChannel * iir
Definition: af_aiir.c:74
d
d
Definition: ffmpeg_filter.c:424
int32_t
int32_t
Definition: audioconvert.c:56
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
b0
static double b0(void *priv, double x, double y)
Definition: vf_xfade.c:2034
IIRChannel::fir
double fir
Definition: af_aiir.c:53
ff_af_aiir
const AVFilter ff_af_aiir
Definition: af_aiir.c:1571
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
AudioIIRContext::normalize
int normalize
Definition: af_aiir.c:63
int
int
Definition: ffmpeg_filter.c:424
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aiir.c:1498
snprintf
#define snprintf
Definition: snprintf.h:34
read_gains
static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
Definition: af_aiir.c:365
IIRChannel
Definition: af_aiir.c:48
AudioIIRContext::mix
double mix
Definition: af_aiir.c:62
convert_pd2zp
static void convert_pd2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:998
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_aiir.c:1044
channel
channel
Definition: ebur128.h:39
min
float min
Definition: vorbis_enc_data.h:429
AudioIIRContext::w
int w
Definition: af_aiir.c:68