FFmpeg
af_aiir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/avstring.h"
24 #include "libavutil/intreadwrite.h"
25 #include "libavutil/opt.h"
27 #include "audio.h"
28 #include "avfilter.h"
29 #include "formats.h"
30 #include "internal.h"
31 #include "video.h"
32 
33 typedef struct ThreadData {
34  AVFrame *in, *out;
35 } ThreadData;
36 
37 typedef struct Pair {
38  int a, b;
39 } Pair;
40 
41 typedef struct BiquadContext {
42  double a[3];
43  double b[3];
44  double w1, w2;
46 
47 typedef struct IIRChannel {
48  int nb_ab[2];
49  double *ab[2];
50  double g;
51  double *cache[2];
52  double fir;
54  int clippings;
55 } IIRChannel;
56 
57 typedef struct AudioIIRContext {
58  const AVClass *class;
59  char *a_str, *b_str, *g_str;
60  double dry_gain, wet_gain;
61  double mix;
62  int normalize;
63  int format;
64  int process;
65  int precision;
66  int response;
67  int w, h;
70 
72 
74  int channels;
76 
77  int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
79 
81 {
82  AudioIIRContext *s = ctx->priv;
84  enum AVSampleFormat sample_fmts[] = {
87  };
88  static const enum AVPixelFormat pix_fmts[] = {
91  };
92  int ret;
93 
94  if (s->response) {
95  AVFilterLink *videolink = ctx->outputs[1];
96 
98  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
99  return ret;
100  }
101 
103  if (ret < 0)
104  return ret;
105 
106  sample_fmts[0] = s->sample_format;
108  if (ret < 0)
109  return ret;
110 
112 }
113 
114 #define IIR_CH(name, type, min, max, need_clipping) \
115 static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
116 { \
117  AudioIIRContext *s = ctx->priv; \
118  const double ig = s->dry_gain; \
119  const double og = s->wet_gain; \
120  const double mix = s->mix; \
121  ThreadData *td = arg; \
122  AVFrame *in = td->in, *out = td->out; \
123  const type *src = (const type *)in->extended_data[ch]; \
124  double *oc = (double *)s->iir[ch].cache[0]; \
125  double *ic = (double *)s->iir[ch].cache[1]; \
126  const int nb_a = s->iir[ch].nb_ab[0]; \
127  const int nb_b = s->iir[ch].nb_ab[1]; \
128  const double *a = s->iir[ch].ab[0]; \
129  const double *b = s->iir[ch].ab[1]; \
130  const double g = s->iir[ch].g; \
131  int *clippings = &s->iir[ch].clippings; \
132  type *dst = (type *)out->extended_data[ch]; \
133  int n; \
134  \
135  for (n = 0; n < in->nb_samples; n++) { \
136  double sample = 0.; \
137  int x; \
138  \
139  memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
140  memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
141  ic[0] = src[n] * ig; \
142  for (x = 0; x < nb_b; x++) \
143  sample += b[x] * ic[x]; \
144  \
145  for (x = 1; x < nb_a; x++) \
146  sample -= a[x] * oc[x]; \
147  \
148  oc[0] = sample; \
149  sample *= og * g; \
150  sample = sample * mix + ic[0] * (1. - mix); \
151  if (need_clipping && sample < min) { \
152  (*clippings)++; \
153  dst[n] = min; \
154  } else if (need_clipping && sample > max) { \
155  (*clippings)++; \
156  dst[n] = max; \
157  } else { \
158  dst[n] = sample; \
159  } \
160  } \
161  \
162  return 0; \
163 }
164 
165 IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
166 IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
167 IIR_CH(fltp, float, -1., 1., 0)
168 IIR_CH(dblp, double, -1., 1., 0)
169 
170 #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
171 static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, \
172  int ch, int nb_jobs) \
173 { \
174  AudioIIRContext *s = ctx->priv; \
175  const double ig = s->dry_gain; \
176  const double og = s->wet_gain; \
177  const double mix = s->mix; \
178  const double imix = 1. - mix; \
179  ThreadData *td = arg; \
180  AVFrame *in = td->in, *out = td->out; \
181  const type *src = (const type *)in->extended_data[ch]; \
182  type *dst = (type *)out->extended_data[ch]; \
183  IIRChannel *iir = &s->iir[ch]; \
184  const double g = iir->g; \
185  int *clippings = &iir->clippings; \
186  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
187  int n, i; \
188  \
189  for (i = nb_biquads - 1; i >= 0; i--) { \
190  const double a1 = -iir->biquads[i].a[1]; \
191  const double a2 = -iir->biquads[i].a[2]; \
192  const double b0 = iir->biquads[i].b[0]; \
193  const double b1 = iir->biquads[i].b[1]; \
194  const double b2 = iir->biquads[i].b[2]; \
195  double w1 = iir->biquads[i].w1; \
196  double w2 = iir->biquads[i].w2; \
197  \
198  for (n = 0; n < in->nb_samples; n++) { \
199  double i0 = ig * (i ? dst[n] : src[n]); \
200  double o0 = i0 * b0 + w1; \
201  \
202  w1 = b1 * i0 + w2 + a1 * o0; \
203  w2 = b2 * i0 + a2 * o0; \
204  o0 *= og * g; \
205  \
206  o0 = o0 * mix + imix * i0; \
207  if (need_clipping && o0 < min) { \
208  (*clippings)++; \
209  dst[n] = min; \
210  } else if (need_clipping && o0 > max) { \
211  (*clippings)++; \
212  dst[n] = max; \
213  } else { \
214  dst[n] = o0; \
215  } \
216  } \
217  iir->biquads[i].w1 = w1; \
218  iir->biquads[i].w2 = w2; \
219  } \
220  \
221  return 0; \
222 }
223 
224 SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
225 SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
226 SERIAL_IIR_CH(fltp, float, -1., 1., 0)
227 SERIAL_IIR_CH(dblp, double, -1., 1., 0)
228 
229 #define PARALLEL_IIR_CH(name, type, min, max, need_clipping) \
230 static int iir_ch_parallel_## name(AVFilterContext *ctx, void *arg, \
231  int ch, int nb_jobs) \
232 { \
233  AudioIIRContext *s = ctx->priv; \
234  const double ig = s->dry_gain; \
235  const double og = s->wet_gain; \
236  const double mix = s->mix; \
237  const double imix = 1. - mix; \
238  ThreadData *td = arg; \
239  AVFrame *in = td->in, *out = td->out; \
240  const type *src = (const type *)in->extended_data[ch]; \
241  type *dst = (type *)out->extended_data[ch]; \
242  IIRChannel *iir = &s->iir[ch]; \
243  const double g = iir->g; \
244  const double fir = iir->fir; \
245  int *clippings = &iir->clippings; \
246  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
247  int n, i; \
248  \
249  for (i = 0; i < nb_biquads; i++) { \
250  const double a1 = -iir->biquads[i].a[1]; \
251  const double a2 = -iir->biquads[i].a[2]; \
252  const double b1 = iir->biquads[i].b[1]; \
253  const double b2 = iir->biquads[i].b[2]; \
254  double w1 = iir->biquads[i].w1; \
255  double w2 = iir->biquads[i].w2; \
256  \
257  for (n = 0; n < in->nb_samples; n++) { \
258  double i0 = ig * src[n]; \
259  double o0 = w1; \
260  \
261  w1 = b1 * i0 + w2 + a1 * o0; \
262  w2 = b2 * i0 + a2 * o0; \
263  o0 *= og * g; \
264  o0 += dst[n]; \
265  \
266  if (need_clipping && o0 < min) { \
267  (*clippings)++; \
268  dst[n] = min; \
269  } else if (need_clipping && o0 > max) { \
270  (*clippings)++; \
271  dst[n] = max; \
272  } else { \
273  dst[n] = o0; \
274  } \
275  } \
276  iir->biquads[i].w1 = w1; \
277  iir->biquads[i].w2 = w2; \
278  } \
279  \
280  for (n = 0; n < in->nb_samples; n++) { \
281  dst[n] += fir * src[n]; \
282  dst[n] = dst[n] * mix + imix * src[n]; \
283  } \
284  \
285  return 0; \
286 }
287 
288 PARALLEL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
289 PARALLEL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
290 PARALLEL_IIR_CH(fltp, float, -1., 1., 0)
291 PARALLEL_IIR_CH(dblp, double, -1., 1., 0)
292 
293 #define LATTICE_IIR_CH(name, type, min, max, need_clipping) \
294 static int iir_ch_lattice_## name(AVFilterContext *ctx, void *arg, \
295  int ch, int nb_jobs) \
296 { \
297  AudioIIRContext *s = ctx->priv; \
298  const double ig = s->dry_gain; \
299  const double og = s->wet_gain; \
300  const double mix = s->mix; \
301  ThreadData *td = arg; \
302  AVFrame *in = td->in, *out = td->out; \
303  const type *src = (const type *)in->extended_data[ch]; \
304  double n0, n1, p0, *x = (double *)s->iir[ch].cache[0]; \
305  const int nb_stages = s->iir[ch].nb_ab[1]; \
306  const double *v = s->iir[ch].ab[0]; \
307  const double *k = s->iir[ch].ab[1]; \
308  const double g = s->iir[ch].g; \
309  int *clippings = &s->iir[ch].clippings; \
310  type *dst = (type *)out->extended_data[ch]; \
311  int n; \
312  \
313  for (n = 0; n < in->nb_samples; n++) { \
314  const double in = src[n] * ig; \
315  double out = 0.; \
316  \
317  n1 = in; \
318  for (int i = nb_stages - 1; i >= 0; i--) { \
319  n0 = n1 - k[i] * x[i]; \
320  p0 = n0 * k[i] + x[i]; \
321  out += p0 * v[i+1]; \
322  x[i] = p0; \
323  n1 = n0; \
324  } \
325  \
326  out += n1 * v[0]; \
327  memmove(&x[1], &x[0], nb_stages * sizeof(*x)); \
328  x[0] = n1; \
329  out *= og * g; \
330  out = out * mix + in * (1. - mix); \
331  if (need_clipping && out < min) { \
332  (*clippings)++; \
333  dst[n] = min; \
334  } else if (need_clipping && out > max) { \
335  (*clippings)++; \
336  dst[n] = max; \
337  } else { \
338  dst[n] = out; \
339  } \
340  } \
341  \
342  return 0; \
343 }
344 
345 LATTICE_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
346 LATTICE_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
347 LATTICE_IIR_CH(fltp, float, -1., 1., 0)
348 LATTICE_IIR_CH(dblp, double, -1., 1., 0)
349 
350 static void count_coefficients(char *item_str, int *nb_items)
351 {
352  char *p;
353 
354  if (!item_str)
355  return;
356 
357  *nb_items = 1;
358  for (p = item_str; *p && *p != '|'; p++) {
359  if (*p == ' ')
360  (*nb_items)++;
361  }
362 }
363 
364 static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
365 {
366  AudioIIRContext *s = ctx->priv;
367  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
368  int i;
369 
370  p = old_str = av_strdup(item_str);
371  if (!p)
372  return AVERROR(ENOMEM);
373  for (i = 0; i < nb_items; i++) {
374  if (!(arg = av_strtok(p, "|", &saveptr)))
375  arg = prev_arg;
376 
377  if (!arg) {
378  av_freep(&old_str);
379  return AVERROR(EINVAL);
380  }
381 
382  p = NULL;
383  if (av_sscanf(arg, "%lf", &s->iir[i].g) != 1) {
384  av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
385  av_freep(&old_str);
386  return AVERROR(EINVAL);
387  }
388 
389  prev_arg = arg;
390  }
391 
392  av_freep(&old_str);
393 
394  return 0;
395 }
396 
397 static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
398 {
399  char *p, *arg, *old_str, *saveptr = NULL;
400  int i;
401 
402  p = old_str = av_strdup(item_str);
403  if (!p)
404  return AVERROR(ENOMEM);
405  for (i = 0; i < nb_items; i++) {
406  if (!(arg = av_strtok(p, " ", &saveptr)))
407  break;
408 
409  p = NULL;
410  if (av_sscanf(arg, "%lf", &dst[i]) != 1) {
411  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
412  av_freep(&old_str);
413  return AVERROR(EINVAL);
414  }
415  }
416 
417  av_freep(&old_str);
418 
419  return 0;
420 }
421 
422 static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
423 {
424  char *p, *arg, *old_str, *saveptr = NULL;
425  int i;
426 
427  p = old_str = av_strdup(item_str);
428  if (!p)
429  return AVERROR(ENOMEM);
430  for (i = 0; i < nb_items; i++) {
431  if (!(arg = av_strtok(p, " ", &saveptr)))
432  break;
433 
434  p = NULL;
435  if (av_sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
436  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
437  av_freep(&old_str);
438  return AVERROR(EINVAL);
439  }
440  }
441 
442  av_freep(&old_str);
443 
444  return 0;
445 }
446 
447 static const char *const format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd", "%lf %lfi" };
448 
449 static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
450 {
451  AudioIIRContext *s = ctx->priv;
452  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
453  int i, ret;
454 
455  p = old_str = av_strdup(item_str);
456  if (!p)
457  return AVERROR(ENOMEM);
458  for (i = 0; i < channels; i++) {
459  IIRChannel *iir = &s->iir[i];
460 
461  if (!(arg = av_strtok(p, "|", &saveptr)))
462  arg = prev_arg;
463 
464  if (!arg) {
465  av_freep(&old_str);
466  return AVERROR(EINVAL);
467  }
468 
469  count_coefficients(arg, &iir->nb_ab[ab]);
470 
471  p = NULL;
472  iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
473  iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
474  if (!iir->ab[ab] || !iir->cache[ab]) {
475  av_freep(&old_str);
476  return AVERROR(ENOMEM);
477  }
478 
479  if (s->format > 0) {
480  ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
481  } else {
482  ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
483  }
484  if (ret < 0) {
485  av_freep(&old_str);
486  return ret;
487  }
488  prev_arg = arg;
489  }
490 
491  av_freep(&old_str);
492 
493  return 0;
494 }
495 
496 static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
497 {
498  *RE = re * re2 - im * im2;
499  *IM = re * im2 + re2 * im;
500 }
501 
502 static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
503 {
504  coefs[2 * n] = 1.0;
505 
506  for (int i = 1; i <= n; i++) {
507  for (int j = n - i; j < n; j++) {
508  double re, im;
509 
510  cmul(coefs[2 * (j + 1)], coefs[2 * (j + 1) + 1],
511  pz[2 * (i - 1)], pz[2 * (i - 1) + 1], &re, &im);
512 
513  coefs[2 * j] -= re;
514  coefs[2 * j + 1] -= im;
515  }
516  }
517 
518  for (int i = 0; i < n + 1; i++) {
519  if (fabs(coefs[2 * i + 1]) > FLT_EPSILON) {
520  av_log(ctx, AV_LOG_ERROR, "coefs: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
521  coefs[2 * i + 1], i);
522  return AVERROR(EINVAL);
523  }
524  }
525 
526  return 0;
527 }
528 
529 static void normalize_coeffs(AVFilterContext *ctx, int ch)
530 {
531  AudioIIRContext *s = ctx->priv;
532  IIRChannel *iir = &s->iir[ch];
533  double sum_den = 0.;
534 
535  if (!s->normalize)
536  return;
537 
538  for (int i = 0; i < iir->nb_ab[1]; i++) {
539  sum_den += iir->ab[1][i];
540  }
541 
542  if (sum_den > 1e-6) {
543  double factor, sum_num = 0.;
544 
545  for (int i = 0; i < iir->nb_ab[0]; i++) {
546  sum_num += iir->ab[0][i];
547  }
548 
549  factor = sum_num / sum_den;
550 
551  for (int i = 0; i < iir->nb_ab[1]; i++) {
552  iir->ab[1][i] *= factor;
553  }
554  }
555 }
556 
558 {
559  AudioIIRContext *s = ctx->priv;
560  int ch, i, j, ret = 0;
561 
562  for (ch = 0; ch < channels; ch++) {
563  IIRChannel *iir = &s->iir[ch];
564  double *topc, *botc;
565 
566  topc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*topc));
567  botc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*botc));
568  if (!topc || !botc) {
569  ret = AVERROR(ENOMEM);
570  goto fail;
571  }
572 
573  ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
574  if (ret < 0) {
575  goto fail;
576  }
577 
578  ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
579  if (ret < 0) {
580  goto fail;
581  }
582 
583  for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
584  iir->ab[1][j] = topc[2 * i];
585  }
586  iir->nb_ab[1]++;
587 
588  for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
589  iir->ab[0][j] = botc[2 * i];
590  }
591  iir->nb_ab[0]++;
592 
593  normalize_coeffs(ctx, ch);
594 
595 fail:
596  av_free(topc);
597  av_free(botc);
598  if (ret < 0)
599  break;
600  }
601 
602  return ret;
603 }
604 
606 {
607  AudioIIRContext *s = ctx->priv;
608  int ch, ret;
609 
610  for (ch = 0; ch < channels; ch++) {
611  IIRChannel *iir = &s->iir[ch];
612  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
613  int current_biquad = 0;
614 
615  iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
616  if (!iir->biquads)
617  return AVERROR(ENOMEM);
618 
619  while (nb_biquads--) {
620  Pair outmost_pole = { -1, -1 };
621  Pair nearest_zero = { -1, -1 };
622  double zeros[4] = { 0 };
623  double poles[4] = { 0 };
624  double b[6] = { 0 };
625  double a[6] = { 0 };
626  double min_distance = DBL_MAX;
627  double max_mag = 0;
628  double factor;
629  int i;
630 
631  for (i = 0; i < iir->nb_ab[0]; i++) {
632  double mag;
633 
634  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
635  continue;
636  mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
637 
638  if (mag > max_mag) {
639  max_mag = mag;
640  outmost_pole.a = i;
641  }
642  }
643 
644  for (i = 0; i < iir->nb_ab[0]; i++) {
645  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
646  continue;
647 
648  if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
649  iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
650  outmost_pole.b = i;
651  break;
652  }
653  }
654 
655  av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
656 
657  if (outmost_pole.a < 0 || outmost_pole.b < 0)
658  return AVERROR(EINVAL);
659 
660  for (i = 0; i < iir->nb_ab[1]; i++) {
661  double distance;
662 
663  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
664  continue;
665  distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
666  iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
667 
668  if (distance < min_distance) {
669  min_distance = distance;
670  nearest_zero.a = i;
671  }
672  }
673 
674  for (i = 0; i < iir->nb_ab[1]; i++) {
675  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
676  continue;
677 
678  if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
679  iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
680  nearest_zero.b = i;
681  break;
682  }
683  }
684 
685  av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
686 
687  if (nearest_zero.a < 0 || nearest_zero.b < 0)
688  return AVERROR(EINVAL);
689 
690  poles[0] = iir->ab[0][2 * outmost_pole.a ];
691  poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
692 
693  zeros[0] = iir->ab[1][2 * nearest_zero.a ];
694  zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
695 
696  if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
697  zeros[2] = 0;
698  zeros[3] = 0;
699 
700  poles[2] = 0;
701  poles[3] = 0;
702  } else {
703  poles[2] = iir->ab[0][2 * outmost_pole.b ];
704  poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
705 
706  zeros[2] = iir->ab[1][2 * nearest_zero.b ];
707  zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
708  }
709 
710  ret = expand(ctx, zeros, 2, b);
711  if (ret < 0)
712  return ret;
713 
714  ret = expand(ctx, poles, 2, a);
715  if (ret < 0)
716  return ret;
717 
718  iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
719  iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
720  iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
721  iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
722 
723  iir->biquads[current_biquad].a[0] = 1.;
724  iir->biquads[current_biquad].a[1] = a[2] / a[4];
725  iir->biquads[current_biquad].a[2] = a[0] / a[4];
726  iir->biquads[current_biquad].b[0] = b[4] / a[4];
727  iir->biquads[current_biquad].b[1] = b[2] / a[4];
728  iir->biquads[current_biquad].b[2] = b[0] / a[4];
729 
730  if (s->normalize &&
731  fabs(iir->biquads[current_biquad].b[0] +
732  iir->biquads[current_biquad].b[1] +
733  iir->biquads[current_biquad].b[2]) > 1e-6) {
734  factor = (iir->biquads[current_biquad].a[0] +
735  iir->biquads[current_biquad].a[1] +
736  iir->biquads[current_biquad].a[2]) /
737  (iir->biquads[current_biquad].b[0] +
738  iir->biquads[current_biquad].b[1] +
739  iir->biquads[current_biquad].b[2]);
740 
741  av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
742 
743  iir->biquads[current_biquad].b[0] *= factor;
744  iir->biquads[current_biquad].b[1] *= factor;
745  iir->biquads[current_biquad].b[2] *= factor;
746  }
747 
748  iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
749  iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
750  iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
751 
752  av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
753  iir->biquads[current_biquad].a[0],
754  iir->biquads[current_biquad].a[1],
755  iir->biquads[current_biquad].a[2],
756  iir->biquads[current_biquad].b[0],
757  iir->biquads[current_biquad].b[1],
758  iir->biquads[current_biquad].b[2]);
759 
760  current_biquad++;
761  }
762  }
763 
764  return 0;
765 }
766 
767 static void biquad_process(double *x, double *y, int length,
768  double b0, double b1, double b2,
769  double a1, double a2)
770 {
771  double w1 = 0., w2 = 0.;
772 
773  a1 = -a1;
774  a2 = -a2;
775 
776  for (int n = 0; n < length; n++) {
777  double out, in = x[n];
778 
779  y[n] = out = in * b0 + w1;
780  w1 = b1 * in + w2 + a1 * out;
781  w2 = b2 * in + a2 * out;
782  }
783 }
784 
785 static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
786 {
787  double sum = 0.;
788 
789  for (int i = 0; i < n; i++) {
790  for (int j = i; j < n; j++) {
791  sum = 0.;
792  for (int k = 0; k < i; k++)
793  sum += lu[i * n + k] * lu[k * n + j];
794  lu[i * n + j] = matrix[j * n + i] - sum;
795  }
796  for (int j = i + 1; j < n; j++) {
797  sum = 0.;
798  for (int k = 0; k < i; k++)
799  sum += lu[j * n + k] * lu[k * n + i];
800  lu[j * n + i] = (1. / lu[i * n + i]) * (matrix[i * n + j] - sum);
801  }
802  }
803 
804  for (int i = 0; i < n; i++) {
805  sum = 0.;
806  for (int k = 0; k < i; k++)
807  sum += lu[i * n + k] * y[k];
808  y[i] = vector[i] - sum;
809  }
810 
811  for (int i = n - 1; i >= 0; i--) {
812  sum = 0.;
813  for (int k = i + 1; k < n; k++)
814  sum += lu[i * n + k] * x[k];
815  x[i] = (1 / lu[i * n + i]) * (y[i] - sum);
816  }
817 }
818 
820 {
821  AudioIIRContext *s = ctx->priv;
822  int ret = 0;
823 
824  for (int ch = 0; ch < channels; ch++) {
825  IIRChannel *iir = &s->iir[ch];
826  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
827  int length = nb_biquads * 2 + 1;
828  double *impulse = av_calloc(length, sizeof(*impulse));
829  double *y = av_calloc(length, sizeof(*y));
830  double *resp = av_calloc(length, sizeof(*resp));
831  double *M = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*M));
832  double *W = av_calloc((length - 1) * 2 * nb_biquads, sizeof(*W));
833 
834  if (!impulse || !y || !resp || !M) {
835  av_free(impulse);
836  av_free(y);
837  av_free(resp);
838  av_free(M);
839  av_free(W);
840  return AVERROR(ENOMEM);
841  }
842 
843  impulse[0] = 1.;
844 
845  for (int n = 0; n < nb_biquads; n++) {
846  BiquadContext *biquad = &iir->biquads[n];
847 
848  biquad_process(n ? y : impulse, y, length,
849  biquad->b[0], biquad->b[1], biquad->b[2],
850  biquad->a[1], biquad->a[2]);
851  }
852 
853  for (int n = 0; n < nb_biquads; n++) {
854  BiquadContext *biquad = &iir->biquads[n];
855 
856  biquad_process(impulse, resp, length - 1,
857  1., 0., 0., biquad->a[1], biquad->a[2]);
858 
859  memcpy(M + n * 2 * (length - 1), resp, sizeof(*resp) * (length - 1));
860  memcpy(M + n * 2 * (length - 1) + length, resp, sizeof(*resp) * (length - 2));
861  memset(resp, 0, length * sizeof(*resp));
862  }
863 
864  solve(M, &y[1], length - 1, &impulse[1], resp, W);
865 
866  iir->fir = y[0];
867 
868  for (int n = 0; n < nb_biquads; n++) {
869  BiquadContext *biquad = &iir->biquads[n];
870 
871  biquad->b[0] = 0.;
872  biquad->b[1] = resp[n * 2 + 0];
873  biquad->b[2] = resp[n * 2 + 1];
874  }
875 
876  av_free(impulse);
877  av_free(y);
878  av_free(resp);
879  av_free(M);
880  av_free(W);
881 
882  if (ret < 0)
883  return ret;
884  }
885 
886  return 0;
887 }
888 
890 {
891  AudioIIRContext *s = ctx->priv;
892  int ch;
893 
894  for (ch = 0; ch < channels; ch++) {
895  IIRChannel *iir = &s->iir[ch];
896  int n;
897 
898  for (n = 0; n < iir->nb_ab[0]; n++) {
899  double r = iir->ab[0][2*n];
900  double angle = iir->ab[0][2*n+1];
901 
902  iir->ab[0][2*n] = r * cos(angle);
903  iir->ab[0][2*n+1] = r * sin(angle);
904  }
905 
906  for (n = 0; n < iir->nb_ab[1]; n++) {
907  double r = iir->ab[1][2*n];
908  double angle = iir->ab[1][2*n+1];
909 
910  iir->ab[1][2*n] = r * cos(angle);
911  iir->ab[1][2*n+1] = r * sin(angle);
912  }
913  }
914 }
915 
917 {
918  AudioIIRContext *s = ctx->priv;
919  int ch;
920 
921  for (ch = 0; ch < channels; ch++) {
922  IIRChannel *iir = &s->iir[ch];
923  int n;
924 
925  for (n = 0; n < iir->nb_ab[0]; n++) {
926  double sr = iir->ab[0][2*n];
927  double si = iir->ab[0][2*n+1];
928 
929  iir->ab[0][2*n] = exp(sr) * cos(si);
930  iir->ab[0][2*n+1] = exp(sr) * sin(si);
931  }
932 
933  for (n = 0; n < iir->nb_ab[1]; n++) {
934  double sr = iir->ab[1][2*n];
935  double si = iir->ab[1][2*n+1];
936 
937  iir->ab[1][2*n] = exp(sr) * cos(si);
938  iir->ab[1][2*n+1] = exp(sr) * sin(si);
939  }
940  }
941 }
942 
943 static double fact(double i)
944 {
945  if (i <= 0.)
946  return 1.;
947  return i * fact(i - 1.);
948 }
949 
950 static double coef_sf2zf(double *a, int N, int n)
951 {
952  double z = 0.;
953 
954  for (int i = 0; i <= N; i++) {
955  double acc = 0.;
956 
957  for (int k = FFMAX(n - N + i, 0); k <= FFMIN(i, n); k++) {
958  acc += ((fact(i) * fact(N - i)) /
959  (fact(k) * fact(i - k) * fact(n - k) * fact(N - i - n + k))) *
960  ((k & 1) ? -1. : 1.);
961  }
962 
963  z += a[i] * pow(2., i) * acc;
964  }
965 
966  return z;
967 }
968 
970 {
971  AudioIIRContext *s = ctx->priv;
972  int ch;
973 
974  for (ch = 0; ch < channels; ch++) {
975  IIRChannel *iir = &s->iir[ch];
976  double *temp0 = av_calloc(iir->nb_ab[0], sizeof(*temp0));
977  double *temp1 = av_calloc(iir->nb_ab[1], sizeof(*temp1));
978 
979  if (!temp0 || !temp1)
980  goto next;
981 
982  memcpy(temp0, iir->ab[0], iir->nb_ab[0] * sizeof(*temp0));
983  memcpy(temp1, iir->ab[1], iir->nb_ab[1] * sizeof(*temp1));
984 
985  for (int n = 0; n < iir->nb_ab[0]; n++)
986  iir->ab[0][n] = coef_sf2zf(temp0, iir->nb_ab[0] - 1, n);
987 
988  for (int n = 0; n < iir->nb_ab[1]; n++)
989  iir->ab[1][n] = coef_sf2zf(temp1, iir->nb_ab[1] - 1, n);
990 
991 next:
992  av_free(temp0);
993  av_free(temp1);
994  }
995 }
996 
998 {
999  AudioIIRContext *s = ctx->priv;
1000  int ch;
1001 
1002  for (ch = 0; ch < channels; ch++) {
1003  IIRChannel *iir = &s->iir[ch];
1004  int n;
1005 
1006  for (n = 0; n < iir->nb_ab[0]; n++) {
1007  double r = iir->ab[0][2*n];
1008  double angle = M_PI*iir->ab[0][2*n+1]/180.;
1009 
1010  iir->ab[0][2*n] = r * cos(angle);
1011  iir->ab[0][2*n+1] = r * sin(angle);
1012  }
1013 
1014  for (n = 0; n < iir->nb_ab[1]; n++) {
1015  double r = iir->ab[1][2*n];
1016  double angle = M_PI*iir->ab[1][2*n+1]/180.;
1017 
1018  iir->ab[1][2*n] = r * cos(angle);
1019  iir->ab[1][2*n+1] = r * sin(angle);
1020  }
1021  }
1022 }
1023 
1025 {
1026  AudioIIRContext *s = ctx->priv;
1027  int ch;
1028 
1029  for (ch = 0; ch < channels; ch++) {
1030  IIRChannel *iir = &s->iir[ch];
1031 
1032  for (int n = 0; n < iir->nb_ab[0]; n++) {
1033  double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
1034 
1035  if (pr >= 1.) {
1036  av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
1037  break;
1038  }
1039  }
1040  }
1041 }
1042 
1043 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
1044 {
1045  const uint8_t *font;
1046  int font_height;
1047  int i;
1048 
1049  font = avpriv_cga_font, font_height = 8;
1050 
1051  for (i = 0; txt[i]; i++) {
1052  int char_y, mask;
1053 
1054  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
1055  for (char_y = 0; char_y < font_height; char_y++) {
1056  for (mask = 0x80; mask; mask >>= 1) {
1057  if (font[txt[i] * font_height + char_y] & mask)
1058  AV_WL32(p, color);
1059  p += 4;
1060  }
1061  p += pic->linesize[0] - 8 * 4;
1062  }
1063  }
1064 }
1065 
1066 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
1067 {
1068  int dx = FFABS(x1-x0);
1069  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
1070  int err = (dx>dy ? dx : -dy) / 2, e2;
1071 
1072  for (;;) {
1073  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
1074 
1075  if (x0 == x1 && y0 == y1)
1076  break;
1077 
1078  e2 = err;
1079 
1080  if (e2 >-dx) {
1081  err -= dy;
1082  x0--;
1083  }
1084 
1085  if (e2 < dy) {
1086  err += dx;
1087  y0 += sy;
1088  }
1089  }
1090 }
1091 
1092 static double distance(double x0, double x1, double y0, double y1)
1093 {
1094  return hypot(x0 - x1, y0 - y1);
1095 }
1096 
1097 static void get_response(int channel, int format, double w,
1098  const double *b, const double *a,
1099  int nb_b, int nb_a, double *magnitude, double *phase)
1100 {
1101  double realz, realp;
1102  double imagz, imagp;
1103  double real, imag;
1104  double div;
1105 
1106  if (format == 0) {
1107  realz = 0., realp = 0.;
1108  imagz = 0., imagp = 0.;
1109  for (int x = 0; x < nb_a; x++) {
1110  realz += cos(-x * w) * a[x];
1111  imagz += sin(-x * w) * a[x];
1112  }
1113 
1114  for (int x = 0; x < nb_b; x++) {
1115  realp += cos(-x * w) * b[x];
1116  imagp += sin(-x * w) * b[x];
1117  }
1118 
1119  div = realp * realp + imagp * imagp;
1120  real = (realz * realp + imagz * imagp) / div;
1121  imag = (imagz * realp - imagp * realz) / div;
1122 
1123  *magnitude = hypot(real, imag);
1124  *phase = atan2(imag, real);
1125  } else {
1126  double p = 1., z = 1.;
1127  double acc = 0.;
1128 
1129  for (int x = 0; x < nb_a; x++) {
1130  z *= distance(cos(w), a[2 * x], sin(w), a[2 * x + 1]);
1131  acc += atan2(sin(w) - a[2 * x + 1], cos(w) - a[2 * x]);
1132  }
1133 
1134  for (int x = 0; x < nb_b; x++) {
1135  p *= distance(cos(w), b[2 * x], sin(w), b[2 * x + 1]);
1136  acc -= atan2(sin(w) - b[2 * x + 1], cos(w) - b[2 * x]);
1137  }
1138 
1139  *magnitude = z / p;
1140  *phase = acc;
1141  }
1142 }
1143 
1145 {
1146  AudioIIRContext *s = ctx->priv;
1147  double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
1148  double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
1149  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
1150  char text[32];
1151  int ch, i;
1152 
1153  memset(out->data[0], 0, s->h * out->linesize[0]);
1154 
1155  phase = av_malloc_array(s->w, sizeof(*phase));
1156  temp = av_malloc_array(s->w, sizeof(*temp));
1157  mag = av_malloc_array(s->w, sizeof(*mag));
1158  delay = av_malloc_array(s->w, sizeof(*delay));
1159  if (!mag || !phase || !delay || !temp)
1160  goto end;
1161 
1162  ch = av_clip(s->ir_channel, 0, s->channels - 1);
1163  for (i = 0; i < s->w; i++) {
1164  const double *b = s->iir[ch].ab[0];
1165  const double *a = s->iir[ch].ab[1];
1166  const int nb_b = s->iir[ch].nb_ab[0];
1167  const int nb_a = s->iir[ch].nb_ab[1];
1168  double w = i * M_PI / (s->w - 1);
1169  double m, p;
1170 
1171  get_response(ch, s->format, w, b, a, nb_b, nb_a, &m, &p);
1172 
1173  mag[i] = s->iir[ch].g * m;
1174  phase[i] = p;
1175  min = fmin(min, mag[i]);
1176  max = fmax(max, mag[i]);
1177  }
1178 
1179  temp[0] = 0.;
1180  for (i = 0; i < s->w - 1; i++) {
1181  double d = phase[i] - phase[i + 1];
1182  temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
1183  }
1184 
1185  min_phase = phase[0];
1186  max_phase = phase[0];
1187  for (i = 1; i < s->w; i++) {
1188  temp[i] += temp[i - 1];
1189  phase[i] += temp[i];
1190  min_phase = fmin(min_phase, phase[i]);
1191  max_phase = fmax(max_phase, phase[i]);
1192  }
1193 
1194  for (i = 0; i < s->w - 1; i++) {
1195  double div = s->w / (double)sample_rate;
1196 
1197  delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
1198  min_delay = fmin(min_delay, delay[i + 1]);
1199  max_delay = fmax(max_delay, delay[i + 1]);
1200  }
1201  delay[0] = delay[1];
1202 
1203  for (i = 0; i < s->w; i++) {
1204  int ymag = mag[i] / max * (s->h - 1);
1205  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
1206  int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
1207 
1208  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
1209  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
1210  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
1211 
1212  if (prev_ymag < 0)
1213  prev_ymag = ymag;
1214  if (prev_yphase < 0)
1215  prev_yphase = yphase;
1216  if (prev_ydelay < 0)
1217  prev_ydelay = ydelay;
1218 
1219  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
1220  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
1221  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
1222 
1223  prev_ymag = ymag;
1224  prev_yphase = yphase;
1225  prev_ydelay = ydelay;
1226  }
1227 
1228  if (s->w > 400 && s->h > 100) {
1229  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
1230  snprintf(text, sizeof(text), "%.2f", max);
1231  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
1232 
1233  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
1234  snprintf(text, sizeof(text), "%.2f", min);
1235  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
1236 
1237  drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
1238  snprintf(text, sizeof(text), "%.2f", max_phase);
1239  drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
1240 
1241  drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
1242  snprintf(text, sizeof(text), "%.2f", min_phase);
1243  drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
1244 
1245  drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
1246  snprintf(text, sizeof(text), "%.2f", max_delay);
1247  drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
1248 
1249  drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
1250  snprintf(text, sizeof(text), "%.2f", min_delay);
1251  drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
1252  }
1253 
1254 end:
1255  av_free(delay);
1256  av_free(temp);
1257  av_free(phase);
1258  av_free(mag);
1259 }
1260 
1261 static int config_output(AVFilterLink *outlink)
1262 {
1263  AVFilterContext *ctx = outlink->src;
1264  AudioIIRContext *s = ctx->priv;
1265  AVFilterLink *inlink = ctx->inputs[0];
1266  int ch, ret, i;
1267 
1268  s->channels = inlink->ch_layout.nb_channels;
1269  s->iir = av_calloc(s->channels, sizeof(*s->iir));
1270  if (!s->iir)
1271  return AVERROR(ENOMEM);
1272 
1273  ret = read_gains(ctx, s->g_str, inlink->ch_layout.nb_channels);
1274  if (ret < 0)
1275  return ret;
1276 
1277  ret = read_channels(ctx, inlink->ch_layout.nb_channels, s->a_str, 0);
1278  if (ret < 0)
1279  return ret;
1280 
1281  ret = read_channels(ctx, inlink->ch_layout.nb_channels, s->b_str, 1);
1282  if (ret < 0)
1283  return ret;
1284 
1285  if (s->format == -1) {
1286  convert_sf2tf(ctx, inlink->ch_layout.nb_channels);
1287  s->format = 0;
1288  } else if (s->format == 2) {
1289  convert_pr2zp(ctx, inlink->ch_layout.nb_channels);
1290  } else if (s->format == 3) {
1291  convert_pd2zp(ctx, inlink->ch_layout.nb_channels);
1292  } else if (s->format == 4) {
1293  convert_sp2zp(ctx, inlink->ch_layout.nb_channels);
1294  }
1295  if (s->format > 0) {
1296  check_stability(ctx, inlink->ch_layout.nb_channels);
1297  }
1298 
1299  av_frame_free(&s->video);
1300  if (s->response) {
1301  s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
1302  if (!s->video)
1303  return AVERROR(ENOMEM);
1304 
1305  draw_response(ctx, s->video, inlink->sample_rate);
1306  }
1307 
1308  if (s->format == 0)
1309  av_log(ctx, AV_LOG_WARNING, "transfer function coefficients format is not recommended for too high number of zeros/poles.\n");
1310 
1311  if (s->format > 0 && s->process == 0) {
1312  av_log(ctx, AV_LOG_WARNING, "Direct processing is not recommended for zp coefficients format.\n");
1313 
1314  ret = convert_zp2tf(ctx, inlink->ch_layout.nb_channels);
1315  if (ret < 0)
1316  return ret;
1317  } else if (s->format == -2 && s->process > 0) {
1318  av_log(ctx, AV_LOG_ERROR, "Only direct processing is implemented for lattice-ladder function.\n");
1319  return AVERROR_PATCHWELCOME;
1320  } else if (s->format <= 0 && s->process == 1) {
1321  av_log(ctx, AV_LOG_ERROR, "Serial processing is not implemented for transfer function.\n");
1322  return AVERROR_PATCHWELCOME;
1323  } else if (s->format <= 0 && s->process == 2) {
1324  av_log(ctx, AV_LOG_ERROR, "Parallel processing is not implemented for transfer function.\n");
1325  return AVERROR_PATCHWELCOME;
1326  } else if (s->format > 0 && s->process == 1) {
1327  ret = decompose_zp2biquads(ctx, inlink->ch_layout.nb_channels);
1328  if (ret < 0)
1329  return ret;
1330  } else if (s->format > 0 && s->process == 2) {
1331  if (s->precision > 1)
1332  av_log(ctx, AV_LOG_WARNING, "Parallel processing is not recommended for fixed-point precisions.\n");
1333  ret = decompose_zp2biquads(ctx, inlink->ch_layout.nb_channels);
1334  if (ret < 0)
1335  return ret;
1336  ret = convert_serial2parallel(ctx, inlink->ch_layout.nb_channels);
1337  if (ret < 0)
1338  return ret;
1339  }
1340 
1341  for (ch = 0; s->format == -2 && ch < inlink->ch_layout.nb_channels; ch++) {
1342  IIRChannel *iir = &s->iir[ch];
1343 
1344  if (iir->nb_ab[0] != iir->nb_ab[1] + 1) {
1345  av_log(ctx, AV_LOG_ERROR, "Number of ladder coefficients must be one more than number of reflection coefficients.\n");
1346  return AVERROR(EINVAL);
1347  }
1348  }
1349 
1350  for (ch = 0; s->format == 0 && ch < inlink->ch_layout.nb_channels; ch++) {
1351  IIRChannel *iir = &s->iir[ch];
1352 
1353  for (i = 1; i < iir->nb_ab[0]; i++) {
1354  iir->ab[0][i] /= iir->ab[0][0];
1355  }
1356 
1357  iir->ab[0][0] = 1.0;
1358  for (i = 0; i < iir->nb_ab[1]; i++) {
1359  iir->ab[1][i] *= iir->g;
1360  }
1361 
1362  normalize_coeffs(ctx, ch);
1363  }
1364 
1365  switch (inlink->format) {
1366  case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 2 ? iir_ch_parallel_dblp : s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
1367  case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 2 ? iir_ch_parallel_fltp : s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
1368  case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s32p : s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
1369  case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s16p : s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
1370  }
1371 
1372  if (s->format == -2) {
1373  switch (inlink->format) {
1374  case AV_SAMPLE_FMT_DBLP: s->iir_channel = iir_ch_lattice_dblp; break;
1375  case AV_SAMPLE_FMT_FLTP: s->iir_channel = iir_ch_lattice_fltp; break;
1376  case AV_SAMPLE_FMT_S32P: s->iir_channel = iir_ch_lattice_s32p; break;
1377  case AV_SAMPLE_FMT_S16P: s->iir_channel = iir_ch_lattice_s16p; break;
1378  }
1379  }
1380 
1381  return 0;
1382 }
1383 
1385 {
1386  AVFilterContext *ctx = inlink->dst;
1387  AudioIIRContext *s = ctx->priv;
1388  AVFilterLink *outlink = ctx->outputs[0];
1389  ThreadData td;
1390  AVFrame *out;
1391  int ch, ret;
1392 
1393  if (av_frame_is_writable(in) && s->process != 2) {
1394  out = in;
1395  } else {
1396  out = ff_get_audio_buffer(outlink, in->nb_samples);
1397  if (!out) {
1398  av_frame_free(&in);
1399  return AVERROR(ENOMEM);
1400  }
1401  av_frame_copy_props(out, in);
1402  }
1403 
1404  td.in = in;
1405  td.out = out;
1406  ff_filter_execute(ctx, s->iir_channel, &td, NULL, outlink->ch_layout.nb_channels);
1407 
1408  for (ch = 0; ch < outlink->ch_layout.nb_channels; ch++) {
1409  if (s->iir[ch].clippings > 0)
1410  av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
1411  ch, s->iir[ch].clippings);
1412  s->iir[ch].clippings = 0;
1413  }
1414 
1415  if (in != out)
1416  av_frame_free(&in);
1417 
1418  if (s->response) {
1419  AVFilterLink *outlink = ctx->outputs[1];
1420  int64_t old_pts = s->video->pts;
1421  int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
1422 
1423  if (new_pts > old_pts) {
1424  AVFrame *clone;
1425 
1426  s->video->pts = new_pts;
1427  clone = av_frame_clone(s->video);
1428  if (!clone)
1429  return AVERROR(ENOMEM);
1430  ret = ff_filter_frame(outlink, clone);
1431  if (ret < 0)
1432  return ret;
1433  }
1434  }
1435 
1436  return ff_filter_frame(outlink, out);
1437 }
1438 
1439 static int config_video(AVFilterLink *outlink)
1440 {
1441  AVFilterContext *ctx = outlink->src;
1442  AudioIIRContext *s = ctx->priv;
1443 
1444  outlink->sample_aspect_ratio = (AVRational){1,1};
1445  outlink->w = s->w;
1446  outlink->h = s->h;
1447  outlink->frame_rate = s->rate;
1448  outlink->time_base = av_inv_q(outlink->frame_rate);
1449 
1450  return 0;
1451 }
1452 
1454 {
1455  AudioIIRContext *s = ctx->priv;
1456  AVFilterPad pad, vpad;
1457  int ret;
1458 
1459  if (!s->a_str || !s->b_str || !s->g_str) {
1460  av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
1461  return AVERROR(EINVAL);
1462  }
1463 
1464  switch (s->precision) {
1465  case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
1466  case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
1467  case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
1468  case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
1469  default: return AVERROR_BUG;
1470  }
1471 
1472  pad = (AVFilterPad){
1473  .name = "default",
1474  .type = AVMEDIA_TYPE_AUDIO,
1475  .config_props = config_output,
1476  };
1477 
1478  ret = ff_append_outpad(ctx, &pad);
1479  if (ret < 0)
1480  return ret;
1481 
1482  if (s->response) {
1483  vpad = (AVFilterPad){
1484  .name = "filter_response",
1485  .type = AVMEDIA_TYPE_VIDEO,
1486  .config_props = config_video,
1487  };
1488 
1489  ret = ff_append_outpad(ctx, &vpad);
1490  if (ret < 0)
1491  return ret;
1492  }
1493 
1494  return 0;
1495 }
1496 
1498 {
1499  AudioIIRContext *s = ctx->priv;
1500  int ch;
1501 
1502  if (s->iir) {
1503  for (ch = 0; ch < s->channels; ch++) {
1504  IIRChannel *iir = &s->iir[ch];
1505  av_freep(&iir->ab[0]);
1506  av_freep(&iir->ab[1]);
1507  av_freep(&iir->cache[0]);
1508  av_freep(&iir->cache[1]);
1509  av_freep(&iir->biquads);
1510  }
1511  }
1512  av_freep(&s->iir);
1513 
1514  av_frame_free(&s->video);
1515 }
1516 
1517 static const AVFilterPad inputs[] = {
1518  {
1519  .name = "default",
1520  .type = AVMEDIA_TYPE_AUDIO,
1521  .filter_frame = filter_frame,
1522  },
1523 };
1524 
1525 #define OFFSET(x) offsetof(AudioIIRContext, x)
1526 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1527 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1528 
1529 static const AVOption aiir_options[] = {
1530  { "zeros", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1531  { "z", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1532  { "poles", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1533  { "p", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1534  { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1535  { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1536  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1537  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1538  { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, .unit = "format" },
1539  { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, .unit = "format" },
1540  { "ll", "lattice-ladder function", 0, AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, AF, .unit = "format" },
1541  { "sf", "analog transfer function", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, .unit = "format" },
1542  { "tf", "digital transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "format" },
1543  { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "format" },
1544  { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "format" },
1545  { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, .unit = "format" },
1546  { "sp", "S-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF, .unit = "format" },
1547  { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, .unit = "process" },
1548  { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, .unit = "process" },
1549  { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "process" },
1550  { "s", "serial", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "process" },
1551  { "p", "parallel", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "process" },
1552  { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, .unit = "precision" },
1553  { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, .unit = "precision" },
1554  { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "precision" },
1555  { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "precision" },
1556  { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "precision" },
1557  { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, .unit = "precision" },
1558  { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1559  { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1560  { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1561  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
1562  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
1563  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
1564  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
1565  { NULL },
1566 };
1567 
1568 AVFILTER_DEFINE_CLASS(aiir);
1569 
1571  .name = "aiir",
1572  .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
1573  .priv_size = sizeof(AudioIIRContext),
1574  .priv_class = &aiir_class,
1575  .init = init,
1576  .uninit = uninit,
1581 };
coef_sf2zf
static double coef_sf2zf(double *a, int N, int n)
Definition: af_aiir.c:950
Pair
Definition: af_aiir.c:37
M
#define M(a, b)
Definition: vp3dsp.c:48
AudioIIRContext::format
int format
Definition: af_aiir.c:63
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:112
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:97
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:154
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_clip
#define av_clip
Definition: common.h:98
mix
static int mix(int c0, int c1)
Definition: 4xm.c:715
r
const char * r
Definition: vf_curves.c:126
acc
int acc
Definition: yuv2rgb.c:554
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:435
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:424
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(aiir)
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:511
IIRChannel::clippings
int clippings
Definition: af_aiir.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:248
AF
#define AF
Definition: af_aiir.c:1526
inputs
static const AVFilterPad inputs[]
Definition: af_aiir.c:1517
matrix
Definition: vc1dsp.c:42
IIRChannel::nb_ab
int nb_ab[2]
Definition: af_aiir.c:48
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
BiquadContext::a
double a[3]
Definition: af_aiir.c:42
aiir_options
static const AVOption aiir_options[]
Definition: af_aiir.c:1529
convert_serial2parallel
static int convert_serial2parallel(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:819
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:88
RE
#define RE(x, ch)
read_channels
static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
Definition: af_aiir.c:449
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AudioIIRContext::ir_channel
int ir_channel
Definition: af_aiir.c:68
IIRChannel::biquads
BiquadContext * biquads
Definition: af_aiir.c:53
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:346
b
#define b
Definition: input.c:41
IIR_CH
#define IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:114
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:159
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:65
read_tf_coefficients
static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
Definition: af_aiir.c:397
AudioIIRContext::iir_channel
int(* iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
Definition: af_aiir.c:77
check_stability
static void check_stability(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:1024
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:821
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_aiir.c:1261
float.h
max
#define max(a, b)
Definition: cuda_runtime.h:33
AudioIIRContext::b_str
char * b_str
Definition: af_aiir.c:59
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
solve
static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
Definition: af_aiir.c:785
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:170
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:526
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_aiir.c:1439
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
AudioIIRContext::video
AVFrame * video
Definition: af_aiir.c:71
video.h
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:153
sample_rate
sample_rate
Definition: ffmpeg_filter.c:425
AudioIIRContext::g_str
char * g_str
Definition: af_aiir.c:59
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
BiquadContext
Definition: af_aiir.c:41
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:2035
fail
#define fail()
Definition: checkasm.h:179
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_aiir.c:80
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
AudioIIRContext::h
int h
Definition: af_aiir.c:67
a1
#define a1
Definition: regdef.h:47
AudioIIRContext::process
int process
Definition: af_aiir.c:64
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aiir.c:1384
convert_zp2tf
static int convert_zp2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:557
mask
static const uint16_t mask[17]
Definition: lzw.c:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:237
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:678
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:873
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:31
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:521
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
IIRChannel::cache
double * cache[2]
Definition: af_aiir.c:51
NAN
#define NAN
Definition: mathematics.h:115
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:182
SERIAL_IIR_CH
#define SERIAL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:170
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
av_sscanf
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Definition: avsscanf.c:962
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
biquad
@ biquad
Definition: af_biquads.c:78
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:637
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:245
double
double
Definition: af_crystalizer.c:131
AudioIIRContext::rate
AVRational rate
Definition: af_aiir.c:69
ff_set_common_all_channel_counts
int ff_set_common_all_channel_counts(AVFilterContext *ctx)
Equivalent to ff_set_common_channel_layouts(ctx, ff_all_channel_counts())
Definition: formats.c:803
normalize_coeffs
static void normalize_coeffs(AVFilterContext *ctx, int ch)
Definition: af_aiir.c:529
exp
int8_t exp
Definition: eval.c:74
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_aiir.c:1453
convert_sp2zp
static void convert_sp2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:916
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
BiquadContext::w1
double w1
Definition: af_aiir.c:44
BiquadContext::w2
double w2
Definition: af_aiir.c:44
format
static const char *const format[]
Definition: af_aiir.c:447
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
draw_line
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_aiir.c:1066
fmin
double fmin(double, double)
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:573
read_zp_coefficients
static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
Definition: af_aiir.c:422
IM
#define IM(x, ch)
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:2036
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
xga_font_data.h
N
#define N
Definition: af_mcompand.c:53
fact
static double fact(double i)
Definition: af_aiir.c:943
IIRChannel::g
double g
Definition: af_aiir.c:50
Pair::b
int b
Definition: af_aiir.c:38
M_PI
#define M_PI
Definition: mathematics.h:67
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
internal.h
W
@ W
Definition: vf_addroi.c:27
AudioIIRContext::a_str
char * a_str
Definition: af_aiir.c:59
normalize
Definition: normalize.py:1
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:420
VF
#define VF
Definition: af_aiir.c:1527
AudioIIRContext::sample_format
enum AVSampleFormat sample_format
Definition: af_aiir.c:75
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
OFFSET
#define OFFSET(x)
Definition: af_aiir.c:1525
IIRChannel::ab
double * ab[2]
Definition: af_aiir.c:49
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
a2
#define a2
Definition: regdef.h:48
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AudioIIRContext::precision
int precision
Definition: af_aiir.c:65
convert_sf2tf
static void convert_sf2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:969
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
AudioIIRContext::wet_gain
double wet_gain
Definition: af_aiir.c:60
get_response
static void get_response(int channel, int format, double w, const double *b, const double *a, int nb_b, int nb_a, double *magnitude, double *phase)
Definition: af_aiir.c:1097
AVFilter
Filter definition.
Definition: avfilter.h:166
cmul
static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
Definition: af_aiir.c:496
PARALLEL_IIR_CH
#define PARALLEL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:229
ret
ret
Definition: filter_design.txt:187
AudioIIRContext::dry_gain
double dry_gain
Definition: af_aiir.c:60
Pair::a
int a
Definition: af_aiir.c:38
fmax
double fmax(double, double)
convert_pr2zp
static void convert_pr2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:889
BiquadContext::b
double b[3]
Definition: af_aiir.c:43
draw_response
static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
Definition: af_aiir.c:1144
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
avfilter.h
AudioIIRContext::response
int response
Definition: af_aiir.c:66
distance
static double distance(double x0, double x1, double y0, double y1)
Definition: af_aiir.c:1092
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
temp
else temp
Definition: vf_mcdeint.c:263
AudioIIRContext::channels
int channels
Definition: af_aiir.c:74
decompose_zp2biquads
static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:605
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
factor
static const int factor[16]
Definition: vf_pp7.c:78
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
LATTICE_IIR_CH
#define LATTICE_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:293
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
count_coefficients
static void count_coefficients(char *item_str, int *nb_items)
Definition: af_aiir.c:350
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:510
biquad_process
static void biquad_process(double *x, double *y, int length, double b0, double b1, double b2, double a1, double a2)
Definition: af_aiir.c:767
expand
static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
Definition: af_aiir.c:502
ff_append_outpad
int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:137
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
AudioIIRContext
Definition: af_aiir.c:57
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AudioIIRContext::iir
IIRChannel * iir
Definition: af_aiir.c:73
d
d
Definition: ffmpeg_filter.c:425
int32_t
int32_t
Definition: audioconvert.c:56
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
b0
static double b0(void *priv, double x, double y)
Definition: vf_xfade.c:2034
IIRChannel::fir
double fir
Definition: af_aiir.c:52
ff_af_aiir
const AVFilter ff_af_aiir
Definition: af_aiir.c:1570
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:134
AudioIIRContext::normalize
int normalize
Definition: af_aiir.c:62
int
int
Definition: ffmpeg_filter.c:425
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aiir.c:1497
snprintf
#define snprintf
Definition: snprintf.h:34
read_gains
static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
Definition: af_aiir.c:364
IIRChannel
Definition: af_aiir.c:47
AudioIIRContext::mix
double mix
Definition: af_aiir.c:61
convert_pd2zp
static void convert_pd2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:997
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_aiir.c:1043
channel
channel
Definition: ebur128.h:39
min
float min
Definition: vorbis_enc_data.h:429
AudioIIRContext::w
int w
Definition: af_aiir.c:67