FFmpeg
af_afir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * An arbitrary audio FIR filter
24  */
25 
26 #include <float.h>
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/tx.h"
32 #include "libavutil/avstring.h"
34 #include "libavutil/float_dsp.h"
35 #include "libavutil/frame.h"
36 #include "libavutil/log.h"
37 #include "libavutil/opt.h"
38 #include "libavutil/rational.h"
39 
40 #include "audio.h"
41 #include "avfilter.h"
42 #include "filters.h"
43 #include "formats.h"
44 #include "af_afirdsp.h"
45 
46 #define MAX_IR_STREAMS 32
47 
48 typedef struct AudioFIRSegment {
50  int part_size;
56 
58  int *part_index;
59 
69 
70  AVTXContext **ctx, **tx, **itx;
73 
74 typedef struct AudioFIRContext {
75  const AVClass *class;
76 
77  float wet_gain;
78  float dry_gain;
79  float length;
80  int gtype;
81  float ir_norm;
82  float ir_link;
83  float ir_gain;
84  int ir_format;
85  int ir_load;
86  float max_ir_len;
87  int response;
88  int w, h;
91  int minp;
92  int maxp;
93  int nb_irs;
95  int selir;
96  int precision;
97  int format;
98 
105  int one2many;
107  int *loading;
108  double *ch_gain;
109 
111 
120 
124 
125 #define DEPTH 32
126 #include "afir_template.c"
127 
128 #undef DEPTH
129 #define DEPTH 64
130 #include "afir_template.c"
131 
132 static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
133 {
134  AudioFIRContext *s = ctx->priv;
135  const int min_part_size = s->min_part_size;
136  const int prev_selir = s->prev_selir;
137  const int selir = s->selir;
138 
139  for (int offset = 0; offset < out->nb_samples; offset += min_part_size) {
140  switch (s->format) {
141  case AV_SAMPLE_FMT_FLTP:
142  fir_quantums_float(ctx, s, out, min_part_size, ch, offset, prev_selir, selir);
143  break;
144  case AV_SAMPLE_FMT_DBLP:
145  fir_quantums_double(ctx, s, out, min_part_size, ch, offset, prev_selir, selir);
146  break;
147  }
148 
149  if (selir != prev_selir && s->loading[ch] != 0)
150  s->loading[ch] += min_part_size;
151  }
152 
153  return 0;
154 }
155 
156 static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
157 {
158  AVFrame *out = arg;
159  const int start = (out->ch_layout.nb_channels * jobnr) / nb_jobs;
160  const int end = (out->ch_layout.nb_channels * (jobnr+1)) / nb_jobs;
161 
162  for (int ch = start; ch < end; ch++)
163  fir_channel(ctx, out, ch);
164 
165  return 0;
166 }
167 
168 static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
169 {
170  AVFilterContext *ctx = outlink->src;
171  AVFrame *out;
172 
173  out = ff_get_audio_buffer(outlink, in->nb_samples);
174  if (!out) {
175  av_frame_free(&in);
176  return AVERROR(ENOMEM);
177  }
179  out->pts = s->pts = in->pts;
180 
181  s->in = in;
184  s->prev_is_disabled = ctx->is_disabled;
185 
186  av_frame_free(&in);
187  s->in = NULL;
188 
189  return ff_filter_frame(outlink, out);
190 }
191 
192 static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int selir,
193  int offset, int nb_partitions, int part_size, int index)
194 {
195  AudioFIRContext *s = ctx->priv;
196  const size_t cpu_align = av_cpu_max_align();
197  union { double d; float f; } cscale, scale, iscale;
198  enum AVTXType tx_type;
199  int ret;
200 
201  seg->tx = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->tx));
202  seg->ctx = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->ctx));
203  seg->itx = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->itx));
204  if (!seg->tx || !seg->ctx || !seg->itx)
205  return AVERROR(ENOMEM);
206 
207  seg->fft_length = (part_size + 1) * 2;
208  seg->part_size = part_size;
209  seg->coeff_size = FFALIGN(seg->part_size + 1, cpu_align);
210  seg->block_size = FFMAX(seg->coeff_size * 2, FFALIGN(seg->fft_length, cpu_align));
211  seg->nb_partitions = nb_partitions;
212  seg->input_size = offset + s->min_part_size;
213  seg->input_offset = offset;
214 
215  seg->part_index = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->part_index));
216  seg->output_offset = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*seg->output_offset));
217  if (!seg->part_index || !seg->output_offset)
218  return AVERROR(ENOMEM);
219 
220  switch (s->format) {
221  case AV_SAMPLE_FMT_FLTP:
222  cscale.f = 1.f;
223  scale.f = 1.f / sqrtf(2.f * part_size);
224  iscale.f = 1.f / sqrtf(2.f * part_size);
225  tx_type = AV_TX_FLOAT_RDFT;
226  break;
227  case AV_SAMPLE_FMT_DBLP:
228  cscale.d = 1.0;
229  scale.d = 1.0 / sqrt(2.0 * part_size);
230  iscale.d = 1.0 / sqrt(2.0 * part_size);
231  tx_type = AV_TX_DOUBLE_RDFT;
232  break;
233  default:
234  av_assert1(0);
235  }
236 
237  for (int ch = 0; ch < ctx->inputs[0]->ch_layout.nb_channels && part_size >= 1; ch++) {
238  ret = av_tx_init(&seg->ctx[ch], &seg->ctx_fn, tx_type,
239  0, 2 * part_size, &cscale, 0);
240  if (ret < 0)
241  return ret;
242 
243  ret = av_tx_init(&seg->tx[ch], &seg->tx_fn, tx_type,
244  0, 2 * part_size, &scale, 0);
245  if (ret < 0)
246  return ret;
247  ret = av_tx_init(&seg->itx[ch], &seg->itx_fn, tx_type,
248  1, 2 * part_size, &iscale, 0);
249  if (ret < 0)
250  return ret;
251  }
252 
253  seg->sumin = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
254  seg->sumout = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
255  seg->blockout = ff_get_audio_buffer(ctx->inputs[0], seg->block_size * seg->nb_partitions);
256  seg->tempin = ff_get_audio_buffer(ctx->inputs[0], seg->block_size);
257  seg->tempout = ff_get_audio_buffer(ctx->inputs[0], seg->block_size);
258  seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
259  seg->input = ff_get_audio_buffer(ctx->inputs[0], seg->input_size);
260  seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size * 5);
261  if (!seg->buffer || !seg->sumin || !seg->sumout || !seg->blockout ||
262  !seg->input || !seg->output || !seg->tempin || !seg->tempout)
263  return AVERROR(ENOMEM);
264 
265  return 0;
266 }
267 
269 {
270  AudioFIRContext *s = ctx->priv;
271 
272  if (seg->ctx) {
273  for (int ch = 0; ch < s->nb_channels; ch++)
274  av_tx_uninit(&seg->ctx[ch]);
275  }
276  av_freep(&seg->ctx);
277 
278  if (seg->tx) {
279  for (int ch = 0; ch < s->nb_channels; ch++)
280  av_tx_uninit(&seg->tx[ch]);
281  }
282  av_freep(&seg->tx);
283 
284  if (seg->itx) {
285  for (int ch = 0; ch < s->nb_channels; ch++)
286  av_tx_uninit(&seg->itx[ch]);
287  }
288  av_freep(&seg->itx);
289 
290  av_freep(&seg->output_offset);
291  av_freep(&seg->part_index);
292 
293  av_frame_free(&seg->tempin);
294  av_frame_free(&seg->tempout);
295  av_frame_free(&seg->blockout);
296  av_frame_free(&seg->sumin);
297  av_frame_free(&seg->sumout);
298  av_frame_free(&seg->buffer);
299  av_frame_free(&seg->input);
300  av_frame_free(&seg->output);
301  seg->input_size = 0;
302 
303  for (int i = 0; i < MAX_IR_STREAMS; i++)
304  av_frame_free(&seg->coeff);
305 }
306 
307 static int convert_coeffs(AVFilterContext *ctx, int selir)
308 {
309  AudioFIRContext *s = ctx->priv;
310  int ret, nb_taps, cur_nb_taps;
311 
312  if (!s->nb_taps[selir]) {
313  int part_size, max_part_size;
314  int left, offset = 0;
315 
316  s->nb_taps[selir] = ff_inlink_queued_samples(ctx->inputs[1 + selir]);
317  if (s->nb_taps[selir] <= 0)
318  return AVERROR(EINVAL);
319 
320  if (s->minp > s->maxp)
321  s->maxp = s->minp;
322 
323  if (s->nb_segments[selir])
324  goto skip;
325 
326  left = s->nb_taps[selir];
327  part_size = 1 << av_log2(s->minp);
328  max_part_size = 1 << av_log2(s->maxp);
329 
330  for (int i = 0; left > 0; i++) {
331  int step = (part_size == max_part_size) ? INT_MAX : 1 + (i == 0);
332  int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size);
333 
334  s->nb_segments[selir] = i + 1;
335  ret = init_segment(ctx, &s->seg[selir][i], selir, offset, nb_partitions, part_size, i);
336  if (ret < 0)
337  return ret;
338  offset += nb_partitions * part_size;
339  s->max_offset[selir] = offset;
340  left -= nb_partitions * part_size;
341  part_size *= 2;
342  part_size = FFMIN(part_size, max_part_size);
343  }
344  }
345 
346 skip:
347  if (!s->ir[selir]) {
348  ret = ff_inlink_consume_samples(ctx->inputs[1 + selir], s->nb_taps[selir], s->nb_taps[selir], &s->ir[selir]);
349  if (ret < 0)
350  return ret;
351  if (ret == 0)
352  return AVERROR_BUG;
353  }
354 
355  cur_nb_taps = s->ir[selir]->nb_samples;
356  nb_taps = cur_nb_taps;
357 
358  if (!s->norm_ir[selir] || s->norm_ir[selir]->nb_samples < nb_taps) {
359  av_frame_free(&s->norm_ir[selir]);
360  s->norm_ir[selir] = ff_get_audio_buffer(ctx->inputs[0], FFALIGN(nb_taps, 8));
361  if (!s->norm_ir[selir])
362  return AVERROR(ENOMEM);
363  }
364 
365  av_log(ctx, AV_LOG_DEBUG, "nb_taps: %d\n", cur_nb_taps);
366  av_log(ctx, AV_LOG_DEBUG, "nb_segments: %d\n", s->nb_segments[selir]);
367 
368  switch (s->format) {
369  case AV_SAMPLE_FMT_FLTP:
370  for (int ch = 0; ch < s->nb_channels; ch++) {
371  const float *tsrc = (const float *)s->ir[selir]->extended_data[!s->one2many * ch];
372 
373  s->ch_gain[ch] = ir_gain_float(ctx, s, nb_taps, tsrc);
374  }
375 
376  if (s->ir_link) {
377  float gain = +INFINITY;
378 
379  for (int ch = 0; ch < s->nb_channels; ch++)
380  gain = fminf(gain, s->ch_gain[ch]);
381 
382  for (int ch = 0; ch < s->nb_channels; ch++)
383  s->ch_gain[ch] = gain;
384  }
385 
386  for (int ch = 0; ch < s->nb_channels; ch++) {
387  const float *tsrc = (const float *)s->ir[selir]->extended_data[!s->one2many * ch];
388  float *time = (float *)s->norm_ir[selir]->extended_data[ch];
389 
390  memcpy(time, tsrc, sizeof(*time) * nb_taps);
391  for (int i = FFMAX(1, s->length * nb_taps); i < nb_taps; i++)
392  time[i] = 0;
393 
394  ir_scale_float(ctx, s, nb_taps, ch, time, s->ch_gain[ch]);
395 
396  for (int n = 0; n < s->nb_segments[selir]; n++) {
397  AudioFIRSegment *seg = &s->seg[selir][n];
398 
399  if (!seg->coeff)
400  seg->coeff = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->coeff_size * 2);
401  if (!seg->coeff)
402  return AVERROR(ENOMEM);
403 
404  for (int i = 0; i < seg->nb_partitions; i++)
405  convert_channel_float(ctx, s, ch, seg, i, selir);
406  }
407  }
408  break;
409  case AV_SAMPLE_FMT_DBLP:
410  for (int ch = 0; ch < s->nb_channels; ch++) {
411  const double *tsrc = (const double *)s->ir[selir]->extended_data[!s->one2many * ch];
412 
413  s->ch_gain[ch] = ir_gain_double(ctx, s, nb_taps, tsrc);
414  }
415 
416  if (s->ir_link) {
417  double gain = +INFINITY;
418 
419  for (int ch = 0; ch < s->nb_channels; ch++)
420  gain = fmin(gain, s->ch_gain[ch]);
421 
422  for (int ch = 0; ch < s->nb_channels; ch++)
423  s->ch_gain[ch] = gain;
424  }
425 
426  for (int ch = 0; ch < s->nb_channels; ch++) {
427  const double *tsrc = (const double *)s->ir[selir]->extended_data[!s->one2many * ch];
428  double *time = (double *)s->norm_ir[selir]->extended_data[ch];
429 
430  memcpy(time, tsrc, sizeof(*time) * nb_taps);
431  for (int i = FFMAX(1, s->length * nb_taps); i < nb_taps; i++)
432  time[i] = 0;
433 
434  ir_scale_double(ctx, s, nb_taps, ch, time, s->ch_gain[ch]);
435 
436  for (int n = 0; n < s->nb_segments[selir]; n++) {
437  AudioFIRSegment *seg = &s->seg[selir][n];
438 
439  if (!seg->coeff)
440  seg->coeff = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->coeff_size * 2);
441  if (!seg->coeff)
442  return AVERROR(ENOMEM);
443 
444  for (int i = 0; i < seg->nb_partitions; i++)
445  convert_channel_double(ctx, s, ch, seg, i, selir);
446  }
447  }
448  break;
449  }
450 
451  s->have_coeffs[selir] = 1;
452 
453  return 0;
454 }
455 
456 static int check_ir(AVFilterLink *link, int selir)
457 {
458  AVFilterContext *ctx = link->dst;
459  AudioFIRContext *s = ctx->priv;
460  int nb_taps, max_nb_taps;
461 
462  nb_taps = ff_inlink_queued_samples(link);
463  max_nb_taps = s->max_ir_len * ctx->outputs[0]->sample_rate;
464  if (nb_taps > max_nb_taps) {
465  av_log(ctx, AV_LOG_ERROR, "Too big number of coefficients: %d > %d.\n", nb_taps, max_nb_taps);
466  return AVERROR(EINVAL);
467  }
468 
469  if (ff_inlink_check_available_samples(link, nb_taps + 1) == 1)
470  s->eof_coeffs[selir] = 1;
471 
472  return 0;
473 }
474 
476 {
477  AudioFIRContext *s = ctx->priv;
478  AVFilterLink *outlink = ctx->outputs[0];
479  int ret, status, available, wanted;
480  AVFrame *in = NULL;
481  int64_t pts;
482 
484 
485  for (int i = 0; i < s->nb_irs; i++) {
486  const int selir = i;
487 
488  if (s->ir_load && selir != s->selir)
489  continue;
490 
491  if (!s->eof_coeffs[selir]) {
492  ret = check_ir(ctx->inputs[1 + selir], selir);
493  if (ret < 0)
494  return ret;
495 
496  if (!s->eof_coeffs[selir]) {
497  if (ff_outlink_frame_wanted(ctx->outputs[0]))
498  ff_inlink_request_frame(ctx->inputs[1 + selir]);
499  return 0;
500  }
501  }
502 
503  if (!s->have_coeffs[selir] && s->eof_coeffs[selir]) {
504  ret = convert_coeffs(ctx, selir);
505  if (ret < 0)
506  return ret;
507  }
508  }
509 
510  available = ff_inlink_queued_samples(ctx->inputs[0]);
511  wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size);
512  ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in);
513  if (ret > 0)
514  ret = fir_frame(s, in, outlink);
515 
516  if (s->selir != s->prev_selir && s->loading[0] == 0)
517  s->prev_selir = s->selir;
518 
519  if (ret < 0)
520  return ret;
521 
522  if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) {
524  return 0;
525  }
526 
527  if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
528  if (status == AVERROR_EOF) {
529  ff_outlink_set_status(ctx->outputs[0], status, pts);
530  return 0;
531  }
532  }
533 
534  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
535  ff_inlink_request_frame(ctx->inputs[0]);
536  return 0;
537  }
538 
539  return FFERROR_NOT_READY;
540 }
541 
543  AVFilterFormatsConfig **cfg_in,
544  AVFilterFormatsConfig **cfg_out)
545 {
546  const AudioFIRContext *s = ctx->priv;
547  static const enum AVSampleFormat sample_fmts[3][3] = {
551  };
552  int ret;
553 
554  if (!s->ir_format) {
557 
558  if ((ret = ff_channel_layouts_ref(layouts, &cfg_in[0]->channel_layouts)) < 0)
559  return ret;
560  if ((ret = ff_channel_layouts_ref(layouts, &cfg_out[0]->channel_layouts)) < 0)
561  return ret;
562 
564  if (ret)
565  return ret;
566  for (int i = 1; i < ctx->nb_inputs; i++) {
567  if ((ret = ff_channel_layouts_ref(mono, &cfg_in[i]->channel_layouts)) < 0)
568  return ret;
569  }
570  }
571 
572  if ((ret = ff_set_common_formats_from_list2(ctx, cfg_in, cfg_out,
573  sample_fmts[s->precision])) < 0)
574  return ret;
575 
576  return 0;
577 }
578 
579 static int config_output(AVFilterLink *outlink)
580 {
581  AVFilterContext *ctx = outlink->src;
582  AudioFIRContext *s = ctx->priv;
583  int ret;
584 
585  s->one2many = ctx->inputs[1 + s->selir]->ch_layout.nb_channels == 1;
586  outlink->sample_rate = ctx->inputs[0]->sample_rate;
587  outlink->time_base = ctx->inputs[0]->time_base;
588  if ((ret = av_channel_layout_copy(&outlink->ch_layout, &ctx->inputs[0]->ch_layout)) < 0)
589  return ret;
590  outlink->ch_layout.nb_channels = ctx->inputs[0]->ch_layout.nb_channels;
591 
592  s->format = outlink->format;
593  s->nb_channels = outlink->ch_layout.nb_channels;
594  s->ch_gain = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*s->ch_gain));
595  s->loading = av_calloc(ctx->inputs[0]->ch_layout.nb_channels, sizeof(*s->loading));
596  if (!s->loading || !s->ch_gain)
597  return AVERROR(ENOMEM);
598 
599  s->fadein[0] = ff_get_audio_buffer(outlink, s->min_part_size);
600  s->fadein[1] = ff_get_audio_buffer(outlink, s->min_part_size);
601  if (!s->fadein[0] || !s->fadein[1])
602  return AVERROR(ENOMEM);
603 
604  s->xfade[0] = ff_get_audio_buffer(outlink, s->min_part_size);
605  s->xfade[1] = ff_get_audio_buffer(outlink, s->min_part_size);
606  if (!s->xfade[0] || !s->xfade[1])
607  return AVERROR(ENOMEM);
608 
609  switch (s->format) {
610  case AV_SAMPLE_FMT_FLTP:
611  for (int ch = 0; ch < s->nb_channels; ch++) {
612  float *dst0 = (float *)s->xfade[0]->extended_data[ch];
613  float *dst1 = (float *)s->xfade[1]->extended_data[ch];
614 
615  for (int n = 0; n < s->min_part_size; n++) {
616  dst0[n] = (n + 1.f) / s->min_part_size;
617  dst1[n] = 1.f - dst0[n];
618  }
619  }
620  break;
621  case AV_SAMPLE_FMT_DBLP:
622  for (int ch = 0; ch < s->nb_channels; ch++) {
623  double *dst0 = (double *)s->xfade[0]->extended_data[ch];
624  double *dst1 = (double *)s->xfade[1]->extended_data[ch];
625 
626  for (int n = 0; n < s->min_part_size; n++) {
627  dst0[n] = (n + 1.0) / s->min_part_size;
628  dst1[n] = 1.0 - dst0[n];
629  }
630  }
631  break;
632  }
633 
634  return 0;
635 }
636 
638 {
639  AudioFIRContext *s = ctx->priv;
640 
641  av_freep(&s->fdsp);
642  av_freep(&s->ch_gain);
643  av_freep(&s->loading);
644 
645  for (int i = 0; i < s->nb_irs; i++) {
646  for (int j = 0; j < s->nb_segments[i]; j++)
647  uninit_segment(ctx, &s->seg[i][j]);
648 
649  av_frame_free(&s->ir[i]);
650  av_frame_free(&s->norm_ir[i]);
651  }
652 
653  av_frame_free(&s->fadein[0]);
654  av_frame_free(&s->fadein[1]);
655 
656  av_frame_free(&s->xfade[0]);
657  av_frame_free(&s->xfade[1]);
658 }
659 
661 {
662  AudioFIRContext *s = ctx->priv;
663  AVFilterPad pad;
664  int ret;
665 
666  s->prev_selir = FFMIN(s->nb_irs - 1, s->selir);
667 
668  pad = (AVFilterPad) {
669  .name = "main",
670  .type = AVMEDIA_TYPE_AUDIO,
671  };
672 
673  ret = ff_append_inpad(ctx, &pad);
674  if (ret < 0)
675  return ret;
676 
677  for (int n = 0; n < s->nb_irs; n++) {
678  pad = (AVFilterPad) {
679  .name = av_asprintf("ir%d", n),
680  .type = AVMEDIA_TYPE_AUDIO,
681  };
682 
683  if (!pad.name)
684  return AVERROR(ENOMEM);
685 
687  if (ret < 0)
688  return ret;
689  }
690 
691  s->fdsp = avpriv_float_dsp_alloc(0);
692  if (!s->fdsp)
693  return AVERROR(ENOMEM);
694 
695  ff_afir_init(&s->afirdsp);
696 
697  s->min_part_size = 1 << av_log2(s->minp);
698  s->max_part_size = 1 << av_log2(s->maxp);
699 
700  return 0;
701 }
702 
704  const char *cmd,
705  const char *arg,
706  char *res,
707  int res_len,
708  int flags)
709 {
710  AudioFIRContext *s = ctx->priv;
711  int prev_selir, ret;
712 
713  prev_selir = s->selir;
714  ret = ff_filter_process_command(ctx, cmd, arg, res, res_len, flags);
715  if (ret < 0)
716  return ret;
717 
718  s->selir = FFMIN(s->nb_irs - 1, s->selir);
719  if (s->selir != prev_selir) {
720  s->prev_selir = prev_selir;
721 
722  for (int ch = 0; ch < s->nb_channels; ch++)
723  s->loading[ch] = 1;
724  }
725 
726  return 0;
727 }
728 
729 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
730 #define AFR AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
731 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
732 #define OFFSET(x) offsetof(AudioFIRContext, x)
733 
734 static const AVOption afir_options[] = {
735  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AFR },
736  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AFR },
737  { "length", "set IR length", OFFSET(length), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
738  { "gtype", "set IR auto gain type",OFFSET(gtype), AV_OPT_TYPE_INT, {.i64=0}, -1, 4, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
739  { "none", "without auto gain", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
740  { "peak", "peak gain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
741  { "dc", "DC gain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
742  { "gn", "gain to noise", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
743  { "ac", "AC gain", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
744  { "rms", "RMS gain", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF|AV_OPT_FLAG_DEPRECATED, .unit = "gtype" },
745  { "irnorm", "set IR norm", OFFSET(ir_norm), AV_OPT_TYPE_FLOAT, {.dbl=1}, -1, 2, AF },
746  { "irlink", "set IR link", OFFSET(ir_link), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
747  { "irgain", "set IR gain", OFFSET(ir_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
748  { "irfmt", "set IR format", OFFSET(ir_format), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, .unit = "irfmt" },
749  { "mono", "single channel", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "irfmt" },
750  { "input", "same as input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "irfmt" },
751  { "maxir", "set max IR length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF },
752  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF|AV_OPT_FLAG_DEPRECATED },
753  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF|AV_OPT_FLAG_DEPRECATED },
754  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF|AV_OPT_FLAG_DEPRECATED },
755  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF|AV_OPT_FLAG_DEPRECATED },
756  { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 1, 65536, AF },
757  { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 65536, AF },
758  { "nbirs", "set number of input IRs",OFFSET(nb_irs),AV_OPT_TYPE_INT, {.i64=1}, 1, 32, AF },
759  { "ir", "select IR", OFFSET(selir), AV_OPT_TYPE_INT, {.i64=0}, 0, 31, AFR },
760  { "precision", "set processing precision", OFFSET(precision), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, AF, .unit = "precision" },
761  { "auto", "set auto processing precision", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "precision" },
762  { "float", "set single-floating point processing precision", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "precision" },
763  { "double","set double-floating point processing precision", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "precision" },
764  { "irload", "set IR loading type", OFFSET(ir_load), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AF, .unit = "irload" },
765  { "init", "load all IRs on init", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "irload" },
766  { "access", "load IR on access", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "irload" },
767  { NULL }
768 };
769 
771 
772 static const AVFilterPad outputs[] = {
773  {
774  .name = "default",
775  .type = AVMEDIA_TYPE_AUDIO,
776  .config_props = config_output,
777  },
778 };
779 
781  .name = "afir",
782  .description = NULL_IF_CONFIG_SMALL("Apply Finite Impulse Response filter with supplied coefficients in additional stream(s)."),
783  .priv_size = sizeof(AudioFIRContext),
784  .priv_class = &afir_class,
787  .init = init,
788  .activate = activate,
789  .uninit = uninit,
790  .process_command = process_command,
794 };
activate
static int activate(AVFilterContext *ctx)
Definition: af_afir.c:475
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:98
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AudioFIRContext::ir
AVFrame * ir[MAX_IR_STREAMS]
Definition: af_afir.c:115
AudioFIRContext::nb_segments
int nb_segments[MAX_IR_STREAMS]
Definition: af_afir.c:102
INFINITY
#define INFINITY
Definition: mathematics.h:118
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AudioFIRContext::frame_rate
AVRational frame_rate
Definition: af_afir.c:89
opt.h
AudioFIRSegment::block_size
int block_size
Definition: af_afir.c:51
AudioFIRContext::ir_format
int ir_format
Definition: af_afir.c:84
out
FILE * out
Definition: movenc.c:55
AudioFIRContext::format
int format
Definition: af_afir.c:97
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1062
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:673
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:335
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
Underlying C type is AVRational.
Definition: opt.h:315
VF
#define VF
Definition: af_afir.c:731
AVTXContext
Definition: tx_priv.h:235
AudioFIRContext::in
AVFrame * in
Definition: af_afir.c:112
rational.h
int64_t
long long int64_t
Definition: coverity.c:34
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:621
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:501
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AudioFIRSegment::buffer
AVFrame * buffer
Definition: af_afir.c:65
w
uint8_t w
Definition: llviddspenc.c:38
AVOption
AVOption.
Definition: opt.h:429
AudioFIRContext::wet_gain
float wet_gain
Definition: af_afir.c:77
AudioFIRSegment::input_offset
int input_offset
Definition: af_afir.c:55
AudioFIRSegment::tx_fn
av_tx_fn tx_fn
Definition: af_afir.c:71
float.h
AudioFIRContext::ir_load
int ir_load
Definition: af_afir.c:85
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
AudioFIRContext::ir_gain
float ir_gain
Definition: af_afir.c:83
AudioFIRSegment::part_size
int part_size
Definition: af_afir.c:50
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:327
AudioFIRSegment::input_size
int input_size
Definition: af_afir.c:54
AudioFIRContext::nb_channels
int nb_channels
Definition: af_afir.c:104
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
formats.h
AudioFIRSegment::ctx_fn
av_tx_fn ctx_fn
Definition: af_afir.c:71
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:447
ff_append_inpad
int ff_append_inpad(AVFilterContext *f, AVFilterPad *p)
Append a new input/output pad to the filter's list of such pads.
Definition: avfilter.c:127
AudioFIRSegment::coeff
AVFrame * coeff
Definition: af_afir.c:66
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_afir.c:637
convert_coeffs
static int convert_coeffs(AVFilterContext *ctx, int selir)
Definition: af_afir.c:307
af_afirdsp.h
fir_channels
static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_afir.c:156
afir_options
static const AVOption afir_options[]
Definition: af_afir.c:734
AudioFIRContext::min_part_size
int min_part_size
Definition: af_afir.c:117
ff_afir_init
static av_unused void ff_afir_init(AudioFIRDSPContext *dsp)
Definition: af_afirdsp.h:73
pts
static int64_t pts
Definition: transcode_aac.c:644
AudioFIRSegment::blockout
AVFrame * blockout
Definition: af_afir.c:62
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:141
uninit_segment
static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
Definition: af_afir.c:268
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
AudioFIRSegment
Definition: af_afir.c:48
AudioFIRSegment::tx
AVTXContext ** tx
Definition: af_afir.c:70
AudioFIRContext::maxp
int maxp
Definition: af_afir.c:92
avassert.h
check_ir
static int check_ir(AVFilterLink *link, int selir)
Definition: af_afir.c:456
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
ff_inlink_check_available_samples
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1472
av_cold
#define av_cold
Definition: attributes.h:90
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(afir)
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:424
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1594
s
#define s(width, name)
Definition: cbs_vp9.c:198
AudioFIRContext::response
int response
Definition: af_afir.c:87
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AudioFIRContext::fadein
AVFrame * fadein[2]
Definition: af_afir.c:114
fminf
float fminf(float, float)
filters.h
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AudioFIRContext::nb_irs
int nb_irs
Definition: af_afir.c:93
afir_template.c
AudioFIRContext::gtype
int gtype
Definition: af_afir.c:80
AudioFIRContext::minp
int minp
Definition: af_afir.c:91
AudioFIRContext::max_offset
int max_offset[MAX_IR_STREAMS]
Definition: af_afir.c:103
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
arg
const char * arg
Definition: jacosubdec.c:67
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1511
AudioFIRContext::prev_selir
int prev_selir
Definition: af_afir.c:94
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:725
AudioFIRContext::have_coeffs
int have_coeffs[MAX_IR_STREAMS]
Definition: af_afir.c:100
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AudioFIRContext::eof_coeffs
int eof_coeffs[MAX_IR_STREAMS]
Definition: af_afir.c:99
ff_append_inpad_free_name
int ff_append_inpad_free_name(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:132
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
Underlying C type is two consecutive integers.
Definition: opt.h:303
AudioFIRSegment::itx_fn
av_tx_fn itx_fn
Definition: af_afir.c:71
sqrtf
static __device__ float sqrtf(float a)
Definition: cuda_runtime.h:184
av_cpu_max_align
size_t av_cpu_max_align(void)
Get the maximum data alignment that may be required by FFmpeg.
Definition: cpu.c:276
AudioFIRContext::seg
AudioFIRSegment seg[MAX_IR_STREAMS][1024]
Definition: af_afir.c:110
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, const AVChannelLayout *channel_layout)
Definition: formats.c:521
AudioFIRContext::max_part_size
int max_part_size
Definition: af_afir.c:118
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1438
AFR
#define AFR
Definition: af_afir.c:730
index
int index
Definition: gxfenc.c:90
AVFilterFormatsConfig
Lists of formats / etc.
Definition: avfilter.h:111
float_dsp.h
AudioFIRSegment::output
AVFrame * output
Definition: af_afir.c:68
AudioFIRContext::ch_gain
double * ch_gain
Definition: af_afir.c:108
AudioFIRContext::norm_ir
AVFrame * norm_ir[MAX_IR_STREAMS]
Definition: af_afir.c:116
MAX_IR_STREAMS
#define MAX_IR_STREAMS
Definition: af_afir.c:46
AudioFIRSegment::tempout
AVFrame * tempout
Definition: af_afir.c:64
f
f
Definition: af_crystalizer.c:122
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:317
cpu.h
AVTXType
AVTXType
Definition: tx.h:39
fmin
double fmin(double, double)
AudioFIRContext::max_ir_len
float max_ir_len
Definition: af_afir.c:86
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
AF
#define AF
Definition: af_afir.c:729
AudioFIRContext::fdsp
AVFloatDSPContext * fdsp
Definition: af_afir.c:122
AudioFIRSegment::sumin
AVFrame * sumin
Definition: af_afir.c:60
AudioFIRContext::prev_is_disabled
int prev_is_disabled
Definition: af_afir.c:106
AudioFIRDSPContext
Definition: af_afirdsp.h:29
AVFloatDSPContext
Definition: float_dsp.h:24
frame.h
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:901
AudioFIRContext::w
int w
Definition: af_afir.c:88
AudioFIRContext::length
float length
Definition: af_afir.c:79
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AudioFIRSegment::tempin
AVFrame * tempin
Definition: af_afir.c:63
AudioFIRSegment::ctx
AVTXContext ** ctx
Definition: af_afir.c:70
AudioFIRContext::h
int h
Definition: af_afir.c:88
AudioFIRContext::one2many
int one2many
Definition: af_afir.c:105
ff_af_afir
const AVFilter ff_af_afir
Definition: af_afir.c:780
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
AudioFIRContext::ir_norm
float ir_norm
Definition: af_afir.c:81
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Underlying C type is float.
Definition: opt.h:271
AV_OPT_FLAG_DEPRECATED
#define AV_OPT_FLAG_DEPRECATED
Set if option is deprecated, users should refer to AVOption.help text for more information.
Definition: opt.h:386
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:469
OFFSET
#define OFFSET(x)
Definition: af_afir.c:732
log.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AudioFIRSegment::input
AVFrame * input
Definition: af_afir.c:67
AudioFIRSegment::coeff_size
int coeff_size
Definition: af_afir.c:53
available
if no frame is available
Definition: filter_design.txt:166
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:841
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
AudioFIRSegment::nb_partitions
int nb_partitions
Definition: af_afir.c:49
FILTER_QUERY_FUNC2
#define FILTER_QUERY_FUNC2(func)
Definition: filters.h:239
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AudioFIRContext::precision
int precision
Definition: af_afir.c:96
AV_TX_DOUBLE_RDFT
@ AV_TX_DOUBLE_RDFT
Definition: tx.h:91
AudioFIRContext::ir_link
float ir_link
Definition: af_afir.c:82
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1466
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
AVFilter
Filter definition.
Definition: avfilter.h:201
ret
ret
Definition: filter_design.txt:187
AudioFIRContext::nb_taps
int nb_taps[MAX_IR_STREAMS]
Definition: af_afir.c:101
AudioFIRSegment::itx
AVTXContext ** itx
Definition: af_afir.c:70
ir_gain
static ftype fn() ir_gain(AVFilterContext *ctx, AudioFIRContext *s, int cur_nb_taps, const ftype *time)
Definition: afir_template.c:57
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_TX_FLOAT_RDFT
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
Definition: tx.h:90
AudioFIRContext::afirdsp
AudioFIRDSPContext afirdsp
Definition: af_afir.c:121
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
AudioFIRSegment::fft_length
int fft_length
Definition: af_afir.c:52
ff_set_common_formats_from_list2
int ff_set_common_formats_from_list2(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out, const int *fmts)
Definition: formats.c:1016
channel_layout.h
AudioFIRContext::ir_channel
int ir_channel
Definition: af_afir.c:90
AudioFIRSegment::sumout
AVFrame * sumout
Definition: af_afir.c:61
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_afir.c:579
ff_filter_execute
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:1667
AudioFIRContext
Definition: af_afir.c:74
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
fir_channel
static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
Definition: af_afir.c:132
outputs
static const AVFilterPad outputs[]
Definition: af_afir.c:772
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
AudioFIRContext::loading
int * loading
Definition: af_afir.c:107
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:447
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:152
mem.h
audio.h
fir_frame
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
Definition: af_afir.c:168
AudioFIRContext::dry_gain
float dry_gain
Definition: af_afir.c:78
AV_CHANNEL_LAYOUT_MONO
#define AV_CHANNEL_LAYOUT_MONO
Definition: channel_layout.h:392
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_afir.c:660
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
channel_layouts
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:112
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:146
query_formats
static int query_formats(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out)
Definition: af_afir.c:542
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:190
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: af_afir.c:703
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
init_segment
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int selir, int offset, int nb_partitions, int part_size, int index)
Definition: af_afir.c:192
AudioFIRContext::xfade
AVFrame * xfade[2]
Definition: af_afir.c:113
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
avstring.h
AudioFIRContext::selir
int selir
Definition: af_afir.c:95
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
AudioFIRContext::pts
int64_t pts
Definition: af_afir.c:119
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AudioFIRSegment::output_offset
int * output_offset
Definition: af_afir.c:57
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:239
tx.h
AudioFIRSegment::part_index
int * part_index
Definition: af_afir.c:58