FFmpeg
af_afir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * An arbitrary audio FIR filter
24  */
25 
26 #include <float.h>
27 
28 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
31 #include "libavutil/float_dsp.h"
32 #include "libavutil/intreadwrite.h"
33 #include "libavutil/opt.h"
35 #include "libavcodec/avfft.h"
36 
37 #include "audio.h"
38 #include "avfilter.h"
39 #include "filters.h"
40 #include "formats.h"
41 #include "internal.h"
42 #include "af_afir.h"
43 
44 static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
45 {
46  int n;
47 
48  for (n = 0; n < len; n++) {
49  const float cre = c[2 * n ];
50  const float cim = c[2 * n + 1];
51  const float tre = t[2 * n ];
52  const float tim = t[2 * n + 1];
53 
54  sum[2 * n ] += tre * cre - tim * cim;
55  sum[2 * n + 1] += tre * cim + tim * cre;
56  }
57 
58  sum[2 * n] += t[2 * n] * c[2 * n];
59 }
60 
61 static void direct(const float *in, const FFTComplex *ir, int len, float *out)
62 {
63  for (int n = 0; n < len; n++)
64  for (int m = 0; m <= n; m++)
65  out[n] += ir[m].re * in[n - m];
66 }
67 
68 static void fir_fadd(AudioFIRContext *s, float *dst, const float *src, int nb_samples)
69 {
70  if ((nb_samples & 15) == 0 && nb_samples >= 16) {
71  s->fdsp->vector_fmac_scalar(dst, src, 1.f, nb_samples);
72  } else {
73  for (int n = 0; n < nb_samples; n++)
74  dst[n] += src[n];
75  }
76 }
77 
78 static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset)
79 {
80  AudioFIRContext *s = ctx->priv;
81  const float *in = (const float *)s->in->extended_data[ch] + offset;
82  float *block, *buf, *ptr = (float *)out->extended_data[ch] + offset;
83  const int nb_samples = FFMIN(s->min_part_size, out->nb_samples - offset);
84  int n, i, j;
85 
86  for (int segment = 0; segment < s->nb_segments; segment++) {
87  AudioFIRSegment *seg = &s->seg[segment];
88  float *src = (float *)seg->input->extended_data[ch];
89  float *dst = (float *)seg->output->extended_data[ch];
90  float *sum = (float *)seg->sum->extended_data[ch];
91 
92  if (s->min_part_size >= 8) {
93  s->fdsp->vector_fmul_scalar(src + seg->input_offset, in, s->dry_gain, FFALIGN(nb_samples, 4));
94  emms_c();
95  } else {
96  for (n = 0; n < nb_samples; n++)
97  src[seg->input_offset + n] = in[n] * s->dry_gain;
98  }
99 
100  seg->output_offset[ch] += s->min_part_size;
101  if (seg->output_offset[ch] == seg->part_size) {
102  seg->output_offset[ch] = 0;
103  } else {
104  memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
105 
106  dst += seg->output_offset[ch];
107  fir_fadd(s, ptr, dst, nb_samples);
108  continue;
109  }
110 
111  if (seg->part_size < 8) {
112  memset(dst, 0, sizeof(*dst) * seg->part_size * seg->nb_partitions);
113 
114  j = seg->part_index[ch];
115 
116  for (i = 0; i < seg->nb_partitions; i++) {
117  const int coffset = j * seg->coeff_size;
118  const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset;
119 
120  direct(src, coeff, nb_samples, dst);
121 
122  if (j == 0)
123  j = seg->nb_partitions;
124  j--;
125  }
126 
127  seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions;
128 
129  memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
130 
131  for (n = 0; n < nb_samples; n++) {
132  ptr[n] += dst[n];
133  }
134  continue;
135  }
136 
137  memset(sum, 0, sizeof(*sum) * seg->fft_length);
138  block = (float *)seg->block->extended_data[ch] + seg->part_index[ch] * seg->block_size;
139  memset(block + seg->part_size, 0, sizeof(*block) * (seg->fft_length - seg->part_size));
140 
141  memcpy(block, src, sizeof(*src) * seg->part_size);
142 
143  av_rdft_calc(seg->rdft[ch], block);
144  block[2 * seg->part_size] = block[1];
145  block[1] = 0;
146 
147  j = seg->part_index[ch];
148 
149  for (i = 0; i < seg->nb_partitions; i++) {
150  const int coffset = j * seg->coeff_size;
151  const float *block = (const float *)seg->block->extended_data[ch] + i * seg->block_size;
152  const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset;
153 
154  s->afirdsp.fcmul_add(sum, block, (const float *)coeff, seg->part_size);
155 
156  if (j == 0)
157  j = seg->nb_partitions;
158  j--;
159  }
160 
161  sum[1] = sum[2 * seg->part_size];
162  av_rdft_calc(seg->irdft[ch], sum);
163 
164  buf = (float *)seg->buffer->extended_data[ch];
165  fir_fadd(s, buf, sum, seg->part_size);
166 
167  memcpy(dst, buf, seg->part_size * sizeof(*dst));
168 
169  buf = (float *)seg->buffer->extended_data[ch];
170  memcpy(buf, sum + seg->part_size, seg->part_size * sizeof(*buf));
171 
172  seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions;
173 
174  memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
175 
176  fir_fadd(s, ptr, dst, nb_samples);
177  }
178 
179  if (s->min_part_size >= 8) {
180  s->fdsp->vector_fmul_scalar(ptr, ptr, s->wet_gain, FFALIGN(nb_samples, 4));
181  emms_c();
182  } else {
183  for (n = 0; n < nb_samples; n++)
184  ptr[n] *= s->wet_gain;
185  }
186 
187  return 0;
188 }
189 
190 static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
191 {
192  AudioFIRContext *s = ctx->priv;
193 
194  for (int offset = 0; offset < out->nb_samples; offset += s->min_part_size) {
195  fir_quantum(ctx, out, ch, offset);
196  }
197 
198  return 0;
199 }
200 
201 static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
202 {
203  AVFrame *out = arg;
204  const int start = (out->channels * jobnr) / nb_jobs;
205  const int end = (out->channels * (jobnr+1)) / nb_jobs;
206 
207  for (int ch = start; ch < end; ch++) {
208  fir_channel(ctx, out, ch);
209  }
210 
211  return 0;
212 }
213 
214 static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
215 {
216  AVFilterContext *ctx = outlink->src;
217  AVFrame *out = NULL;
218 
219  out = ff_get_audio_buffer(outlink, in->nb_samples);
220  if (!out) {
221  av_frame_free(&in);
222  return AVERROR(ENOMEM);
223  }
224 
225  if (s->pts == AV_NOPTS_VALUE)
226  s->pts = in->pts;
227  s->in = in;
230 
231  out->pts = s->pts;
232  if (s->pts != AV_NOPTS_VALUE)
233  s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
234 
235  av_frame_free(&in);
236  s->in = NULL;
237 
238  return ff_filter_frame(outlink, out);
239 }
240 
241 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
242 {
243  const uint8_t *font;
244  int font_height;
245  int i;
246 
247  font = avpriv_cga_font, font_height = 8;
248 
249  for (i = 0; txt[i]; i++) {
250  int char_y, mask;
251 
252  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
253  for (char_y = 0; char_y < font_height; char_y++) {
254  for (mask = 0x80; mask; mask >>= 1) {
255  if (font[txt[i] * font_height + char_y] & mask)
256  AV_WL32(p, color);
257  p += 4;
258  }
259  p += pic->linesize[0] - 8 * 4;
260  }
261  }
262 }
263 
264 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
265 {
266  int dx = FFABS(x1-x0);
267  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
268  int err = (dx>dy ? dx : -dy) / 2, e2;
269 
270  for (;;) {
271  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
272 
273  if (x0 == x1 && y0 == y1)
274  break;
275 
276  e2 = err;
277 
278  if (e2 >-dx) {
279  err -= dy;
280  x0--;
281  }
282 
283  if (e2 < dy) {
284  err += dx;
285  y0 += sy;
286  }
287  }
288 }
289 
291 {
292  AudioFIRContext *s = ctx->priv;
293  float *mag, *phase, *delay, min = FLT_MAX, max = FLT_MIN;
294  float min_delay = FLT_MAX, max_delay = FLT_MIN;
295  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
296  char text[32];
297  int channel, i, x;
298 
299  memset(out->data[0], 0, s->h * out->linesize[0]);
300 
301  phase = av_malloc_array(s->w, sizeof(*phase));
302  mag = av_malloc_array(s->w, sizeof(*mag));
303  delay = av_malloc_array(s->w, sizeof(*delay));
304  if (!mag || !phase || !delay)
305  goto end;
306 
307  channel = av_clip(s->ir_channel, 0, s->ir[s->selir]->channels - 1);
308  for (i = 0; i < s->w; i++) {
309  const float *src = (const float *)s->ir[s->selir]->extended_data[channel];
310  double w = i * M_PI / (s->w - 1);
311  double div, real_num = 0., imag_num = 0., real = 0., imag = 0.;
312 
313  for (x = 0; x < s->nb_taps; x++) {
314  real += cos(-x * w) * src[x];
315  imag += sin(-x * w) * src[x];
316  real_num += cos(-x * w) * src[x] * x;
317  imag_num += sin(-x * w) * src[x] * x;
318  }
319 
320  mag[i] = hypot(real, imag);
321  phase[i] = atan2(imag, real);
322  div = real * real + imag * imag;
323  delay[i] = (real_num * real + imag_num * imag) / div;
324  min = fminf(min, mag[i]);
325  max = fmaxf(max, mag[i]);
326  min_delay = fminf(min_delay, delay[i]);
327  max_delay = fmaxf(max_delay, delay[i]);
328  }
329 
330  for (i = 0; i < s->w; i++) {
331  int ymag = mag[i] / max * (s->h - 1);
332  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
333  int yphase = (0.5 * (1. + phase[i] / M_PI)) * (s->h - 1);
334 
335  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
336  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
337  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
338 
339  if (prev_ymag < 0)
340  prev_ymag = ymag;
341  if (prev_yphase < 0)
342  prev_yphase = yphase;
343  if (prev_ydelay < 0)
344  prev_ydelay = ydelay;
345 
346  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
347  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
348  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
349 
350  prev_ymag = ymag;
351  prev_yphase = yphase;
352  prev_ydelay = ydelay;
353  }
354 
355  if (s->w > 400 && s->h > 100) {
356  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
357  snprintf(text, sizeof(text), "%.2f", max);
358  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
359 
360  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
361  snprintf(text, sizeof(text), "%.2f", min);
362  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
363 
364  drawtext(out, 2, 22, "Max Delay:", 0xDDDDDDDD);
365  snprintf(text, sizeof(text), "%.2f", max_delay);
366  drawtext(out, 11 * 8 + 2, 22, text, 0xDDDDDDDD);
367 
368  drawtext(out, 2, 32, "Min Delay:", 0xDDDDDDDD);
369  snprintf(text, sizeof(text), "%.2f", min_delay);
370  drawtext(out, 11 * 8 + 2, 32, text, 0xDDDDDDDD);
371  }
372 
373 end:
374  av_free(delay);
375  av_free(phase);
376  av_free(mag);
377 }
378 
380  int offset, int nb_partitions, int part_size)
381 {
382  AudioFIRContext *s = ctx->priv;
383 
384  seg->rdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->rdft));
385  seg->irdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->irdft));
386  if (!seg->rdft || !seg->irdft)
387  return AVERROR(ENOMEM);
388 
389  seg->fft_length = part_size * 2 + 1;
390  seg->part_size = part_size;
391  seg->block_size = FFALIGN(seg->fft_length, 32);
392  seg->coeff_size = FFALIGN(seg->part_size + 1, 32);
393  seg->nb_partitions = nb_partitions;
394  seg->input_size = offset + s->min_part_size;
395  seg->input_offset = offset;
396 
397  seg->part_index = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->part_index));
398  seg->output_offset = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->output_offset));
399  if (!seg->part_index || !seg->output_offset)
400  return AVERROR(ENOMEM);
401 
402  for (int ch = 0; ch < ctx->inputs[0]->channels && part_size >= 8; ch++) {
403  seg->rdft[ch] = av_rdft_init(av_log2(2 * part_size), DFT_R2C);
404  seg->irdft[ch] = av_rdft_init(av_log2(2 * part_size), IDFT_C2R);
405  if (!seg->rdft[ch] || !seg->irdft[ch])
406  return AVERROR(ENOMEM);
407  }
408 
409  seg->sum = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
410  seg->block = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->block_size);
411  seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
412  seg->coeff = ff_get_audio_buffer(ctx->inputs[1 + s->selir], seg->nb_partitions * seg->coeff_size * 2);
413  seg->input = ff_get_audio_buffer(ctx->inputs[0], seg->input_size);
414  seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
415  if (!seg->buffer || !seg->sum || !seg->block || !seg->coeff || !seg->input || !seg->output)
416  return AVERROR(ENOMEM);
417 
418  return 0;
419 }
420 
422 {
423  AudioFIRContext *s = ctx->priv;
424 
425  if (seg->rdft) {
426  for (int ch = 0; ch < s->nb_channels; ch++) {
427  av_rdft_end(seg->rdft[ch]);
428  }
429  }
430  av_freep(&seg->rdft);
431 
432  if (seg->irdft) {
433  for (int ch = 0; ch < s->nb_channels; ch++) {
434  av_rdft_end(seg->irdft[ch]);
435  }
436  }
437  av_freep(&seg->irdft);
438 
439  av_freep(&seg->output_offset);
440  av_freep(&seg->part_index);
441 
442  av_frame_free(&seg->block);
443  av_frame_free(&seg->sum);
444  av_frame_free(&seg->buffer);
445  av_frame_free(&seg->coeff);
446  av_frame_free(&seg->input);
447  av_frame_free(&seg->output);
448  seg->input_size = 0;
449 }
450 
452 {
453  AudioFIRContext *s = ctx->priv;
454  int ret, i, ch, n, cur_nb_taps;
455  float power = 0;
456 
457  if (!s->nb_taps) {
458  int part_size, max_part_size;
459  int left, offset = 0;
460 
461  s->nb_taps = ff_inlink_queued_samples(ctx->inputs[1 + s->selir]);
462  if (s->nb_taps <= 0)
463  return AVERROR(EINVAL);
464 
465  if (s->minp > s->maxp) {
466  s->maxp = s->minp;
467  }
468 
469  left = s->nb_taps;
470  part_size = 1 << av_log2(s->minp);
471  max_part_size = 1 << av_log2(s->maxp);
472 
473  s->min_part_size = part_size;
474 
475  for (i = 0; left > 0; i++) {
476  int step = part_size == max_part_size ? INT_MAX : 1 + (i == 0);
477  int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size);
478 
479  s->nb_segments = i + 1;
480  ret = init_segment(ctx, &s->seg[i], offset, nb_partitions, part_size);
481  if (ret < 0)
482  return ret;
483  offset += nb_partitions * part_size;
484  left -= nb_partitions * part_size;
485  part_size *= 2;
486  part_size = FFMIN(part_size, max_part_size);
487  }
488  }
489 
490  if (!s->ir[s->selir]) {
491  ret = ff_inlink_consume_samples(ctx->inputs[1 + s->selir], s->nb_taps, s->nb_taps, &s->ir[s->selir]);
492  if (ret < 0)
493  return ret;
494  if (ret == 0)
495  return AVERROR_BUG;
496  }
497 
498  if (s->response)
499  draw_response(ctx, s->video);
500 
501  s->gain = 1;
502  cur_nb_taps = s->ir[s->selir]->nb_samples;
503 
504  switch (s->gtype) {
505  case -1:
506  /* nothing to do */
507  break;
508  case 0:
509  for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
510  float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
511 
512  for (i = 0; i < cur_nb_taps; i++)
513  power += FFABS(time[i]);
514  }
515  s->gain = ctx->inputs[1 + s->selir]->channels / power;
516  break;
517  case 1:
518  for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
519  float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
520 
521  for (i = 0; i < cur_nb_taps; i++)
522  power += time[i];
523  }
524  s->gain = ctx->inputs[1 + s->selir]->channels / power;
525  break;
526  case 2:
527  for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
528  float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
529 
530  for (i = 0; i < cur_nb_taps; i++)
531  power += time[i] * time[i];
532  }
533  s->gain = sqrtf(ch / power);
534  break;
535  default:
536  return AVERROR_BUG;
537  }
538 
539  s->gain = FFMIN(s->gain * s->ir_gain, 1.f);
540  av_log(ctx, AV_LOG_DEBUG, "power %f, gain %f\n", power, s->gain);
541  for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
542  float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
543 
544  s->fdsp->vector_fmul_scalar(time, time, s->gain, FFALIGN(cur_nb_taps, 4));
545  }
546 
547  av_log(ctx, AV_LOG_DEBUG, "nb_taps: %d\n", cur_nb_taps);
548  av_log(ctx, AV_LOG_DEBUG, "nb_segments: %d\n", s->nb_segments);
549 
550  for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
551  float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
552  int toffset = 0;
553 
554  for (i = FFMAX(1, s->length * s->nb_taps); i < s->nb_taps; i++)
555  time[i] = 0;
556 
557  av_log(ctx, AV_LOG_DEBUG, "channel: %d\n", ch);
558 
559  for (int segment = 0; segment < s->nb_segments; segment++) {
560  AudioFIRSegment *seg = &s->seg[segment];
561  float *block = (float *)seg->block->extended_data[ch];
563 
564  av_log(ctx, AV_LOG_DEBUG, "segment: %d\n", segment);
565 
566  for (i = 0; i < seg->nb_partitions; i++) {
567  const float scale = 1.f / seg->part_size;
568  const int coffset = i * seg->coeff_size;
569  const int remaining = s->nb_taps - toffset;
570  const int size = remaining >= seg->part_size ? seg->part_size : remaining;
571 
572  if (size < 8) {
573  for (n = 0; n < size; n++)
574  coeff[coffset + n].re = time[toffset + n];
575 
576  toffset += size;
577  continue;
578  }
579 
580  memset(block, 0, sizeof(*block) * seg->fft_length);
581  memcpy(block, time + toffset, size * sizeof(*block));
582 
583  av_rdft_calc(seg->rdft[0], block);
584 
585  coeff[coffset].re = block[0] * scale;
586  coeff[coffset].im = 0;
587  for (n = 1; n < seg->part_size; n++) {
588  coeff[coffset + n].re = block[2 * n] * scale;
589  coeff[coffset + n].im = block[2 * n + 1] * scale;
590  }
591  coeff[coffset + seg->part_size].re = block[1] * scale;
592  coeff[coffset + seg->part_size].im = 0;
593 
594  toffset += size;
595  }
596 
597  av_log(ctx, AV_LOG_DEBUG, "nb_partitions: %d\n", seg->nb_partitions);
598  av_log(ctx, AV_LOG_DEBUG, "partition size: %d\n", seg->part_size);
599  av_log(ctx, AV_LOG_DEBUG, "block size: %d\n", seg->block_size);
600  av_log(ctx, AV_LOG_DEBUG, "fft_length: %d\n", seg->fft_length);
601  av_log(ctx, AV_LOG_DEBUG, "coeff_size: %d\n", seg->coeff_size);
602  av_log(ctx, AV_LOG_DEBUG, "input_size: %d\n", seg->input_size);
603  av_log(ctx, AV_LOG_DEBUG, "input_offset: %d\n", seg->input_offset);
604  }
605  }
606 
607  s->have_coeffs = 1;
608 
609  return 0;
610 }
611 
613 {
614  AVFilterContext *ctx = link->dst;
615  AudioFIRContext *s = ctx->priv;
616  int nb_taps, max_nb_taps;
617 
618  nb_taps = ff_inlink_queued_samples(link);
619  max_nb_taps = s->max_ir_len * ctx->outputs[0]->sample_rate;
620  if (nb_taps > max_nb_taps) {
621  av_log(ctx, AV_LOG_ERROR, "Too big number of coefficients: %d > %d.\n", nb_taps, max_nb_taps);
622  return AVERROR(EINVAL);
623  }
624 
625  return 0;
626 }
627 
629 {
630  AudioFIRContext *s = ctx->priv;
631  AVFilterLink *outlink = ctx->outputs[0];
632  int ret, status, available, wanted;
633  AVFrame *in = NULL;
634  int64_t pts;
635 
637  if (s->response)
639  if (!s->eof_coeffs[s->selir]) {
640  ret = check_ir(ctx->inputs[1 + s->selir]);
641  if (ret < 0)
642  return ret;
643 
644  if (ff_outlink_get_status(ctx->inputs[1 + s->selir]) == AVERROR_EOF)
645  s->eof_coeffs[s->selir] = 1;
646 
647  if (!s->eof_coeffs[s->selir]) {
648  if (ff_outlink_frame_wanted(ctx->outputs[0]))
649  ff_inlink_request_frame(ctx->inputs[1 + s->selir]);
650  else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1]))
651  ff_inlink_request_frame(ctx->inputs[1 + s->selir]);
652  return 0;
653  }
654  }
655 
656  if (!s->have_coeffs && s->eof_coeffs[s->selir]) {
658  if (ret < 0)
659  return ret;
660  }
661 
662  available = ff_inlink_queued_samples(ctx->inputs[0]);
663  wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size);
664  ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in);
665  if (ret > 0)
666  ret = fir_frame(s, in, outlink);
667 
668  if (ret < 0)
669  return ret;
670 
671  if (s->response && s->have_coeffs) {
672  int64_t old_pts = s->video->pts;
673  int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base);
674 
675  if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) {
676  AVFrame *clone;
677  s->video->pts = new_pts;
678  clone = av_frame_clone(s->video);
679  if (!clone)
680  return AVERROR(ENOMEM);
681  return ff_filter_frame(ctx->outputs[1], clone);
682  }
683  }
684 
685  if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) {
687  return 0;
688  }
689 
690  if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
691  if (status == AVERROR_EOF) {
692  ff_outlink_set_status(ctx->outputs[0], status, pts);
693  if (s->response)
694  ff_outlink_set_status(ctx->outputs[1], status, pts);
695  return 0;
696  }
697  }
698 
699  if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
700  !ff_outlink_get_status(ctx->inputs[0])) {
701  ff_inlink_request_frame(ctx->inputs[0]);
702  return 0;
703  }
704 
705  if (s->response &&
706  ff_outlink_frame_wanted(ctx->outputs[1]) &&
707  !ff_outlink_get_status(ctx->inputs[0])) {
708  ff_inlink_request_frame(ctx->inputs[0]);
709  return 0;
710  }
711 
712  return FFERROR_NOT_READY;
713 }
714 
716 {
717  AudioFIRContext *s = ctx->priv;
718  static const enum AVSampleFormat sample_fmts[] = {
721  };
722  static const enum AVPixelFormat pix_fmts[] = {
725  };
726  int ret;
727 
728  if (s->response) {
729  AVFilterLink *videolink = ctx->outputs[1];
731  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
732  return ret;
733  }
734 
735  if (s->ir_format) {
737  if (ret < 0)
738  return ret;
739  } else {
742 
743  if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->outcfg.channel_layouts)) < 0)
744  return ret;
745  if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->incfg.channel_layouts)) < 0)
746  return ret;
747 
749  if (ret)
750  return ret;
751  for (int i = 1; i < ctx->nb_inputs; i++) {
752  if ((ret = ff_channel_layouts_ref(mono, &ctx->inputs[i]->outcfg.channel_layouts)) < 0)
753  return ret;
754  }
755  }
756 
758  return ret;
759 
761 }
762 
763 static int config_output(AVFilterLink *outlink)
764 {
765  AVFilterContext *ctx = outlink->src;
766  AudioFIRContext *s = ctx->priv;
767 
768  s->one2many = ctx->inputs[1 + s->selir]->channels == 1;
769  outlink->sample_rate = ctx->inputs[0]->sample_rate;
770  outlink->time_base = ctx->inputs[0]->time_base;
771  outlink->channel_layout = ctx->inputs[0]->channel_layout;
772  outlink->channels = ctx->inputs[0]->channels;
773 
774  s->nb_channels = outlink->channels;
775  s->nb_coef_channels = ctx->inputs[1 + s->selir]->channels;
776  s->pts = AV_NOPTS_VALUE;
777 
778  return 0;
779 }
780 
782 {
783  AudioFIRContext *s = ctx->priv;
784 
785  for (int i = 0; i < s->nb_segments; i++) {
786  uninit_segment(ctx, &s->seg[i]);
787  }
788 
789  av_freep(&s->fdsp);
790 
791  for (int i = 0; i < s->nb_irs; i++) {
792  av_frame_free(&s->ir[i]);
793  }
794 
795  av_frame_free(&s->video);
796 }
797 
798 static int config_video(AVFilterLink *outlink)
799 {
800  AVFilterContext *ctx = outlink->src;
801  AudioFIRContext *s = ctx->priv;
802 
803  outlink->sample_aspect_ratio = (AVRational){1,1};
804  outlink->w = s->w;
805  outlink->h = s->h;
806  outlink->frame_rate = s->frame_rate;
807  outlink->time_base = av_inv_q(outlink->frame_rate);
808 
809  av_frame_free(&s->video);
810  s->video = ff_get_video_buffer(outlink, outlink->w, outlink->h);
811  if (!s->video)
812  return AVERROR(ENOMEM);
813 
814  return 0;
815 }
816 
818 {
819  dsp->fcmul_add = fcmul_add_c;
820 
821  if (ARCH_X86)
822  ff_afir_init_x86(dsp);
823 }
824 
826 {
827  AudioFIRContext *s = ctx->priv;
828  AVFilterPad pad, vpad;
829  int ret;
830 
831  pad = (AVFilterPad) {
832  .name = "main",
833  .type = AVMEDIA_TYPE_AUDIO,
834  };
835 
836  ret = ff_append_inpad(ctx, &pad);
837  if (ret < 0)
838  return ret;
839 
840  for (int n = 0; n < s->nb_irs; n++) {
841  pad = (AVFilterPad) {
842  .name = av_asprintf("ir%d", n),
843  .type = AVMEDIA_TYPE_AUDIO,
844  };
845 
846  if (!pad.name)
847  return AVERROR(ENOMEM);
848 
850  if (ret < 0)
851  return ret;
852  }
853 
854  pad = (AVFilterPad) {
855  .name = "default",
856  .type = AVMEDIA_TYPE_AUDIO,
857  .config_props = config_output,
858  };
859 
860  ret = ff_append_outpad(ctx, &pad);
861  if (ret < 0)
862  return ret;
863 
864  if (s->response) {
865  vpad = (AVFilterPad){
866  .name = "filter_response",
867  .type = AVMEDIA_TYPE_VIDEO,
868  .config_props = config_video,
869  };
870 
871  ret = ff_append_outpad(ctx, &vpad);
872  if (ret < 0)
873  return ret;
874  }
875 
876  s->fdsp = avpriv_float_dsp_alloc(0);
877  if (!s->fdsp)
878  return AVERROR(ENOMEM);
879 
880  ff_afir_init(&s->afirdsp);
881 
882  return 0;
883 }
884 
886  const char *cmd,
887  const char *arg,
888  char *res,
889  int res_len,
890  int flags)
891 {
892  AudioFIRContext *s = ctx->priv;
893  int prev_ir = s->selir;
894  int ret = ff_filter_process_command(ctx, cmd, arg, res, res_len, flags);
895 
896  if (ret < 0)
897  return ret;
898 
899  s->selir = FFMIN(s->nb_irs - 1, s->selir);
900 
901  if (prev_ir != s->selir) {
902  s->have_coeffs = 0;
903  }
904 
905  return 0;
906 }
907 
908 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
909 #define AFR AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
910 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
911 #define OFFSET(x) offsetof(AudioFIRContext, x)
912 
913 static const AVOption afir_options[] = {
914  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
915  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
916  { "length", "set IR length", OFFSET(length), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
917  { "gtype", "set IR auto gain type",OFFSET(gtype), AV_OPT_TYPE_INT, {.i64=0}, -1, 2, AF, "gtype" },
918  { "none", "without auto gain", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, "gtype" },
919  { "peak", "peak gain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "gtype" },
920  { "dc", "DC gain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "gtype" },
921  { "gn", "gain to noise", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "gtype" },
922  { "irgain", "set IR gain", OFFSET(ir_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
923  { "irfmt", "set IR format", OFFSET(ir_format), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "irfmt" },
924  { "mono", "single channel", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "irfmt" },
925  { "input", "same as input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "irfmt" },
926  { "maxir", "set max IR length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF },
927  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
928  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
929  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
930  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
931  { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 1, 32768, AF },
932  { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 32768, AF },
933  { "nbirs", "set number of input IRs",OFFSET(nb_irs),AV_OPT_TYPE_INT, {.i64=1}, 1, 32, AF },
934  { "ir", "select IR", OFFSET(selir), AV_OPT_TYPE_INT, {.i64=0}, 0, 31, AFR },
935  { NULL }
936 };
937 
939 
941  .name = "afir",
942  .description = NULL_IF_CONFIG_SMALL("Apply Finite Impulse Response filter with supplied coefficients in additional stream(s)."),
943  .priv_size = sizeof(AudioFIRContext),
944  .priv_class = &afir_class,
946  .init = init,
947  .activate = activate,
948  .uninit = uninit,
949  .process_command = process_command,
953 };
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:98
activate
static int activate(AVFilterContext *ctx)
Definition: af_afir.c:628
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
direct
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:61
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
av_clip
#define av_clip
Definition: common.h:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:381
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AudioFIRSegment::block_size
int block_size
Definition: af_afir.h:37
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:599
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1018
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:550
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:237
VF
#define VF
Definition: af_afir.c:910
AV_CH_LAYOUT_MONO
#define AV_CH_LAYOUT_MONO
Definition: channel_layout.h:90
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:525
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:424
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AudioFIRSegment::buffer
AVFrame * buffer
Definition: af_afir.h:48
w
uint8_t w
Definition: llviddspenc.c:38
fir_quantum
static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset)
Definition: af_afir.c:78
AVOption
AVOption.
Definition: opt.h:247
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:168
AudioFIRSegment::input_offset
int input_offset
Definition: af_afir.h:41
AudioFIRDSPContext::fcmul_add
void(* fcmul_add)(float *sum, const float *t, const float *c, ptrdiff_t len)
Definition: af_afir.h:57
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:689
float.h
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:169
AudioFIRSegment::part_size
int part_size
Definition: af_afir.h:36
AudioFIRSegment::input_size
int input_size
Definition: af_afir.h:40
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
ff_append_inpad
int ff_append_inpad(AVFilterContext *f, AVFilterPad *p)
Append a new input/output pad to the filter's list of such pads.
Definition: avfilter.c:139
AudioFIRSegment::coeff
AVFrame * coeff
Definition: af_afir.h:49
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_afir.c:781
fir_channels
static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_afir.c:201
afir_options
static const AVOption afir_options[]
Definition: af_afir.c:913
IDFT_C2R
@ IDFT_C2R
Definition: avfft.h:73
AudioFIRSegment::block
AVFrame * block
Definition: af_afir.h:47
ff_afir_init_x86
void ff_afir_init_x86(AudioFIRDSPContext *s)
Definition: af_afir_init.c:30
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1388
pts
static int64_t pts
Definition: transcode_aac.c:653
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:110
uninit_segment
static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
Definition: af_afir.c:421
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AudioFIRSegment
Definition: af_afir.h:34
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(afir)
mask
static const uint16_t mask[17]
Definition: lzw.c:38
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:426
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1534
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:555
fminf
float fminf(float, float)
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:705
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:422
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
f
#define f(width, name)
Definition: cbs_vp9.c:255
av_rdft_calc
void av_rdft_calc(RDFTContext *s, FFTSample *data)
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
if
if(ret)
Definition: filter_design.txt:179
AudioFIRSegment::sum
AVFrame * sum
Definition: af_afir.h:46
fir_fadd
static void fir_fadd(AudioFIRContext *s, float *dst, const float *src, int nb_samples)
Definition: af_afir.c:68
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1436
NULL
#define NULL
Definition: coverity.c:32
ff_afir_init
void ff_afir_init(AudioFIRDSPContext *dsp)
Definition: af_afir.c:817
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_append_inpad_free_name
int ff_append_inpad_free_name(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:144
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:234
src
#define src
Definition: vp8dsp.c:255
draw_line
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_afir.c:264
DFT_R2C
@ DFT_R2C
Definition: avfft.h:72
avfft.h
convert_coeffs
static int convert_coeffs(AVFilterContext *ctx)
Definition: af_afir.c:451
ff_set_common_all_channel_counts
int ff_set_common_all_channel_counts(AVFilterContext *ctx)
Equivalent to ff_set_common_channel_layouts(ctx, ff_all_channel_counts())
Definition: formats.c:671
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1371
AFR
#define AFR
Definition: af_afir.c:909
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
float_dsp.h
AudioFIRSegment::output
AVFrame * output
Definition: af_afir.h:51
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:116
fcmul_add_c
static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
Definition: af_afir.c:44
av_rdft_init
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AudioFIRSegment::irdft
RDFTContext ** irdft
Definition: af_afir.h:53
fmaxf
float fmaxf(float, float)
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
size
int size
Definition: twinvq_data.h:10344
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AF
#define AF
Definition: af_afir.c:908
check_ir
static int check_ir(AVFilterLink *link)
Definition: af_afir.c:612
AudioFIRDSPContext
Definition: af_afir.h:56
ff_filter_process_command
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:882
af_afir.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:228
xga_font_data.h
draw_response
static void draw_response(AVFilterContext *ctx, AVFrame *out)
Definition: af_afir.c:290
M_PI
#define M_PI
Definition: mathematics.h:52
ff_af_afir
const AVFilter ff_af_afir
Definition: af_afir.c:940
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:227
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_afir.c:715
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:397
AudioFIRSegment::rdft
RDFTContext ** rdft
Definition: af_afir.h:53
OFFSET
#define OFFSET(x)
Definition: af_afir.c:911
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_afir.c:798
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AudioFIRSegment::input
AVFrame * input
Definition: af_afir.h:50
AudioFIRSegment::coeff_size
int coeff_size
Definition: af_afir.h:39
available
if no frame is available
Definition: filter_design.txt:166
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:378
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
common.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:803
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
AudioFIRSegment::nb_partitions
int nb_partitions
Definition: af_afir.h:35
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1396
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
AVFilter
Filter definition.
Definition: avfilter.h:165
ret
ret
Definition: filter_design.txt:187
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AudioFIRSegment::fft_length
int fft_length
Definition: af_afir.h:38
power
static float power(float r, float g, float b, float max)
Definition: preserve_color.h:45
channel_layout.h
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_afir.c:763
AudioFIRContext
Definition: af_afir.h:61
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
fir_channel
static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
Definition: af_afir.c:190
segment
Definition: hls.c:71
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1557
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
fir_frame
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
Definition: af_afir.c:214
AVFilterFormatsConfig::formats
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:506
ff_append_outpad
int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:150
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_afir.c:825
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
init_segment
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int offset, int nb_partitions, int part_size)
Definition: af_afir.c:379
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: af_afir.c:885
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_rdft_end
void av_rdft_end(RDFTContext *s)
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:78
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
avstring.h
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
snprintf
#define snprintf
Definition: snprintf.h:34
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AudioFIRSegment::output_offset
int * output_offset
Definition: af_afir.h:43
channel
channel
Definition: ebur128.h:39
FFTComplex
Definition: avfft.h:37
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_afir.c:241
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:211
re
float re
Definition: fft.c:78
min
float min
Definition: vorbis_enc_data.h:429
AudioFIRSegment::part_index
int * part_index
Definition: af_afir.h:44