FFmpeg
af_afir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * An arbitrary audio FIR filter
24  */
25 
26 #include <float.h>
27 
28 #include "libavutil/avstring.h"
29 #include "libavutil/common.h"
30 #include "libavutil/float_dsp.h"
31 #include "libavutil/intreadwrite.h"
32 #include "libavutil/opt.h"
34 #include "libavcodec/avfft.h"
35 
36 #include "audio.h"
37 #include "avfilter.h"
38 #include "filters.h"
39 #include "formats.h"
40 #include "internal.h"
41 #include "af_afir.h"
42 
43 static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
44 {
45  int n;
46 
47  for (n = 0; n < len; n++) {
48  const float cre = c[2 * n ];
49  const float cim = c[2 * n + 1];
50  const float tre = t[2 * n ];
51  const float tim = t[2 * n + 1];
52 
53  sum[2 * n ] += tre * cre - tim * cim;
54  sum[2 * n + 1] += tre * cim + tim * cre;
55  }
56 
57  sum[2 * n] += t[2 * n] * c[2 * n];
58 }
59 
60 static void direct(const float *in, const FFTComplex *ir, int len, float *out)
61 {
62  for (int n = 0; n < len; n++)
63  for (int m = 0; m <= n; m++)
64  out[n] += ir[m].re * in[n - m];
65 }
66 
67 static void fir_fadd(AudioFIRContext *s, float *dst, const float *src, int nb_samples)
68 {
69  if ((nb_samples & 15) == 0 && nb_samples >= 16) {
70  s->fdsp->vector_fmac_scalar(dst, src, 1.f, nb_samples);
71  } else {
72  for (int n = 0; n < nb_samples; n++)
73  dst[n] += src[n];
74  }
75 }
76 
78 {
79  AudioFIRContext *s = ctx->priv;
80  const float *in = (const float *)s->in->extended_data[ch] + offset;
81  float *block, *buf, *ptr = (float *)out->extended_data[ch] + offset;
82  const int nb_samples = FFMIN(s->min_part_size, out->nb_samples - offset);
83  int n, i, j;
84 
85  for (int segment = 0; segment < s->nb_segments; segment++) {
86  AudioFIRSegment *seg = &s->seg[segment];
87  float *src = (float *)seg->input->extended_data[ch];
88  float *dst = (float *)seg->output->extended_data[ch];
89  float *sum = (float *)seg->sum->extended_data[ch];
90 
91  if (s->min_part_size >= 8) {
92  s->fdsp->vector_fmul_scalar(src + seg->input_offset, in, s->dry_gain, FFALIGN(nb_samples, 4));
93  emms_c();
94  } else {
95  for (n = 0; n < nb_samples; n++)
96  src[seg->input_offset + n] = in[n] * s->dry_gain;
97  }
98 
99  seg->output_offset[ch] += s->min_part_size;
100  if (seg->output_offset[ch] == seg->part_size) {
101  seg->output_offset[ch] = 0;
102  } else {
103  memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
104 
105  dst += seg->output_offset[ch];
106  fir_fadd(s, ptr, dst, nb_samples);
107  continue;
108  }
109 
110  if (seg->part_size < 8) {
111  memset(dst, 0, sizeof(*dst) * seg->part_size * seg->nb_partitions);
112 
113  j = seg->part_index[ch];
114 
115  for (i = 0; i < seg->nb_partitions; i++) {
116  const int coffset = j * seg->coeff_size;
117  const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset;
118 
119  direct(src, coeff, nb_samples, dst);
120 
121  if (j == 0)
122  j = seg->nb_partitions;
123  j--;
124  }
125 
126  seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions;
127 
128  memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
129 
130  for (n = 0; n < nb_samples; n++) {
131  ptr[n] += dst[n];
132  }
133  continue;
134  }
135 
136  memset(sum, 0, sizeof(*sum) * seg->fft_length);
137  block = (float *)seg->block->extended_data[ch] + seg->part_index[ch] * seg->block_size;
138  memset(block + seg->part_size, 0, sizeof(*block) * (seg->fft_length - seg->part_size));
139 
140  memcpy(block, src, sizeof(*src) * seg->part_size);
141 
142  av_rdft_calc(seg->rdft[ch], block);
143  block[2 * seg->part_size] = block[1];
144  block[1] = 0;
145 
146  j = seg->part_index[ch];
147 
148  for (i = 0; i < seg->nb_partitions; i++) {
149  const int coffset = j * seg->coeff_size;
150  const float *block = (const float *)seg->block->extended_data[ch] + i * seg->block_size;
151  const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset;
152 
153  s->afirdsp.fcmul_add(sum, block, (const float *)coeff, seg->part_size);
154 
155  if (j == 0)
156  j = seg->nb_partitions;
157  j--;
158  }
159 
160  sum[1] = sum[2 * seg->part_size];
161  av_rdft_calc(seg->irdft[ch], sum);
162 
163  buf = (float *)seg->buffer->extended_data[ch];
164  fir_fadd(s, buf, sum, seg->part_size);
165 
166  memcpy(dst, buf, seg->part_size * sizeof(*dst));
167 
168  buf = (float *)seg->buffer->extended_data[ch];
169  memcpy(buf, sum + seg->part_size, seg->part_size * sizeof(*buf));
170 
171  seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions;
172 
173  memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
174 
175  fir_fadd(s, ptr, dst, nb_samples);
176  }
177 
178  if (s->min_part_size >= 8) {
179  s->fdsp->vector_fmul_scalar(ptr, ptr, s->wet_gain, FFALIGN(nb_samples, 4));
180  emms_c();
181  } else {
182  for (n = 0; n < nb_samples; n++)
183  ptr[n] *= s->wet_gain;
184  }
185 
186  return 0;
187 }
188 
190 {
191  AudioFIRContext *s = ctx->priv;
192 
193  for (int offset = 0; offset < out->nb_samples; offset += s->min_part_size) {
194  fir_quantum(ctx, out, ch, offset);
195  }
196 
197  return 0;
198 }
199 
200 static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
201 {
202  AVFrame *out = arg;
203  const int start = (out->channels * jobnr) / nb_jobs;
204  const int end = (out->channels * (jobnr+1)) / nb_jobs;
205 
206  for (int ch = start; ch < end; ch++) {
207  fir_channel(ctx, out, ch);
208  }
209 
210  return 0;
211 }
212 
214 {
215  AVFilterContext *ctx = outlink->src;
216  AVFrame *out = NULL;
217 
218  out = ff_get_audio_buffer(outlink, in->nb_samples);
219  if (!out) {
220  av_frame_free(&in);
221  return AVERROR(ENOMEM);
222  }
223 
224  if (s->pts == AV_NOPTS_VALUE)
225  s->pts = in->pts;
226  s->in = in;
227  ctx->internal->execute(ctx, fir_channels, out, NULL, FFMIN(outlink->channels,
229 
230  out->pts = s->pts;
231  if (s->pts != AV_NOPTS_VALUE)
232  s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
233 
234  av_frame_free(&in);
235  s->in = NULL;
236 
237  return ff_filter_frame(outlink, out);
238 }
239 
240 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
241 {
242  const uint8_t *font;
243  int font_height;
244  int i;
245 
246  font = avpriv_cga_font, font_height = 8;
247 
248  for (i = 0; txt[i]; i++) {
249  int char_y, mask;
250 
251  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
252  for (char_y = 0; char_y < font_height; char_y++) {
253  for (mask = 0x80; mask; mask >>= 1) {
254  if (font[txt[i] * font_height + char_y] & mask)
255  AV_WL32(p, color);
256  p += 4;
257  }
258  p += pic->linesize[0] - 8 * 4;
259  }
260  }
261 }
262 
263 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
264 {
265  int dx = FFABS(x1-x0);
266  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
267  int err = (dx>dy ? dx : -dy) / 2, e2;
268 
269  for (;;) {
270  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
271 
272  if (x0 == x1 && y0 == y1)
273  break;
274 
275  e2 = err;
276 
277  if (e2 >-dx) {
278  err -= dy;
279  x0--;
280  }
281 
282  if (e2 < dy) {
283  err += dx;
284  y0 += sy;
285  }
286  }
287 }
288 
290 {
291  AudioFIRContext *s = ctx->priv;
292  float *mag, *phase, *delay, min = FLT_MAX, max = FLT_MIN;
293  float min_delay = FLT_MAX, max_delay = FLT_MIN;
294  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
295  char text[32];
296  int channel, i, x;
297 
298  memset(out->data[0], 0, s->h * out->linesize[0]);
299 
300  phase = av_malloc_array(s->w, sizeof(*phase));
301  mag = av_malloc_array(s->w, sizeof(*mag));
302  delay = av_malloc_array(s->w, sizeof(*delay));
303  if (!mag || !phase || !delay)
304  goto end;
305 
306  channel = av_clip(s->ir_channel, 0, s->ir[s->selir]->channels - 1);
307  for (i = 0; i < s->w; i++) {
308  const float *src = (const float *)s->ir[s->selir]->extended_data[channel];
309  double w = i * M_PI / (s->w - 1);
310  double div, real_num = 0., imag_num = 0., real = 0., imag = 0.;
311 
312  for (x = 0; x < s->nb_taps; x++) {
313  real += cos(-x * w) * src[x];
314  imag += sin(-x * w) * src[x];
315  real_num += cos(-x * w) * src[x] * x;
316  imag_num += sin(-x * w) * src[x] * x;
317  }
318 
319  mag[i] = hypot(real, imag);
320  phase[i] = atan2(imag, real);
321  div = real * real + imag * imag;
322  delay[i] = (real_num * real + imag_num * imag) / div;
323  min = fminf(min, mag[i]);
324  max = fmaxf(max, mag[i]);
325  min_delay = fminf(min_delay, delay[i]);
326  max_delay = fmaxf(max_delay, delay[i]);
327  }
328 
329  for (i = 0; i < s->w; i++) {
330  int ymag = mag[i] / max * (s->h - 1);
331  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
332  int yphase = (0.5 * (1. + phase[i] / M_PI)) * (s->h - 1);
333 
334  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
335  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
336  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
337 
338  if (prev_ymag < 0)
339  prev_ymag = ymag;
340  if (prev_yphase < 0)
341  prev_yphase = yphase;
342  if (prev_ydelay < 0)
343  prev_ydelay = ydelay;
344 
345  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
346  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
347  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
348 
349  prev_ymag = ymag;
350  prev_yphase = yphase;
351  prev_ydelay = ydelay;
352  }
353 
354  if (s->w > 400 && s->h > 100) {
355  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
356  snprintf(text, sizeof(text), "%.2f", max);
357  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
358 
359  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
360  snprintf(text, sizeof(text), "%.2f", min);
361  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
362 
363  drawtext(out, 2, 22, "Max Delay:", 0xDDDDDDDD);
364  snprintf(text, sizeof(text), "%.2f", max_delay);
365  drawtext(out, 11 * 8 + 2, 22, text, 0xDDDDDDDD);
366 
367  drawtext(out, 2, 32, "Min Delay:", 0xDDDDDDDD);
368  snprintf(text, sizeof(text), "%.2f", min_delay);
369  drawtext(out, 11 * 8 + 2, 32, text, 0xDDDDDDDD);
370  }
371 
372 end:
373  av_free(delay);
374  av_free(phase);
375  av_free(mag);
376 }
377 
379  int offset, int nb_partitions, int part_size)
380 {
381  AudioFIRContext *s = ctx->priv;
382 
383  seg->rdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->rdft));
384  seg->irdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->irdft));
385  if (!seg->rdft || !seg->irdft)
386  return AVERROR(ENOMEM);
387 
388  seg->fft_length = part_size * 2 + 1;
389  seg->part_size = part_size;
390  seg->block_size = FFALIGN(seg->fft_length, 32);
391  seg->coeff_size = FFALIGN(seg->part_size + 1, 32);
392  seg->nb_partitions = nb_partitions;
393  seg->input_size = offset + s->min_part_size;
394  seg->input_offset = offset;
395 
396  seg->part_index = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->part_index));
397  seg->output_offset = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->output_offset));
398  if (!seg->part_index || !seg->output_offset)
399  return AVERROR(ENOMEM);
400 
401  for (int ch = 0; ch < ctx->inputs[0]->channels && part_size >= 8; ch++) {
402  seg->rdft[ch] = av_rdft_init(av_log2(2 * part_size), DFT_R2C);
403  seg->irdft[ch] = av_rdft_init(av_log2(2 * part_size), IDFT_C2R);
404  if (!seg->rdft[ch] || !seg->irdft[ch])
405  return AVERROR(ENOMEM);
406  }
407 
408  seg->sum = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
409  seg->block = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->block_size);
410  seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
411  seg->coeff = ff_get_audio_buffer(ctx->inputs[1 + s->selir], seg->nb_partitions * seg->coeff_size * 2);
412  seg->input = ff_get_audio_buffer(ctx->inputs[0], seg->input_size);
413  seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
414  if (!seg->buffer || !seg->sum || !seg->block || !seg->coeff || !seg->input || !seg->output)
415  return AVERROR(ENOMEM);
416 
417  return 0;
418 }
419 
421 {
422  AudioFIRContext *s = ctx->priv;
423 
424  if (seg->rdft) {
425  for (int ch = 0; ch < s->nb_channels; ch++) {
426  av_rdft_end(seg->rdft[ch]);
427  }
428  }
429  av_freep(&seg->rdft);
430 
431  if (seg->irdft) {
432  for (int ch = 0; ch < s->nb_channels; ch++) {
433  av_rdft_end(seg->irdft[ch]);
434  }
435  }
436  av_freep(&seg->irdft);
437 
438  av_freep(&seg->output_offset);
439  av_freep(&seg->part_index);
440 
441  av_frame_free(&seg->block);
442  av_frame_free(&seg->sum);
443  av_frame_free(&seg->buffer);
444  av_frame_free(&seg->coeff);
445  av_frame_free(&seg->input);
446  av_frame_free(&seg->output);
447  seg->input_size = 0;
448 }
449 
451 {
452  AudioFIRContext *s = ctx->priv;
453  int ret, i, ch, n, cur_nb_taps;
454  float power = 0;
455 
456  if (!s->nb_taps) {
457  int part_size, max_part_size;
458  int left, offset = 0;
459 
460  s->nb_taps = ff_inlink_queued_samples(ctx->inputs[1 + s->selir]);
461  if (s->nb_taps <= 0)
462  return AVERROR(EINVAL);
463 
464  if (s->minp > s->maxp) {
465  s->maxp = s->minp;
466  }
467 
468  left = s->nb_taps;
469  part_size = 1 << av_log2(s->minp);
470  max_part_size = 1 << av_log2(s->maxp);
471 
472  s->min_part_size = part_size;
473 
474  for (i = 0; left > 0; i++) {
475  int step = part_size == max_part_size ? INT_MAX : 1 + (i == 0);
476  int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size);
477 
478  s->nb_segments = i + 1;
479  ret = init_segment(ctx, &s->seg[i], offset, nb_partitions, part_size);
480  if (ret < 0)
481  return ret;
482  offset += nb_partitions * part_size;
483  left -= nb_partitions * part_size;
484  part_size *= 2;
485  part_size = FFMIN(part_size, max_part_size);
486  }
487  }
488 
489  if (!s->ir[s->selir]) {
490  ret = ff_inlink_consume_samples(ctx->inputs[1 + s->selir], s->nb_taps, s->nb_taps, &s->ir[s->selir]);
491  if (ret < 0)
492  return ret;
493  if (ret == 0)
494  return AVERROR_BUG;
495  }
496 
497  if (s->response)
498  draw_response(ctx, s->video);
499 
500  s->gain = 1;
501  cur_nb_taps = s->ir[s->selir]->nb_samples;
502 
503  switch (s->gtype) {
504  case -1:
505  /* nothing to do */
506  break;
507  case 0:
508  for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
509  float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
510 
511  for (i = 0; i < cur_nb_taps; i++)
512  power += FFABS(time[i]);
513  }
514  s->gain = ctx->inputs[1 + s->selir]->channels / power;
515  break;
516  case 1:
517  for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
518  float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
519 
520  for (i = 0; i < cur_nb_taps; i++)
521  power += time[i];
522  }
523  s->gain = ctx->inputs[1 + s->selir]->channels / power;
524  break;
525  case 2:
526  for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
527  float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
528 
529  for (i = 0; i < cur_nb_taps; i++)
530  power += time[i] * time[i];
531  }
532  s->gain = sqrtf(ch / power);
533  break;
534  default:
535  return AVERROR_BUG;
536  }
537 
538  s->gain = FFMIN(s->gain * s->ir_gain, 1.f);
539  av_log(ctx, AV_LOG_DEBUG, "power %f, gain %f\n", power, s->gain);
540  for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
541  float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
542 
543  s->fdsp->vector_fmul_scalar(time, time, s->gain, FFALIGN(cur_nb_taps, 4));
544  }
545 
546  av_log(ctx, AV_LOG_DEBUG, "nb_taps: %d\n", cur_nb_taps);
547  av_log(ctx, AV_LOG_DEBUG, "nb_segments: %d\n", s->nb_segments);
548 
549  for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
550  float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
551  int toffset = 0;
552 
553  for (i = FFMAX(1, s->length * s->nb_taps); i < s->nb_taps; i++)
554  time[i] = 0;
555 
556  av_log(ctx, AV_LOG_DEBUG, "channel: %d\n", ch);
557 
558  for (int segment = 0; segment < s->nb_segments; segment++) {
559  AudioFIRSegment *seg = &s->seg[segment];
560  float *block = (float *)seg->block->extended_data[ch];
562 
563  av_log(ctx, AV_LOG_DEBUG, "segment: %d\n", segment);
564 
565  for (i = 0; i < seg->nb_partitions; i++) {
566  const float scale = 1.f / seg->part_size;
567  const int coffset = i * seg->coeff_size;
568  const int remaining = s->nb_taps - toffset;
569  const int size = remaining >= seg->part_size ? seg->part_size : remaining;
570 
571  if (size < 8) {
572  for (n = 0; n < size; n++)
573  coeff[coffset + n].re = time[toffset + n];
574 
575  toffset += size;
576  continue;
577  }
578 
579  memset(block, 0, sizeof(*block) * seg->fft_length);
580  memcpy(block, time + toffset, size * sizeof(*block));
581 
582  av_rdft_calc(seg->rdft[0], block);
583 
584  coeff[coffset].re = block[0] * scale;
585  coeff[coffset].im = 0;
586  for (n = 1; n < seg->part_size; n++) {
587  coeff[coffset + n].re = block[2 * n] * scale;
588  coeff[coffset + n].im = block[2 * n + 1] * scale;
589  }
590  coeff[coffset + seg->part_size].re = block[1] * scale;
591  coeff[coffset + seg->part_size].im = 0;
592 
593  toffset += size;
594  }
595 
596  av_log(ctx, AV_LOG_DEBUG, "nb_partitions: %d\n", seg->nb_partitions);
597  av_log(ctx, AV_LOG_DEBUG, "partition size: %d\n", seg->part_size);
598  av_log(ctx, AV_LOG_DEBUG, "block size: %d\n", seg->block_size);
599  av_log(ctx, AV_LOG_DEBUG, "fft_length: %d\n", seg->fft_length);
600  av_log(ctx, AV_LOG_DEBUG, "coeff_size: %d\n", seg->coeff_size);
601  av_log(ctx, AV_LOG_DEBUG, "input_size: %d\n", seg->input_size);
602  av_log(ctx, AV_LOG_DEBUG, "input_offset: %d\n", seg->input_offset);
603  }
604  }
605 
606  s->have_coeffs = 1;
607 
608  return 0;
609 }
610 
612 {
613  AVFilterContext *ctx = link->dst;
614  AudioFIRContext *s = ctx->priv;
615  int nb_taps, max_nb_taps;
616 
617  nb_taps = ff_inlink_queued_samples(link);
618  max_nb_taps = s->max_ir_len * ctx->outputs[0]->sample_rate;
619  if (nb_taps > max_nb_taps) {
620  av_log(ctx, AV_LOG_ERROR, "Too big number of coefficients: %d > %d.\n", nb_taps, max_nb_taps);
621  return AVERROR(EINVAL);
622  }
623 
624  return 0;
625 }
626 
628 {
629  AudioFIRContext *s = ctx->priv;
630  AVFilterLink *outlink = ctx->outputs[0];
631  int ret, status, available, wanted;
632  AVFrame *in = NULL;
633  int64_t pts;
634 
636  if (s->response)
638  if (!s->eof_coeffs[s->selir]) {
639  AVFrame *ir = NULL;
640 
641  ret = check_ir(ctx->inputs[1 + s->selir], ir);
642  if (ret < 0)
643  return ret;
644 
645  if (ff_outlink_get_status(ctx->inputs[1 + s->selir]) == AVERROR_EOF)
646  s->eof_coeffs[s->selir] = 1;
647 
648  if (!s->eof_coeffs[s->selir]) {
649  if (ff_outlink_frame_wanted(ctx->outputs[0]))
650  ff_inlink_request_frame(ctx->inputs[1 + s->selir]);
651  else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1]))
652  ff_inlink_request_frame(ctx->inputs[1 + s->selir]);
653  return 0;
654  }
655  }
656 
657  if (!s->have_coeffs && s->eof_coeffs[s->selir]) {
658  ret = convert_coeffs(ctx);
659  if (ret < 0)
660  return ret;
661  }
662 
663  available = ff_inlink_queued_samples(ctx->inputs[0]);
664  wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size);
665  ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in);
666  if (ret > 0)
667  ret = fir_frame(s, in, outlink);
668 
669  if (ret < 0)
670  return ret;
671 
672  if (s->response && s->have_coeffs) {
673  int64_t old_pts = s->video->pts;
674  int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base);
675 
676  if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) {
677  AVFrame *clone;
678  s->video->pts = new_pts;
679  clone = av_frame_clone(s->video);
680  if (!clone)
681  return AVERROR(ENOMEM);
682  return ff_filter_frame(ctx->outputs[1], clone);
683  }
684  }
685 
686  if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) {
687  ff_filter_set_ready(ctx, 10);
688  return 0;
689  }
690 
691  if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
692  if (status == AVERROR_EOF) {
693  ff_outlink_set_status(ctx->outputs[0], status, pts);
694  if (s->response)
695  ff_outlink_set_status(ctx->outputs[1], status, pts);
696  return 0;
697  }
698  }
699 
700  if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
701  !ff_outlink_get_status(ctx->inputs[0])) {
703  return 0;
704  }
705 
706  if (s->response &&
707  ff_outlink_frame_wanted(ctx->outputs[1]) &&
708  !ff_outlink_get_status(ctx->inputs[0])) {
710  return 0;
711  }
712 
713  return FFERROR_NOT_READY;
714 }
715 
717 {
718  AudioFIRContext *s = ctx->priv;
721  static const enum AVSampleFormat sample_fmts[] = {
724  };
725  static const enum AVPixelFormat pix_fmts[] = {
728  };
729  int ret;
730 
731  if (s->response) {
732  AVFilterLink *videolink = ctx->outputs[1];
733  formats = ff_make_format_list(pix_fmts);
734  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
735  return ret;
736  }
737 
738  layouts = ff_all_channel_counts();
739  if (!layouts)
740  return AVERROR(ENOMEM);
741 
742  if (s->ir_format) {
743  ret = ff_set_common_channel_layouts(ctx, layouts);
744  if (ret < 0)
745  return ret;
746  } else {
748 
749  if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->outcfg.channel_layouts)) < 0)
750  return ret;
751  if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->incfg.channel_layouts)) < 0)
752  return ret;
753 
755  if (ret)
756  return ret;
757  for (int i = 1; i < ctx->nb_inputs; i++) {
758  if ((ret = ff_channel_layouts_ref(mono, &ctx->inputs[i]->outcfg.channel_layouts)) < 0)
759  return ret;
760  }
761  }
762 
763  formats = ff_make_format_list(sample_fmts);
764  if ((ret = ff_set_common_formats(ctx, formats)) < 0)
765  return ret;
766 
767  formats = ff_all_samplerates();
768  return ff_set_common_samplerates(ctx, formats);
769 }
770 
771 static int config_output(AVFilterLink *outlink)
772 {
773  AVFilterContext *ctx = outlink->src;
774  AudioFIRContext *s = ctx->priv;
775 
776  s->one2many = ctx->inputs[1 + s->selir]->channels == 1;
777  outlink->sample_rate = ctx->inputs[0]->sample_rate;
778  outlink->time_base = ctx->inputs[0]->time_base;
779  outlink->channel_layout = ctx->inputs[0]->channel_layout;
780  outlink->channels = ctx->inputs[0]->channels;
781 
782  s->nb_channels = outlink->channels;
783  s->nb_coef_channels = ctx->inputs[1 + s->selir]->channels;
784  s->pts = AV_NOPTS_VALUE;
785 
786  return 0;
787 }
788 
790 {
791  AudioFIRContext *s = ctx->priv;
792 
793  for (int i = 0; i < s->nb_segments; i++) {
794  uninit_segment(ctx, &s->seg[i]);
795  }
796 
797  av_freep(&s->fdsp);
798 
799  for (int i = 0; i < s->nb_irs; i++) {
800  av_frame_free(&s->ir[i]);
801  }
802 
803  for (unsigned i = 1; i < ctx->nb_inputs; i++)
804  av_freep(&ctx->input_pads[i].name);
805 
806  av_frame_free(&s->video);
807 }
808 
809 static int config_video(AVFilterLink *outlink)
810 {
811  AVFilterContext *ctx = outlink->src;
812  AudioFIRContext *s = ctx->priv;
813 
814  outlink->sample_aspect_ratio = (AVRational){1,1};
815  outlink->w = s->w;
816  outlink->h = s->h;
817  outlink->frame_rate = s->frame_rate;
818  outlink->time_base = av_inv_q(outlink->frame_rate);
819 
820  av_frame_free(&s->video);
821  s->video = ff_get_video_buffer(outlink, outlink->w, outlink->h);
822  if (!s->video)
823  return AVERROR(ENOMEM);
824 
825  return 0;
826 }
827 
829 {
830  dsp->fcmul_add = fcmul_add_c;
831 
832  if (ARCH_X86)
833  ff_afir_init_x86(dsp);
834 }
835 
837 {
838  AudioFIRContext *s = ctx->priv;
839  AVFilterPad pad, vpad;
840  int ret;
841 
842  pad = (AVFilterPad) {
843  .name = "main",
844  .type = AVMEDIA_TYPE_AUDIO,
845  };
846 
847  ret = ff_insert_inpad(ctx, 0, &pad);
848  if (ret < 0)
849  return ret;
850 
851  for (int n = 0; n < s->nb_irs; n++) {
852  pad = (AVFilterPad) {
853  .name = av_asprintf("ir%d", n),
854  .type = AVMEDIA_TYPE_AUDIO,
855  };
856 
857  if (!pad.name)
858  return AVERROR(ENOMEM);
859 
860  ret = ff_insert_inpad(ctx, n + 1, &pad);
861  if (ret < 0) {
862  av_freep(&pad.name);
863  return ret;
864  }
865  }
866 
867  pad = (AVFilterPad) {
868  .name = "default",
869  .type = AVMEDIA_TYPE_AUDIO,
870  .config_props = config_output,
871  };
872 
873  ret = ff_insert_outpad(ctx, 0, &pad);
874  if (ret < 0)
875  return ret;
876 
877  if (s->response) {
878  vpad = (AVFilterPad){
879  .name = "filter_response",
880  .type = AVMEDIA_TYPE_VIDEO,
881  .config_props = config_video,
882  };
883 
884  ret = ff_insert_outpad(ctx, 1, &vpad);
885  if (ret < 0)
886  return ret;
887  }
888 
890  if (!s->fdsp)
891  return AVERROR(ENOMEM);
892 
893  ff_afir_init(&s->afirdsp);
894 
895  return 0;
896 }
897 
899  const char *cmd,
900  const char *arg,
901  char *res,
902  int res_len,
903  int flags)
904 {
905  AudioFIRContext *s = ctx->priv;
906  int prev_ir = s->selir;
907  int ret = ff_filter_process_command(ctx, cmd, arg, res, res_len, flags);
908 
909  if (ret < 0)
910  return ret;
911 
912  s->selir = FFMIN(s->nb_irs - 1, s->selir);
913 
914  if (prev_ir != s->selir) {
915  s->have_coeffs = 0;
916  }
917 
918  return 0;
919 }
920 
921 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
922 #define AFR AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
923 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
924 #define OFFSET(x) offsetof(AudioFIRContext, x)
925 
926 static const AVOption afir_options[] = {
927  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
928  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
929  { "length", "set IR length", OFFSET(length), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
930  { "gtype", "set IR auto gain type",OFFSET(gtype), AV_OPT_TYPE_INT, {.i64=0}, -1, 2, AF, "gtype" },
931  { "none", "without auto gain", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, "gtype" },
932  { "peak", "peak gain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "gtype" },
933  { "dc", "DC gain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "gtype" },
934  { "gn", "gain to noise", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "gtype" },
935  { "irgain", "set IR gain", OFFSET(ir_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
936  { "irfmt", "set IR format", OFFSET(ir_format), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "irfmt" },
937  { "mono", "single channel", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "irfmt" },
938  { "input", "same as input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "irfmt" },
939  { "maxir", "set max IR length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF },
940  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
941  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
942  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
943  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
944  { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 1, 32768, AF },
945  { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 32768, AF },
946  { "nbirs", "set number of input IRs",OFFSET(nb_irs),AV_OPT_TYPE_INT, {.i64=1}, 1, 32, AF },
947  { "ir", "select IR", OFFSET(selir), AV_OPT_TYPE_INT, {.i64=0}, 0, 31, AFR },
948  { NULL }
949 };
950 
952 
954  .name = "afir",
955  .description = NULL_IF_CONFIG_SMALL("Apply Finite Impulse Response filter with supplied coefficients in additional stream(s)."),
956  .priv_size = sizeof(AudioFIRContext),
957  .priv_class = &afir_class,
959  .init = init,
960  .activate = activate,
961  .uninit = uninit,
966 };
float, planar
Definition: samplefmt.h:69
#define NULL
Definition: coverity.c:32
static av_cold int init(AVFilterContext *ctx)
Definition: af_afir.c:836
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVOption.
Definition: opt.h:248
static int check_ir(AVFilterLink *link, AVFrame *frame)
Definition: af_afir.c:611
int nb_coef_channels
Definition: af_afir.h:86
float re
Definition: fft.c:82
int * part_index
Definition: af_afir.h:44
int nb_channels
Definition: af_afir.h:85
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
Main libavfilter public API header.
AVFrame * block
Definition: af_afir.h:47
int input_offset
Definition: af_afir.h:41
int eof_coeffs[32]
Definition: af_afir.h:82
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:106
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_afir.c:789
float fminf(float, float)
int input_size
Definition: af_afir.h:40
int av_log2(unsigned v)
Definition: intmath.c:26
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int offset, int nb_partitions, int part_size)
Definition: af_afir.c:378
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *channel_layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates...
Definition: formats.c:569
int min_part_size
Definition: af_afir.h:95
return FFERROR_NOT_READY
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
float ir_gain
Definition: af_afir.h:68
AVFrame * coeff
Definition: af_afir.h:49
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1615
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:347
static void draw_response(AVFilterContext *ctx, AVFrame *out)
Definition: af_afir.c:289
void(* vector_fmac_scalar)(float *dst, const float *src, float mul, int len)
Multiply a vector of floats by a scalar float and add to destination vector.
Definition: float_dsp.h:54
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:462
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
float dry_gain
Definition: af_afir.h:65
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
AVOptions.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_afir.c:263
AVFrame * buffer
Definition: af_afir.h:48
void ff_afir_init_x86(AudioFIRDSPContext *s)
Definition: af_afir_init.c:30
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
Definition: af_afir.c:213
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
#define max(a, b)
Definition: cuda_runtime.h:33
ptrdiff_t size
Definition: opengl_enc.c:100
static int process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: af_afir.c:898
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function.If this function returns true
#define FFALIGN(x, a)
Definition: macros.h:48
static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
Definition: af_afir.c:420
#define av_log(a,...)
AVFrame * ir[32]
Definition: af_afir.h:93
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVFrame * output
Definition: af_afir.h:51
#define src
Definition: vp8dsp.c:254
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1444
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:346
AVFrame * sum
Definition: af_afir.h:46
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:588
#define AFR
Definition: af_afir.c:922
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
static int convert_coeffs(AVFilterContext *ctx)
Definition: af_afir.c:450
AVFILTER_DEFINE_CLASS(afir)
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
if no frame is available
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:339
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
static const uint16_t mask[17]
Definition: lzw.c:38
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
RDFTContext ** irdft
Definition: af_afir.h:53
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options...
Definition: avfilter.c:885
void * priv
private data for use by the filter
Definition: avfilter.h:354
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:443
const char * arg
Definition: jacosubdec.c:66
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
GLsizei GLsizei * length
Definition: opengl_enc.c:114
Definition: avfft.h:73
#define FFMAX(a, b)
Definition: common.h:94
void av_rdft_calc(RDFTContext *s, FFTSample *data)
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
Definition: hls.c:68
static av_const double hypot(double x, double y)
Definition: libm.h:366
int channels
number of audio channels, only used for audio.
Definition: frame.h:614
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:800
unsigned nb_inputs
number of input pads
Definition: avfilter.h:348
#define FFMIN(a, b)
Definition: common.h:96
float fmaxf(float, float)
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:453
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:467
int coeff_size
Definition: af_afir.h:39
uint8_t w
Definition: llviddspenc.c:38
RDFTContext ** rdft
Definition: af_afir.h:53
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1469
AudioFIRDSPContext afirdsp
Definition: af_afir.h:98
AVFormatContext * ctx
Definition: movenc.c:48
int ir_channel
Definition: af_afir.h:74
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
void(* fcmul_add)(float *sum, const float *t, const float *c, ptrdiff_t len)
Definition: af_afir.h:57
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static void fir_fadd(AudioFIRContext *s, float *dst, const float *src, int nb_samples)
Definition: af_afir.c:67
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:553
void ff_afir_init(AudioFIRDSPContext *dsp)
Definition: af_afir.c:828
static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
Definition: af_afir.c:43
A list of supported channel layouts.
Definition: formats.h:86
if(ret)
int64_t pts
Definition: af_afir.h:96
static int query_formats(AVFilterContext *ctx)
Definition: af_afir.c:716
AVFloatDSPContext * fdsp
Definition: af_afir.h:99
AVFrame * input
Definition: af_afir.h:50
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_afir.c:200
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1508
FFT functions.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Filter definition.
Definition: avfilter.h:145
static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
Definition: af_afir.c:189
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1638
int have_coeffs
Definition: af_afir.h:83
Rational number (pair of numerator and denominator).
Definition: rational.h:58
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
void(* vector_fmul_scalar)(float *dst, const float *src, float mul, int len)
Multiply a vector of floats by a scalar float.
Definition: float_dsp.h:85
float max_ir_len
Definition: af_afir.h:70
offset must point to AVRational
Definition: opt.h:238
const char * name
Filter name.
Definition: avfilter.h:149
AVFrame * in
Definition: af_afir.h:92
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
#define snprintf
Definition: snprintf.h:34
offset must point to two consecutive integers
Definition: opt.h:235
#define AF
Definition: af_afir.c:921
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:351
float length
Definition: af_afir.h:66
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:422
AVFilter ff_af_afir
Definition: af_afir.c:953
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:560
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:379
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_afir.c:240
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
int nb_partitions
Definition: af_afir.h:35
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
common internal and external API header
AudioFIRSegment seg[1024]
Definition: af_afir.h:89
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
#define OFFSET(x)
Definition: af_afir.c:924
static int config_video(AVFilterLink *outlink)
Definition: af_afir.c:809
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:193
avfilter_execute_func * execute
Definition: internal.h:136
static const AVOption afir_options[]
Definition: af_afir.c:926
AVFrame * video
Definition: af_afir.h:94
float gain
Definition: af_afir.h:80
static int activate(AVFilterContext *ctx)
Definition: af_afir.c:627
int fft_length
Definition: af_afir.h:38
#define av_free(p)
int len
float wet_gain
Definition: af_afir.h:64
int * output_offset
Definition: af_afir.h:43
int block_size
Definition: af_afir.h:37
A list of supported formats for one end of a filter link.
Definition: formats.h:65
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
AVRational frame_rate
Definition: af_afir.h:73
An instance of a filter.
Definition: avfilter.h:339
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:836
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define M_PI
Definition: mathematics.h:52
#define av_malloc_array(a, b)
formats
Definition: signature.h:48
static int config_output(AVFilterLink *outlink)
Definition: af_afir.c:771
internal API functions
#define VF
Definition: af_afir.c:923
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:437
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:355
#define AV_CH_LAYOUT_MONO
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:248
float min
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
int nb_segments
Definition: af_afir.h:90
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
for(j=16;j >0;--j)
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:576
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
CGA/EGA/VGA ROM font data.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:240
static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset)
Definition: af_afir.c:77