FFmpeg
af_afir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * An arbitrary audio FIR filter
24  */
25 
26 #include <float.h>
27 
28 #include "libavutil/common.h"
29 #include "libavutil/float_dsp.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/opt.h"
33 #include "libavcodec/avfft.h"
34 
35 #include "audio.h"
36 #include "avfilter.h"
37 #include "filters.h"
38 #include "formats.h"
39 #include "internal.h"
40 #include "af_afir.h"
41 
42 static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
43 {
44  int n;
45 
46  for (n = 0; n < len; n++) {
47  const float cre = c[2 * n ];
48  const float cim = c[2 * n + 1];
49  const float tre = t[2 * n ];
50  const float tim = t[2 * n + 1];
51 
52  sum[2 * n ] += tre * cre - tim * cim;
53  sum[2 * n + 1] += tre * cim + tim * cre;
54  }
55 
56  sum[2 * n] += t[2 * n] * c[2 * n];
57 }
58 
60 {
61  AudioFIRContext *s = ctx->priv;
62  const float *in = (const float *)s->in[0]->extended_data[ch] + offset;
63  float *block, *buf, *ptr = (float *)out->extended_data[ch] + offset;
64  const int nb_samples = FFMIN(s->min_part_size, out->nb_samples - offset);
65  int n, i, j;
66 
67  for (int segment = 0; segment < s->nb_segments; segment++) {
68  AudioFIRSegment *seg = &s->seg[segment];
69  float *src = (float *)seg->input->extended_data[ch];
70  float *dst = (float *)seg->output->extended_data[ch];
71  float *sum = (float *)seg->sum->extended_data[ch];
72 
73  s->fdsp->vector_fmul_scalar(src + seg->input_offset, in, s->dry_gain, FFALIGN(nb_samples, 4));
74  emms_c();
75 
76  seg->output_offset[ch] += s->min_part_size;
77  if (seg->output_offset[ch] == seg->part_size) {
78  seg->output_offset[ch] = 0;
79  } else {
80  memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
81 
82  dst += seg->output_offset[ch];
83  for (n = 0; n < nb_samples; n++) {
84  ptr[n] += dst[n];
85  }
86  continue;
87  }
88 
89  memset(sum, 0, sizeof(*sum) * seg->fft_length);
90  block = (float *)seg->block->extended_data[ch] + seg->part_index[ch] * seg->block_size;
91  memset(block + seg->part_size, 0, sizeof(*block) * (seg->fft_length - seg->part_size));
92 
93  memcpy(block, src, sizeof(*src) * seg->part_size);
94 
95  av_rdft_calc(seg->rdft[ch], block);
96  block[2 * seg->part_size] = block[1];
97  block[1] = 0;
98 
99  j = seg->part_index[ch];
100 
101  for (i = 0; i < seg->nb_partitions; i++) {
102  const int coffset = j * seg->coeff_size;
103  const float *block = (const float *)seg->block->extended_data[ch] + i * seg->block_size;
104  const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset;
105 
106  s->afirdsp.fcmul_add(sum, block, (const float *)coeff, seg->part_size);
107 
108  if (j == 0)
109  j = seg->nb_partitions;
110  j--;
111  }
112 
113  sum[1] = sum[2 * seg->part_size];
114  av_rdft_calc(seg->irdft[ch], sum);
115 
116  buf = (float *)seg->buffer->extended_data[ch];
117  for (n = 0; n < seg->part_size; n++) {
118  buf[n] += sum[n];
119  }
120 
121  memcpy(dst, buf, seg->part_size * sizeof(*dst));
122 
123  buf = (float *)seg->buffer->extended_data[ch];
124  memcpy(buf, sum + seg->part_size, seg->part_size * sizeof(*buf));
125 
126  seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions;
127 
128  memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
129 
130  for (n = 0; n < nb_samples; n++) {
131  ptr[n] += dst[n];
132  }
133  }
134 
135  s->fdsp->vector_fmul_scalar(ptr, ptr, s->wet_gain, FFALIGN(nb_samples, 4));
136  emms_c();
137 
138  return 0;
139 }
140 
142 {
143  AudioFIRContext *s = ctx->priv;
144 
145  for (int offset = 0; offset < out->nb_samples; offset += s->min_part_size) {
147  }
148 
149  return 0;
150 }
151 
152 static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
153 {
154  AVFrame *out = arg;
155  const int start = (out->channels * jobnr) / nb_jobs;
156  const int end = (out->channels * (jobnr+1)) / nb_jobs;
157 
158  for (int ch = start; ch < end; ch++) {
159  fir_channel(ctx, out, ch);
160  }
161 
162  return 0;
163 }
164 
166 {
167  AVFilterContext *ctx = outlink->src;
168  AVFrame *out = NULL;
169 
170  out = ff_get_audio_buffer(outlink, in->nb_samples);
171  if (!out) {
172  av_frame_free(&in);
173  return AVERROR(ENOMEM);
174  }
175 
176  if (s->pts == AV_NOPTS_VALUE)
177  s->pts = in->pts;
178  s->in[0] = in;
179  ctx->internal->execute(ctx, fir_channels, out, NULL, FFMIN(outlink->channels,
181 
182  out->pts = s->pts;
183  if (s->pts != AV_NOPTS_VALUE)
184  s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
185 
186  av_frame_free(&in);
187  s->in[0] = NULL;
188 
189  return ff_filter_frame(outlink, out);
190 }
191 
192 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
193 {
194  const uint8_t *font;
195  int font_height;
196  int i;
197 
198  font = avpriv_cga_font, font_height = 8;
199 
200  for (i = 0; txt[i]; i++) {
201  int char_y, mask;
202 
203  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
204  for (char_y = 0; char_y < font_height; char_y++) {
205  for (mask = 0x80; mask; mask >>= 1) {
206  if (font[txt[i] * font_height + char_y] & mask)
207  AV_WL32(p, color);
208  p += 4;
209  }
210  p += pic->linesize[0] - 8 * 4;
211  }
212  }
213 }
214 
215 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
216 {
217  int dx = FFABS(x1-x0);
218  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
219  int err = (dx>dy ? dx : -dy) / 2, e2;
220 
221  for (;;) {
222  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
223 
224  if (x0 == x1 && y0 == y1)
225  break;
226 
227  e2 = err;
228 
229  if (e2 >-dx) {
230  err -= dy;
231  x0--;
232  }
233 
234  if (e2 < dy) {
235  err += dx;
236  y0 += sy;
237  }
238  }
239 }
240 
242 {
243  AudioFIRContext *s = ctx->priv;
244  float *mag, *phase, *delay, min = FLT_MAX, max = FLT_MIN;
245  float min_delay = FLT_MAX, max_delay = FLT_MIN;
246  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
247  char text[32];
248  int channel, i, x;
249 
250  memset(out->data[0], 0, s->h * out->linesize[0]);
251 
252  phase = av_malloc_array(s->w, sizeof(*phase));
253  mag = av_malloc_array(s->w, sizeof(*mag));
254  delay = av_malloc_array(s->w, sizeof(*delay));
255  if (!mag || !phase || !delay)
256  goto end;
257 
258  channel = av_clip(s->ir_channel, 0, s->in[1]->channels - 1);
259  for (i = 0; i < s->w; i++) {
260  const float *src = (const float *)s->in[1]->extended_data[channel];
261  double w = i * M_PI / (s->w - 1);
262  double div, real_num = 0., imag_num = 0., real = 0., imag = 0.;
263 
264  for (x = 0; x < s->nb_taps; x++) {
265  real += cos(-x * w) * src[x];
266  imag += sin(-x * w) * src[x];
267  real_num += cos(-x * w) * src[x] * x;
268  imag_num += sin(-x * w) * src[x] * x;
269  }
270 
271  mag[i] = hypot(real, imag);
272  phase[i] = atan2(imag, real);
273  div = real * real + imag * imag;
274  delay[i] = (real_num * real + imag_num * imag) / div;
275  min = fminf(min, mag[i]);
276  max = fmaxf(max, mag[i]);
277  min_delay = fminf(min_delay, delay[i]);
278  max_delay = fmaxf(max_delay, delay[i]);
279  }
280 
281  for (i = 0; i < s->w; i++) {
282  int ymag = mag[i] / max * (s->h - 1);
283  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
284  int yphase = (0.5 * (1. + phase[i] / M_PI)) * (s->h - 1);
285 
286  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
287  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
288  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
289 
290  if (prev_ymag < 0)
291  prev_ymag = ymag;
292  if (prev_yphase < 0)
293  prev_yphase = yphase;
294  if (prev_ydelay < 0)
295  prev_ydelay = ydelay;
296 
297  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
298  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
299  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
300 
301  prev_ymag = ymag;
302  prev_yphase = yphase;
303  prev_ydelay = ydelay;
304  }
305 
306  if (s->w > 400 && s->h > 100) {
307  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
308  snprintf(text, sizeof(text), "%.2f", max);
309  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
310 
311  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
312  snprintf(text, sizeof(text), "%.2f", min);
313  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
314 
315  drawtext(out, 2, 22, "Max Delay:", 0xDDDDDDDD);
316  snprintf(text, sizeof(text), "%.2f", max_delay);
317  drawtext(out, 11 * 8 + 2, 22, text, 0xDDDDDDDD);
318 
319  drawtext(out, 2, 32, "Min Delay:", 0xDDDDDDDD);
320  snprintf(text, sizeof(text), "%.2f", min_delay);
321  drawtext(out, 11 * 8 + 2, 32, text, 0xDDDDDDDD);
322  }
323 
324 end:
325  av_free(delay);
326  av_free(phase);
327  av_free(mag);
328 }
329 
331  int offset, int nb_partitions, int part_size)
332 {
333  AudioFIRContext *s = ctx->priv;
334 
335  seg->rdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->rdft));
336  seg->irdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->irdft));
337  if (!seg->rdft || !seg->irdft)
338  return AVERROR(ENOMEM);
339 
340  seg->fft_length = part_size * 2 + 1;
341  seg->part_size = part_size;
342  seg->block_size = FFALIGN(seg->fft_length, 32);
343  seg->coeff_size = FFALIGN(seg->part_size + 1, 32);
344  seg->nb_partitions = nb_partitions;
345  seg->input_size = offset + s->min_part_size;
346  seg->input_offset = offset;
347 
348  seg->part_index = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->part_index));
349  seg->output_offset = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->output_offset));
350  if (!seg->part_index || !seg->output_offset)
351  return AVERROR(ENOMEM);
352 
353  for (int ch = 0; ch < ctx->inputs[0]->channels; ch++) {
354  seg->rdft[ch] = av_rdft_init(av_log2(2 * part_size), DFT_R2C);
355  seg->irdft[ch] = av_rdft_init(av_log2(2 * part_size), IDFT_C2R);
356  if (!seg->rdft[ch] || !seg->irdft[ch])
357  return AVERROR(ENOMEM);
358  }
359 
360  seg->sum = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
361  seg->block = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->block_size);
362  seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
363  seg->coeff = ff_get_audio_buffer(ctx->inputs[1], seg->nb_partitions * seg->coeff_size * 2);
364  seg->input = ff_get_audio_buffer(ctx->inputs[0], seg->input_size);
365  seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
366  if (!seg->buffer || !seg->sum || !seg->block || !seg->coeff || !seg->input || !seg->output)
367  return AVERROR(ENOMEM);
368 
369  return 0;
370 }
371 
373 {
374  AudioFIRContext *s = ctx->priv;
375  int left, offset = 0, part_size, max_part_size;
376  int ret, i, ch, n;
377  float power = 0;
378 
379  s->nb_taps = ff_inlink_queued_samples(ctx->inputs[1]);
380  if (s->nb_taps <= 0)
381  return AVERROR(EINVAL);
382 
383  if (s->minp > s->maxp) {
384  s->maxp = s->minp;
385  }
386 
387  left = s->nb_taps;
388  part_size = 1 << av_log2(s->minp);
389  max_part_size = 1 << av_log2(s->maxp);
390 
391  s->min_part_size = part_size;
392 
393  for (i = 0; left > 0; i++) {
394  int step = part_size == max_part_size ? INT_MAX : 1 + (i == 0);
395  int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size);
396 
397  s->nb_segments = i + 1;
398  ret = init_segment(ctx, &s->seg[i], offset, nb_partitions, part_size);
399  if (ret < 0)
400  return ret;
401  offset += nb_partitions * part_size;
402  left -= nb_partitions * part_size;
403  part_size *= 2;
404  part_size = FFMIN(part_size, max_part_size);
405  }
406 
407  ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_taps, s->nb_taps, &s->in[1]);
408  if (ret < 0)
409  return ret;
410  if (ret == 0)
411  return AVERROR_BUG;
412 
413  if (s->response)
414  draw_response(ctx, s->video);
415 
416  s->gain = 1;
417 
418  switch (s->gtype) {
419  case -1:
420  /* nothing to do */
421  break;
422  case 0:
423  for (ch = 0; ch < ctx->inputs[1]->channels; ch++) {
424  float *time = (float *)s->in[1]->extended_data[!s->one2many * ch];
425 
426  for (i = 0; i < s->nb_taps; i++)
427  power += FFABS(time[i]);
428  }
429  s->gain = ctx->inputs[1]->channels / power;
430  break;
431  case 1:
432  for (ch = 0; ch < ctx->inputs[1]->channels; ch++) {
433  float *time = (float *)s->in[1]->extended_data[!s->one2many * ch];
434 
435  for (i = 0; i < s->nb_taps; i++)
436  power += time[i];
437  }
438  s->gain = ctx->inputs[1]->channels / power;
439  break;
440  case 2:
441  for (ch = 0; ch < ctx->inputs[1]->channels; ch++) {
442  float *time = (float *)s->in[1]->extended_data[!s->one2many * ch];
443 
444  for (i = 0; i < s->nb_taps; i++)
445  power += time[i] * time[i];
446  }
447  s->gain = sqrtf(ch / power);
448  break;
449  default:
450  return AVERROR_BUG;
451  }
452 
453  s->gain = FFMIN(s->gain * s->ir_gain, 1.f);
454  av_log(ctx, AV_LOG_DEBUG, "power %f, gain %f\n", power, s->gain);
455  for (ch = 0; ch < ctx->inputs[1]->channels; ch++) {
456  float *time = (float *)s->in[1]->extended_data[!s->one2many * ch];
457 
458  s->fdsp->vector_fmul_scalar(time, time, s->gain, FFALIGN(s->nb_taps, 4));
459  }
460 
461  av_log(ctx, AV_LOG_DEBUG, "nb_taps: %d\n", s->nb_taps);
462  av_log(ctx, AV_LOG_DEBUG, "nb_segments: %d\n", s->nb_segments);
463 
464  for (ch = 0; ch < ctx->inputs[1]->channels; ch++) {
465  float *time = (float *)s->in[1]->extended_data[!s->one2many * ch];
466  int toffset = 0;
467 
468  for (i = FFMAX(1, s->length * s->nb_taps); i < s->nb_taps; i++)
469  time[i] = 0;
470 
471  av_log(ctx, AV_LOG_DEBUG, "channel: %d\n", ch);
472 
473  for (int segment = 0; segment < s->nb_segments; segment++) {
474  AudioFIRSegment *seg = &s->seg[segment];
475  float *block = (float *)seg->block->extended_data[ch];
477 
478  av_log(ctx, AV_LOG_DEBUG, "segment: %d\n", segment);
479 
480  for (i = 0; i < seg->nb_partitions; i++) {
481  const float scale = 1.f / seg->part_size;
482  const int coffset = i * seg->coeff_size;
483  const int remaining = s->nb_taps - toffset;
484  const int size = remaining >= seg->part_size ? seg->part_size : remaining;
485 
486  memset(block, 0, sizeof(*block) * seg->fft_length);
487  memcpy(block, time + toffset, size * sizeof(*block));
488 
489  av_rdft_calc(seg->rdft[0], block);
490 
491  coeff[coffset].re = block[0] * scale;
492  coeff[coffset].im = 0;
493  for (n = 1; n < seg->part_size; n++) {
494  coeff[coffset + n].re = block[2 * n] * scale;
495  coeff[coffset + n].im = block[2 * n + 1] * scale;
496  }
497  coeff[coffset + seg->part_size].re = block[1] * scale;
498  coeff[coffset + seg->part_size].im = 0;
499 
500  toffset += size;
501  }
502 
503  av_log(ctx, AV_LOG_DEBUG, "nb_partitions: %d\n", seg->nb_partitions);
504  av_log(ctx, AV_LOG_DEBUG, "partition size: %d\n", seg->part_size);
505  av_log(ctx, AV_LOG_DEBUG, "block size: %d\n", seg->block_size);
506  av_log(ctx, AV_LOG_DEBUG, "fft_length: %d\n", seg->fft_length);
507  av_log(ctx, AV_LOG_DEBUG, "coeff_size: %d\n", seg->coeff_size);
508  av_log(ctx, AV_LOG_DEBUG, "input_size: %d\n", seg->input_size);
509  av_log(ctx, AV_LOG_DEBUG, "input_offset: %d\n", seg->input_offset);
510  }
511  }
512 
513  av_frame_free(&s->in[1]);
514  s->have_coeffs = 1;
515 
516  return 0;
517 }
518 
520 {
521  AVFilterContext *ctx = link->dst;
522  AudioFIRContext *s = ctx->priv;
523  int nb_taps, max_nb_taps;
524 
525  nb_taps = ff_inlink_queued_samples(link);
526  max_nb_taps = s->max_ir_len * ctx->outputs[0]->sample_rate;
527  if (nb_taps > max_nb_taps) {
528  av_log(ctx, AV_LOG_ERROR, "Too big number of coefficients: %d > %d.\n", nb_taps, max_nb_taps);
529  return AVERROR(EINVAL);
530  }
531 
532  return 0;
533 }
534 
536 {
537  AudioFIRContext *s = ctx->priv;
538  AVFilterLink *outlink = ctx->outputs[0];
539  int ret, status, available, wanted;
540  AVFrame *in = NULL;
541  int64_t pts;
542 
544  if (s->response)
546  if (!s->eof_coeffs) {
547  AVFrame *ir = NULL;
548 
549  ret = check_ir(ctx->inputs[1], ir);
550  if (ret < 0)
551  return ret;
552 
553  if (ff_outlink_get_status(ctx->inputs[1]) == AVERROR_EOF)
554  s->eof_coeffs = 1;
555 
556  if (!s->eof_coeffs) {
557  if (ff_outlink_frame_wanted(ctx->outputs[0]))
558  ff_inlink_request_frame(ctx->inputs[1]);
559  else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1]))
560  ff_inlink_request_frame(ctx->inputs[1]);
561  return 0;
562  }
563  }
564 
565  if (!s->have_coeffs && s->eof_coeffs) {
567  if (ret < 0)
568  return ret;
569  }
570 
571  available = ff_inlink_queued_samples(ctx->inputs[0]);
572  wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size);
573  ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in);
574  if (ret > 0)
575  ret = fir_frame(s, in, outlink);
576 
577  if (ret < 0)
578  return ret;
579 
580  if (s->response && s->have_coeffs) {
581  int64_t old_pts = s->video->pts;
582  int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base);
583 
584  if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) {
585  s->video->pts = new_pts;
586  return ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video));
587  }
588  }
589 
590  if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) {
592  return 0;
593  }
594 
595  if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
596  if (status == AVERROR_EOF) {
597  ff_outlink_set_status(ctx->outputs[0], status, pts);
598  if (s->response)
599  ff_outlink_set_status(ctx->outputs[1], status, pts);
600  return 0;
601  }
602  }
603 
604  if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
605  !ff_outlink_get_status(ctx->inputs[0])) {
606  ff_inlink_request_frame(ctx->inputs[0]);
607  return 0;
608  }
609 
610  if (s->response &&
611  ff_outlink_frame_wanted(ctx->outputs[1]) &&
612  !ff_outlink_get_status(ctx->inputs[0])) {
613  ff_inlink_request_frame(ctx->inputs[0]);
614  return 0;
615  }
616 
617  return FFERROR_NOT_READY;
618 }
619 
621 {
622  AudioFIRContext *s = ctx->priv;
625  static const enum AVSampleFormat sample_fmts[] = {
628  };
629  static const enum AVPixelFormat pix_fmts[] = {
632  };
633  int ret;
634 
635  if (s->response) {
636  AVFilterLink *videolink = ctx->outputs[1];
638  if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
639  return ret;
640  }
641 
643  if (!layouts)
644  return AVERROR(ENOMEM);
645 
646  if (s->ir_format) {
648  if (ret < 0)
649  return ret;
650  } else {
652 
654  if (ret)
655  return ret;
656 
657  if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts)) < 0)
658  return ret;
659  if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
660  return ret;
661  if ((ret = ff_channel_layouts_ref(mono, &ctx->inputs[1]->out_channel_layouts)) < 0)
662  return ret;
663  }
664 
666  if ((ret = ff_set_common_formats(ctx, formats)) < 0)
667  return ret;
668 
671 }
672 
673 static int config_output(AVFilterLink *outlink)
674 {
675  AVFilterContext *ctx = outlink->src;
676  AudioFIRContext *s = ctx->priv;
677 
678  s->one2many = ctx->inputs[1]->channels == 1;
679  outlink->sample_rate = ctx->inputs[0]->sample_rate;
680  outlink->time_base = ctx->inputs[0]->time_base;
681  outlink->channel_layout = ctx->inputs[0]->channel_layout;
682  outlink->channels = ctx->inputs[0]->channels;
683 
684  s->nb_channels = outlink->channels;
685  s->nb_coef_channels = ctx->inputs[1]->channels;
686  s->pts = AV_NOPTS_VALUE;
687 
688  return 0;
689 }
690 
692 {
693  AudioFIRContext *s = ctx->priv;
694 
695  if (seg->rdft) {
696  for (int ch = 0; ch < s->nb_channels; ch++) {
697  av_rdft_end(seg->rdft[ch]);
698  }
699  }
700  av_freep(&seg->rdft);
701 
702  if (seg->irdft) {
703  for (int ch = 0; ch < s->nb_channels; ch++) {
704  av_rdft_end(seg->irdft[ch]);
705  }
706  }
707  av_freep(&seg->irdft);
708 
709  av_freep(&seg->output_offset);
710  av_freep(&seg->part_index);
711 
712  av_frame_free(&seg->block);
713  av_frame_free(&seg->sum);
714  av_frame_free(&seg->buffer);
715  av_frame_free(&seg->coeff);
716  av_frame_free(&seg->input);
717  av_frame_free(&seg->output);
718  seg->input_size = 0;
719 }
720 
722 {
723  AudioFIRContext *s = ctx->priv;
724 
725  for (int i = 0; i < s->nb_segments; i++) {
726  uninit_segment(ctx, &s->seg[i]);
727  }
728 
729  av_freep(&s->fdsp);
730  av_frame_free(&s->in[1]);
731 
732  for (int i = 0; i < ctx->nb_outputs; i++)
733  av_freep(&ctx->output_pads[i].name);
734  av_frame_free(&s->video);
735 }
736 
737 static int config_video(AVFilterLink *outlink)
738 {
739  AVFilterContext *ctx = outlink->src;
740  AudioFIRContext *s = ctx->priv;
741 
742  outlink->sample_aspect_ratio = (AVRational){1,1};
743  outlink->w = s->w;
744  outlink->h = s->h;
745  outlink->frame_rate = s->frame_rate;
746  outlink->time_base = av_inv_q(outlink->frame_rate);
747 
748  av_frame_free(&s->video);
749  s->video = ff_get_video_buffer(outlink, outlink->w, outlink->h);
750  if (!s->video)
751  return AVERROR(ENOMEM);
752 
753  return 0;
754 }
755 
757 {
758  dsp->fcmul_add = fcmul_add_c;
759 
760  if (ARCH_X86)
761  ff_afir_init_x86(dsp);
762 }
763 
765 {
766  AudioFIRContext *s = ctx->priv;
767  AVFilterPad pad, vpad;
768  int ret;
769 
770  pad = (AVFilterPad){
771  .name = av_strdup("default"),
772  .type = AVMEDIA_TYPE_AUDIO,
773  .config_props = config_output,
774  };
775 
776  if (!pad.name)
777  return AVERROR(ENOMEM);
778 
779  if (s->response) {
780  vpad = (AVFilterPad){
781  .name = av_strdup("filter_response"),
782  .type = AVMEDIA_TYPE_VIDEO,
783  .config_props = config_video,
784  };
785  if (!vpad.name)
786  return AVERROR(ENOMEM);
787  }
788 
789  ret = ff_insert_outpad(ctx, 0, &pad);
790  if (ret < 0) {
791  av_freep(&pad.name);
792  return ret;
793  }
794 
795  if (s->response) {
796  ret = ff_insert_outpad(ctx, 1, &vpad);
797  if (ret < 0) {
798  av_freep(&vpad.name);
799  return ret;
800  }
801  }
802 
803  s->fdsp = avpriv_float_dsp_alloc(0);
804  if (!s->fdsp)
805  return AVERROR(ENOMEM);
806 
807  ff_afir_init(&s->afirdsp);
808 
809  return 0;
810 }
811 
812 static const AVFilterPad afir_inputs[] = {
813  {
814  .name = "main",
815  .type = AVMEDIA_TYPE_AUDIO,
816  },{
817  .name = "ir",
818  .type = AVMEDIA_TYPE_AUDIO,
819  },
820  { NULL }
821 };
822 
823 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
824 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
825 #define OFFSET(x) offsetof(AudioFIRContext, x)
826 
827 static const AVOption afir_options[] = {
828  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
829  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
830  { "length", "set IR length", OFFSET(length), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
831  { "gtype", "set IR auto gain type",OFFSET(gtype), AV_OPT_TYPE_INT, {.i64=0}, -1, 2, AF, "gtype" },
832  { "none", "without auto gain", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, "gtype" },
833  { "peak", "peak gain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "gtype" },
834  { "dc", "DC gain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "gtype" },
835  { "gn", "gain to noise", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "gtype" },
836  { "irgain", "set IR gain", OFFSET(ir_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
837  { "irfmt", "set IR format", OFFSET(ir_format), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "irfmt" },
838  { "mono", "single channel", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "irfmt" },
839  { "input", "same as input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "irfmt" },
840  { "maxir", "set max IR length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF },
841  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
842  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
843  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
844  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
845  { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 32768, AF },
846  { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 32768, AF },
847  { NULL }
848 };
849 
851 
853  .name = "afir",
854  .description = NULL_IF_CONFIG_SMALL("Apply Finite Impulse Response filter with supplied coefficients in 2nd stream."),
855  .priv_size = sizeof(AudioFIRContext),
856  .priv_class = &afir_class,
858  .init = init,
859  .activate = activate,
860  .uninit = uninit,
861  .inputs = afir_inputs,
864 };
check_ir
static int check_ir(AVFilterLink *link, AVFrame *frame)
Definition: af_afir.c:519
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
activate
static int activate(AVFilterContext *ctx)
Definition: af_afir.c:535
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
AudioFIRSegment::block_size
int block_size
Definition: af_afir.h:37
out
FILE * out
Definition: movenc.c:54
color
Definition: vf_paletteuse.c:588
n
int n
Definition: avisynth_c.h:760
ff_set_common_channel_layouts
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates.
Definition: formats.c:549
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:686
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:236
VF
#define VF
Definition: af_afir.c:824
AV_CH_LAYOUT_MONO
#define AV_CH_LAYOUT_MONO
Definition: channel_layout.h:85
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:410
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
AudioFIRSegment::buffer
AVFrame * buffer
Definition: af_afir.h:48
w
uint8_t w
Definition: llviddspenc.c:38
fir_quantum
static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset)
Definition: af_afir.c:59
AVOption
AVOption.
Definition: opt.h:246
AudioFIRSegment::input_offset
int input_offset
Definition: af_afir.h:41
AudioFIRDSPContext::fcmul_add
void(* fcmul_add)(float *sum, const float *t, const float *c, ptrdiff_t len)
Definition: af_afir.h:57
float.h
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
AudioFIRSegment::part_size
int part_size
Definition: af_afir.h:36
AudioFIRSegment::input_size
int input_size
Definition: af_afir.h:40
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1795
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
AudioFIRSegment::coeff
AVFrame * coeff
Definition: af_afir.h:49
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_afir.c:721
start
void INT64 start
Definition: avisynth_c.h:767
fir_channels
static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_afir.c:152
afir_options
static const AVOption afir_options[]
Definition: af_afir.c:827
IDFT_C2R
@ IDFT_C2R
Definition: avfft.h:73
AudioFIRSegment::block
AVFrame * block
Definition: af_afir.h:47
ff_afir_init_x86
void ff_afir_init_x86(AudioFIRDSPContext *s)
Definition: af_afir_init.c:30
pts
static int64_t pts
Definition: transcode_aac.c:647
uninit_segment
static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
Definition: af_afir.c:691
src
#define src
Definition: vp8dsp.c:254
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AudioFIRSegment
Definition: af_afir.h:34
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(afir)
mask
static const uint16_t mask[17]
Definition: lzw.c:38
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:343
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1607
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
fminf
float fminf(float, float)
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
av_rdft_calc
void av_rdft_calc(RDFTContext *s, FFTSample *data)
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
arg
const char * arg
Definition: jacosubdec.c:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AudioFIRSegment::sum
AVFrame * sum
Definition: af_afir.h:46
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1500
NULL
#define NULL
Definition: coverity.c:32
ff_afir_init
void ff_afir_init(AudioFIRDSPContext *dsp)
Definition: af_afir.c:756
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:233
draw_line
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_afir.c:215
DFT_R2C
@ DFT_R2C
Definition: avfft.h:72
avfft.h
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
convert_coeffs
static int convert_coeffs(AVFilterContext *ctx)
Definition: af_afir.c:372
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1436
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
float_dsp.h
AudioFIRSegment::output
AVFrame * output
Definition: af_afir.h:51
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
ff_af_afir
AVFilter ff_af_afir
Definition: af_afir.c:852
fcmul_add_c
static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
Definition: af_afir.c:42
av_rdft_init
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AudioFIRSegment::irdft
RDFTContext ** irdft
Definition: af_afir.h:53
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
fmaxf
float fmaxf(float, float)
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
size
int size
Definition: twinvq_data.h:11134
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AF
#define AF
Definition: af_afir.c:823
AudioFIRDSPContext
Definition: af_afir.h:56
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
af_afir.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
xga_font_data.h
draw_response
static void draw_response(AVFilterContext *ctx, AVFrame *out)
Definition: af_afir.c:241
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:226
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_afir.c:620
AudioFIRSegment::rdft
RDFTContext ** rdft
Definition: af_afir.h:53
OFFSET
#define OFFSET(x)
Definition: af_afir.c:825
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_afir.c:737
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AudioFIRSegment::input
AVFrame * input
Definition: af_afir.h:50
AudioFIRSegment::coeff_size
int coeff_size
Definition: af_afir.h:39
available
if no frame is available
Definition: filter_design.txt:166
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
common.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
AudioFIRSegment::nb_partitions
int nb_partitions
Definition: af_afir.h:35
uint8_t
uint8_t
Definition: audio_convert.c:194
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
len
int len
Definition: vorbis_enc_data.h:452
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1461
AVFilter
Filter definition.
Definition: avfilter.h:144
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
AudioFIRSegment::fft_length
int fft_length
Definition: af_afir.h:38
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_afir.c:673
AudioFIRContext
Definition: af_afir.h:61
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_insert_outpad
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:285
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
fir_channel
static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
Definition: af_afir.c:141
segment
Definition: hls.c:68
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1630
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
fir_frame
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
Definition: af_afir.c:165
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_afir.c:764
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
init_segment
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int offset, int nb_partitions, int part_size)
Definition: af_afir.c:330
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
av_rdft_end
void av_rdft_end(RDFTContext *s)
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
afir_inputs
static const AVFilterPad afir_inputs[]
Definition: af_afir.c:812
length
const char int length
Definition: avisynth_c.h:860
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
snprintf
#define snprintf
Definition: snprintf.h:34
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AudioFIRSegment::output_offset
int * output_offset
Definition: af_afir.h:43
channel
channel
Definition: ebur128.h:39
FFTComplex
Definition: avfft.h:37
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_afir.c:192
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:193
min
float min
Definition: vorbis_enc_data.h:456
AudioFIRSegment::part_index
int * part_index
Definition: af_afir.h:44