FFmpeg
af_headphone.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2017 Paul B Mahol
3  * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 
23 #include "libavutil/avstring.h"
25 #include "libavutil/float_dsp.h"
26 #include "libavutil/intmath.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/tx.h"
29 
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "internal.h"
33 #include "audio.h"
34 
35 #define TIME_DOMAIN 0
36 #define FREQUENCY_DOMAIN 1
37 
38 #define HRIR_STEREO 0
39 #define HRIR_MULTI 1
40 
41 typedef struct HeadphoneContext {
42  const AVClass *class;
43 
44  char *map;
45  int type;
46 
48 
50  int eof_hrirs;
51 
52  int ir_len;
53  int air_len;
54 
56 
57  int nb_irs;
58 
59  float gain;
61 
62  float *ringbuffer[2];
63  int write[2];
64 
66  int n_fft;
67  int size;
68  int hrir_fmt;
69 
70  float *data_ir[2];
71  float *temp_src[2];
75 
76  AVTXContext *fft[2], *ifft[2];
79 
80  float (*scalarproduct_float)(const float *v1, const float *v2, int len);
81  struct hrir_inputs {
82  int ir_len;
83  int eof;
84  } hrir_in[64];
85  uint64_t mapping[64];
87 
88 static int parse_channel_name(const char *arg, uint64_t *rchannel)
89 {
90  uint64_t layout = av_get_channel_layout(arg);
91 
93  return AVERROR(EINVAL);
94  *rchannel = layout;
95  return 0;
96 }
97 
99 {
100  HeadphoneContext *s = ctx->priv;
101  char *arg, *tokenizer, *p;
102  uint64_t used_channels = 0;
103 
104  p = s->map;
105  while ((arg = av_strtok(p, "|", &tokenizer))) {
106  uint64_t out_channel;
107 
108  p = NULL;
109  if (parse_channel_name(arg, &out_channel)) {
110  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", arg);
111  continue;
112  }
113  if (used_channels & out_channel) {
114  av_log(ctx, AV_LOG_WARNING, "Ignoring duplicate channel '%s'.\n", arg);
115  continue;
116  }
117  used_channels |= out_channel;
118  s->mapping[s->nb_irs] = out_channel;
119  s->nb_irs++;
120  }
121 
122  if (s->hrir_fmt == HRIR_MULTI)
123  s->nb_hrir_inputs = 1;
124  else
125  s->nb_hrir_inputs = s->nb_irs;
126 }
127 
128 typedef struct ThreadData {
129  AVFrame *in, *out;
130  int *write;
131  float **ir;
133  float **ringbuffer;
134  float **temp_src;
138 } ThreadData;
139 
140 static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
141 {
142  HeadphoneContext *s = ctx->priv;
143  ThreadData *td = arg;
144  AVFrame *in = td->in, *out = td->out;
145  int offset = jobnr;
146  int *write = &td->write[jobnr];
147  const float *const ir = td->ir[jobnr];
148  int *n_clippings = &td->n_clippings[jobnr];
149  float *ringbuffer = td->ringbuffer[jobnr];
150  float *temp_src = td->temp_src[jobnr];
151  const int ir_len = s->ir_len;
152  const int air_len = s->air_len;
153  const float *src = (const float *)in->data[0];
154  float *dst = (float *)out->data[0];
155  const int in_channels = in->channels;
156  const int buffer_length = s->buffer_length;
157  const uint32_t modulo = (uint32_t)buffer_length - 1;
158  float *buffer[64];
159  int wr = *write;
160  int read;
161  int i, l;
162 
163  dst += offset;
164  for (l = 0; l < in_channels; l++) {
165  buffer[l] = ringbuffer + l * buffer_length;
166  }
167 
168  for (i = 0; i < in->nb_samples; i++) {
169  const float *cur_ir = ir;
170 
171  *dst = 0;
172  for (l = 0; l < in_channels; l++) {
173  *(buffer[l] + wr) = src[l];
174  }
175 
176  for (l = 0; l < in_channels; cur_ir += air_len, l++) {
177  const float *const bptr = buffer[l];
178 
179  if (l == s->lfe_channel) {
180  *dst += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
181  continue;
182  }
183 
184  read = (wr - (ir_len - 1)) & modulo;
185 
186  if (read + ir_len < buffer_length) {
187  memcpy(temp_src, bptr + read, ir_len * sizeof(*temp_src));
188  } else {
189  int len = FFMIN(air_len - (read % ir_len), buffer_length - read);
190 
191  memcpy(temp_src, bptr + read, len * sizeof(*temp_src));
192  memcpy(temp_src + len, bptr, (air_len - len) * sizeof(*temp_src));
193  }
194 
195  dst[0] += s->scalarproduct_float(cur_ir, temp_src, FFALIGN(ir_len, 32));
196  }
197 
198  if (fabsf(dst[0]) > 1)
199  n_clippings[0]++;
200 
201  dst += 2;
202  src += in_channels;
203  wr = (wr + 1) & modulo;
204  }
205 
206  *write = wr;
207 
208  return 0;
209 }
210 
211 static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
212 {
213  HeadphoneContext *s = ctx->priv;
214  ThreadData *td = arg;
215  AVFrame *in = td->in, *out = td->out;
216  int offset = jobnr;
217  int *write = &td->write[jobnr];
218  AVComplexFloat *hrtf = s->data_hrtf[jobnr];
219  int *n_clippings = &td->n_clippings[jobnr];
220  float *ringbuffer = td->ringbuffer[jobnr];
221  const int ir_len = s->ir_len;
222  const float *src = (const float *)in->data[0];
223  float *dst = (float *)out->data[0];
224  const int in_channels = in->channels;
225  const int buffer_length = s->buffer_length;
226  const uint32_t modulo = (uint32_t)buffer_length - 1;
227  AVComplexFloat *fft_out = s->out_fft[jobnr];
228  AVComplexFloat *fft_in = s->in_fft[jobnr];
229  AVComplexFloat *fft_acc = s->temp_afft[jobnr];
230  AVTXContext *ifft = s->ifft[jobnr];
231  AVTXContext *fft = s->fft[jobnr];
232  av_tx_fn tx_fn = s->tx_fn[jobnr];
233  av_tx_fn itx_fn = s->itx_fn[jobnr];
234  const int n_fft = s->n_fft;
235  const float fft_scale = 1.0f / s->n_fft;
236  AVComplexFloat *hrtf_offset;
237  int wr = *write;
238  int n_read;
239  int i, j;
240 
241  dst += offset;
242 
243  n_read = FFMIN(ir_len, in->nb_samples);
244  for (j = 0; j < n_read; j++) {
245  dst[2 * j] = ringbuffer[wr];
246  ringbuffer[wr] = 0.0;
247  wr = (wr + 1) & modulo;
248  }
249 
250  for (j = n_read; j < in->nb_samples; j++) {
251  dst[2 * j] = 0;
252  }
253 
254  memset(fft_acc, 0, sizeof(AVComplexFloat) * n_fft);
255 
256  for (i = 0; i < in_channels; i++) {
257  if (i == s->lfe_channel) {
258  for (j = 0; j < in->nb_samples; j++) {
259  dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
260  }
261  continue;
262  }
263 
264  offset = i * n_fft;
265  hrtf_offset = hrtf + offset;
266 
267  memset(fft_in, 0, sizeof(AVComplexFloat) * n_fft);
268 
269  for (j = 0; j < in->nb_samples; j++) {
270  fft_in[j].re = src[j * in_channels + i];
271  }
272 
273  tx_fn(fft, fft_out, fft_in, sizeof(float));
274 
275  for (j = 0; j < n_fft; j++) {
276  const AVComplexFloat *hcomplex = hrtf_offset + j;
277  const float re = fft_out[j].re;
278  const float im = fft_out[j].im;
279 
280  fft_acc[j].re += re * hcomplex->re - im * hcomplex->im;
281  fft_acc[j].im += re * hcomplex->im + im * hcomplex->re;
282  }
283  }
284 
285  itx_fn(ifft, fft_out, fft_acc, sizeof(float));
286 
287  for (j = 0; j < in->nb_samples; j++) {
288  dst[2 * j] += fft_out[j].re * fft_scale;
289  if (fabsf(dst[2 * j]) > 1)
290  n_clippings[0]++;
291  }
292 
293  for (j = 0; j < ir_len - 1; j++) {
294  int write_pos = (wr + j) & modulo;
295 
296  *(ringbuffer + write_pos) += fft_out[in->nb_samples + j].re * fft_scale;
297  }
298 
299  *write = wr;
300 
301  return 0;
302 }
303 
304 static int check_ir(AVFilterLink *inlink, int input_number)
305 {
306  AVFilterContext *ctx = inlink->dst;
307  HeadphoneContext *s = ctx->priv;
308  int ir_len, max_ir_len;
309 
311  max_ir_len = 65536;
312  if (ir_len > max_ir_len) {
313  av_log(ctx, AV_LOG_ERROR, "Too big length of IRs: %d > %d.\n", ir_len, max_ir_len);
314  return AVERROR(EINVAL);
315  }
316  s->hrir_in[input_number].ir_len = ir_len;
317  s->ir_len = FFMAX(ir_len, s->ir_len);
318 
319  return 0;
320 }
321 
323 {
324  AVFilterContext *ctx = outlink->src;
325  int n_clippings[2] = { 0 };
326  ThreadData td;
327  AVFrame *out;
328 
329  out = ff_get_audio_buffer(outlink, in->nb_samples);
330  if (!out) {
331  av_frame_free(&in);
332  return AVERROR(ENOMEM);
333  }
334  out->pts = in->pts;
335 
336  td.in = in; td.out = out; td.write = s->write;
337  td.ir = s->data_ir; td.n_clippings = n_clippings;
338  td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
339  td.out_fft = s->out_fft;
340  td.in_fft = s->in_fft;
341  td.temp_afft = s->temp_afft;
342 
343  if (s->type == TIME_DOMAIN) {
345  } else {
347  }
348  emms_c();
349 
350  if (n_clippings[0] + n_clippings[1] > 0) {
351  av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
352  n_clippings[0] + n_clippings[1], out->nb_samples * 2);
353  }
354 
355  av_frame_free(&in);
356  return ff_filter_frame(outlink, out);
357 }
358 
360 {
361  struct HeadphoneContext *s = ctx->priv;
362  const int ir_len = s->ir_len;
363  int nb_input_channels = ctx->inputs[0]->channels;
364  float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10);
365  AVFrame *frame;
366  int ret = 0;
367  int n_fft;
368  int i, j, k;
369 
370  s->air_len = 1 << (32 - ff_clz(ir_len));
371  if (s->type == TIME_DOMAIN) {
372  s->air_len = FFALIGN(s->air_len, 32);
373  }
374  s->buffer_length = 1 << (32 - ff_clz(s->air_len));
375  s->n_fft = n_fft = 1 << (32 - ff_clz(ir_len + s->size));
376 
377  if (s->type == FREQUENCY_DOMAIN) {
378  float scale;
379 
380  ret = av_tx_init(&s->fft[0], &s->tx_fn[0], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
381  if (ret < 0)
382  goto fail;
383  ret = av_tx_init(&s->fft[1], &s->tx_fn[1], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
384  if (ret < 0)
385  goto fail;
386  ret = av_tx_init(&s->ifft[0], &s->itx_fn[0], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
387  if (ret < 0)
388  goto fail;
389  ret = av_tx_init(&s->ifft[1], &s->itx_fn[1], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
390  if (ret < 0)
391  goto fail;
392 
393  if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
394  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
395  ret = AVERROR(ENOMEM);
396  goto fail;
397  }
398  }
399 
400  if (s->type == TIME_DOMAIN) {
401  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
402  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
403  } else {
404  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
405  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
406  s->out_fft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
407  s->out_fft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
408  s->in_fft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
409  s->in_fft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
410  s->temp_afft[0] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
411  s->temp_afft[1] = av_calloc(s->n_fft, sizeof(AVComplexFloat));
412  if (!s->in_fft[0] || !s->in_fft[1] ||
413  !s->out_fft[0] || !s->out_fft[1] ||
414  !s->temp_afft[0] || !s->temp_afft[1]) {
415  ret = AVERROR(ENOMEM);
416  goto fail;
417  }
418  }
419 
420  if (!s->ringbuffer[0] || !s->ringbuffer[1]) {
421  ret = AVERROR(ENOMEM);
422  goto fail;
423  }
424 
425  if (s->type == TIME_DOMAIN) {
426  s->temp_src[0] = av_calloc(s->air_len, sizeof(float));
427  s->temp_src[1] = av_calloc(s->air_len, sizeof(float));
428 
429  s->data_ir[0] = av_calloc(nb_input_channels * s->air_len, sizeof(*s->data_ir[0]));
430  s->data_ir[1] = av_calloc(nb_input_channels * s->air_len, sizeof(*s->data_ir[1]));
431  if (!s->data_ir[0] || !s->data_ir[1] || !s->temp_src[0] || !s->temp_src[1]) {
432  ret = AVERROR(ENOMEM);
433  goto fail;
434  }
435  } else {
436  s->data_hrtf[0] = av_calloc(n_fft, sizeof(*s->data_hrtf[0]) * nb_input_channels);
437  s->data_hrtf[1] = av_calloc(n_fft, sizeof(*s->data_hrtf[1]) * nb_input_channels);
438  if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
439  ret = AVERROR(ENOMEM);
440  goto fail;
441  }
442  }
443 
444  for (i = 0; i < s->nb_hrir_inputs; av_frame_free(&frame), i++) {
445  int len = s->hrir_in[i].ir_len;
446  float *ptr;
447 
448  ret = ff_inlink_consume_samples(ctx->inputs[i + 1], len, len, &frame);
449  if (ret < 0)
450  goto fail;
451  ptr = (float *)frame->extended_data[0];
452 
453  if (s->hrir_fmt == HRIR_STEREO) {
454  int idx = av_get_channel_layout_channel_index(inlink->channel_layout,
455  s->mapping[i]);
456  if (idx < 0)
457  continue;
458  if (s->type == TIME_DOMAIN) {
459  float *data_ir_l = s->data_ir[0] + idx * s->air_len;
460  float *data_ir_r = s->data_ir[1] + idx * s->air_len;
461 
462  for (j = 0; j < len; j++) {
463  data_ir_l[j] = ptr[len * 2 - j * 2 - 2] * gain_lin;
464  data_ir_r[j] = ptr[len * 2 - j * 2 - 1] * gain_lin;
465  }
466  } else {
467  AVComplexFloat *fft_out_l = s->data_hrtf[0] + idx * n_fft;
468  AVComplexFloat *fft_out_r = s->data_hrtf[1] + idx * n_fft;
469  AVComplexFloat *fft_in_l = s->in_fft[0];
470  AVComplexFloat *fft_in_r = s->in_fft[1];
471 
472  for (j = 0; j < len; j++) {
473  fft_in_l[j].re = ptr[j * 2 ] * gain_lin;
474  fft_in_r[j].re = ptr[j * 2 + 1] * gain_lin;
475  }
476 
477  s->tx_fn[0](s->fft[0], fft_out_l, fft_in_l, sizeof(float));
478  s->tx_fn[0](s->fft[0], fft_out_r, fft_in_r, sizeof(float));
479  }
480  } else {
481  int I, N = ctx->inputs[1]->channels;
482 
483  for (k = 0; k < N / 2; k++) {
484  int idx = av_get_channel_layout_channel_index(inlink->channel_layout,
485  s->mapping[k]);
486  if (idx < 0)
487  continue;
488 
489  I = k * 2;
490  if (s->type == TIME_DOMAIN) {
491  float *data_ir_l = s->data_ir[0] + idx * s->air_len;
492  float *data_ir_r = s->data_ir[1] + idx * s->air_len;
493 
494  for (j = 0; j < len; j++) {
495  data_ir_l[j] = ptr[len * N - j * N - N + I ] * gain_lin;
496  data_ir_r[j] = ptr[len * N - j * N - N + I + 1] * gain_lin;
497  }
498  } else {
499  AVComplexFloat *fft_out_l = s->data_hrtf[0] + idx * n_fft;
500  AVComplexFloat *fft_out_r = s->data_hrtf[1] + idx * n_fft;
501  AVComplexFloat *fft_in_l = s->in_fft[0];
502  AVComplexFloat *fft_in_r = s->in_fft[1];
503 
504  for (j = 0; j < len; j++) {
505  fft_in_l[j].re = ptr[j * N + I ] * gain_lin;
506  fft_in_r[j].re = ptr[j * N + I + 1] * gain_lin;
507  }
508 
509  s->tx_fn[0](s->fft[0], fft_out_l, fft_in_l, sizeof(float));
510  s->tx_fn[0](s->fft[0], fft_out_r, fft_in_r, sizeof(float));
511  }
512  }
513  }
514  }
515 
516  s->have_hrirs = 1;
517 
518 fail:
519  return ret;
520 }
521 
523 {
524  HeadphoneContext *s = ctx->priv;
525  AVFilterLink *inlink = ctx->inputs[0];
526  AVFilterLink *outlink = ctx->outputs[0];
527  AVFrame *in = NULL;
528  int i, ret;
529 
531  if (!s->eof_hrirs) {
532  int eof = 1;
533  for (i = 0; i < s->nb_hrir_inputs; i++) {
534  AVFilterLink *input = ctx->inputs[i + 1];
535 
536  if (s->hrir_in[i].eof)
537  continue;
538 
539  if ((ret = check_ir(input, i)) < 0)
540  return ret;
541 
544  av_log(ctx, AV_LOG_ERROR, "No samples provided for "
545  "HRIR stream %d.\n", i);
546  return AVERROR_INVALIDDATA;
547  }
548  s->hrir_in[i].eof = 1;
549  } else {
550  if (ff_outlink_frame_wanted(ctx->outputs[0]))
552  eof = 0;
553  }
554  }
555  if (!eof)
556  return 0;
557  s->eof_hrirs = 1;
558 
560  if (ret < 0)
561  return ret;
562  } else if (!s->have_hrirs)
563  return AVERROR_EOF;
564 
565  if ((ret = ff_inlink_consume_samples(ctx->inputs[0], s->size, s->size, &in)) > 0) {
566  ret = headphone_frame(s, in, outlink);
567  if (ret < 0)
568  return ret;
569  }
570 
571  if (ret < 0)
572  return ret;
573 
574  FF_FILTER_FORWARD_STATUS(ctx->inputs[0], ctx->outputs[0]);
575  if (ff_outlink_frame_wanted(ctx->outputs[0]))
576  ff_inlink_request_frame(ctx->inputs[0]);
577 
578  return 0;
579 }
580 
582 {
583  struct HeadphoneContext *s = ctx->priv;
586  AVFilterChannelLayouts *stereo_layout = NULL;
587  AVFilterChannelLayouts *hrir_layouts = NULL;
588  int ret, i;
589 
591  if (ret)
592  return ret;
594  if (ret)
595  return ret;
596 
598  if (!layouts)
599  return AVERROR(ENOMEM);
600 
601  ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->outcfg.channel_layouts);
602  if (ret)
603  return ret;
604 
605  ret = ff_add_channel_layout(&stereo_layout, AV_CH_LAYOUT_STEREO);
606  if (ret)
607  return ret;
608  ret = ff_channel_layouts_ref(stereo_layout, &ctx->outputs[0]->incfg.channel_layouts);
609  if (ret)
610  return ret;
611 
612  if (s->hrir_fmt == HRIR_MULTI) {
613  hrir_layouts = ff_all_channel_counts();
614  if (!hrir_layouts)
615  return AVERROR(ENOMEM);
616  ret = ff_channel_layouts_ref(hrir_layouts, &ctx->inputs[1]->outcfg.channel_layouts);
617  if (ret)
618  return ret;
619  } else {
620  for (i = 1; i <= s->nb_hrir_inputs; i++) {
621  ret = ff_channel_layouts_ref(stereo_layout, &ctx->inputs[i]->outcfg.channel_layouts);
622  if (ret)
623  return ret;
624  }
625  }
626 
628 }
629 
631 {
632  AVFilterContext *ctx = inlink->dst;
633  HeadphoneContext *s = ctx->priv;
634 
635  if (s->nb_irs < inlink->channels) {
636  av_log(ctx, AV_LOG_ERROR, "Number of HRIRs must be >= %d.\n", inlink->channels);
637  return AVERROR(EINVAL);
638  }
639 
640  s->lfe_channel = av_get_channel_layout_channel_index(inlink->channel_layout,
642  return 0;
643 }
644 
646 {
647  HeadphoneContext *s = ctx->priv;
648  int i, ret;
649 
650  AVFilterPad pad = {
651  .name = "in0",
652  .type = AVMEDIA_TYPE_AUDIO,
653  .config_props = config_input,
654  };
655  if ((ret = ff_append_inpad(ctx, &pad)) < 0)
656  return ret;
657 
658  if (!s->map) {
659  av_log(ctx, AV_LOG_ERROR, "Valid mapping must be set.\n");
660  return AVERROR(EINVAL);
661  }
662 
663  parse_map(ctx);
664 
665  for (i = 0; i < s->nb_hrir_inputs; i++) {
666  char *name = av_asprintf("hrir%d", i);
667  AVFilterPad pad = {
668  .name = name,
669  .type = AVMEDIA_TYPE_AUDIO,
670  };
671  if (!name)
672  return AVERROR(ENOMEM);
673  if ((ret = ff_append_inpad_free_name(ctx, &pad)) < 0)
674  return ret;
675  }
676 
677  if (s->type == TIME_DOMAIN) {
679  if (!fdsp)
680  return AVERROR(ENOMEM);
681  s->scalarproduct_float = fdsp->scalarproduct_float;
682  av_free(fdsp);
683  }
684 
685  return 0;
686 }
687 
688 static int config_output(AVFilterLink *outlink)
689 {
690  AVFilterContext *ctx = outlink->src;
691  HeadphoneContext *s = ctx->priv;
692  AVFilterLink *inlink = ctx->inputs[0];
693 
694  if (s->hrir_fmt == HRIR_MULTI) {
695  AVFilterLink *hrir_link = ctx->inputs[1];
696 
697  if (hrir_link->channels < inlink->channels * 2) {
698  av_log(ctx, AV_LOG_ERROR, "Number of channels in HRIR stream must be >= %d.\n", inlink->channels * 2);
699  return AVERROR(EINVAL);
700  }
701  }
702 
703  s->gain_lfe = expf((s->gain - 3 * inlink->channels + s->lfe_gain) / 20 * M_LN10);
704 
705  return 0;
706 }
707 
709 {
710  HeadphoneContext *s = ctx->priv;
711 
712  av_tx_uninit(&s->ifft[0]);
713  av_tx_uninit(&s->ifft[1]);
714  av_tx_uninit(&s->fft[0]);
715  av_tx_uninit(&s->fft[1]);
716  av_freep(&s->data_ir[0]);
717  av_freep(&s->data_ir[1]);
718  av_freep(&s->ringbuffer[0]);
719  av_freep(&s->ringbuffer[1]);
720  av_freep(&s->temp_src[0]);
721  av_freep(&s->temp_src[1]);
722  av_freep(&s->out_fft[0]);
723  av_freep(&s->out_fft[1]);
724  av_freep(&s->in_fft[0]);
725  av_freep(&s->in_fft[1]);
726  av_freep(&s->temp_afft[0]);
727  av_freep(&s->temp_afft[1]);
728  av_freep(&s->data_hrtf[0]);
729  av_freep(&s->data_hrtf[1]);
730 }
731 
732 #define OFFSET(x) offsetof(HeadphoneContext, x)
733 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
734 
735 static const AVOption headphone_options[] = {
736  { "map", "set channels convolution mappings", OFFSET(map), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
737  { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
738  { "lfe", "set lfe gain in dB", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
739  { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, "type" },
740  { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, "type" },
741  { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, "type" },
742  { "size", "set frame size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
743  { "hrir", "set hrir format", OFFSET(hrir_fmt), AV_OPT_TYPE_INT, {.i64=HRIR_STEREO}, 0, 1, .flags = FLAGS, "hrir" },
744  { "stereo", "hrir files have exactly 2 channels", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_STEREO}, 0, 0, .flags = FLAGS, "hrir" },
745  { "multich", "single multichannel hrir file", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_MULTI}, 0, 0, .flags = FLAGS, "hrir" },
746  { NULL }
747 };
748 
749 AVFILTER_DEFINE_CLASS(headphone);
750 
751 static const AVFilterPad outputs[] = {
752  {
753  .name = "default",
754  .type = AVMEDIA_TYPE_AUDIO,
755  .config_props = config_output,
756  },
757 };
758 
760  .name = "headphone",
761  .description = NULL_IF_CONFIG_SMALL("Apply headphone binaural spatialization with HRTFs in additional streams."),
762  .priv_size = sizeof(HeadphoneContext),
763  .priv_class = &headphone_class,
764  .init = init,
765  .uninit = uninit,
767  .activate = activate,
768  .inputs = NULL,
771 };
formats
formats
Definition: signature.h:48
HeadphoneContext::hrir_inputs
Definition: af_headphone.c:81
convert_coeffs
static int convert_coeffs(AVFilterContext *ctx, AVFilterLink *inlink)
Definition: af_headphone.c:359
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:88
FREQUENCY_DOMAIN
#define FREQUENCY_DOMAIN
Definition: af_headphone.c:36
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
td
#define td
Definition: regdef.h:70
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
HeadphoneContext::gain_lfe
float gain_lfe
Definition: af_headphone.c:60
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
HeadphoneContext::data_ir
float * data_ir[2]
Definition: af_headphone.c:70
ThreadData::out_fft
AVComplexFloat ** out_fft
Definition: af_headphone.c:135
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1019
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:548
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
parse_channel_name
static int parse_channel_name(const char *arg, uint64_t *rchannel)
Definition: af_headphone.c:88
HeadphoneContext::size
int size
Definition: af_headphone.c:67
HeadphoneContext::temp_afft
AVComplexFloat * temp_afft[2]
Definition: af_headphone.c:74
AVTXContext
Definition: tx_priv.h:110
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
ff_clz
#define ff_clz
Definition: intmath.h:142
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:523
im
float im
Definition: fft.c:78
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:396
TIME_DOMAIN
#define TIME_DOMAIN
Definition: af_headphone.c:35
AVOption
AVOption.
Definition: opt.h:247
HeadphoneContext::ringbuffer
float * ringbuffer[2]
Definition: af_headphone.c:62
expf
#define expf(x)
Definition: libm.h:283
ff_set_common_all_samplerates
int ff_set_common_all_samplerates(AVFilterContext *ctx)
Equivalent to ff_set_common_samplerates(ctx, ff_all_samplerates())
Definition: formats.c:687
HeadphoneContext::eof_hrirs
int eof_hrirs
Definition: af_headphone.c:50
AVComplexFloat
Definition: tx.h:27
HeadphoneContext::fft
AVTXContext * fft[2]
Definition: af_headphone.c:76
av_get_channel_layout
uint64_t av_get_channel_layout(const char *name)
Return a channel layout id that matches name, or 0 if no match is found.
Definition: channel_layout.c:145
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:153
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:492
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:228
ThreadData::temp_src
float ** temp_src
Definition: af_headphone.c:134
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
HeadphoneContext::data_hrtf
AVComplexFloat * data_hrtf[2]
Definition: af_headphone.c:78
HeadphoneContext::hrir_inputs::ir_len
int ir_len
Definition: af_headphone.c:82
AVComplexFloat::im
float im
Definition: tx.h:28
HeadphoneContext::air_len
int air_len
Definition: af_headphone.c:53
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
ff_append_inpad
int ff_append_inpad(AVFilterContext *f, AVFilterPad *p)
Append a new input/output pad to the filter's list of such pads.
Definition: avfilter.c:140
HeadphoneContext::ir_len
int ir_len
Definition: af_headphone.c:52
fail
#define fail()
Definition: checkasm.h:127
HeadphoneContext::scalarproduct_float
float(* scalarproduct_float)(const float *v1, const float *v2, int len)
Definition: af_headphone.c:80
activate
static int activate(AVFilterContext *ctx)
Definition: af_headphone.c:522
HeadphoneContext::hrir_inputs::eof
int eof
Definition: af_headphone.c:83
ThreadData::ringbuffer
float ** ringbuffer
Definition: af_headphone.c:133
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1376
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:110
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_STEREO
Definition: channel_layout.h:91
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_headphone.c:630
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:50
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
OFFSET
#define OFFSET(x)
Definition: af_headphone.c:732
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:697
HRIR_MULTI
#define HRIR_MULTI
Definition: af_headphone.c:39
HRIR_STEREO
#define HRIR_STEREO
Definition: af_headphone.c:38
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:102
AV_CH_LOW_FREQUENCY
#define AV_CH_LOW_FREQUENCY
Definition: channel_layout.h:52
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:425
AVFloatDSPContext::scalarproduct_float
float(* scalarproduct_float)(const float *v1, const float *v2, int len)
Calculate the scalar product of two vectors of floats.
Definition: float_dsp.h:175
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1535
s
#define s(width, name)
Definition: cbs_vp9.c:257
HeadphoneContext::buffer_length
int buffer_length
Definition: af_headphone.c:65
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:592
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
filters.h
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type AVComplexFloat.
Definition: tx.h:45
ctx
AVFormatContext * ctx
Definition: movenc.c:48
HeadphoneContext::lfe_gain
float lfe_gain
Definition: af_headphone.c:60
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_headphone.c:708
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
headphone_convolute
static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:140
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1437
NULL
#define NULL
Definition: coverity.c:32
FLAGS
#define FLAGS
Definition: af_headphone.c:733
parse_map
static void parse_map(AVFilterContext *ctx)
Definition: af_headphone.c:98
ff_append_inpad_free_name
int ff_append_inpad_free_name(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:145
outputs
static const AVFilterPad outputs[]
Definition: af_headphone.c:751
src
#define src
Definition: vp8dsp.c:255
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:419
ThreadData::in_fft
AVComplexFloat ** in_fft
Definition: af_headphone.c:136
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_headphone.c:581
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:226
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(headphone)
float_dsp.h
HeadphoneContext::mapping
uint64_t mapping[64]
Definition: af_headphone.c:85
headphone_frame
static int headphone_frame(HeadphoneContext *s, AVFrame *in, AVFilterLink *outlink)
Definition: af_headphone.c:322
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
HeadphoneContext::write
int write[2]
Definition: af_headphone.c:63
size
int size
Definition: twinvq_data.h:10344
AVComplexFloat::re
float re
Definition: tx.h:28
HeadphoneContext::gain
float gain
Definition: af_headphone.c:59
AVFloatDSPContext
Definition: float_dsp.h:24
HeadphoneContext
Definition: af_headphone.c:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
N
#define N
Definition: af_mcompand.c:53
ff_all_channel_layouts
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:514
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets ctx to NULL, does nothing when ctx == NULL.
Definition: tx.c:213
headphone_options
static const AVOption headphone_options[]
Definition: af_headphone.c:735
HeadphoneContext::have_hrirs
int have_hrirs
Definition: af_headphone.c:49
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:227
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:369
i
int i
Definition: input.c:406
HeadphoneContext::map
char * map
Definition: af_headphone.c:44
HeadphoneContext::tx_fn
av_tx_fn tx_fn[2]
Definition: af_headphone.c:77
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_headphone.c:645
av_get_channel_layout_channel_index
int av_get_channel_layout_channel_index(uint64_t channel_layout, uint64_t channel)
Get the index of a channel in channel_layout.
Definition: channel_layout.c:239
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
HeadphoneContext::hrir_fmt
int hrir_fmt
Definition: af_headphone.c:68
ThreadData::n_clippings
int * n_clippings
Definition: af_headphone.c:132
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:56
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1397
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
HeadphoneContext::in_fft
AVComplexFloat * in_fft[2]
Definition: af_headphone.c:73
AVFilter
Filter definition.
Definition: avfilter.h:149
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ThreadData::write
int * write
Definition: af_headphone.c:130
ThreadData::temp_afft
AVComplexFloat ** temp_afft
Definition: af_headphone.c:137
channel_layout.h
HeadphoneContext::nb_hrir_inputs
int nb_hrir_inputs
Definition: af_headphone.c:55
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
HeadphoneContext::ifft
AVTXContext * ifft[2]
Definition: af_headphone.c:76
HeadphoneContext::itx_fn
av_tx_fn itx_fn[2]
Definition: af_headphone.c:77
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
avfilter.h
headphone_fast_convolute
static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:211
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1558
AVFilterContext
An instance of a filter.
Definition: avfilter.h:346
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:121
audio.h
M_LN10
#define M_LN10
Definition: mathematics.h:43
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:171
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:153
ff_af_headphone
const AVFilter ff_af_headphone
Definition: af_headphone.c:759
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
ThreadData::ir
float ** ir
Definition: af_headphone.c:131
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
HeadphoneContext::temp_src
float * temp_src[2]
Definition: af_headphone.c:71
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
HeadphoneContext::type
int type
Definition: af_headphone.c:45
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:143
HeadphoneContext::lfe_channel
int lfe_channel
Definition: af_headphone.c:47
HeadphoneContext::hrir_in
struct HeadphoneContext::hrir_inputs hrir_in[64]
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:233
HeadphoneContext::nb_irs
int nb_irs
Definition: af_headphone.c:57
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_headphone.c:688
HeadphoneContext::n_fft
int n_fft
Definition: af_headphone.c:66
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:63
HeadphoneContext::out_fft
AVComplexFloat * out_fft[2]
Definition: af_headphone.c:72
tx.h
re
float re
Definition: fft.c:78
check_ir
static int check_ir(AVFilterLink *inlink, int input_number)
Definition: af_headphone.c:304
intmath.h