FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
af_headphone.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2017 Paul B Mahol
3  * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 
23 #include "libavutil/audio_fifo.h"
24 #include "libavutil/avstring.h"
26 #include "libavutil/float_dsp.h"
27 #include "libavutil/intmath.h"
28 #include "libavutil/opt.h"
29 #include "libavcodec/avfft.h"
30 
31 #include "avfilter.h"
32 #include "internal.h"
33 #include "audio.h"
34 
35 #define TIME_DOMAIN 0
36 #define FREQUENCY_DOMAIN 1
37 
38 #define HRIR_STEREO 0
39 #define HRIR_MULTI 1
40 
41 typedef struct HeadphoneContext {
42  const AVClass *class;
43 
44  char *map;
45  int type;
46 
48 
50  int eof_hrirs;
51  int64_t pts;
52 
53  int ir_len;
54 
55  int mapping[64];
56 
57  int nb_inputs;
58 
59  int nb_irs;
60 
61  float gain;
63 
64  float *ringbuffer[2];
65  int write[2];
66 
68  int n_fft;
69  int size;
70  int hrir_fmt;
71 
72  int *delay[2];
73  float *data_ir[2];
74  float *temp_src[2];
76 
77  FFTContext *fft[2], *ifft[2];
79 
84  int ir_len;
85  int delay_l;
86  int delay_r;
87  int eof;
88  } *in;
90 
91 static int parse_channel_name(HeadphoneContext *s, int x, char **arg, int *rchannel, char *buf)
92 {
93  int len, i, channel_id = 0;
94  int64_t layout, layout0;
95 
96  if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
97  layout0 = layout = av_get_channel_layout(buf);
98  if (layout == AV_CH_LOW_FREQUENCY)
99  s->lfe_channel = x;
100  for (i = 32; i > 0; i >>= 1) {
101  if (layout >= 1LL << i) {
102  channel_id += i;
103  layout >>= i;
104  }
105  }
106  if (channel_id >= 64 || layout0 != 1LL << channel_id)
107  return AVERROR(EINVAL);
108  *rchannel = channel_id;
109  *arg += len;
110  return 0;
111  }
112  return AVERROR(EINVAL);
113 }
114 
116 {
117  HeadphoneContext *s = ctx->priv;
118  char *arg, *tokenizer, *p, *args = av_strdup(s->map);
119  int i;
120 
121  if (!args)
122  return;
123  p = args;
124 
125  s->lfe_channel = -1;
126  s->nb_inputs = 1;
127 
128  for (i = 0; i < 64; i++) {
129  s->mapping[i] = -1;
130  }
131 
132  while ((arg = av_strtok(p, "|", &tokenizer))) {
133  int out_ch_id;
134  char buf[8];
135 
136  p = NULL;
137  if (parse_channel_name(s, s->nb_irs, &arg, &out_ch_id, buf)) {
138  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", buf);
139  continue;
140  }
141  s->mapping[s->nb_irs] = out_ch_id;
142  s->nb_irs++;
143  }
144 
145  if (s->hrir_fmt == HRIR_MULTI)
146  s->nb_inputs = 2;
147  else
148  s->nb_inputs = s->nb_irs + 1;
149 
150  av_free(args);
151 }
152 
153 typedef struct ThreadData {
154  AVFrame *in, *out;
155  int *write;
156  int **delay;
157  float **ir;
159  float **ringbuffer;
160  float **temp_src;
162 } ThreadData;
163 
164 static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
165 {
166  HeadphoneContext *s = ctx->priv;
167  ThreadData *td = arg;
168  AVFrame *in = td->in, *out = td->out;
169  int offset = jobnr;
170  int *write = &td->write[jobnr];
171  const int *const delay = td->delay[jobnr];
172  const float *const ir = td->ir[jobnr];
173  int *n_clippings = &td->n_clippings[jobnr];
174  float *ringbuffer = td->ringbuffer[jobnr];
175  float *temp_src = td->temp_src[jobnr];
176  const int ir_len = s->ir_len;
177  const float *src = (const float *)in->data[0];
178  float *dst = (float *)out->data[0];
179  const int in_channels = in->channels;
180  const int buffer_length = s->buffer_length;
181  const uint32_t modulo = (uint32_t)buffer_length - 1;
182  float *buffer[16];
183  int wr = *write;
184  int read;
185  int i, l;
186 
187  dst += offset;
188  for (l = 0; l < in_channels; l++) {
189  buffer[l] = ringbuffer + l * buffer_length;
190  }
191 
192  for (i = 0; i < in->nb_samples; i++) {
193  const float *temp_ir = ir;
194 
195  *dst = 0;
196  for (l = 0; l < in_channels; l++) {
197  *(buffer[l] + wr) = src[l];
198  }
199 
200  for (l = 0; l < in_channels; l++) {
201  const float *const bptr = buffer[l];
202 
203  if (l == s->lfe_channel) {
204  *dst += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
205  temp_ir += FFALIGN(ir_len, 16);
206  continue;
207  }
208 
209  read = (wr - *(delay + l) - (ir_len - 1) + buffer_length) & modulo;
210 
211  if (read + ir_len < buffer_length) {
212  memcpy(temp_src, bptr + read, ir_len * sizeof(*temp_src));
213  } else {
214  int len = FFMIN(ir_len - (read % ir_len), buffer_length - read);
215 
216  memcpy(temp_src, bptr + read, len * sizeof(*temp_src));
217  memcpy(temp_src + len, bptr, (ir_len - len) * sizeof(*temp_src));
218  }
219 
220  dst[0] += s->fdsp->scalarproduct_float(temp_ir, temp_src, ir_len);
221  temp_ir += FFALIGN(ir_len, 16);
222  }
223 
224  if (fabs(*dst) > 1)
225  *n_clippings += 1;
226 
227  dst += 2;
228  src += in_channels;
229  wr = (wr + 1) & modulo;
230  }
231 
232  *write = wr;
233 
234  return 0;
235 }
236 
237 static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
238 {
239  HeadphoneContext *s = ctx->priv;
240  ThreadData *td = arg;
241  AVFrame *in = td->in, *out = td->out;
242  int offset = jobnr;
243  int *write = &td->write[jobnr];
244  FFTComplex *hrtf = s->data_hrtf[jobnr];
245  int *n_clippings = &td->n_clippings[jobnr];
246  float *ringbuffer = td->ringbuffer[jobnr];
247  const int ir_len = s->ir_len;
248  const float *src = (const float *)in->data[0];
249  float *dst = (float *)out->data[0];
250  const int in_channels = in->channels;
251  const int buffer_length = s->buffer_length;
252  const uint32_t modulo = (uint32_t)buffer_length - 1;
253  FFTComplex *fft_in = s->temp_fft[jobnr];
254  FFTContext *ifft = s->ifft[jobnr];
255  FFTContext *fft = s->fft[jobnr];
256  const int n_fft = s->n_fft;
257  const float fft_scale = 1.0f / s->n_fft;
258  FFTComplex *hrtf_offset;
259  int wr = *write;
260  int n_read;
261  int i, j;
262 
263  dst += offset;
264 
265  n_read = FFMIN(s->ir_len, in->nb_samples);
266  for (j = 0; j < n_read; j++) {
267  dst[2 * j] = ringbuffer[wr];
268  ringbuffer[wr] = 0.0;
269  wr = (wr + 1) & modulo;
270  }
271 
272  for (j = n_read; j < in->nb_samples; j++) {
273  dst[2 * j] = 0;
274  }
275 
276  for (i = 0; i < in_channels; i++) {
277  if (i == s->lfe_channel) {
278  for (j = 0; j < in->nb_samples; j++) {
279  dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
280  }
281  continue;
282  }
283 
284  offset = i * n_fft;
285  hrtf_offset = hrtf + offset;
286 
287  memset(fft_in, 0, sizeof(FFTComplex) * n_fft);
288 
289  for (j = 0; j < in->nb_samples; j++) {
290  fft_in[j].re = src[j * in_channels + i];
291  }
292 
293  av_fft_permute(fft, fft_in);
294  av_fft_calc(fft, fft_in);
295  for (j = 0; j < n_fft; j++) {
296  const FFTComplex *hcomplex = hrtf_offset + j;
297  const float re = fft_in[j].re;
298  const float im = fft_in[j].im;
299 
300  fft_in[j].re = re * hcomplex->re - im * hcomplex->im;
301  fft_in[j].im = re * hcomplex->im + im * hcomplex->re;
302  }
303 
304  av_fft_permute(ifft, fft_in);
305  av_fft_calc(ifft, fft_in);
306 
307  for (j = 0; j < in->nb_samples; j++) {
308  dst[2 * j] += fft_in[j].re * fft_scale;
309  }
310 
311  for (j = 0; j < ir_len - 1; j++) {
312  int write_pos = (wr + j) & modulo;
313 
314  *(ringbuffer + write_pos) += fft_in[in->nb_samples + j].re * fft_scale;
315  }
316  }
317 
318  for (i = 0; i < out->nb_samples; i++) {
319  if (fabs(*dst) > 1) {
320  n_clippings[0]++;
321  }
322 
323  dst += 2;
324  }
325 
326  *write = wr;
327 
328  return 0;
329 }
330 
331 static int read_ir(AVFilterLink *inlink, AVFrame *frame)
332 {
333  AVFilterContext *ctx = inlink->dst;
334  HeadphoneContext *s = ctx->priv;
335  int ir_len, max_ir_len, input_number, ret;
336 
337  for (input_number = 0; input_number < s->nb_inputs; input_number++)
338  if (inlink == ctx->inputs[input_number])
339  break;
340 
341  ret = av_audio_fifo_write(s->in[input_number].fifo, (void **)frame->extended_data,
342  frame->nb_samples);
343  av_frame_free(&frame);
344 
345  if (ret < 0)
346  return ret;
347 
348  ir_len = av_audio_fifo_size(s->in[input_number].fifo);
349  max_ir_len = 65536;
350  if (ir_len > max_ir_len) {
351  av_log(ctx, AV_LOG_ERROR, "Too big length of IRs: %d > %d.\n", ir_len, max_ir_len);
352  return AVERROR(EINVAL);
353  }
354  s->in[input_number].ir_len = ir_len;
355  s->ir_len = FFMAX(ir_len, s->ir_len);
356 
357  return 0;
358 }
359 
360 static int headphone_frame(HeadphoneContext *s, AVFilterLink *outlink, int max_nb_samples)
361 {
362  AVFilterContext *ctx = outlink->src;
363  AVFrame *in = s->in[0].frame;
364  int n_clippings[2] = { 0 };
365  ThreadData td;
366  AVFrame *out;
367 
368  av_audio_fifo_read(s->in[0].fifo, (void **)in->extended_data, s->size);
369 
370  out = ff_get_audio_buffer(outlink, in->nb_samples);
371  if (!out)
372  return AVERROR(ENOMEM);
373  out->pts = s->pts;
374  if (s->pts != AV_NOPTS_VALUE)
375  s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
376 
377  td.in = in; td.out = out; td.write = s->write;
378  td.delay = s->delay; td.ir = s->data_ir; td.n_clippings = n_clippings;
379  td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
380  td.temp_fft = s->temp_fft;
381 
382  if (s->type == TIME_DOMAIN) {
383  ctx->internal->execute(ctx, headphone_convolute, &td, NULL, 2);
384  } else {
385  ctx->internal->execute(ctx, headphone_fast_convolute, &td, NULL, 2);
386  }
387  emms_c();
388 
389  if (n_clippings[0] + n_clippings[1] > 0) {
390  av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
391  n_clippings[0] + n_clippings[1], out->nb_samples * 2);
392  }
393 
394  out->nb_samples = max_nb_samples;
395  return ff_filter_frame(outlink, out);
396 }
397 
399 {
400  struct HeadphoneContext *s = ctx->priv;
401  const int ir_len = s->ir_len;
402  int nb_irs = s->nb_irs;
403  int nb_input_channels = ctx->inputs[0]->channels;
404  float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10);
405  FFTComplex *data_hrtf_l = NULL;
406  FFTComplex *data_hrtf_r = NULL;
407  FFTComplex *fft_in_l = NULL;
408  FFTComplex *fft_in_r = NULL;
409  float *data_ir_l = NULL;
410  float *data_ir_r = NULL;
411  int offset = 0, ret = 0;
412  int n_fft;
413  int i, j, k;
414 
415  s->buffer_length = 1 << (32 - ff_clz(s->ir_len));
416  s->n_fft = n_fft = 1 << (32 - ff_clz(s->ir_len + s->size));
417 
418  if (s->type == FREQUENCY_DOMAIN) {
419  fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l));
420  fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r));
421  if (!fft_in_l || !fft_in_r) {
422  ret = AVERROR(ENOMEM);
423  goto fail;
424  }
425 
426  av_fft_end(s->fft[0]);
427  av_fft_end(s->fft[1]);
428  s->fft[0] = av_fft_init(log2(s->n_fft), 0);
429  s->fft[1] = av_fft_init(log2(s->n_fft), 0);
430  av_fft_end(s->ifft[0]);
431  av_fft_end(s->ifft[1]);
432  s->ifft[0] = av_fft_init(log2(s->n_fft), 1);
433  s->ifft[1] = av_fft_init(log2(s->n_fft), 1);
434 
435  if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
436  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
437  ret = AVERROR(ENOMEM);
438  goto fail;
439  }
440  }
441 
442  s->data_ir[0] = av_calloc(FFALIGN(s->ir_len, 16), sizeof(float) * s->nb_irs);
443  s->data_ir[1] = av_calloc(FFALIGN(s->ir_len, 16), sizeof(float) * s->nb_irs);
444  s->delay[0] = av_calloc(s->nb_irs, sizeof(float));
445  s->delay[1] = av_calloc(s->nb_irs, sizeof(float));
446 
447  if (s->type == TIME_DOMAIN) {
448  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
449  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
450  } else {
451  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
452  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
453  s->temp_fft[0] = av_calloc(s->n_fft, sizeof(FFTComplex));
454  s->temp_fft[1] = av_calloc(s->n_fft, sizeof(FFTComplex));
455  if (!s->temp_fft[0] || !s->temp_fft[1]) {
456  ret = AVERROR(ENOMEM);
457  goto fail;
458  }
459  }
460 
461  if (!s->data_ir[0] || !s->data_ir[1] ||
462  !s->ringbuffer[0] || !s->ringbuffer[1]) {
463  ret = AVERROR(ENOMEM);
464  goto fail;
465  }
466 
467  s->in[0].frame = ff_get_audio_buffer(ctx->inputs[0], s->size);
468  if (!s->in[0].frame) {
469  ret = AVERROR(ENOMEM);
470  goto fail;
471  }
472  for (i = 0; i < s->nb_inputs - 1; i++) {
473  s->in[i + 1].frame = ff_get_audio_buffer(ctx->inputs[i + 1], s->ir_len);
474  if (!s->in[i + 1].frame) {
475  ret = AVERROR(ENOMEM);
476  goto fail;
477  }
478  }
479 
480  if (s->type == TIME_DOMAIN) {
481  s->temp_src[0] = av_calloc(FFALIGN(ir_len, 16), sizeof(float));
482  s->temp_src[1] = av_calloc(FFALIGN(ir_len, 16), sizeof(float));
483 
484  data_ir_l = av_calloc(nb_irs * FFALIGN(ir_len, 16), sizeof(*data_ir_l));
485  data_ir_r = av_calloc(nb_irs * FFALIGN(ir_len, 16), sizeof(*data_ir_r));
486  if (!data_ir_r || !data_ir_l || !s->temp_src[0] || !s->temp_src[1]) {
487  ret = AVERROR(ENOMEM);
488  goto fail;
489  }
490  } else {
491  data_hrtf_l = av_calloc(n_fft, sizeof(*data_hrtf_l) * nb_irs);
492  data_hrtf_r = av_calloc(n_fft, sizeof(*data_hrtf_r) * nb_irs);
493  if (!data_hrtf_r || !data_hrtf_l) {
494  ret = AVERROR(ENOMEM);
495  goto fail;
496  }
497  }
498 
499  for (i = 0; i < s->nb_inputs - 1; i++) {
500  int len = s->in[i + 1].ir_len;
501  int delay_l = s->in[i + 1].delay_l;
502  int delay_r = s->in[i + 1].delay_r;
503  float *ptr;
504 
505  av_audio_fifo_read(s->in[i + 1].fifo, (void **)s->in[i + 1].frame->extended_data, len);
506  ptr = (float *)s->in[i + 1].frame->extended_data[0];
507 
508  if (s->hrir_fmt == HRIR_STEREO) {
509  int idx = -1;
510 
511  for (j = 0; j < inlink->channels; j++) {
512  if (s->mapping[i] < 0) {
513  continue;
514  }
515 
516  if ((av_channel_layout_extract_channel(inlink->channel_layout, j)) == (1LL << s->mapping[i])) {
517  idx = i;
518  break;
519  }
520  }
521 
522  if (idx == -1)
523  continue;
524  if (s->type == TIME_DOMAIN) {
525  offset = idx * FFALIGN(len, 16);
526  for (j = 0; j < len; j++) {
527  data_ir_l[offset + j] = ptr[len * 2 - j * 2 - 2] * gain_lin;
528  data_ir_r[offset + j] = ptr[len * 2 - j * 2 - 1] * gain_lin;
529  }
530  } else {
531  memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
532  memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
533 
534  offset = idx * n_fft;
535  for (j = 0; j < len; j++) {
536  fft_in_l[delay_l + j].re = ptr[j * 2 ] * gain_lin;
537  fft_in_r[delay_r + j].re = ptr[j * 2 + 1] * gain_lin;
538  }
539 
540  av_fft_permute(s->fft[0], fft_in_l);
541  av_fft_calc(s->fft[0], fft_in_l);
542  memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
543  av_fft_permute(s->fft[0], fft_in_r);
544  av_fft_calc(s->fft[0], fft_in_r);
545  memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
546  }
547  } else {
548  int I, N = ctx->inputs[1]->channels;
549 
550  for (k = 0; k < N / 2; k++) {
551  int idx = -1;
552 
553  for (j = 0; j < inlink->channels; j++) {
554  if (s->mapping[k] < 0) {
555  continue;
556  }
557 
558  if ((av_channel_layout_extract_channel(inlink->channel_layout, j)) == (1LL << s->mapping[k])) {
559  idx = k;
560  break;
561  }
562  }
563  if (idx == -1)
564  continue;
565 
566  I = idx * 2;
567  if (s->type == TIME_DOMAIN) {
568  offset = idx * FFALIGN(len, 16);
569  for (j = 0; j < len; j++) {
570  data_ir_l[offset + j] = ptr[len * N - j * N - N + I ] * gain_lin;
571  data_ir_r[offset + j] = ptr[len * N - j * N - N + I + 1] * gain_lin;
572  }
573  } else {
574  memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
575  memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
576 
577  offset = idx * n_fft;
578  for (j = 0; j < len; j++) {
579  fft_in_l[delay_l + j].re = ptr[j * N + I ] * gain_lin;
580  fft_in_r[delay_r + j].re = ptr[j * N + I + 1] * gain_lin;
581  }
582 
583  av_fft_permute(s->fft[0], fft_in_l);
584  av_fft_calc(s->fft[0], fft_in_l);
585  memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
586  av_fft_permute(s->fft[0], fft_in_r);
587  av_fft_calc(s->fft[0], fft_in_r);
588  memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
589  }
590  }
591  }
592  }
593 
594  if (s->type == TIME_DOMAIN) {
595  memcpy(s->data_ir[0], data_ir_l, sizeof(float) * nb_irs * FFALIGN(ir_len, 16));
596  memcpy(s->data_ir[1], data_ir_r, sizeof(float) * nb_irs * FFALIGN(ir_len, 16));
597  } else {
598  s->data_hrtf[0] = av_calloc(n_fft * s->nb_irs, sizeof(FFTComplex));
599  s->data_hrtf[1] = av_calloc(n_fft * s->nb_irs, sizeof(FFTComplex));
600  if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
601  ret = AVERROR(ENOMEM);
602  goto fail;
603  }
604 
605  memcpy(s->data_hrtf[0], data_hrtf_l,
606  sizeof(FFTComplex) * nb_irs * n_fft);
607  memcpy(s->data_hrtf[1], data_hrtf_r,
608  sizeof(FFTComplex) * nb_irs * n_fft);
609  }
610 
611  s->have_hrirs = 1;
612 
613 fail:
614 
615  av_freep(&data_ir_l);
616  av_freep(&data_ir_r);
617 
618  av_freep(&data_hrtf_l);
619  av_freep(&data_hrtf_r);
620 
621  av_freep(&fft_in_l);
622  av_freep(&fft_in_r);
623 
624  return ret;
625 }
626 
627 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
628 {
629  AVFilterContext *ctx = inlink->dst;
630  HeadphoneContext *s = ctx->priv;
631  AVFilterLink *outlink = ctx->outputs[0];
632  int ret = 0;
633 
634  ret = av_audio_fifo_write(s->in[0].fifo, (void **)in->extended_data,
635  in->nb_samples);
636  if (s->pts == AV_NOPTS_VALUE)
637  s->pts = in->pts;
638 
639  av_frame_free(&in);
640 
641  if (ret < 0)
642  return ret;
643 
644  if (!s->have_hrirs && s->eof_hrirs) {
645  ret = convert_coeffs(ctx, inlink);
646  if (ret < 0)
647  return ret;
648  }
649 
650  if (s->have_hrirs) {
651  while (av_audio_fifo_size(s->in[0].fifo) >= s->size) {
652  ret = headphone_frame(s, outlink, s->size);
653  if (ret < 0)
654  return ret;
655  }
656  }
657 
658  return 0;
659 }
660 
662 {
663  struct HeadphoneContext *s = ctx->priv;
666  AVFilterChannelLayouts *stereo_layout = NULL;
667  AVFilterChannelLayouts *hrir_layouts = NULL;
668  int ret, i;
669 
670  ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
671  if (ret)
672  return ret;
673  ret = ff_set_common_formats(ctx, formats);
674  if (ret)
675  return ret;
676 
677  layouts = ff_all_channel_layouts();
678  if (!layouts)
679  return AVERROR(ENOMEM);
680 
681  ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts);
682  if (ret)
683  return ret;
684 
685  ret = ff_add_channel_layout(&stereo_layout, AV_CH_LAYOUT_STEREO);
686  if (ret)
687  return ret;
688 
689  if (s->hrir_fmt == HRIR_MULTI) {
690  hrir_layouts = ff_all_channel_counts();
691  if (!hrir_layouts)
692  ret = AVERROR(ENOMEM);
693  ret = ff_channel_layouts_ref(hrir_layouts, &ctx->inputs[1]->out_channel_layouts);
694  if (ret)
695  return ret;
696  } else {
697  for (i = 1; i < s->nb_inputs; i++) {
698  ret = ff_channel_layouts_ref(stereo_layout, &ctx->inputs[i]->out_channel_layouts);
699  if (ret)
700  return ret;
701  }
702  }
703 
704  ret = ff_channel_layouts_ref(stereo_layout, &ctx->outputs[0]->in_channel_layouts);
705  if (ret)
706  return ret;
707 
708  formats = ff_all_samplerates();
709  if (!formats)
710  return AVERROR(ENOMEM);
711  return ff_set_common_samplerates(ctx, formats);
712 }
713 
714 static int config_input(AVFilterLink *inlink)
715 {
716  AVFilterContext *ctx = inlink->dst;
717  HeadphoneContext *s = ctx->priv;
718 
719  if (s->nb_irs < inlink->channels) {
720  av_log(ctx, AV_LOG_ERROR, "Number of HRIRs must be >= %d.\n", inlink->channels);
721  return AVERROR(EINVAL);
722  }
723 
724  return 0;
725 }
726 
728 {
729  HeadphoneContext *s = ctx->priv;
730  int i, ret;
731 
732  AVFilterPad pad = {
733  .name = "in0",
734  .type = AVMEDIA_TYPE_AUDIO,
735  .config_props = config_input,
736  .filter_frame = filter_frame,
737  };
738  if ((ret = ff_insert_inpad(ctx, 0, &pad)) < 0)
739  return ret;
740 
741  if (!s->map) {
742  av_log(ctx, AV_LOG_ERROR, "Valid mapping must be set.\n");
743  return AVERROR(EINVAL);
744  }
745 
746  parse_map(ctx);
747 
748  s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
749  if (!s->in)
750  return AVERROR(ENOMEM);
751 
752  for (i = 1; i < s->nb_inputs; i++) {
753  char *name = av_asprintf("hrir%d", i - 1);
754  AVFilterPad pad = {
755  .name = name,
756  .type = AVMEDIA_TYPE_AUDIO,
757  .filter_frame = read_ir,
758  };
759  if (!name)
760  return AVERROR(ENOMEM);
761  if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
762  av_freep(&pad.name);
763  return ret;
764  }
765  }
766 
768  if (!s->fdsp)
769  return AVERROR(ENOMEM);
770  s->pts = AV_NOPTS_VALUE;
771 
772  return 0;
773 }
774 
775 static int config_output(AVFilterLink *outlink)
776 {
777  AVFilterContext *ctx = outlink->src;
778  HeadphoneContext *s = ctx->priv;
779  AVFilterLink *inlink = ctx->inputs[0];
780  int i;
781 
782  if (s->hrir_fmt == HRIR_MULTI) {
783  AVFilterLink *hrir_link = ctx->inputs[1];
784 
785  if (hrir_link->channels < inlink->channels * 2) {
786  av_log(ctx, AV_LOG_ERROR, "Number of channels in HRIR stream must be >= %d.\n", inlink->channels * 2);
787  return AVERROR(EINVAL);
788  }
789  }
790 
791  for (i = 0; i < s->nb_inputs; i++) {
792  s->in[i].fifo = av_audio_fifo_alloc(ctx->inputs[i]->format, ctx->inputs[i]->channels, 1024);
793  if (!s->in[i].fifo)
794  return AVERROR(ENOMEM);
795  }
796  s->gain_lfe = expf((s->gain - 3 * inlink->channels - 6 + s->lfe_gain) / 20 * M_LN10);
797 
798  return 0;
799 }
800 
801 static int request_frame(AVFilterLink *outlink)
802 {
803  AVFilterContext *ctx = outlink->src;
804  HeadphoneContext *s = ctx->priv;
805  int i, ret;
806 
807  for (i = 1; !s->eof_hrirs && i < s->nb_inputs; i++) {
808  if (!s->in[i].eof) {
809  ret = ff_request_frame(ctx->inputs[i]);
810  if (ret == AVERROR_EOF) {
811  s->in[i].eof = 1;
812  ret = 0;
813  }
814  return ret;
815  } else {
816  if (i == s->nb_inputs - 1)
817  s->eof_hrirs = 1;
818  }
819  }
820 
821  ret = ff_request_frame(ctx->inputs[0]);
822  if (ret == AVERROR_EOF && av_audio_fifo_size(s->in[0].fifo) > 0 && s->have_hrirs) {
823  int nb_samples = av_audio_fifo_size(s->in[0].fifo);
824  AVFrame *in = ff_get_audio_buffer(ctx->inputs[0], s->size - nb_samples);
825 
826  if (!in)
827  return AVERROR(ENOMEM);
828 
830  in->nb_samples,
831  in->channels,
832  in->format);
833 
834  ret = av_audio_fifo_write(s->in[0].fifo, (void **)in->extended_data,
835  in->nb_samples);
836  av_frame_free(&in);
837  if (ret < 0)
838  return ret;
839  ret = headphone_frame(s, outlink, nb_samples);
840 
842  }
843 
844  return ret;
845 }
846 
848 {
849  HeadphoneContext *s = ctx->priv;
850  int i;
851 
852  av_fft_end(s->ifft[0]);
853  av_fft_end(s->ifft[1]);
854  av_fft_end(s->fft[0]);
855  av_fft_end(s->fft[1]);
856  av_freep(&s->delay[0]);
857  av_freep(&s->delay[1]);
858  av_freep(&s->data_ir[0]);
859  av_freep(&s->data_ir[1]);
860  av_freep(&s->ringbuffer[0]);
861  av_freep(&s->ringbuffer[1]);
862  av_freep(&s->temp_src[0]);
863  av_freep(&s->temp_src[1]);
864  av_freep(&s->temp_fft[0]);
865  av_freep(&s->temp_fft[1]);
866  av_freep(&s->data_hrtf[0]);
867  av_freep(&s->data_hrtf[1]);
868  av_freep(&s->fdsp);
869 
870  for (i = 0; i < s->nb_inputs; i++) {
871  av_frame_free(&s->in[i].frame);
872  av_audio_fifo_free(s->in[i].fifo);
873  if (ctx->input_pads && i)
874  av_freep(&ctx->input_pads[i].name);
875  }
876  av_freep(&s->in);
877 }
878 
879 #define OFFSET(x) offsetof(HeadphoneContext, x)
880 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
881 
882 static const AVOption headphone_options[] = {
883  { "map", "set channels convolution mappings", OFFSET(map), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
884  { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
885  { "lfe", "set lfe gain in dB", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
886  { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, "type" },
887  { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, "type" },
888  { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, "type" },
889  { "size", "set frame size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
890  { "hrir", "set hrir format", OFFSET(hrir_fmt), AV_OPT_TYPE_INT, {.i64=HRIR_STEREO}, 0, 1, .flags = FLAGS, "hrir" },
891  { "stereo", "hrir files have exactly 2 channels", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_STEREO}, 0, 0, .flags = FLAGS, "hrir" },
892  { "multich", "single multichannel hrir file", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_MULTI}, 0, 0, .flags = FLAGS, "hrir" },
893  { NULL }
894 };
895 
896 AVFILTER_DEFINE_CLASS(headphone);
897 
898 static const AVFilterPad outputs[] = {
899  {
900  .name = "default",
901  .type = AVMEDIA_TYPE_AUDIO,
902  .config_props = config_output,
903  .request_frame = request_frame,
904  },
905  { NULL }
906 };
907 
909  .name = "headphone",
910  .description = NULL_IF_CONFIG_SMALL("Apply headphone binaural spatialization with HRTFs in additional streams."),
911  .priv_size = sizeof(HeadphoneContext),
912  .priv_class = &headphone_class,
913  .init = init,
914  .uninit = uninit,
916  .inputs = NULL,
917  .outputs = outputs,
919 };
#define NULL
Definition: coverity.c:32
static int config_output(AVFilterLink *outlink)
Definition: af_headphone.c:775
const char * s
Definition: avisynth_c.h:768
AVFrame * out
Definition: af_aiir.c:31
AVAudioFifo * av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, int nb_samples)
Allocate an AVAudioFifo.
Definition: audio_fifo.c:59
int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples)
Read data from an AVAudioFifo.
Definition: audio_fifo.c:181
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
AVOption.
Definition: opt.h:246
av_cold void av_fft_end(FFTContext *s)
Definition: avfft.c:48
static int convert_coeffs(AVFilterContext *ctx, AVFilterLink *inlink)
Definition: af_headphone.c:398
float re
Definition: fft.c:82
float ** temp_src
Definition: af_headphone.c:160
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
Main libavfilter public API header.
void av_audio_fifo_free(AVAudioFifo *af)
Free an AVAudioFifo.
Definition: audio_fifo.c:45
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:105
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1793
float(* scalarproduct_float)(const float *v1, const float *v2, int len)
Calculate the scalar product of two vectors of floats.
Definition: float_dsp.h:175
FFTContext * fft[2]
Definition: af_headphone.c:77
FFTSample re
Definition: avfft.h:38
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
Definition: avfft.c:38
#define AV_CH_LAYOUT_STEREO
AVFloatDSPContext * fdsp
Definition: af_headphone.c:80
#define src
Definition: vp8dsp.c:254
#define TIME_DOMAIN
Definition: af_headphone.c:35
static int read_ir(AVFilterLink *inlink, AVFrame *frame)
Definition: af_headphone.c:331
#define N
Definition: af_mcompand.c:54
#define log2(x)
Definition: libm.h:404
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
const char * name
Pad name.
Definition: internal.h:60
uint64_t av_get_channel_layout(const char *name)
Return a channel layout id that matches name, or 0 if no match is found.
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
float ** ringbuffer
Definition: af_headphone.c:159
static int parse_channel_name(HeadphoneContext *s, int x, char **arg, int *rchannel, char *buf)
Definition: af_headphone.c:91
AVFrame * in
Definition: af_aiir.c:31
static int request_frame(AVFilterLink *outlink)
Definition: af_headphone.c:801
#define av_cold
Definition: attributes.h:82
AVOptions.
int ** delay
Definition: af_headphone.c:156
#define OFFSET(x)
Definition: af_headphone.c:879
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
#define HRIR_MULTI
Definition: af_headphone.c:39
#define AV_CH_LOW_FREQUENCY
static AVFrame * frame
static int flags
Definition: log.c:55
#define AVERROR_EOF
End of file.
Definition: error.h:55
FFTContext * ifft[2]
Definition: af_headphone.c:77
ptrdiff_t size
Definition: opengl_enc.c:101
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
static int config_input(AVFilterLink *inlink)
Definition: af_headphone.c:714
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define expf(x)
Definition: libm.h:283
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:345
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
#define td
Definition: regdef.h:70
#define HRIR_STEREO
Definition: af_headphone.c:38
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:127
int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Fill an audio buffer with silence.
Definition: samplefmt.c:237
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:343
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
#define FLAGS
Definition: af_headphone.c:880
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
static void parse_map(AVFilterContext *ctx)
Definition: af_headphone.c:115
const char * arg
Definition: jacosubdec.c:66
static const AVFilterPad outputs[]
Definition: af_headphone.c:898
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:337
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
Definition: avfft.c:28
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
#define FFMAX(a, b)
Definition: common.h:94
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_headphone.c:847
#define fail()
Definition: checkasm.h:116
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_headphone.c:627
Context for an Audio FIFO Buffer.
Definition: audio_fifo.c:34
AVFILTER_DEFINE_CLASS(headphone)
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:164
int av_audio_fifo_size(AVAudioFifo *af)
Get the current number of samples in the AVAudioFifo available for reading.
Definition: audio_fifo.c:228
FFTComplex * data_hrtf[2]
Definition: af_headphone.c:78
Definition: fft.h:88
int channels
number of audio channels, only used for audio.
Definition: frame.h:523
audio channel layout utility functions
#define FFMIN(a, b)
Definition: common.h:96
#define ff_clz
Definition: intmath.h:142
AVS_Value args
Definition: avisynth_c.h:699
float * data_ir[2]
Definition: af_headphone.c:73
AVFormatContext * ctx
Definition: movenc.c:48
AVFilter ff_af_headphone
Definition: af_headphone.c:908
int * n_clippings
Definition: af_headphone.c:158
static int query_formats(AVFilterContext *ctx)
Definition: af_headphone.c:661
FFTComplex * temp_fft[2]
Definition: af_headphone.c:75
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:401
static const AVOption headphone_options[]
Definition: af_headphone.c:882
A list of supported channel layouts.
Definition: formats.h:85
FFTComplex ** temp_fft
Definition: af_headphone.c:161
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:291
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
FFT functions.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
void * buf
Definition: avisynth_c.h:690
GLint GLenum type
Definition: opengl_enc.c:105
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
float im
Definition: fft.c:82
const char * name
Filter name.
Definition: avfilter.h:148
const VDPAUPixFmtMap * map
static int headphone_frame(HeadphoneContext *s, AVFilterLink *outlink, int max_nb_samples)
Definition: af_headphone.c:360
float * temp_src[2]
Definition: af_headphone.c:74
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
static av_cold int init(AVFilterContext *ctx)
Definition: af_headphone.c:727
float ** ir
Definition: af_headphone.c:157
float * ringbuffer[2]
Definition: af_headphone.c:64
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples)
Write data to an AVAudioFifo.
Definition: audio_fifo.c:112
struct HeadphoneContext::headphone_inputs * in
int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples)
Drain data from an AVAudioFifo.
Definition: audio_fifo.c:201
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
#define M_LN10
Definition: mathematics.h:43
FFTSample im
Definition: avfft.h:38
if(ret< 0)
Definition: vf_mcdeint.c:279
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index)
Get the channel with the given index in channel_layout.
#define av_free(p)
Audio FIFO Buffer.
int len
static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:237
A list of supported formats for one end of a filter link.
Definition: formats.h:64
uint64_t layout
An instance of a filter.
Definition: avfilter.h:338
FILE * out
Definition: movenc.c:54
#define av_freep(p)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
formats
Definition: signature.h:48
internal API functions
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition...
Definition: formats.c:410
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:265
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().
Definition: avfft.c:43
#define FREQUENCY_DOMAIN
Definition: af_headphone.c:36
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:284
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
GLuint buffer
Definition: opengl_enc.c:102
const char * name
Definition: opengl_enc.c:103
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:277