FFmpeg
af_headphone.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2017 Paul B Mahol
3  * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 
23 #include "libavutil/avstring.h"
25 #include "libavutil/float_dsp.h"
26 #include "libavutil/intmath.h"
27 #include "libavutil/opt.h"
28 #include "libavcodec/avfft.h"
29 
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "internal.h"
33 #include "audio.h"
34 
35 #define TIME_DOMAIN 0
36 #define FREQUENCY_DOMAIN 1
37 
38 #define HRIR_STEREO 0
39 #define HRIR_MULTI 1
40 
41 typedef struct HeadphoneContext {
42  const AVClass *class;
43 
44  char *map;
45  int type;
46 
48 
50  int eof_hrirs;
51 
52  int ir_len;
53  int air_len;
54 
55  int mapping[64];
56 
57  int nb_inputs;
58 
59  int nb_irs;
60 
61  float gain;
63 
64  float *ringbuffer[2];
65  int write[2];
66 
68  int n_fft;
69  int size;
70  int hrir_fmt;
71 
72  int *delay[2];
73  float *data_ir[2];
74  float *temp_src[2];
77 
78  FFTContext *fft[2], *ifft[2];
80 
84  int ir_len;
85  int delay_l;
86  int delay_r;
87  int eof;
88  } *in;
90 
91 static int parse_channel_name(HeadphoneContext *s, int x, char **arg, int *rchannel, char *buf)
92 {
93  int len, i, channel_id = 0;
94  int64_t layout, layout0;
95 
96  if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
97  layout0 = layout = av_get_channel_layout(buf);
99  s->lfe_channel = x;
100  for (i = 32; i > 0; i >>= 1) {
101  if (layout >= 1LL << i) {
102  channel_id += i;
103  layout >>= i;
104  }
105  }
106  if (channel_id >= 64 || layout0 != 1LL << channel_id)
107  return AVERROR(EINVAL);
108  *rchannel = channel_id;
109  *arg += len;
110  return 0;
111  }
112  return AVERROR(EINVAL);
113 }
114 
116 {
117  HeadphoneContext *s = ctx->priv;
118  char *arg, *tokenizer, *p, *args = av_strdup(s->map);
119  int i;
120 
121  if (!args)
122  return;
123  p = args;
124 
125  s->lfe_channel = -1;
126  s->nb_inputs = 1;
127 
128  for (i = 0; i < 64; i++) {
129  s->mapping[i] = -1;
130  }
131 
132  while ((arg = av_strtok(p, "|", &tokenizer))) {
133  int out_ch_id;
134  char buf[8];
135 
136  p = NULL;
137  if (parse_channel_name(s, s->nb_irs, &arg, &out_ch_id, buf)) {
138  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", buf);
139  continue;
140  }
141  s->mapping[s->nb_irs] = out_ch_id;
142  s->nb_irs++;
143  }
144 
145  if (s->hrir_fmt == HRIR_MULTI)
146  s->nb_inputs = 2;
147  else
148  s->nb_inputs = s->nb_irs + 1;
149 
150  av_free(args);
151 }
152 
153 typedef struct ThreadData {
154  AVFrame *in, *out;
155  int *write;
156  int **delay;
157  float **ir;
159  float **ringbuffer;
160  float **temp_src;
163 } ThreadData;
164 
165 static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
166 {
167  HeadphoneContext *s = ctx->priv;
168  ThreadData *td = arg;
169  AVFrame *in = td->in, *out = td->out;
170  int offset = jobnr;
171  int *write = &td->write[jobnr];
172  const int *const delay = td->delay[jobnr];
173  const float *const ir = td->ir[jobnr];
174  int *n_clippings = &td->n_clippings[jobnr];
175  float *ringbuffer = td->ringbuffer[jobnr];
176  float *temp_src = td->temp_src[jobnr];
177  const int ir_len = s->ir_len;
178  const int air_len = s->air_len;
179  const float *src = (const float *)in->data[0];
180  float *dst = (float *)out->data[0];
181  const int in_channels = in->channels;
182  const int buffer_length = s->buffer_length;
183  const uint32_t modulo = (uint32_t)buffer_length - 1;
184  float *buffer[16];
185  int wr = *write;
186  int read;
187  int i, l;
188 
189  dst += offset;
190  for (l = 0; l < in_channels; l++) {
191  buffer[l] = ringbuffer + l * buffer_length;
192  }
193 
194  for (i = 0; i < in->nb_samples; i++) {
195  const float *temp_ir = ir;
196 
197  *dst = 0;
198  for (l = 0; l < in_channels; l++) {
199  *(buffer[l] + wr) = src[l];
200  }
201 
202  for (l = 0; l < in_channels; l++) {
203  const float *const bptr = buffer[l];
204 
205  if (l == s->lfe_channel) {
206  *dst += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
207  temp_ir += air_len;
208  continue;
209  }
210 
211  read = (wr - *(delay + l) - (ir_len - 1) + buffer_length) & modulo;
212 
213  if (read + ir_len < buffer_length) {
214  memcpy(temp_src, bptr + read, ir_len * sizeof(*temp_src));
215  } else {
216  int len = FFMIN(air_len - (read % ir_len), buffer_length - read);
217 
218  memcpy(temp_src, bptr + read, len * sizeof(*temp_src));
219  memcpy(temp_src + len, bptr, (air_len - len) * sizeof(*temp_src));
220  }
221 
222  dst[0] += s->fdsp->scalarproduct_float(temp_ir, temp_src, FFALIGN(ir_len, 32));
223  temp_ir += air_len;
224  }
225 
226  if (fabsf(dst[0]) > 1)
227  n_clippings[0]++;
228 
229  dst += 2;
230  src += in_channels;
231  wr = (wr + 1) & modulo;
232  }
233 
234  *write = wr;
235 
236  return 0;
237 }
238 
239 static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
240 {
241  HeadphoneContext *s = ctx->priv;
242  ThreadData *td = arg;
243  AVFrame *in = td->in, *out = td->out;
244  int offset = jobnr;
245  int *write = &td->write[jobnr];
246  FFTComplex *hrtf = s->data_hrtf[jobnr];
247  int *n_clippings = &td->n_clippings[jobnr];
248  float *ringbuffer = td->ringbuffer[jobnr];
249  const int ir_len = s->ir_len;
250  const float *src = (const float *)in->data[0];
251  float *dst = (float *)out->data[0];
252  const int in_channels = in->channels;
253  const int buffer_length = s->buffer_length;
254  const uint32_t modulo = (uint32_t)buffer_length - 1;
255  FFTComplex *fft_in = s->temp_fft[jobnr];
256  FFTComplex *fft_acc = s->temp_afft[jobnr];
257  FFTContext *ifft = s->ifft[jobnr];
258  FFTContext *fft = s->fft[jobnr];
259  const int n_fft = s->n_fft;
260  const float fft_scale = 1.0f / s->n_fft;
261  FFTComplex *hrtf_offset;
262  int wr = *write;
263  int n_read;
264  int i, j;
265 
266  dst += offset;
267 
268  n_read = FFMIN(ir_len, in->nb_samples);
269  for (j = 0; j < n_read; j++) {
270  dst[2 * j] = ringbuffer[wr];
271  ringbuffer[wr] = 0.0;
272  wr = (wr + 1) & modulo;
273  }
274 
275  for (j = n_read; j < in->nb_samples; j++) {
276  dst[2 * j] = 0;
277  }
278 
279  memset(fft_acc, 0, sizeof(FFTComplex) * n_fft);
280 
281  for (i = 0; i < in_channels; i++) {
282  if (i == s->lfe_channel) {
283  for (j = 0; j < in->nb_samples; j++) {
284  dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
285  }
286  continue;
287  }
288 
289  offset = i * n_fft;
290  hrtf_offset = hrtf + offset;
291 
292  memset(fft_in, 0, sizeof(FFTComplex) * n_fft);
293 
294  for (j = 0; j < in->nb_samples; j++) {
295  fft_in[j].re = src[j * in_channels + i];
296  }
297 
298  av_fft_permute(fft, fft_in);
299  av_fft_calc(fft, fft_in);
300  for (j = 0; j < n_fft; j++) {
301  const FFTComplex *hcomplex = hrtf_offset + j;
302  const float re = fft_in[j].re;
303  const float im = fft_in[j].im;
304 
305  fft_acc[j].re += re * hcomplex->re - im * hcomplex->im;
306  fft_acc[j].im += re * hcomplex->im + im * hcomplex->re;
307  }
308  }
309 
310  av_fft_permute(ifft, fft_acc);
311  av_fft_calc(ifft, fft_acc);
312 
313  for (j = 0; j < in->nb_samples; j++) {
314  dst[2 * j] += fft_acc[j].re * fft_scale;
315  }
316 
317  for (j = 0; j < ir_len - 1; j++) {
318  int write_pos = (wr + j) & modulo;
319 
320  *(ringbuffer + write_pos) += fft_acc[in->nb_samples + j].re * fft_scale;
321  }
322 
323  for (i = 0; i < out->nb_samples; i++) {
324  if (fabsf(dst[0]) > 1) {
325  n_clippings[0]++;
326  }
327 
328  dst += 2;
329  }
330 
331  *write = wr;
332 
333  return 0;
334 }
335 
336 static int check_ir(AVFilterLink *inlink, int input_number)
337 {
338  AVFilterContext *ctx = inlink->dst;
339  HeadphoneContext *s = ctx->priv;
340  int ir_len, max_ir_len;
341 
343  max_ir_len = 65536;
344  if (ir_len > max_ir_len) {
345  av_log(ctx, AV_LOG_ERROR, "Too big length of IRs: %d > %d.\n", ir_len, max_ir_len);
346  return AVERROR(EINVAL);
347  }
348  s->in[input_number].ir_len = ir_len;
349  s->ir_len = FFMAX(ir_len, s->ir_len);
350 
351  return 0;
352 }
353 
355 {
356  AVFilterContext *ctx = outlink->src;
357  int n_clippings[2] = { 0 };
358  ThreadData td;
359  AVFrame *out;
360 
361  out = ff_get_audio_buffer(outlink, in->nb_samples);
362  if (!out) {
363  av_frame_free(&in);
364  return AVERROR(ENOMEM);
365  }
366  out->pts = in->pts;
367 
368  td.in = in; td.out = out; td.write = s->write;
369  td.delay = s->delay; td.ir = s->data_ir; td.n_clippings = n_clippings;
370  td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
371  td.temp_fft = s->temp_fft;
372  td.temp_afft = s->temp_afft;
373 
374  if (s->type == TIME_DOMAIN) {
375  ctx->internal->execute(ctx, headphone_convolute, &td, NULL, 2);
376  } else {
377  ctx->internal->execute(ctx, headphone_fast_convolute, &td, NULL, 2);
378  }
379  emms_c();
380 
381  if (n_clippings[0] + n_clippings[1] > 0) {
382  av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
383  n_clippings[0] + n_clippings[1], out->nb_samples * 2);
384  }
385 
386  av_frame_free(&in);
387  return ff_filter_frame(outlink, out);
388 }
389 
391 {
392  struct HeadphoneContext *s = ctx->priv;
393  const int ir_len = s->ir_len;
394  int nb_irs = s->nb_irs;
395  int nb_input_channels = ctx->inputs[0]->channels;
396  float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10);
397  FFTComplex *data_hrtf_l = NULL;
398  FFTComplex *data_hrtf_r = NULL;
399  FFTComplex *fft_in_l = NULL;
400  FFTComplex *fft_in_r = NULL;
401  float *data_ir_l = NULL;
402  float *data_ir_r = NULL;
403  int offset = 0, ret = 0;
404  int n_fft;
405  int i, j, k;
406 
407  s->air_len = 1 << (32 - ff_clz(ir_len));
408  s->buffer_length = 1 << (32 - ff_clz(s->air_len));
409  s->n_fft = n_fft = 1 << (32 - ff_clz(ir_len + s->size));
410 
411  if (s->type == FREQUENCY_DOMAIN) {
412  fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l));
413  fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r));
414  if (!fft_in_l || !fft_in_r) {
415  ret = AVERROR(ENOMEM);
416  goto fail;
417  }
418 
419  av_fft_end(s->fft[0]);
420  av_fft_end(s->fft[1]);
421  s->fft[0] = av_fft_init(av_log2(s->n_fft), 0);
422  s->fft[1] = av_fft_init(av_log2(s->n_fft), 0);
423  av_fft_end(s->ifft[0]);
424  av_fft_end(s->ifft[1]);
425  s->ifft[0] = av_fft_init(av_log2(s->n_fft), 1);
426  s->ifft[1] = av_fft_init(av_log2(s->n_fft), 1);
427 
428  if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
429  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
430  ret = AVERROR(ENOMEM);
431  goto fail;
432  }
433  }
434 
435  s->data_ir[0] = av_calloc(s->air_len, sizeof(float) * s->nb_irs);
436  s->data_ir[1] = av_calloc(s->air_len, sizeof(float) * s->nb_irs);
437  s->delay[0] = av_calloc(s->nb_irs, sizeof(float));
438  s->delay[1] = av_calloc(s->nb_irs, sizeof(float));
439 
440  if (s->type == TIME_DOMAIN) {
441  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
442  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
443  } else {
444  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
445  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
446  s->temp_fft[0] = av_calloc(s->n_fft, sizeof(FFTComplex));
447  s->temp_fft[1] = av_calloc(s->n_fft, sizeof(FFTComplex));
448  s->temp_afft[0] = av_calloc(s->n_fft, sizeof(FFTComplex));
449  s->temp_afft[1] = av_calloc(s->n_fft, sizeof(FFTComplex));
450  if (!s->temp_fft[0] || !s->temp_fft[1] ||
451  !s->temp_afft[0] || !s->temp_afft[1]) {
452  ret = AVERROR(ENOMEM);
453  goto fail;
454  }
455  }
456 
457  if (!s->data_ir[0] || !s->data_ir[1] ||
458  !s->ringbuffer[0] || !s->ringbuffer[1]) {
459  ret = AVERROR(ENOMEM);
460  goto fail;
461  }
462 
463  if (s->type == TIME_DOMAIN) {
464  s->temp_src[0] = av_calloc(s->air_len, sizeof(float));
465  s->temp_src[1] = av_calloc(s->air_len, sizeof(float));
466 
467  data_ir_l = av_calloc(nb_irs * s->air_len, sizeof(*data_ir_l));
468  data_ir_r = av_calloc(nb_irs * s->air_len, sizeof(*data_ir_r));
469  if (!data_ir_r || !data_ir_l || !s->temp_src[0] || !s->temp_src[1]) {
470  ret = AVERROR(ENOMEM);
471  goto fail;
472  }
473  } else {
474  data_hrtf_l = av_calloc(n_fft, sizeof(*data_hrtf_l) * nb_irs);
475  data_hrtf_r = av_calloc(n_fft, sizeof(*data_hrtf_r) * nb_irs);
476  if (!data_hrtf_r || !data_hrtf_l) {
477  ret = AVERROR(ENOMEM);
478  goto fail;
479  }
480  }
481 
482  for (i = 0; i < s->nb_inputs - 1; i++) {
483  int len = s->in[i + 1].ir_len;
484  int delay_l = s->in[i + 1].delay_l;
485  int delay_r = s->in[i + 1].delay_r;
486  float *ptr;
487 
488  ret = ff_inlink_consume_samples(ctx->inputs[i + 1], len, len, &s->in[i + 1].frame);
489  if (ret < 0)
490  goto fail;
491  ptr = (float *)s->in[i + 1].frame->extended_data[0];
492 
493  if (s->hrir_fmt == HRIR_STEREO) {
494  int idx = -1;
495 
496  for (j = 0; j < inlink->channels; j++) {
497  if (s->mapping[i] < 0) {
498  continue;
499  }
500 
501  if ((av_channel_layout_extract_channel(inlink->channel_layout, j)) == (1LL << s->mapping[i])) {
502  idx = i;
503  break;
504  }
505  }
506 
507  if (idx == -1)
508  continue;
509  if (s->type == TIME_DOMAIN) {
510  offset = idx * s->air_len;
511  for (j = 0; j < len; j++) {
512  data_ir_l[offset + j] = ptr[len * 2 - j * 2 - 2] * gain_lin;
513  data_ir_r[offset + j] = ptr[len * 2 - j * 2 - 1] * gain_lin;
514  }
515  } else {
516  memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
517  memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
518 
519  offset = idx * n_fft;
520  for (j = 0; j < len; j++) {
521  fft_in_l[delay_l + j].re = ptr[j * 2 ] * gain_lin;
522  fft_in_r[delay_r + j].re = ptr[j * 2 + 1] * gain_lin;
523  }
524 
525  av_fft_permute(s->fft[0], fft_in_l);
526  av_fft_calc(s->fft[0], fft_in_l);
527  memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
528  av_fft_permute(s->fft[0], fft_in_r);
529  av_fft_calc(s->fft[0], fft_in_r);
530  memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
531  }
532  } else {
533  int I, N = ctx->inputs[1]->channels;
534 
535  for (k = 0; k < N / 2; k++) {
536  int idx = -1;
537 
538  for (j = 0; j < inlink->channels; j++) {
539  if (s->mapping[k] < 0) {
540  continue;
541  }
542 
543  if ((av_channel_layout_extract_channel(inlink->channel_layout, j)) == (1LL << s->mapping[k])) {
544  idx = k;
545  break;
546  }
547  }
548  if (idx == -1)
549  continue;
550 
551  I = idx * 2;
552  if (s->type == TIME_DOMAIN) {
553  offset = idx * s->air_len;
554  for (j = 0; j < len; j++) {
555  data_ir_l[offset + j] = ptr[len * N - j * N - N + I ] * gain_lin;
556  data_ir_r[offset + j] = ptr[len * N - j * N - N + I + 1] * gain_lin;
557  }
558  } else {
559  memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
560  memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
561 
562  offset = idx * n_fft;
563  for (j = 0; j < len; j++) {
564  fft_in_l[delay_l + j].re = ptr[j * N + I ] * gain_lin;
565  fft_in_r[delay_r + j].re = ptr[j * N + I + 1] * gain_lin;
566  }
567 
568  av_fft_permute(s->fft[0], fft_in_l);
569  av_fft_calc(s->fft[0], fft_in_l);
570  memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
571  av_fft_permute(s->fft[0], fft_in_r);
572  av_fft_calc(s->fft[0], fft_in_r);
573  memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
574  }
575  }
576  }
577 
578  av_frame_free(&s->in[i + 1].frame);
579  }
580 
581  if (s->type == TIME_DOMAIN) {
582  memcpy(s->data_ir[0], data_ir_l, sizeof(float) * nb_irs * s->air_len);
583  memcpy(s->data_ir[1], data_ir_r, sizeof(float) * nb_irs * s->air_len);
584  } else {
585  s->data_hrtf[0] = av_calloc(n_fft * s->nb_irs, sizeof(FFTComplex));
586  s->data_hrtf[1] = av_calloc(n_fft * s->nb_irs, sizeof(FFTComplex));
587  if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
588  ret = AVERROR(ENOMEM);
589  goto fail;
590  }
591 
592  memcpy(s->data_hrtf[0], data_hrtf_l,
593  sizeof(FFTComplex) * nb_irs * n_fft);
594  memcpy(s->data_hrtf[1], data_hrtf_r,
595  sizeof(FFTComplex) * nb_irs * n_fft);
596  }
597 
598  s->have_hrirs = 1;
599 
600 fail:
601 
602  for (i = 0; i < s->nb_inputs - 1; i++)
603  av_frame_free(&s->in[i + 1].frame);
604 
605  av_freep(&data_ir_l);
606  av_freep(&data_ir_r);
607 
608  av_freep(&data_hrtf_l);
609  av_freep(&data_hrtf_r);
610 
611  av_freep(&fft_in_l);
612  av_freep(&fft_in_r);
613 
614  return ret;
615 }
616 
618 {
619  HeadphoneContext *s = ctx->priv;
620  AVFilterLink *inlink = ctx->inputs[0];
621  AVFilterLink *outlink = ctx->outputs[0];
622  AVFrame *in = NULL;
623  int i, ret;
624 
626  if (!s->eof_hrirs) {
627  for (i = 1; i < s->nb_inputs; i++) {
628  if (s->in[i].eof)
629  continue;
630 
631  if ((ret = check_ir(ctx->inputs[i], i)) < 0)
632  return ret;
633 
634  if (!s->in[i].eof) {
635  if (ff_outlink_get_status(ctx->inputs[i]) == AVERROR_EOF)
636  s->in[i].eof = 1;
637  }
638  }
639 
640  for (i = 1; i < s->nb_inputs; i++) {
641  if (!s->in[i].eof)
642  break;
643  }
644 
645  if (i != s->nb_inputs) {
646  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
647  for (i = 1; i < s->nb_inputs; i++) {
648  if (!s->in[i].eof)
649  ff_inlink_request_frame(ctx->inputs[i]);
650  }
651  }
652 
653  return 0;
654  } else {
655  s->eof_hrirs = 1;
656  }
657  }
658 
659  if (!s->have_hrirs && s->eof_hrirs) {
661  if (ret < 0)
662  return ret;
663  }
664 
665  if ((ret = ff_inlink_consume_samples(ctx->inputs[0], s->size, s->size, &in)) > 0) {
666  ret = headphone_frame(s, in, outlink);
667  if (ret < 0)
668  return ret;
669  }
670 
671  if (ret < 0)
672  return ret;
673 
674  FF_FILTER_FORWARD_STATUS(ctx->inputs[0], ctx->outputs[0]);
675  if (ff_outlink_frame_wanted(ctx->outputs[0]))
676  ff_inlink_request_frame(ctx->inputs[0]);
677 
678  return 0;
679 }
680 
682 {
683  struct HeadphoneContext *s = ctx->priv;
686  AVFilterChannelLayouts *stereo_layout = NULL;
687  AVFilterChannelLayouts *hrir_layouts = NULL;
688  int ret, i;
689 
691  if (ret)
692  return ret;
694  if (ret)
695  return ret;
696 
698  if (!layouts)
699  return AVERROR(ENOMEM);
700 
701  ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts);
702  if (ret)
703  return ret;
704 
705  ret = ff_add_channel_layout(&stereo_layout, AV_CH_LAYOUT_STEREO);
706  if (ret)
707  return ret;
708 
709  if (s->hrir_fmt == HRIR_MULTI) {
710  hrir_layouts = ff_all_channel_counts();
711  if (!hrir_layouts)
712  ret = AVERROR(ENOMEM);
713  ret = ff_channel_layouts_ref(hrir_layouts, &ctx->inputs[1]->out_channel_layouts);
714  if (ret)
715  return ret;
716  } else {
717  for (i = 1; i < s->nb_inputs; i++) {
718  ret = ff_channel_layouts_ref(stereo_layout, &ctx->inputs[i]->out_channel_layouts);
719  if (ret)
720  return ret;
721  }
722  }
723 
724  ret = ff_channel_layouts_ref(stereo_layout, &ctx->outputs[0]->in_channel_layouts);
725  if (ret)
726  return ret;
727 
729  if (!formats)
730  return AVERROR(ENOMEM);
732 }
733 
735 {
736  AVFilterContext *ctx = inlink->dst;
737  HeadphoneContext *s = ctx->priv;
738 
739  if (s->nb_irs < inlink->channels) {
740  av_log(ctx, AV_LOG_ERROR, "Number of HRIRs must be >= %d.\n", inlink->channels);
741  return AVERROR(EINVAL);
742  }
743 
744  return 0;
745 }
746 
748 {
749  HeadphoneContext *s = ctx->priv;
750  int i, ret;
751 
752  AVFilterPad pad = {
753  .name = "in0",
754  .type = AVMEDIA_TYPE_AUDIO,
755  .config_props = config_input,
756  };
757  if ((ret = ff_insert_inpad(ctx, 0, &pad)) < 0)
758  return ret;
759 
760  if (!s->map) {
761  av_log(ctx, AV_LOG_ERROR, "Valid mapping must be set.\n");
762  return AVERROR(EINVAL);
763  }
764 
765  parse_map(ctx);
766 
767  s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
768  if (!s->in)
769  return AVERROR(ENOMEM);
770 
771  for (i = 1; i < s->nb_inputs; i++) {
772  char *name = av_asprintf("hrir%d", i - 1);
773  AVFilterPad pad = {
774  .name = name,
775  .type = AVMEDIA_TYPE_AUDIO,
776  };
777  if (!name)
778  return AVERROR(ENOMEM);
779  if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
780  av_freep(&pad.name);
781  return ret;
782  }
783  }
784 
785  s->fdsp = avpriv_float_dsp_alloc(0);
786  if (!s->fdsp)
787  return AVERROR(ENOMEM);
788 
789  return 0;
790 }
791 
792 static int config_output(AVFilterLink *outlink)
793 {
794  AVFilterContext *ctx = outlink->src;
795  HeadphoneContext *s = ctx->priv;
796  AVFilterLink *inlink = ctx->inputs[0];
797 
798  if (s->hrir_fmt == HRIR_MULTI) {
799  AVFilterLink *hrir_link = ctx->inputs[1];
800 
801  if (hrir_link->channels < inlink->channels * 2) {
802  av_log(ctx, AV_LOG_ERROR, "Number of channels in HRIR stream must be >= %d.\n", inlink->channels * 2);
803  return AVERROR(EINVAL);
804  }
805  }
806 
807  s->gain_lfe = expf((s->gain - 3 * inlink->channels + s->lfe_gain) / 20 * M_LN10);
808 
809  return 0;
810 }
811 
813 {
814  HeadphoneContext *s = ctx->priv;
815  int i;
816 
817  av_fft_end(s->ifft[0]);
818  av_fft_end(s->ifft[1]);
819  av_fft_end(s->fft[0]);
820  av_fft_end(s->fft[1]);
821  av_freep(&s->delay[0]);
822  av_freep(&s->delay[1]);
823  av_freep(&s->data_ir[0]);
824  av_freep(&s->data_ir[1]);
825  av_freep(&s->ringbuffer[0]);
826  av_freep(&s->ringbuffer[1]);
827  av_freep(&s->temp_src[0]);
828  av_freep(&s->temp_src[1]);
829  av_freep(&s->temp_fft[0]);
830  av_freep(&s->temp_fft[1]);
831  av_freep(&s->temp_afft[0]);
832  av_freep(&s->temp_afft[1]);
833  av_freep(&s->data_hrtf[0]);
834  av_freep(&s->data_hrtf[1]);
835  av_freep(&s->fdsp);
836 
837  for (i = 0; i < s->nb_inputs; i++) {
838  if (ctx->input_pads && i)
839  av_freep(&ctx->input_pads[i].name);
840  }
841  av_freep(&s->in);
842 }
843 
844 #define OFFSET(x) offsetof(HeadphoneContext, x)
845 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
846 
847 static const AVOption headphone_options[] = {
848  { "map", "set channels convolution mappings", OFFSET(map), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
849  { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
850  { "lfe", "set lfe gain in dB", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
851  { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, "type" },
852  { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, "type" },
853  { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, "type" },
854  { "size", "set frame size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
855  { "hrir", "set hrir format", OFFSET(hrir_fmt), AV_OPT_TYPE_INT, {.i64=HRIR_STEREO}, 0, 1, .flags = FLAGS, "hrir" },
856  { "stereo", "hrir files have exactly 2 channels", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_STEREO}, 0, 0, .flags = FLAGS, "hrir" },
857  { "multich", "single multichannel hrir file", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_MULTI}, 0, 0, .flags = FLAGS, "hrir" },
858  { NULL }
859 };
860 
861 AVFILTER_DEFINE_CLASS(headphone);
862 
863 static const AVFilterPad outputs[] = {
864  {
865  .name = "default",
866  .type = AVMEDIA_TYPE_AUDIO,
867  .config_props = config_output,
868  },
869  { NULL }
870 };
871 
873  .name = "headphone",
874  .description = NULL_IF_CONFIG_SMALL("Apply headphone binaural spatialization with HRTFs in additional streams."),
875  .priv_size = sizeof(HeadphoneContext),
876  .priv_class = &headphone_class,
877  .init = init,
878  .uninit = uninit,
880  .activate = activate,
881  .inputs = NULL,
882  .outputs = outputs,
884 };
formats
formats
Definition: signature.h:48
convert_coeffs
static int convert_coeffs(AVFilterContext *ctx, AVFilterLink *inlink)
Definition: af_headphone.c:390
av_fft_end
av_cold void av_fft_end(FFTContext *s)
Definition: avfft.c:48
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
FREQUENCY_DOMAIN
#define FREQUENCY_DOMAIN
Definition: af_headphone.c:36
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
td
#define td
Definition: regdef.h:70
HeadphoneContext::gain_lfe
float gain_lfe
Definition: af_headphone.c:62
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
HeadphoneContext::data_ir
float * data_ir[2]
Definition: af_headphone.c:73
HeadphoneContext::headphone_inputs::eof
int eof
Definition: af_headphone.c:87
out
FILE * out
Definition: movenc.c:54
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
HeadphoneContext::size
int size
Definition: af_headphone.c:69
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
ff_clz
#define ff_clz
Definition: intmath.h:142
ThreadData::delay
int ** delay
Definition: af_headphone.c:156
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:410
AudioConvert::channels
int channels
Definition: audio_convert.c:54
im
float im
Definition: fft.c:82
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
name
const char * name
Definition: avisynth_c.h:867
TIME_DOMAIN
#define TIME_DOMAIN
Definition: af_headphone.c:35
AVOption
AVOption.
Definition: opt.h:246
HeadphoneContext::ringbuffer
float * ringbuffer[2]
Definition: af_headphone.c:64
expf
#define expf(x)
Definition: libm.h:283
av_fft_permute
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
Definition: avfft.c:38
HeadphoneContext::eof_hrirs
int eof_hrirs
Definition: af_headphone.c:50
HeadphoneContext::headphone_inputs::delay_r
int delay_r
Definition: af_headphone.c:86
av_get_channel_layout
uint64_t av_get_channel_layout(const char *name)
Return a channel layout id that matches name, or 0 if no match is found.
Definition: channel_layout.c:139
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
HeadphoneContext::ifft
FFTContext * ifft[2]
Definition: af_headphone.c:78
parse_channel_name
static int parse_channel_name(HeadphoneContext *s, int x, char **arg, int *rchannel, char *buf)
Definition: af_headphone.c:91
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:488
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1795
ThreadData::temp_src
float ** temp_src
Definition: af_headphone.c:160
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
ff_insert_inpad
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:277
HeadphoneContext::air_len
int air_len
Definition: af_headphone.c:53
FF_FILTER_FORWARD_STATUS_BACK_ALL
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
HeadphoneContext::ir_len
int ir_len
Definition: af_headphone.c:52
fail
#define fail()
Definition: checkasm.h:120
activate
static int activate(AVFilterContext *ctx)
Definition: af_headphone.c:617
HeadphoneContext::headphone_inputs::delay_l
int delay_l
Definition: af_headphone.c:85
ThreadData::ringbuffer
float ** ringbuffer
Definition: af_headphone.c:159
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:105
src
#define src
Definition: vp8dsp.c:254
AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_STEREO
Definition: channel_layout.h:86
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_headphone.c:734
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
OFFSET
#define OFFSET(x)
Definition: af_headphone.c:844
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
HRIR_MULTI
#define HRIR_MULTI
Definition: af_headphone.c:39
HRIR_STEREO
#define HRIR_STEREO
Definition: af_headphone.c:38
AV_CH_LOW_FREQUENCY
#define AV_CH_LOW_FREQUENCY
Definition: channel_layout.h:52
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:343
ff_inlink_request_frame
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1607
s
#define s(width, name)
Definition: cbs_vp9.c:257
HeadphoneContext::data_hrtf
FFTComplex * data_hrtf[2]
Definition: af_headphone.c:79
HeadphoneContext::buffer_length
int buffer_length
Definition: af_headphone.c:67
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:184
filters.h
ctx
AVFormatContext * ctx
Definition: movenc.c:48
HeadphoneContext::lfe_gain
float lfe_gain
Definition: af_headphone.c:62
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_headphone.c:812
arg
const char * arg
Definition: jacosubdec.c:66
if
if(ret)
Definition: filter_design.txt:179
headphone_convolute
static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:165
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1500
NULL
#define NULL
Definition: coverity.c:32
FLAGS
#define FLAGS
Definition: af_headphone.c:845
ThreadData::temp_fft
FFTComplex ** temp_fft
Definition: af_headphone.c:161
parse_map
static void parse_map(AVFilterContext *ctx)
Definition: af_headphone.c:115
outputs
static const AVFilterPad outputs[]
Definition: af_headphone.c:863
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:337
avfft.h
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_headphone.c:681
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(headphone)
float_dsp.h
headphone_frame
static int headphone_frame(HeadphoneContext *s, AVFrame *in, AVFilterLink *outlink)
Definition: af_headphone.c:354
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
HeadphoneContext::in
struct HeadphoneContext::headphone_inputs * in
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
HeadphoneContext::write
int write[2]
Definition: af_headphone.c:65
size
int size
Definition: twinvq_data.h:11134
HeadphoneContext::headphone_inputs::frame
AVFrame * frame
Definition: af_headphone.c:83
ff_af_headphone
AVFilter ff_af_headphone
Definition: af_headphone.c:872
HeadphoneContext::gain
float gain
Definition: af_headphone.c:61
FFTComplex::im
FFTSample im
Definition: avfft.h:38
AVFloatDSPContext
Definition: float_dsp.h:24
FFTComplex::re
FFTSample re
Definition: avfft.h:38
HeadphoneContext
Definition: af_headphone.c:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
HeadphoneContext::delay
int * delay[2]
Definition: af_headphone.c:72
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
N
#define N
Definition: af_mcompand.c:54
ff_all_channel_layouts
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:401
headphone_options
static const AVOption headphone_options[]
Definition: af_headphone.c:847
HeadphoneContext::nb_inputs
int nb_inputs
Definition: af_headphone.c:57
HeadphoneContext::have_hrirs
int have_hrirs
Definition: af_headphone.c:49
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:226
HeadphoneContext::headphone_inputs::ir_len
int ir_len
Definition: af_headphone.c:84
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
HeadphoneContext::temp_fft
FFTComplex * temp_fft[2]
Definition: af_headphone.c:75
FFTContext
Definition: fft.h:88
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
HeadphoneContext::map
char * map
Definition: af_headphone.c:44
av_channel_layout_extract_channel
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index)
Get the channel with the given index in channel_layout.
Definition: channel_layout.c:265
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_headphone.c:747
ThreadData
Used for passing data between threads.
Definition: af_adeclick.c:487
args
const char AVS_Value args
Definition: avisynth_c.h:873
HeadphoneContext::hrir_fmt
int hrir_fmt
Definition: af_headphone.c:70
ThreadData::n_clippings
int * n_clippings
Definition: af_headphone.c:158
len
int len
Definition: vorbis_enc_data.h:452
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1461
AVFilter
Filter definition.
Definition: avfilter.h:144
ret
ret
Definition: filter_design.txt:187
HeadphoneContext::mapping
int mapping[64]
Definition: af_headphone.c:55
HeadphoneContext::headphone_inputs
Definition: af_headphone.c:82
ThreadData::write
int * write
Definition: af_headphone.c:155
av_fft_init
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
Definition: avfft.c:28
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
channel_layout.h
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
headphone_fast_convolute
static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_headphone.c:239
ff_outlink_get_status
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1630
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
audio.h
M_LN10
#define M_LN10
Definition: mathematics.h:43
ThreadData::in
AVFrame * in
Definition: af_afftdn.c:1082
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:85
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
HeadphoneContext::temp_afft
FFTComplex * temp_afft[2]
Definition: af_headphone.c:76
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
HeadphoneContext::fdsp
AVFloatDSPContext * fdsp
Definition: af_headphone.c:81
ThreadData::ir
float ** ir
Definition: af_headphone.c:157
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
HeadphoneContext::temp_src
float * temp_src[2]
Definition: af_headphone.c:74
ff_outlink_frame_wanted
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:227
HeadphoneContext::type
int type
Definition: af_headphone.c:45
HeadphoneContext::lfe_channel
int lfe_channel
Definition: af_headphone.c:47
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
av_fft_calc
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().
Definition: avfft.c:43
HeadphoneContext::nb_irs
int nb_irs
Definition: af_headphone.c:59
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
HeadphoneContext::fft
FFTContext * fft[2]
Definition: af_headphone.c:78
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_headphone.c:792
HeadphoneContext::n_fft
int n_fft
Definition: af_headphone.c:68
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:63
FFTComplex
Definition: avfft.h:37
re
float re
Definition: fft.c:82
check_ir
static int check_ir(AVFilterLink *inlink, int input_number)
Definition: af_headphone.c:336
ThreadData::temp_afft
FFTComplex ** temp_afft
Definition: af_headphone.c:162
intmath.h