FFmpeg
af_sofalizer.c
Go to the documentation of this file.
1 /*****************************************************************************
2  * sofalizer.c : SOFAlizer filter for virtual binaural acoustics
3  *****************************************************************************
4  * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda,
5  * Acoustics Research Institute (ARI), Vienna, Austria
6  *
7  * Authors: Andreas Fuchs <andi.fuchs.mail@gmail.com>
8  * Wolfgang Hrauda <wolfgang.hrauda@gmx.at>
9  *
10  * SOFAlizer project coordinator at ARI, main developer of SOFA:
11  * Piotr Majdak <piotr@majdak.at>
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU Lesser General Public License as published by
15  * the Free Software Foundation; either version 2.1 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public License
24  * along with this program; if not, write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
26  *****************************************************************************/
27 
28 #include <math.h>
29 #include <mysofa.h>
30 
31 #include "libavutil/tx.h"
32 #include "libavutil/avstring.h"
34 #include "libavutil/float_dsp.h"
35 #include "libavutil/intmath.h"
36 #include "libavutil/opt.h"
37 #include "avfilter.h"
38 #include "filters.h"
39 #include "internal.h"
40 #include "audio.h"
41 
42 #define TIME_DOMAIN 0
43 #define FREQUENCY_DOMAIN 1
44 
45 typedef struct MySofa { /* contains data of one SOFA file */
46  struct MYSOFA_HRTF *hrtf;
47  struct MYSOFA_LOOKUP *lookup;
48  struct MYSOFA_NEIGHBORHOOD *neighborhood;
49  int ir_samples; /* length of one impulse response (IR) */
50  int n_samples; /* ir_samples to next power of 2 */
51  float *lir, *rir; /* IRs (time-domain) */
52  float *fir;
53  int max_delay;
54 } MySofa;
55 
56 typedef struct VirtualSpeaker {
57  uint8_t set;
58  float azim;
59  float elev;
61 
62 typedef struct SOFAlizerContext {
63  const AVClass *class;
64 
65  char *filename; /* name of SOFA file */
66  MySofa sofa; /* contains data of the SOFA file */
67 
68  int sample_rate; /* sample rate from SOFA file */
69  float *speaker_azim; /* azimuth of the virtual loudspeakers */
70  float *speaker_elev; /* elevation of the virtual loudspeakers */
71  char *speakers_pos; /* custom positions of the virtual loudspeakers */
72  float lfe_gain; /* initial gain for the LFE channel */
73  float gain_lfe; /* gain applied to LFE channel */
74  int lfe_channel; /* LFE channel position in channel layout */
75 
76  int n_conv; /* number of channels to convolute */
77 
78  /* buffer variables (for convolution) */
79  float *ringbuffer[2]; /* buffers input samples, length of one buffer: */
80  /* no. input ch. (incl. LFE) x buffer_length */
81  int write[2]; /* current write position to ringbuffer */
82  int buffer_length; /* is: longest IR plus max. delay in all SOFA files */
83  /* then choose next power of 2 */
84  int n_fft; /* number of samples in one FFT block */
86 
87  /* netCDF variables */
88  int *delay[2]; /* broadband delay for each channel/IR to be convolved */
89 
90  float *data_ir[2]; /* IRs for all channels to be convolved */
91  /* (this excludes the LFE) */
92  float *temp_src[2];
93  AVComplexFloat *in_fft[2]; /* Array to hold input FFT values */
94  AVComplexFloat *out_fft[2]; /* Array to hold output FFT values */
95  AVComplexFloat *temp_afft[2]; /* Array to accumulate FFT values prior to IFFT */
96 
97  /* control variables */
98  float gain; /* filter gain (in dB) */
99  float rotation; /* rotation of virtual loudspeakers (in degrees) */
100  float elevation; /* elevation of virtual loudspeakers (in deg.) */
101  float radius; /* distance virtual loudspeakers to listener (in metres) */
102  int type; /* processing type */
103  int framesize; /* size of buffer */
104  int normalize; /* should all IRs be normalized upon import ? */
105  int interpolate; /* should wanted IRs be interpolated from neighbors ? */
106  int minphase; /* should all IRs be minphased upon import ? */
107  float anglestep; /* neighbor search angle step, in agles */
108  float radstep; /* neighbor search radius step, in meters */
109 
111 
112  AVTXContext *fft[2], *ifft[2];
115 
118 
119 static int close_sofa(struct MySofa *sofa)
120 {
121  if (sofa->neighborhood)
122  mysofa_neighborhood_free(sofa->neighborhood);
123  sofa->neighborhood = NULL;
124  if (sofa->lookup)
125  mysofa_lookup_free(sofa->lookup);
126  sofa->lookup = NULL;
127  if (sofa->hrtf)
128  mysofa_free(sofa->hrtf);
129  sofa->hrtf = NULL;
130  av_freep(&sofa->fir);
131 
132  return 0;
133 }
134 
135 static int preload_sofa(AVFilterContext *ctx, char *filename, int *samplingrate)
136 {
137  struct SOFAlizerContext *s = ctx->priv;
138  struct MYSOFA_HRTF *mysofa;
139  char *license;
140  int ret;
141 
142  mysofa = mysofa_load(filename, &ret);
143  s->sofa.hrtf = mysofa;
144  if (ret || !mysofa) {
145  av_log(ctx, AV_LOG_ERROR, "Can't find SOFA-file '%s'\n", filename);
146  return AVERROR(EINVAL);
147  }
148 
149  ret = mysofa_check(mysofa);
150  if (ret != MYSOFA_OK) {
151  av_log(ctx, AV_LOG_ERROR, "Selected SOFA file is invalid. Please select valid SOFA file.\n");
152  return ret;
153  }
154 
155  if (s->normalize)
156  mysofa_loudness(s->sofa.hrtf);
157 
158  if (s->minphase)
159  mysofa_minphase(s->sofa.hrtf, 0.01f);
160 
161  mysofa_tocartesian(s->sofa.hrtf);
162 
163  s->sofa.lookup = mysofa_lookup_init(s->sofa.hrtf);
164  if (s->sofa.lookup == NULL)
165  return AVERROR(EINVAL);
166 
167  if (s->interpolate)
168  s->sofa.neighborhood = mysofa_neighborhood_init_withstepdefine(s->sofa.hrtf,
169  s->sofa.lookup,
170  s->anglestep,
171  s->radstep);
172 
173  s->sofa.fir = av_calloc(s->sofa.hrtf->N * s->sofa.hrtf->R, sizeof(*s->sofa.fir));
174  if (!s->sofa.fir)
175  return AVERROR(ENOMEM);
176 
177  if (mysofa->DataSamplingRate.elements != 1)
178  return AVERROR(EINVAL);
179  av_log(ctx, AV_LOG_DEBUG, "Original IR length: %d.\n", mysofa->N);
180  *samplingrate = mysofa->DataSamplingRate.values[0];
181  license = mysofa_getAttribute(mysofa->attributes, (char *)"License");
182  if (license)
183  av_log(ctx, AV_LOG_INFO, "SOFA license: %s\n", license);
184 
185  return 0;
186 }
187 
188 static int parse_channel_name(AVFilterContext *ctx, char **arg, int *rchannel)
189 {
190  int len;
191  enum AVChannel channel_id = 0;
192  char buf[8] = {0};
193 
194  /* try to parse a channel name, e.g. "FL" */
195  if (av_sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
196  channel_id = av_channel_from_string(buf);
197  if (channel_id < 0 || channel_id >= 64) {
198  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", buf);
199  return AVERROR(EINVAL);
200  }
201 
202  *rchannel = channel_id;
203  *arg += len;
204  return 0;
205  } else if (av_sscanf(*arg, "%d%n", &channel_id, &len) == 1) {
206  if (channel_id < 0 || channel_id >= 64) {
207  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%d\' as channel number.\n", channel_id);
208  return AVERROR(EINVAL);
209  }
210  *rchannel = channel_id;
211  *arg += len;
212  return 0;
213  }
214  return AVERROR(EINVAL);
215 }
216 
218 {
219  SOFAlizerContext *s = ctx->priv;
220  char *arg, *tokenizer, *p, *args = av_strdup(s->speakers_pos);
221 
222  if (!args)
223  return;
224  p = args;
225 
226  while ((arg = av_strtok(p, "|", &tokenizer))) {
227  float azim, elev;
228  int out_ch_id;
229 
230  p = NULL;
231  if (parse_channel_name(ctx, &arg, &out_ch_id)) {
232  continue;
233  }
234  if (av_sscanf(arg, "%f %f", &azim, &elev) == 2) {
235  s->vspkrpos[out_ch_id].set = 1;
236  s->vspkrpos[out_ch_id].azim = azim;
237  s->vspkrpos[out_ch_id].elev = elev;
238  } else if (av_sscanf(arg, "%f", &azim) == 1) {
239  s->vspkrpos[out_ch_id].set = 1;
240  s->vspkrpos[out_ch_id].azim = azim;
241  s->vspkrpos[out_ch_id].elev = 0;
242  }
243  }
244 
245  av_free(args);
246 }
247 
249  float *speaker_azim, float *speaker_elev)
250 {
251  struct SOFAlizerContext *s = ctx->priv;
252  AVChannelLayout *channel_layout = &ctx->inputs[0]->ch_layout;
253  float azim[64] = { 0 };
254  float elev[64] = { 0 };
255  int ch, n_conv = ctx->inputs[0]->ch_layout.nb_channels; /* get no. input channels */
256 
257  if (n_conv < 0 || n_conv > 64)
258  return AVERROR(EINVAL);
259 
260  s->lfe_channel = -1;
261 
262  if (s->speakers_pos)
264 
265  /* set speaker positions according to input channel configuration: */
266  for (ch = 0; ch < n_conv; ch++) {
267  int chan = av_channel_layout_channel_from_index(channel_layout, ch);
268 
269  switch (chan) {
270  case AV_CHAN_FRONT_LEFT: azim[ch] = 30; break;
271  case AV_CHAN_FRONT_RIGHT: azim[ch] = 330; break;
272  case AV_CHAN_FRONT_CENTER: azim[ch] = 0; break;
274  case AV_CHAN_LOW_FREQUENCY_2: s->lfe_channel = ch; break;
275  case AV_CHAN_BACK_LEFT: azim[ch] = 150; break;
276  case AV_CHAN_BACK_RIGHT: azim[ch] = 210; break;
277  case AV_CHAN_BACK_CENTER: azim[ch] = 180; break;
278  case AV_CHAN_SIDE_LEFT: azim[ch] = 90; break;
279  case AV_CHAN_SIDE_RIGHT: azim[ch] = 270; break;
280  case AV_CHAN_FRONT_LEFT_OF_CENTER: azim[ch] = 15; break;
281  case AV_CHAN_FRONT_RIGHT_OF_CENTER: azim[ch] = 345; break;
282  case AV_CHAN_TOP_CENTER: azim[ch] = 0;
283  elev[ch] = 90; break;
284  case AV_CHAN_TOP_FRONT_LEFT: azim[ch] = 30;
285  elev[ch] = 45; break;
286  case AV_CHAN_TOP_FRONT_CENTER: azim[ch] = 0;
287  elev[ch] = 45; break;
288  case AV_CHAN_TOP_FRONT_RIGHT: azim[ch] = 330;
289  elev[ch] = 45; break;
290  case AV_CHAN_TOP_BACK_LEFT: azim[ch] = 150;
291  elev[ch] = 45; break;
292  case AV_CHAN_TOP_BACK_RIGHT: azim[ch] = 210;
293  elev[ch] = 45; break;
294  case AV_CHAN_TOP_BACK_CENTER: azim[ch] = 180;
295  elev[ch] = 45; break;
296  case AV_CHAN_WIDE_LEFT: azim[ch] = 90; break;
297  case AV_CHAN_WIDE_RIGHT: azim[ch] = 270; break;
298  case AV_CHAN_SURROUND_DIRECT_LEFT: azim[ch] = 90; break;
299  case AV_CHAN_SURROUND_DIRECT_RIGHT: azim[ch] = 270; break;
300  case AV_CHAN_STEREO_LEFT: azim[ch] = 90; break;
301  case AV_CHAN_STEREO_RIGHT: azim[ch] = 270; break;
302  default:
303  return AVERROR(EINVAL);
304  }
305 
306  if (s->vspkrpos[ch].set) {
307  azim[ch] = s->vspkrpos[ch].azim;
308  elev[ch] = s->vspkrpos[ch].elev;
309  }
310  }
311 
312  memcpy(speaker_azim, azim, n_conv * sizeof(float));
313  memcpy(speaker_elev, elev, n_conv * sizeof(float));
314 
315  return 0;
316 
317 }
318 
319 typedef struct ThreadData {
320  AVFrame *in, *out;
321  int *write;
322  int **delay;
323  float **ir;
324  int *n_clippings;
325  float **ringbuffer;
326  float **temp_src;
330 } ThreadData;
331 
332 static int sofalizer_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
333 {
334  SOFAlizerContext *s = ctx->priv;
335  ThreadData *td = arg;
336  AVFrame *in = td->in, *out = td->out;
337  int offset = jobnr;
338  int *write = &td->write[jobnr];
339  const int *const delay = td->delay[jobnr];
340  const float *const ir = td->ir[jobnr];
341  int *n_clippings = &td->n_clippings[jobnr];
342  float *ringbuffer = td->ringbuffer[jobnr];
343  float *temp_src = td->temp_src[jobnr];
344  const int ir_samples = s->sofa.ir_samples; /* length of one IR */
345  const int n_samples = s->sofa.n_samples;
346  const int planar = in->format == AV_SAMPLE_FMT_FLTP;
347  const int mult = 1 + !planar;
348  const float *src = (const float *)in->extended_data[0]; /* get pointer to audio input buffer */
349  float *dst = (float *)out->extended_data[jobnr * planar]; /* get pointer to audio output buffer */
350  const int in_channels = s->n_conv; /* number of input channels */
351  /* ring buffer length is: longest IR plus max. delay -> next power of 2 */
352  const int buffer_length = s->buffer_length;
353  /* -1 for AND instead of MODULO (applied to powers of 2): */
354  const uint32_t modulo = (uint32_t)buffer_length - 1;
355  float *buffer[64]; /* holds ringbuffer for each input channel */
356  int wr = *write;
357  int read;
358  int i, l;
359 
360  if (!planar)
361  dst += offset;
362 
363  for (l = 0; l < in_channels; l++) {
364  /* get starting address of ringbuffer for each input channel */
365  buffer[l] = ringbuffer + l * buffer_length;
366  }
367 
368  for (i = 0; i < in->nb_samples; i++) {
369  const float *temp_ir = ir; /* using same set of IRs for each sample */
370 
371  dst[0] = 0;
372  if (planar) {
373  for (l = 0; l < in_channels; l++) {
374  const float *srcp = (const float *)in->extended_data[l];
375 
376  /* write current input sample to ringbuffer (for each channel) */
377  buffer[l][wr] = srcp[i];
378  }
379  } else {
380  for (l = 0; l < in_channels; l++) {
381  /* write current input sample to ringbuffer (for each channel) */
382  buffer[l][wr] = src[l];
383  }
384  }
385 
386  /* loop goes through all channels to be convolved */
387  for (l = 0; l < in_channels; l++) {
388  const float *const bptr = buffer[l];
389 
390  if (l == s->lfe_channel) {
391  /* LFE is an input channel but requires no convolution */
392  /* apply gain to LFE signal and add to output buffer */
393  dst[0] += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
394  temp_ir += n_samples;
395  continue;
396  }
397 
398  /* current read position in ringbuffer: input sample write position
399  * - delay for l-th ch. + diff. betw. IR length and buffer length
400  * (mod buffer length) */
401  read = (wr - delay[l] - (ir_samples - 1) + buffer_length) & modulo;
402 
403  if (read + ir_samples < buffer_length) {
404  memmove(temp_src, bptr + read, ir_samples * sizeof(*temp_src));
405  } else {
406  int len = FFMIN(n_samples - (read % ir_samples), buffer_length - read);
407 
408  memmove(temp_src, bptr + read, len * sizeof(*temp_src));
409  memmove(temp_src + len, bptr, (n_samples - len) * sizeof(*temp_src));
410  }
411 
412  /* multiply signal and IR, and add up the results */
413  dst[0] += s->fdsp->scalarproduct_float(temp_ir, temp_src, FFALIGN(ir_samples, 32));
414  temp_ir += n_samples;
415  }
416 
417  /* clippings counter */
418  if (fabsf(dst[0]) > 1)
419  n_clippings[0]++;
420 
421  /* move output buffer pointer by +2 to get to next sample of processed channel: */
422  dst += mult;
423  src += in_channels;
424  wr = (wr + 1) & modulo; /* update ringbuffer write position */
425  }
426 
427  *write = wr; /* remember write position in ringbuffer for next call */
428 
429  return 0;
430 }
431 
432 static int sofalizer_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
433 {
434  SOFAlizerContext *s = ctx->priv;
435  ThreadData *td = arg;
436  AVFrame *in = td->in, *out = td->out;
437  int offset = jobnr;
438  int *write = &td->write[jobnr];
439  AVComplexFloat *hrtf = s->data_hrtf[jobnr]; /* get pointers to current HRTF data */
440  int *n_clippings = &td->n_clippings[jobnr];
441  float *ringbuffer = td->ringbuffer[jobnr];
442  const int ir_samples = s->sofa.ir_samples; /* length of one IR */
443  const int planar = in->format == AV_SAMPLE_FMT_FLTP;
444  const int mult = 1 + !planar;
445  float *dst = (float *)out->extended_data[jobnr * planar]; /* get pointer to audio output buffer */
446  const int in_channels = s->n_conv; /* number of input channels */
447  /* ring buffer length is: longest IR plus max. delay -> next power of 2 */
448  const int buffer_length = s->buffer_length;
449  /* -1 for AND instead of MODULO (applied to powers of 2): */
450  const uint32_t modulo = (uint32_t)buffer_length - 1;
451  AVComplexFloat *fft_in = s->in_fft[jobnr]; /* temporary array for FFT input data */
452  AVComplexFloat *fft_out = s->out_fft[jobnr]; /* temporary array for FFT output data */
453  AVComplexFloat *fft_acc = s->temp_afft[jobnr];
454  AVTXContext *ifft = s->ifft[jobnr];
455  av_tx_fn itx_fn = s->itx_fn[jobnr];
456  AVTXContext *fft = s->fft[jobnr];
457  av_tx_fn tx_fn = s->tx_fn[jobnr];
458  const int n_conv = s->n_conv;
459  const int n_fft = s->n_fft;
460  const float fft_scale = 1.0f / s->n_fft;
461  AVComplexFloat *hrtf_offset;
462  int wr = *write;
463  int n_read;
464  int i, j;
465 
466  if (!planar)
467  dst += offset;
468 
469  /* find minimum between number of samples and output buffer length:
470  * (important, if one IR is longer than the output buffer) */
471  n_read = FFMIN(ir_samples, in->nb_samples);
472  for (j = 0; j < n_read; j++) {
473  /* initialize output buf with saved signal from overflow buf */
474  dst[mult * j] = ringbuffer[wr];
475  ringbuffer[wr] = 0.0f; /* re-set read samples to zero */
476  /* update ringbuffer read/write position */
477  wr = (wr + 1) & modulo;
478  }
479 
480  /* initialize rest of output buffer with 0 */
481  for (j = n_read; j < in->nb_samples; j++) {
482  dst[mult * j] = 0;
483  }
484 
485  /* fill FFT accumulation with 0 */
486  memset(fft_acc, 0, sizeof(AVComplexFloat) * n_fft);
487 
488  for (i = 0; i < n_conv; i++) {
489  const float *src = (const float *)in->extended_data[i * planar]; /* get pointer to audio input buffer */
490 
491  if (i == s->lfe_channel) { /* LFE */
492  if (in->format == AV_SAMPLE_FMT_FLT) {
493  for (j = 0; j < in->nb_samples; j++) {
494  /* apply gain to LFE signal and add to output buffer */
495  dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
496  }
497  } else {
498  for (j = 0; j < in->nb_samples; j++) {
499  /* apply gain to LFE signal and add to output buffer */
500  dst[j] += src[j] * s->gain_lfe;
501  }
502  }
503  continue;
504  }
505 
506  /* outer loop: go through all input channels to be convolved */
507  offset = i * n_fft; /* no. samples already processed */
508  hrtf_offset = hrtf + offset;
509 
510  /* fill FFT input with 0 (we want to zero-pad) */
511  memset(fft_in, 0, sizeof(AVComplexFloat) * n_fft);
512 
513  if (in->format == AV_SAMPLE_FMT_FLT) {
514  for (j = 0; j < in->nb_samples; j++) {
515  /* prepare input for FFT */
516  /* write all samples of current input channel to FFT input array */
517  fft_in[j].re = src[j * in_channels + i];
518  }
519  } else {
520  for (j = 0; j < in->nb_samples; j++) {
521  /* prepare input for FFT */
522  /* write all samples of current input channel to FFT input array */
523  fft_in[j].re = src[j];
524  }
525  }
526 
527  /* transform input signal of current channel to frequency domain */
528  tx_fn(fft, fft_out, fft_in, sizeof(*fft_in));
529 
530  for (j = 0; j < n_fft; j++) {
531  const AVComplexFloat *hcomplex = hrtf_offset + j;
532  const float re = fft_out[j].re;
533  const float im = fft_out[j].im;
534 
535  /* complex multiplication of input signal and HRTFs */
536  /* output channel (real): */
537  fft_acc[j].re += re * hcomplex->re - im * hcomplex->im;
538  /* output channel (imag): */
539  fft_acc[j].im += re * hcomplex->im + im * hcomplex->re;
540  }
541  }
542 
543  /* transform output signal of current channel back to time domain */
544  itx_fn(ifft, fft_out, fft_acc, sizeof(*fft_acc));
545 
546  for (j = 0; j < in->nb_samples; j++) {
547  /* write output signal of current channel to output buffer */
548  dst[mult * j] += fft_out[j].re * fft_scale;
549  }
550 
551  for (j = 0; j < ir_samples - 1; j++) { /* overflow length is IR length - 1 */
552  /* write the rest of output signal to overflow buffer */
553  int write_pos = (wr + j) & modulo;
554 
555  *(ringbuffer + write_pos) += fft_out[in->nb_samples + j].re * fft_scale;
556  }
557 
558  /* go through all samples of current output buffer: count clippings */
559  for (i = 0; i < out->nb_samples; i++) {
560  /* clippings counter */
561  if (fabsf(dst[i * mult]) > 1) { /* if current output sample > 1 */
562  n_clippings[0]++;
563  }
564  }
565 
566  /* remember read/write position in ringbuffer for next call */
567  *write = wr;
568 
569  return 0;
570 }
571 
573 {
574  AVFilterContext *ctx = inlink->dst;
575  SOFAlizerContext *s = ctx->priv;
576  AVFilterLink *outlink = ctx->outputs[0];
577  int n_clippings[2] = { 0 };
578  ThreadData td;
579  AVFrame *out;
580 
581  out = ff_get_audio_buffer(outlink, in->nb_samples);
582  if (!out) {
583  av_frame_free(&in);
584  return AVERROR(ENOMEM);
585  }
587 
588  td.in = in; td.out = out; td.write = s->write;
589  td.delay = s->delay; td.ir = s->data_ir; td.n_clippings = n_clippings;
590  td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
591  td.in_fft = s->in_fft;
592  td.out_fft = s->out_fft;
593  td.temp_afft = s->temp_afft;
594 
595  if (s->type == TIME_DOMAIN) {
597  } else if (s->type == FREQUENCY_DOMAIN) {
599  }
600  emms_c();
601 
602  /* display error message if clipping occurred */
603  if (n_clippings[0] + n_clippings[1] > 0) {
604  av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
605  n_clippings[0] + n_clippings[1], out->nb_samples * 2);
606  }
607 
608  av_frame_free(&in);
609  return ff_filter_frame(outlink, out);
610 }
611 
613 {
614  AVFilterLink *inlink = ctx->inputs[0];
615  AVFilterLink *outlink = ctx->outputs[0];
616  SOFAlizerContext *s = ctx->priv;
617  AVFrame *in;
618  int ret;
619 
621 
622  if (s->nb_samples)
623  ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
624  else
626  if (ret < 0)
627  return ret;
628  if (ret > 0)
629  return filter_frame(inlink, in);
630 
633 
634  return FFERROR_NOT_READY;
635 }
636 
638 {
639  struct SOFAlizerContext *s = ctx->priv;
641  int ret, sample_rates[] = { 48000, -1 };
642  static const enum AVSampleFormat sample_fmts[] = {
645  };
646 
648  if (ret)
649  return ret;
650 
652  if (!layouts)
653  return AVERROR(ENOMEM);
654 
655  ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->outcfg.channel_layouts);
656  if (ret)
657  return ret;
658 
659  layouts = NULL;
661  if (ret)
662  return ret;
663 
664  ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->incfg.channel_layouts);
665  if (ret)
666  return ret;
667 
668  sample_rates[0] = s->sample_rate;
670 }
671 
672 static int getfilter_float(AVFilterContext *ctx, float x, float y, float z,
673  float *left, float *right,
674  float *delay_left, float *delay_right)
675 {
676  struct SOFAlizerContext *s = ctx->priv;
677  float c[3], delays[2];
678  float *fl, *fr;
679  int nearest;
680  int *neighbors;
681  float *res;
682 
683  c[0] = x, c[1] = y, c[2] = z;
684  nearest = mysofa_lookup(s->sofa.lookup, c);
685  if (nearest < 0)
686  return AVERROR(EINVAL);
687 
688  if (s->interpolate) {
689  neighbors = mysofa_neighborhood(s->sofa.neighborhood, nearest);
690  res = mysofa_interpolate(s->sofa.hrtf, c,
691  nearest, neighbors,
692  s->sofa.fir, delays);
693  } else {
694  if (s->sofa.hrtf->DataDelay.elements > s->sofa.hrtf->R) {
695  delays[0] = s->sofa.hrtf->DataDelay.values[nearest * s->sofa.hrtf->R];
696  delays[1] = s->sofa.hrtf->DataDelay.values[nearest * s->sofa.hrtf->R + 1];
697  } else {
698  delays[0] = s->sofa.hrtf->DataDelay.values[0];
699  delays[1] = s->sofa.hrtf->DataDelay.values[1];
700  }
701  res = s->sofa.hrtf->DataIR.values + nearest * s->sofa.hrtf->N * s->sofa.hrtf->R;
702  }
703 
704  *delay_left = delays[0];
705  *delay_right = delays[1];
706 
707  fl = res;
708  fr = res + s->sofa.hrtf->N;
709 
710  memcpy(left, fl, sizeof(float) * s->sofa.hrtf->N);
711  memcpy(right, fr, sizeof(float) * s->sofa.hrtf->N);
712 
713  return 0;
714 }
715 
716 static int load_data(AVFilterContext *ctx, int azim, int elev, float radius, int sample_rate)
717 {
718  struct SOFAlizerContext *s = ctx->priv;
719  int n_samples;
720  int ir_samples;
721  int n_conv = s->n_conv; /* no. channels to convolve */
722  int n_fft;
723  float delay_l; /* broadband delay for each IR */
724  float delay_r;
725  int nb_input_channels = ctx->inputs[0]->ch_layout.nb_channels; /* no. input channels */
726  float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10); /* gain - 3dB/channel */
727  AVComplexFloat *data_hrtf_l = NULL;
728  AVComplexFloat *data_hrtf_r = NULL;
729  AVComplexFloat *fft_out_l = NULL;
730  AVComplexFloat *fft_out_r = NULL;
731  AVComplexFloat *fft_in_l = NULL;
732  AVComplexFloat *fft_in_r = NULL;
733  float *data_ir_l = NULL;
734  float *data_ir_r = NULL;
735  int offset = 0; /* used for faster pointer arithmetics in for-loop */
736  int i, j, azim_orig = azim, elev_orig = elev;
737  int ret = 0;
738  int n_current;
739  int n_max = 0;
740 
741  av_log(ctx, AV_LOG_DEBUG, "IR length: %d.\n", s->sofa.hrtf->N);
742  s->sofa.ir_samples = s->sofa.hrtf->N;
743  s->sofa.n_samples = 1 << (32 - ff_clz(s->sofa.ir_samples));
744 
745  n_samples = s->sofa.n_samples;
746  ir_samples = s->sofa.ir_samples;
747 
748  if (s->type == TIME_DOMAIN) {
749  s->data_ir[0] = av_calloc(n_samples, sizeof(float) * s->n_conv);
750  s->data_ir[1] = av_calloc(n_samples, sizeof(float) * s->n_conv);
751 
752  if (!s->data_ir[0] || !s->data_ir[1]) {
753  ret = AVERROR(ENOMEM);
754  goto fail;
755  }
756  }
757 
758  s->delay[0] = av_calloc(s->n_conv, sizeof(int));
759  s->delay[1] = av_calloc(s->n_conv, sizeof(int));
760 
761  if (!s->delay[0] || !s->delay[1]) {
762  ret = AVERROR(ENOMEM);
763  goto fail;
764  }
765 
766  /* get temporary IR for L and R channel */
767  data_ir_l = av_calloc(n_conv * n_samples, sizeof(*data_ir_l));
768  data_ir_r = av_calloc(n_conv * n_samples, sizeof(*data_ir_r));
769  if (!data_ir_r || !data_ir_l) {
770  ret = AVERROR(ENOMEM);
771  goto fail;
772  }
773 
774  if (s->type == TIME_DOMAIN) {
775  s->temp_src[0] = av_calloc(n_samples, sizeof(float));
776  s->temp_src[1] = av_calloc(n_samples, sizeof(float));
777  if (!s->temp_src[0] || !s->temp_src[1]) {
778  ret = AVERROR(ENOMEM);
779  goto fail;
780  }
781  }
782 
783  s->speaker_azim = av_calloc(s->n_conv, sizeof(*s->speaker_azim));
784  s->speaker_elev = av_calloc(s->n_conv, sizeof(*s->speaker_elev));
785  if (!s->speaker_azim || !s->speaker_elev) {
786  ret = AVERROR(ENOMEM);
787  goto fail;
788  }
789 
790  /* get speaker positions */
791  if ((ret = get_speaker_pos(ctx, s->speaker_azim, s->speaker_elev)) < 0) {
792  av_log(ctx, AV_LOG_ERROR, "Couldn't get speaker positions. Input channel configuration not supported.\n");
793  goto fail;
794  }
795 
796  for (i = 0; i < s->n_conv; i++) {
797  float coordinates[3];
798 
799  /* load and store IRs and corresponding delays */
800  azim = (int)(s->speaker_azim[i] + azim_orig) % 360;
801  elev = (int)(s->speaker_elev[i] + elev_orig) % 90;
802 
803  coordinates[0] = azim;
804  coordinates[1] = elev;
805  coordinates[2] = radius;
806 
807  mysofa_s2c(coordinates);
808 
809  /* get id of IR closest to desired position */
810  ret = getfilter_float(ctx, coordinates[0], coordinates[1], coordinates[2],
811  data_ir_l + n_samples * i,
812  data_ir_r + n_samples * i,
813  &delay_l, &delay_r);
814  if (ret < 0)
815  goto fail;
816 
817  s->delay[0][i] = delay_l * sample_rate;
818  s->delay[1][i] = delay_r * sample_rate;
819 
820  s->sofa.max_delay = FFMAX3(s->sofa.max_delay, s->delay[0][i], s->delay[1][i]);
821  }
822 
823  /* get size of ringbuffer (longest IR plus max. delay) */
824  /* then choose next power of 2 for performance optimization */
825  n_current = n_samples + s->sofa.max_delay;
826  /* length of longest IR plus max. delay */
827  n_max = FFMAX(n_max, n_current);
828 
829  /* buffer length is longest IR plus max. delay -> next power of 2
830  (32 - count leading zeros gives required exponent) */
831  s->buffer_length = 1 << (32 - ff_clz(n_max));
832  s->n_fft = n_fft = 1 << (32 - ff_clz(n_max + s->framesize));
833 
834  if (s->type == FREQUENCY_DOMAIN) {
835  float scale = 1.f;
836 
837  av_tx_uninit(&s->fft[0]);
838  av_tx_uninit(&s->fft[1]);
839  ret = av_tx_init(&s->fft[0], &s->tx_fn[0], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
840  if (ret < 0)
841  goto fail;
842  ret = av_tx_init(&s->fft[1], &s->tx_fn[1], AV_TX_FLOAT_FFT, 0, s->n_fft, &scale, 0);
843  if (ret < 0)
844  goto fail;
845  av_tx_uninit(&s->ifft[0]);
846  av_tx_uninit(&s->ifft[1]);
847  ret = av_tx_init(&s->ifft[0], &s->itx_fn[0], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
848  if (ret < 0)
849  goto fail;
850  ret = av_tx_init(&s->ifft[1], &s->itx_fn[1], AV_TX_FLOAT_FFT, 1, s->n_fft, &scale, 0);
851  if (ret < 0)
852  goto fail;
853  }
854 
855  if (s->type == TIME_DOMAIN) {
856  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
857  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
858  } else if (s->type == FREQUENCY_DOMAIN) {
859  /* get temporary HRTF memory for L and R channel */
860  data_hrtf_l = av_malloc_array(n_fft, sizeof(*data_hrtf_l) * n_conv);
861  data_hrtf_r = av_malloc_array(n_fft, sizeof(*data_hrtf_r) * n_conv);
862  if (!data_hrtf_r || !data_hrtf_l) {
863  ret = AVERROR(ENOMEM);
864  goto fail;
865  }
866 
867  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
868  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
869  s->in_fft[0] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
870  s->in_fft[1] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
871  s->out_fft[0] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
872  s->out_fft[1] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
873  s->temp_afft[0] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
874  s->temp_afft[1] = av_malloc_array(s->n_fft, sizeof(AVComplexFloat));
875  if (!s->in_fft[0] || !s->in_fft[1] ||
876  !s->out_fft[0] || !s->out_fft[1] ||
877  !s->temp_afft[0] || !s->temp_afft[1]) {
878  ret = AVERROR(ENOMEM);
879  goto fail;
880  }
881  }
882 
883  if (!s->ringbuffer[0] || !s->ringbuffer[1]) {
884  ret = AVERROR(ENOMEM);
885  goto fail;
886  }
887 
888  if (s->type == FREQUENCY_DOMAIN) {
889  fft_out_l = av_calloc(n_fft, sizeof(*fft_out_l));
890  fft_out_r = av_calloc(n_fft, sizeof(*fft_out_r));
891  fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l));
892  fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r));
893  if (!fft_in_l || !fft_in_r ||
894  !fft_out_l || !fft_out_r) {
895  ret = AVERROR(ENOMEM);
896  goto fail;
897  }
898  }
899 
900  for (i = 0; i < s->n_conv; i++) {
901  float *lir, *rir;
902 
903  offset = i * n_samples; /* no. samples already written */
904 
905  lir = data_ir_l + offset;
906  rir = data_ir_r + offset;
907 
908  if (s->type == TIME_DOMAIN) {
909  for (j = 0; j < ir_samples; j++) {
910  /* load reversed IRs of the specified source position
911  * sample-by-sample for left and right ear; and apply gain */
912  s->data_ir[0][offset + j] = lir[ir_samples - 1 - j] * gain_lin;
913  s->data_ir[1][offset + j] = rir[ir_samples - 1 - j] * gain_lin;
914  }
915  } else if (s->type == FREQUENCY_DOMAIN) {
916  memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
917  memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
918 
919  offset = i * n_fft; /* no. samples already written */
920  for (j = 0; j < ir_samples; j++) {
921  /* load non-reversed IRs of the specified source position
922  * sample-by-sample and apply gain,
923  * L channel is loaded to real part, R channel to imag part,
924  * IRs are shifted by L and R delay */
925  fft_in_l[s->delay[0][i] + j].re = lir[j] * gain_lin;
926  fft_in_r[s->delay[1][i] + j].re = rir[j] * gain_lin;
927  }
928 
929  /* actually transform to frequency domain (IRs -> HRTFs) */
930  s->tx_fn[0](s->fft[0], fft_out_l, fft_in_l, sizeof(*fft_in_l));
931  memcpy(data_hrtf_l + offset, fft_out_l, n_fft * sizeof(*fft_out_l));
932  s->tx_fn[1](s->fft[1], fft_out_r, fft_in_r, sizeof(*fft_in_r));
933  memcpy(data_hrtf_r + offset, fft_out_r, n_fft * sizeof(*fft_out_r));
934  }
935  }
936 
937  if (s->type == FREQUENCY_DOMAIN) {
938  s->data_hrtf[0] = av_malloc_array(n_fft * s->n_conv, sizeof(AVComplexFloat));
939  s->data_hrtf[1] = av_malloc_array(n_fft * s->n_conv, sizeof(AVComplexFloat));
940  if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
941  ret = AVERROR(ENOMEM);
942  goto fail;
943  }
944 
945  memcpy(s->data_hrtf[0], data_hrtf_l, /* copy HRTF data to */
946  sizeof(AVComplexFloat) * n_conv * n_fft); /* filter struct */
947  memcpy(s->data_hrtf[1], data_hrtf_r,
948  sizeof(AVComplexFloat) * n_conv * n_fft);
949  }
950 
951 fail:
952  av_freep(&data_hrtf_l); /* free temporary HRTF memory */
953  av_freep(&data_hrtf_r);
954 
955  av_freep(&data_ir_l); /* free temprary IR memory */
956  av_freep(&data_ir_r);
957 
958  av_freep(&fft_out_l); /* free temporary FFT memory */
959  av_freep(&fft_out_r);
960 
961  av_freep(&fft_in_l); /* free temporary FFT memory */
962  av_freep(&fft_in_r);
963 
964  return ret;
965 }
966 
968 {
969  SOFAlizerContext *s = ctx->priv;
970  int ret;
971 
972  if (!s->filename) {
973  av_log(ctx, AV_LOG_ERROR, "Valid SOFA filename must be set.\n");
974  return AVERROR(EINVAL);
975  }
976 
977  /* preload SOFA file, */
978  ret = preload_sofa(ctx, s->filename, &s->sample_rate);
979  if (ret) {
980  /* file loading error */
981  av_log(ctx, AV_LOG_ERROR, "Error while loading SOFA file: '%s'\n", s->filename);
982  } else { /* no file loading error, resampling not required */
983  av_log(ctx, AV_LOG_DEBUG, "File '%s' loaded.\n", s->filename);
984  }
985 
986  if (ret) {
987  av_log(ctx, AV_LOG_ERROR, "No valid SOFA file could be loaded. Please specify valid SOFA file.\n");
988  return ret;
989  }
990 
991  s->fdsp = avpriv_float_dsp_alloc(0);
992  if (!s->fdsp)
993  return AVERROR(ENOMEM);
994 
995  return 0;
996 }
997 
999 {
1000  AVFilterContext *ctx = inlink->dst;
1001  SOFAlizerContext *s = ctx->priv;
1002  int ret;
1003 
1004  if (s->type == FREQUENCY_DOMAIN)
1005  s->nb_samples = s->framesize;
1006 
1007  /* gain -3 dB per channel */
1008  s->gain_lfe = expf((s->gain - 3 * inlink->ch_layout.nb_channels + s->lfe_gain) / 20 * M_LN10);
1009 
1010  s->n_conv = inlink->ch_layout.nb_channels;
1011 
1012  /* load IRs to data_ir[0] and data_ir[1] for required directions */
1013  if ((ret = load_data(ctx, s->rotation, s->elevation, s->radius, inlink->sample_rate)) < 0)
1014  return ret;
1015 
1016  av_log(ctx, AV_LOG_DEBUG, "Samplerate: %d Channels to convolute: %d, Length of ringbuffer: %d x %d\n",
1017  inlink->sample_rate, s->n_conv, inlink->ch_layout.nb_channels, s->buffer_length);
1018 
1019  return 0;
1020 }
1021 
1023 {
1024  SOFAlizerContext *s = ctx->priv;
1025 
1026  close_sofa(&s->sofa);
1027  av_tx_uninit(&s->ifft[0]);
1028  av_tx_uninit(&s->ifft[1]);
1029  av_tx_uninit(&s->fft[0]);
1030  av_tx_uninit(&s->fft[1]);
1031  s->ifft[0] = NULL;
1032  s->ifft[1] = NULL;
1033  s->fft[0] = NULL;
1034  s->fft[1] = NULL;
1035  av_freep(&s->delay[0]);
1036  av_freep(&s->delay[1]);
1037  av_freep(&s->data_ir[0]);
1038  av_freep(&s->data_ir[1]);
1039  av_freep(&s->ringbuffer[0]);
1040  av_freep(&s->ringbuffer[1]);
1041  av_freep(&s->speaker_azim);
1042  av_freep(&s->speaker_elev);
1043  av_freep(&s->temp_src[0]);
1044  av_freep(&s->temp_src[1]);
1045  av_freep(&s->temp_afft[0]);
1046  av_freep(&s->temp_afft[1]);
1047  av_freep(&s->in_fft[0]);
1048  av_freep(&s->in_fft[1]);
1049  av_freep(&s->out_fft[0]);
1050  av_freep(&s->out_fft[1]);
1051  av_freep(&s->data_hrtf[0]);
1052  av_freep(&s->data_hrtf[1]);
1053  av_freep(&s->fdsp);
1054 }
1055 
1056 #define OFFSET(x) offsetof(SOFAlizerContext, x)
1057 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1058 
1059 static const AVOption sofalizer_options[] = {
1060  { "sofa", "sofa filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
1061  { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
1062  { "rotation", "set rotation" , OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl=0}, -360, 360, .flags = FLAGS },
1063  { "elevation", "set elevation", OFFSET(elevation), AV_OPT_TYPE_FLOAT, {.dbl=0}, -90, 90, .flags = FLAGS },
1064  { "radius", "set radius", OFFSET(radius), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 5, .flags = FLAGS },
1065  { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, "type" },
1066  { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, "type" },
1067  { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, "type" },
1068  { "speakers", "set speaker custom positions", OFFSET(speakers_pos), AV_OPT_TYPE_STRING, {.str=0}, 0, 0, .flags = FLAGS },
1069  { "lfegain", "set lfe gain", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20,40, .flags = FLAGS },
1070  { "framesize", "set frame size", OFFSET(framesize), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
1071  { "normalize", "normalize IRs", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, .flags = FLAGS },
1072  { "interpolate","interpolate IRs from neighbors", OFFSET(interpolate),AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
1073  { "minphase", "minphase IRs", OFFSET(minphase), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
1074  { "anglestep", "set neighbor search angle step", OFFSET(anglestep), AV_OPT_TYPE_FLOAT, {.dbl=.5}, 0.01, 10, .flags = FLAGS },
1075  { "radstep", "set neighbor search radius step", OFFSET(radstep), AV_OPT_TYPE_FLOAT, {.dbl=.01}, 0.01, 1, .flags = FLAGS },
1076  { NULL }
1077 };
1078 
1079 AVFILTER_DEFINE_CLASS(sofalizer);
1080 
1081 static const AVFilterPad inputs[] = {
1082  {
1083  .name = "default",
1084  .type = AVMEDIA_TYPE_AUDIO,
1085  .config_props = config_input,
1086  },
1087 };
1088 
1089 static const AVFilterPad outputs[] = {
1090  {
1091  .name = "default",
1092  .type = AVMEDIA_TYPE_AUDIO,
1093  },
1094 };
1095 
1097  .name = "sofalizer",
1098  .description = NULL_IF_CONFIG_SMALL("SOFAlizer (Spatially Oriented Format for Acoustics)."),
1099  .priv_size = sizeof(SOFAlizerContext),
1100  .priv_class = &sofalizer_class,
1101  .init = init,
1102  .activate = activate,
1103  .uninit = uninit,
1107  .flags = AVFILTER_FLAG_SLICE_THREADS,
1108 };
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:100
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
SOFAlizerContext::write
int write[2]
Definition: af_sofalizer.c:81
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
td
#define td
Definition: regdef.h:70
TIME_DOMAIN
#define TIME_DOMAIN
Definition: af_sofalizer.c:42
SOFAlizerContext::filename
char * filename
Definition: af_sofalizer.c:65
SOFAlizerContext::nb_samples
int nb_samples
Definition: af_sofalizer.c:85
SOFAlizerContext::speakers_pos
char * speakers_pos
Definition: af_sofalizer.c:71
SOFAlizerContext::radstep
float radstep
Definition: af_sofalizer.c:108
SOFAlizerContext
Definition: af_sofalizer.c:62
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ThreadData::out_fft
AVComplexFloat ** out_fft
Definition: af_headphone.c:138
SOFAlizerContext::vspkrpos
VirtualSpeaker vspkrpos[64]
Definition: af_sofalizer.c:110
out
FILE * out
Definition: movenc.c:54
parse_channel_name
static int parse_channel_name(AVFilterContext *ctx, char **arg, int *rchannel)
Definition: af_sofalizer.c:188
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:369
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:969
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:591
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:326
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
AV_CHAN_WIDE_LEFT
@ AV_CHAN_WIDE_LEFT
Definition: channel_layout.h:72
AVTXContext
Definition: tx_priv.h:228
ff_set_common_samplerates_from_list
int ff_set_common_samplerates_from_list(AVFilterContext *ctx, const int *samplerates)
Equivalent to ff_set_common_samplerates(ctx, ff_make_format_list(samplerates))
Definition: formats.c:733
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ff_clz
#define ff_clz
Definition: intmath.h:143
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
SOFAlizerContext::in_fft
AVComplexFloat * in_fft[2]
Definition: af_sofalizer.c:93
SOFAlizerContext::lfe_channel
int lfe_channel
Definition: af_sofalizer.c:74
im
float im
Definition: fft.c:79
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
av_channel_layout_channel_from_index
enum AVChannel av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout, unsigned int idx)
Get the channel with the given index in a channel layout.
Definition: channel_layout.c:796
AVOption
AVOption.
Definition: opt.h:251
FILTER_QUERY_FUNC
#define FILTER_QUERY_FUNC(func)
Definition: internal.h:171
expf
#define expf(x)
Definition: libm.h:283
AVComplexFloat
Definition: tx.h:27
SOFAlizerContext::type
int type
Definition: af_sofalizer.c:102
SOFAlizerContext::anglestep
float anglestep
Definition: af_sofalizer.c:107
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:165
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:473
outputs
static const AVFilterPad outputs[]
Definition: af_sofalizer.c:1089
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:154
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:883
ThreadData::temp_src
float ** temp_src
Definition: af_headphone.c:137
SOFAlizerContext::gain_lfe
float gain_lfe
Definition: af_sofalizer.c:73
SOFAlizerContext::n_conv
int n_conv
Definition: af_sofalizer.c:76
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1364
AV_CHAN_SURROUND_DIRECT_LEFT
@ AV_CHAN_SURROUND_DIRECT_LEFT
Definition: channel_layout.h:74
AVComplexFloat::im
float im
Definition: tx.h:28
AV_CHAN_TOP_BACK_RIGHT
@ AV_CHAN_TOP_BACK_RIGHT
Definition: channel_layout.h:67
fail
#define fail()
Definition: checkasm.h:134
VirtualSpeaker::elev
float elev
Definition: af_sofalizer.c:59
ThreadData::ringbuffer
float ** ringbuffer
Definition: af_headphone.c:136
parse_speaker_pos
static void parse_speaker_pos(AVFilterContext *ctx)
Definition: af_sofalizer.c:217
SOFAlizerContext::sofa
MySofa sofa
Definition: af_sofalizer.c:66
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
SOFAlizerContext::sample_rate
int sample_rate
Definition: af_sofalizer.c:68
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:49
MySofa::lir
float * lir
Definition: af_sofalizer.c:51
AV_CHAN_STEREO_RIGHT
@ AV_CHAN_STEREO_RIGHT
See above.
Definition: channel_layout.h:71
MySofa::n_samples
int n_samples
Definition: af_sofalizer.c:50
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
SOFAlizerContext::interpolate
int interpolate
Definition: af_sofalizer.c:105
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:127
FREQUENCY_DOMAIN
#define FREQUENCY_DOMAIN
Definition: af_sofalizer.c:43
s
#define s(width, name)
Definition: cbs_vp9.c:256
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_sofalizer.c:998
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:179
AV_CHAN_SIDE_RIGHT
@ AV_CHAN_SIDE_RIGHT
Definition: channel_layout.h:60
ff_set_common_formats_from_list
int ff_set_common_formats_from_list(AVFilterContext *ctx, const int *fmts)
Equivalent to ff_set_common_formats(ctx, ff_make_format_list(fmts))
Definition: formats.c:755
get_speaker_pos
static int get_speaker_pos(AVFilterContext *ctx, float *speaker_azim, float *speaker_elev)
Definition: af_sofalizer.c:248
filters.h
AV_TX_FLOAT_FFT
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
Definition: tx.h:47
load_data
static int load_data(AVFilterContext *ctx, int azim, int elev, float radius, int sample_rate)
Definition: af_sofalizer.c:716
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
SOFAlizerContext::data_hrtf
AVComplexFloat * data_hrtf[2]
Definition: af_sofalizer.c:114
SOFAlizerContext::data_ir
float * data_ir[2]
Definition: af_sofalizer.c:90
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_sofalizer.c:967
SOFAlizerContext::framesize
int framesize
Definition: af_sofalizer.c:103
sofalizer_fast_convolute
static int sofalizer_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_sofalizer.c:432
MySofa::lookup
struct MYSOFA_LOOKUP * lookup
Definition: af_sofalizer.c:47
MySofa::ir_samples
int ir_samples
Definition: af_sofalizer.c:49
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: internal.h:194
SOFAlizerContext::fdsp
AVFloatDSPContext * fdsp
Definition: af_sofalizer.c:116
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
activate
static int activate(AVFilterContext *ctx)
Definition: af_sofalizer.c:612
SOFAlizerContext::ringbuffer
float * ringbuffer[2]
Definition: af_sofalizer.c:79
av_sscanf
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Definition: avsscanf.c:962
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1383
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:594
SOFAlizerContext::delay
int * delay[2]
Definition: af_sofalizer.c:88
SOFAlizerContext::buffer_length
int buffer_length
Definition: af_sofalizer.c:82
MySofa::rir
float * rir
Definition: af_sofalizer.c:51
AV_CHAN_TOP_BACK_CENTER
@ AV_CHAN_TOP_BACK_CENTER
Definition: channel_layout.h:66
MySofa::max_delay
int max_delay
Definition: af_sofalizer.c:53
AV_CHAN_TOP_CENTER
@ AV_CHAN_TOP_CENTER
Definition: channel_layout.h:61
ThreadData::in_fft
AVComplexFloat ** in_fft
Definition: af_headphone.c:139
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, const AVChannelLayout *channel_layout)
Definition: formats.c:466
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_sofalizer.c:572
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CHAN_FRONT_RIGHT_OF_CENTER
@ AV_CHAN_FRONT_RIGHT_OF_CENTER
Definition: channel_layout.h:57
float_dsp.h
AV_CHAN_FRONT_RIGHT
@ AV_CHAN_FRONT_RIGHT
Definition: channel_layout.h:51
AV_CHAN_FRONT_CENTER
@ AV_CHAN_FRONT_CENTER
Definition: channel_layout.h:52
inputs
static const AVFilterPad inputs[]
Definition: af_sofalizer.c:1081
SOFAlizerContext::speaker_azim
float * speaker_azim
Definition: af_sofalizer.c:69
SOFAlizerContext::temp_afft
AVComplexFloat * temp_afft[2]
Definition: af_sofalizer.c:95
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:301
VirtualSpeaker
Definition: af_sofalizer.c:56
SOFAlizerContext::n_fft
int n_fft
Definition: af_sofalizer.c:84
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
SOFAlizerContext::rotation
float rotation
Definition: af_sofalizer.c:99
AV_CHAN_LOW_FREQUENCY
@ AV_CHAN_LOW_FREQUENCY
Definition: channel_layout.h:53
AV_CHAN_BACK_RIGHT
@ AV_CHAN_BACK_RIGHT
Definition: channel_layout.h:55
AVComplexFloat::re
float re
Definition: tx.h:28
AV_CHAN_SIDE_LEFT
@ AV_CHAN_SIDE_LEFT
Definition: channel_layout.h:59
AVFloatDSPContext
Definition: float_dsp.h:24
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:417
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_sofalizer.c:637
OFFSET
#define OFFSET(x)
Definition: af_sofalizer.c:1056
AV_CHAN_TOP_FRONT_RIGHT
@ AV_CHAN_TOP_FRONT_RIGHT
Definition: channel_layout.h:64
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
AV_CHAN_FRONT_LEFT_OF_CENTER
@ AV_CHAN_FRONT_LEFT_OF_CENTER
Definition: channel_layout.h:56
ff_all_channel_layouts
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:557
preload_sofa
static int preload_sofa(AVFilterContext *ctx, char *filename, int *samplingrate)
Definition: af_sofalizer.c:135
SOFAlizerContext::radius
float radius
Definition: af_sofalizer.c:101
interpolate
static void interpolate(float *out, float v1, float v2, int size)
Definition: twinvq.c:84
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:294
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:156
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
SOFAlizerContext::tx_fn
av_tx_fn tx_fn[2]
Definition: af_sofalizer.c:113
getfilter_float
static int getfilter_float(AVFilterContext *ctx, float x, float y, float z, float *left, float *right, float *delay_left, float *delay_right)
Definition: af_sofalizer.c:672
AVChannel
AVChannel
Definition: channel_layout.h:47
normalize
Definition: normalize.py:1
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:410
AV_CHAN_SURROUND_DIRECT_RIGHT
@ AV_CHAN_SURROUND_DIRECT_RIGHT
Definition: channel_layout.h:75
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
FLAGS
#define FLAGS
Definition: af_sofalizer.c:1057
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:391
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
sofalizer_options
static const AVOption sofalizer_options[]
Definition: af_sofalizer.c:1059
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
ThreadData
Used for passing data between threads.
Definition: dsddec.c:69
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ThreadData::n_clippings
int * n_clippings
Definition: af_headphone.c:135
ThreadData::delay
int ** delay
Definition: af_sofalizer.c:322
len
int len
Definition: vorbis_enc_data.h:426
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:55
MySofa
Definition: af_sofalizer.c:45
AV_CHAN_STEREO_LEFT
@ AV_CHAN_STEREO_LEFT
Stereo downmix.
Definition: channel_layout.h:69
SOFAlizerContext::lfe_gain
float lfe_gain
Definition: af_sofalizer.c:72
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
AVFilter
Filter definition.
Definition: avfilter.h:161
SOFAlizerContext::fft
AVTXContext * fft[2]
Definition: af_sofalizer.c:112
ret
ret
Definition: filter_design.txt:187
ThreadData::write
int * write
Definition: af_headphone.c:133
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_sofalizer.c:1022
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_CHAN_BACK_CENTER
@ AV_CHAN_BACK_CENTER
Definition: channel_layout.h:58
av_channel_from_string
enum AVChannel av_channel_from_string(const char *str)
This is the inverse function of av_channel_name().
Definition: channel_layout.c:141
ThreadData::temp_afft
AVComplexFloat ** temp_afft
Definition: af_headphone.c:140
SOFAlizerContext::out_fft
AVComplexFloat * out_fft[2]
Definition: af_sofalizer.c:94
sofalizer_convolute
static int sofalizer_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_sofalizer.c:332
channel_layout.h
AV_CHAN_LOW_FREQUENCY_2
@ AV_CHAN_LOW_FREQUENCY_2
Definition: channel_layout.h:76
AV_CHAN_TOP_BACK_LEFT
@ AV_CHAN_TOP_BACK_LEFT
Definition: channel_layout.h:65
SOFAlizerContext::minphase
int minphase
Definition: af_sofalizer.c:106
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
MySofa::hrtf
struct MYSOFA_HRTF * hrtf
Definition: af_sofalizer.c:46
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
VirtualSpeaker::set
uint8_t set
Definition: af_sofalizer.c:57
AV_CHAN_BACK_LEFT
@ AV_CHAN_BACK_LEFT
Definition: channel_layout.h:54
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
MySofa::neighborhood
struct MYSOFA_NEIGHBORHOOD * neighborhood
Definition: af_sofalizer.c:48
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
AV_CHAN_TOP_FRONT_CENTER
@ AV_CHAN_TOP_FRONT_CENTER
Definition: channel_layout.h:63
audio.h
ff_af_sofalizer
const AVFilter ff_af_sofalizer
Definition: af_sofalizer.c:1096
AV_CHAN_WIDE_RIGHT
@ AV_CHAN_WIDE_RIGHT
Definition: channel_layout.h:73
M_LN10
#define M_LN10
Definition: mathematics.h:43
VirtualSpeaker::azim
float azim
Definition: af_sofalizer.c:58
SOFAlizerContext::speaker_elev
float * speaker_elev
Definition: af_sofalizer.c:70
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: internal.h:195
SOFAlizerContext::ifft
AVTXContext * ifft[2]
Definition: af_sofalizer.c:112
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
AV_CHAN_TOP_FRONT_LEFT
@ AV_CHAN_TOP_FRONT_LEFT
Definition: channel_layout.h:62
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
SOFAlizerContext::gain
float gain
Definition: af_sofalizer.c:98
ThreadData::ir
float ** ir
Definition: af_headphone.c:134
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
SOFAlizerContext::temp_src
float * temp_src[2]
Definition: af_sofalizer.c:92
AV_CHAN_FRONT_LEFT
@ AV_CHAN_FRONT_LEFT
Definition: channel_layout.h:50
MySofa::fir
float * fir
Definition: af_sofalizer.c:52
avstring.h
SOFAlizerContext::itx_fn
av_tx_fn itx_fn[2]
Definition: af_sofalizer.c:113
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
ff_filter_execute
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: internal.h:146
int
int
Definition: ffmpeg_filter.c:156
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
SOFAlizerContext::normalize
int normalize
Definition: af_sofalizer.c:104
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:60
SOFAlizerContext::elevation
float elevation
Definition: af_sofalizer.c:100
read
static uint32_t BS_FUNC() read(BSCTX *bc, unsigned int n)
Return n bits from the buffer, n has to be in the 0-32 range.
Definition: bitstream_template.h:231
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(sofalizer)
tx.h
re
float re
Definition: fft.c:79
close_sofa
static int close_sofa(struct MySofa *sofa)
Definition: af_sofalizer.c:119
intmath.h