FFmpeg
af_sofalizer.c
Go to the documentation of this file.
1 /*****************************************************************************
2  * sofalizer.c : SOFAlizer filter for virtual binaural acoustics
3  *****************************************************************************
4  * Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda,
5  * Acoustics Research Institute (ARI), Vienna, Austria
6  *
7  * Authors: Andreas Fuchs <andi.fuchs.mail@gmail.com>
8  * Wolfgang Hrauda <wolfgang.hrauda@gmx.at>
9  *
10  * SOFAlizer project coordinator at ARI, main developer of SOFA:
11  * Piotr Majdak <piotr@majdak.at>
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU Lesser General Public License as published by
15  * the Free Software Foundation; either version 2.1 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public License
24  * along with this program; if not, write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
26  *****************************************************************************/
27 
28 #include <math.h>
29 #include <mysofa.h>
30 
31 #include "libavcodec/avfft.h"
32 #include "libavutil/avstring.h"
34 #include "libavutil/float_dsp.h"
35 #include "libavutil/intmath.h"
36 #include "libavutil/opt.h"
37 #include "avfilter.h"
38 #include "filters.h"
39 #include "internal.h"
40 #include "audio.h"
41 
42 #define TIME_DOMAIN 0
43 #define FREQUENCY_DOMAIN 1
44 
45 typedef struct MySofa { /* contains data of one SOFA file */
46  struct MYSOFA_HRTF *hrtf;
47  struct MYSOFA_LOOKUP *lookup;
48  struct MYSOFA_NEIGHBORHOOD *neighborhood;
49  int ir_samples; /* length of one impulse response (IR) */
50  int n_samples; /* ir_samples to next power of 2 */
51  float *lir, *rir; /* IRs (time-domain) */
52  float *fir;
53  int max_delay;
54 } MySofa;
55 
56 typedef struct VirtualSpeaker {
58  float azim;
59  float elev;
61 
62 typedef struct SOFAlizerContext {
63  const AVClass *class;
64 
65  char *filename; /* name of SOFA file */
66  MySofa sofa; /* contains data of the SOFA file */
67 
68  int sample_rate; /* sample rate from SOFA file */
69  float *speaker_azim; /* azimuth of the virtual loudspeakers */
70  float *speaker_elev; /* elevation of the virtual loudspeakers */
71  char *speakers_pos; /* custom positions of the virtual loudspeakers */
72  float lfe_gain; /* initial gain for the LFE channel */
73  float gain_lfe; /* gain applied to LFE channel */
74  int lfe_channel; /* LFE channel position in channel layout */
75 
76  int n_conv; /* number of channels to convolute */
77 
78  /* buffer variables (for convolution) */
79  float *ringbuffer[2]; /* buffers input samples, length of one buffer: */
80  /* no. input ch. (incl. LFE) x buffer_length */
81  int write[2]; /* current write position to ringbuffer */
82  int buffer_length; /* is: longest IR plus max. delay in all SOFA files */
83  /* then choose next power of 2 */
84  int n_fft; /* number of samples in one FFT block */
86 
87  /* netCDF variables */
88  int *delay[2]; /* broadband delay for each channel/IR to be convolved */
89 
90  float *data_ir[2]; /* IRs for all channels to be convolved */
91  /* (this excludes the LFE) */
92  float *temp_src[2];
93  FFTComplex *temp_fft[2]; /* Array to hold FFT values */
94  FFTComplex *temp_afft[2]; /* Array to accumulate FFT values prior to IFFT */
95 
96  /* control variables */
97  float gain; /* filter gain (in dB) */
98  float rotation; /* rotation of virtual loudspeakers (in degrees) */
99  float elevation; /* elevation of virtual loudspeakers (in deg.) */
100  float radius; /* distance virtual loudspeakers to listener (in metres) */
101  int type; /* processing type */
102  int framesize; /* size of buffer */
103  int normalize; /* should all IRs be normalized upon import ? */
104  int interpolate; /* should wanted IRs be interpolated from neighbors ? */
105  int minphase; /* should all IRs be minphased upon import ? */
106  float anglestep; /* neighbor search angle step, in agles */
107  float radstep; /* neighbor search radius step, in meters */
108 
110 
111  FFTContext *fft[2], *ifft[2];
113 
116 
117 static int close_sofa(struct MySofa *sofa)
118 {
119  if (sofa->neighborhood)
120  mysofa_neighborhood_free(sofa->neighborhood);
121  sofa->neighborhood = NULL;
122  if (sofa->lookup)
123  mysofa_lookup_free(sofa->lookup);
124  sofa->lookup = NULL;
125  if (sofa->hrtf)
126  mysofa_free(sofa->hrtf);
127  sofa->hrtf = NULL;
128  av_freep(&sofa->fir);
129 
130  return 0;
131 }
132 
133 static int preload_sofa(AVFilterContext *ctx, char *filename, int *samplingrate)
134 {
135  struct SOFAlizerContext *s = ctx->priv;
136  struct MYSOFA_HRTF *mysofa;
137  char *license;
138  int ret;
139 
140  mysofa = mysofa_load(filename, &ret);
141  s->sofa.hrtf = mysofa;
142  if (ret || !mysofa) {
143  av_log(ctx, AV_LOG_ERROR, "Can't find SOFA-file '%s'\n", filename);
144  return AVERROR(EINVAL);
145  }
146 
147  ret = mysofa_check(mysofa);
148  if (ret != MYSOFA_OK) {
149  av_log(ctx, AV_LOG_ERROR, "Selected SOFA file is invalid. Please select valid SOFA file.\n");
150  return ret;
151  }
152 
153  if (s->normalize)
154  mysofa_loudness(s->sofa.hrtf);
155 
156  if (s->minphase)
157  mysofa_minphase(s->sofa.hrtf, 0.01f);
158 
159  mysofa_tocartesian(s->sofa.hrtf);
160 
161  s->sofa.lookup = mysofa_lookup_init(s->sofa.hrtf);
162  if (s->sofa.lookup == NULL)
163  return AVERROR(EINVAL);
164 
165  if (s->interpolate)
166  s->sofa.neighborhood = mysofa_neighborhood_init_withstepdefine(s->sofa.hrtf,
167  s->sofa.lookup,
168  s->anglestep,
169  s->radstep);
170 
171  s->sofa.fir = av_calloc(s->sofa.hrtf->N * s->sofa.hrtf->R, sizeof(*s->sofa.fir));
172  if (!s->sofa.fir)
173  return AVERROR(ENOMEM);
174 
175  if (mysofa->DataSamplingRate.elements != 1)
176  return AVERROR(EINVAL);
177  av_log(ctx, AV_LOG_DEBUG, "Original IR length: %d.\n", mysofa->N);
178  *samplingrate = mysofa->DataSamplingRate.values[0];
179  license = mysofa_getAttribute(mysofa->attributes, (char *)"License");
180  if (license)
181  av_log(ctx, AV_LOG_INFO, "SOFA license: %s\n", license);
182 
183  return 0;
184 }
185 
186 static int parse_channel_name(char **arg, int *rchannel, char *buf)
187 {
188  int len, i, channel_id = 0;
189  int64_t layout, layout0;
190 
191  /* try to parse a channel name, e.g. "FL" */
192  if (av_sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
193  layout0 = layout = av_get_channel_layout(buf);
194  /* channel_id <- first set bit in layout */
195  for (i = 32; i > 0; i >>= 1) {
196  if (layout >= 1LL << i) {
197  channel_id += i;
198  layout >>= i;
199  }
200  }
201  /* reject layouts that are not a single channel */
202  if (channel_id >= 64 || layout0 != 1LL << channel_id)
203  return AVERROR(EINVAL);
204  *rchannel = channel_id;
205  *arg += len;
206  return 0;
207  }
208  return AVERROR(EINVAL);
209 }
210 
211 static void parse_speaker_pos(AVFilterContext *ctx, int64_t in_channel_layout)
212 {
213  SOFAlizerContext *s = ctx->priv;
214  char *arg, *tokenizer, *p, *args = av_strdup(s->speakers_pos);
215 
216  if (!args)
217  return;
218  p = args;
219 
220  while ((arg = av_strtok(p, "|", &tokenizer))) {
221  char buf[8];
222  float azim, elev;
223  int out_ch_id;
224 
225  p = NULL;
226  if (parse_channel_name(&arg, &out_ch_id, buf)) {
227  av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", buf);
228  continue;
229  }
230  if (av_sscanf(arg, "%f %f", &azim, &elev) == 2) {
231  s->vspkrpos[out_ch_id].set = 1;
232  s->vspkrpos[out_ch_id].azim = azim;
233  s->vspkrpos[out_ch_id].elev = elev;
234  } else if (av_sscanf(arg, "%f", &azim) == 1) {
235  s->vspkrpos[out_ch_id].set = 1;
236  s->vspkrpos[out_ch_id].azim = azim;
237  s->vspkrpos[out_ch_id].elev = 0;
238  }
239  }
240 
241  av_free(args);
242 }
243 
245  float *speaker_azim, float *speaker_elev)
246 {
247  struct SOFAlizerContext *s = ctx->priv;
248  uint64_t channels_layout = ctx->inputs[0]->channel_layout;
249  float azim[16] = { 0 };
250  float elev[16] = { 0 };
251  int m, ch, n_conv = ctx->inputs[0]->channels; /* get no. input channels */
252 
253  if (n_conv > 16)
254  return AVERROR(EINVAL);
255 
256  s->lfe_channel = -1;
257 
258  if (s->speakers_pos)
259  parse_speaker_pos(ctx, channels_layout);
260 
261  /* set speaker positions according to input channel configuration: */
262  for (m = 0, ch = 0; ch < n_conv && m < 64; m++) {
263  uint64_t mask = channels_layout & (1ULL << m);
264 
265  switch (mask) {
266  case AV_CH_FRONT_LEFT: azim[ch] = 30; break;
267  case AV_CH_FRONT_RIGHT: azim[ch] = 330; break;
268  case AV_CH_FRONT_CENTER: azim[ch] = 0; break;
269  case AV_CH_LOW_FREQUENCY:
270  case AV_CH_LOW_FREQUENCY_2: s->lfe_channel = ch; break;
271  case AV_CH_BACK_LEFT: azim[ch] = 150; break;
272  case AV_CH_BACK_RIGHT: azim[ch] = 210; break;
273  case AV_CH_BACK_CENTER: azim[ch] = 180; break;
274  case AV_CH_SIDE_LEFT: azim[ch] = 90; break;
275  case AV_CH_SIDE_RIGHT: azim[ch] = 270; break;
276  case AV_CH_FRONT_LEFT_OF_CENTER: azim[ch] = 15; break;
277  case AV_CH_FRONT_RIGHT_OF_CENTER: azim[ch] = 345; break;
278  case AV_CH_TOP_CENTER: azim[ch] = 0;
279  elev[ch] = 90; break;
280  case AV_CH_TOP_FRONT_LEFT: azim[ch] = 30;
281  elev[ch] = 45; break;
282  case AV_CH_TOP_FRONT_CENTER: azim[ch] = 0;
283  elev[ch] = 45; break;
284  case AV_CH_TOP_FRONT_RIGHT: azim[ch] = 330;
285  elev[ch] = 45; break;
286  case AV_CH_TOP_BACK_LEFT: azim[ch] = 150;
287  elev[ch] = 45; break;
288  case AV_CH_TOP_BACK_RIGHT: azim[ch] = 210;
289  elev[ch] = 45; break;
290  case AV_CH_TOP_BACK_CENTER: azim[ch] = 180;
291  elev[ch] = 45; break;
292  case AV_CH_WIDE_LEFT: azim[ch] = 90; break;
293  case AV_CH_WIDE_RIGHT: azim[ch] = 270; break;
294  case AV_CH_SURROUND_DIRECT_LEFT: azim[ch] = 90; break;
295  case AV_CH_SURROUND_DIRECT_RIGHT: azim[ch] = 270; break;
296  case AV_CH_STEREO_LEFT: azim[ch] = 90; break;
297  case AV_CH_STEREO_RIGHT: azim[ch] = 270; break;
298  case 0: break;
299  default:
300  return AVERROR(EINVAL);
301  }
302 
303  if (s->vspkrpos[m].set) {
304  azim[ch] = s->vspkrpos[m].azim;
305  elev[ch] = s->vspkrpos[m].elev;
306  }
307 
308  if (mask)
309  ch++;
310  }
311 
312  memcpy(speaker_azim, azim, n_conv * sizeof(float));
313  memcpy(speaker_elev, elev, n_conv * sizeof(float));
314 
315  return 0;
316 
317 }
318 
319 typedef struct ThreadData {
320  AVFrame *in, *out;
321  int *write;
322  int **delay;
323  float **ir;
324  int *n_clippings;
325  float **ringbuffer;
326  float **temp_src;
329 } ThreadData;
330 
331 static int sofalizer_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
332 {
333  SOFAlizerContext *s = ctx->priv;
334  ThreadData *td = arg;
335  AVFrame *in = td->in, *out = td->out;
336  int offset = jobnr;
337  int *write = &td->write[jobnr];
338  const int *const delay = td->delay[jobnr];
339  const float *const ir = td->ir[jobnr];
340  int *n_clippings = &td->n_clippings[jobnr];
341  float *ringbuffer = td->ringbuffer[jobnr];
342  float *temp_src = td->temp_src[jobnr];
343  const int ir_samples = s->sofa.ir_samples; /* length of one IR */
344  const int n_samples = s->sofa.n_samples;
345  const int planar = in->format == AV_SAMPLE_FMT_FLTP;
346  const int mult = 1 + !planar;
347  const float *src = (const float *)in->extended_data[0]; /* get pointer to audio input buffer */
348  float *dst = (float *)out->extended_data[jobnr * planar]; /* get pointer to audio output buffer */
349  const int in_channels = s->n_conv; /* number of input channels */
350  /* ring buffer length is: longest IR plus max. delay -> next power of 2 */
351  const int buffer_length = s->buffer_length;
352  /* -1 for AND instead of MODULO (applied to powers of 2): */
353  const uint32_t modulo = (uint32_t)buffer_length - 1;
354  float *buffer[16]; /* holds ringbuffer for each input channel */
355  int wr = *write;
356  int read;
357  int i, l;
358 
359  if (!planar)
360  dst += offset;
361 
362  for (l = 0; l < in_channels; l++) {
363  /* get starting address of ringbuffer for each input channel */
364  buffer[l] = ringbuffer + l * buffer_length;
365  }
366 
367  for (i = 0; i < in->nb_samples; i++) {
368  const float *temp_ir = ir; /* using same set of IRs for each sample */
369 
370  dst[0] = 0;
371  if (planar) {
372  for (l = 0; l < in_channels; l++) {
373  const float *srcp = (const float *)in->extended_data[l];
374 
375  /* write current input sample to ringbuffer (for each channel) */
376  buffer[l][wr] = srcp[i];
377  }
378  } else {
379  for (l = 0; l < in_channels; l++) {
380  /* write current input sample to ringbuffer (for each channel) */
381  buffer[l][wr] = src[l];
382  }
383  }
384 
385  /* loop goes through all channels to be convolved */
386  for (l = 0; l < in_channels; l++) {
387  const float *const bptr = buffer[l];
388 
389  if (l == s->lfe_channel) {
390  /* LFE is an input channel but requires no convolution */
391  /* apply gain to LFE signal and add to output buffer */
392  dst[0] += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
393  temp_ir += n_samples;
394  continue;
395  }
396 
397  /* current read position in ringbuffer: input sample write position
398  * - delay for l-th ch. + diff. betw. IR length and buffer length
399  * (mod buffer length) */
400  read = (wr - delay[l] - (ir_samples - 1) + buffer_length) & modulo;
401 
402  if (read + ir_samples < buffer_length) {
403  memmove(temp_src, bptr + read, ir_samples * sizeof(*temp_src));
404  } else {
405  int len = FFMIN(n_samples - (read % ir_samples), buffer_length - read);
406 
407  memmove(temp_src, bptr + read, len * sizeof(*temp_src));
408  memmove(temp_src + len, bptr, (n_samples - len) * sizeof(*temp_src));
409  }
410 
411  /* multiply signal and IR, and add up the results */
412  dst[0] += s->fdsp->scalarproduct_float(temp_ir, temp_src, FFALIGN(ir_samples, 32));
413  temp_ir += n_samples;
414  }
415 
416  /* clippings counter */
417  if (fabsf(dst[0]) > 1)
418  n_clippings[0]++;
419 
420  /* move output buffer pointer by +2 to get to next sample of processed channel: */
421  dst += mult;
422  src += in_channels;
423  wr = (wr + 1) & modulo; /* update ringbuffer write position */
424  }
425 
426  *write = wr; /* remember write position in ringbuffer for next call */
427 
428  return 0;
429 }
430 
431 static int sofalizer_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
432 {
433  SOFAlizerContext *s = ctx->priv;
434  ThreadData *td = arg;
435  AVFrame *in = td->in, *out = td->out;
436  int offset = jobnr;
437  int *write = &td->write[jobnr];
438  FFTComplex *hrtf = s->data_hrtf[jobnr]; /* get pointers to current HRTF data */
439  int *n_clippings = &td->n_clippings[jobnr];
440  float *ringbuffer = td->ringbuffer[jobnr];
441  const int ir_samples = s->sofa.ir_samples; /* length of one IR */
442  const int planar = in->format == AV_SAMPLE_FMT_FLTP;
443  const int mult = 1 + !planar;
444  float *dst = (float *)out->extended_data[jobnr * planar]; /* get pointer to audio output buffer */
445  const int in_channels = s->n_conv; /* number of input channels */
446  /* ring buffer length is: longest IR plus max. delay -> next power of 2 */
447  const int buffer_length = s->buffer_length;
448  /* -1 for AND instead of MODULO (applied to powers of 2): */
449  const uint32_t modulo = (uint32_t)buffer_length - 1;
450  FFTComplex *fft_in = s->temp_fft[jobnr]; /* temporary array for FFT input/output data */
451  FFTComplex *fft_acc = s->temp_afft[jobnr];
452  FFTContext *ifft = s->ifft[jobnr];
453  FFTContext *fft = s->fft[jobnr];
454  const int n_conv = s->n_conv;
455  const int n_fft = s->n_fft;
456  const float fft_scale = 1.0f / s->n_fft;
457  FFTComplex *hrtf_offset;
458  int wr = *write;
459  int n_read;
460  int i, j;
461 
462  if (!planar)
463  dst += offset;
464 
465  /* find minimum between number of samples and output buffer length:
466  * (important, if one IR is longer than the output buffer) */
467  n_read = FFMIN(ir_samples, in->nb_samples);
468  for (j = 0; j < n_read; j++) {
469  /* initialize output buf with saved signal from overflow buf */
470  dst[mult * j] = ringbuffer[wr];
471  ringbuffer[wr] = 0.0f; /* re-set read samples to zero */
472  /* update ringbuffer read/write position */
473  wr = (wr + 1) & modulo;
474  }
475 
476  /* initialize rest of output buffer with 0 */
477  for (j = n_read; j < in->nb_samples; j++) {
478  dst[mult * j] = 0;
479  }
480 
481  /* fill FFT accumulation with 0 */
482  memset(fft_acc, 0, sizeof(FFTComplex) * n_fft);
483 
484  for (i = 0; i < n_conv; i++) {
485  const float *src = (const float *)in->extended_data[i * planar]; /* get pointer to audio input buffer */
486 
487  if (i == s->lfe_channel) { /* LFE */
488  if (in->format == AV_SAMPLE_FMT_FLT) {
489  for (j = 0; j < in->nb_samples; j++) {
490  /* apply gain to LFE signal and add to output buffer */
491  dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
492  }
493  } else {
494  for (j = 0; j < in->nb_samples; j++) {
495  /* apply gain to LFE signal and add to output buffer */
496  dst[j] += src[j] * s->gain_lfe;
497  }
498  }
499  continue;
500  }
501 
502  /* outer loop: go through all input channels to be convolved */
503  offset = i * n_fft; /* no. samples already processed */
504  hrtf_offset = hrtf + offset;
505 
506  /* fill FFT input with 0 (we want to zero-pad) */
507  memset(fft_in, 0, sizeof(FFTComplex) * n_fft);
508 
509  if (in->format == AV_SAMPLE_FMT_FLT) {
510  for (j = 0; j < in->nb_samples; j++) {
511  /* prepare input for FFT */
512  /* write all samples of current input channel to FFT input array */
513  fft_in[j].re = src[j * in_channels + i];
514  }
515  } else {
516  for (j = 0; j < in->nb_samples; j++) {
517  /* prepare input for FFT */
518  /* write all samples of current input channel to FFT input array */
519  fft_in[j].re = src[j];
520  }
521  }
522 
523  /* transform input signal of current channel to frequency domain */
524  av_fft_permute(fft, fft_in);
525  av_fft_calc(fft, fft_in);
526  for (j = 0; j < n_fft; j++) {
527  const FFTComplex *hcomplex = hrtf_offset + j;
528  const float re = fft_in[j].re;
529  const float im = fft_in[j].im;
530 
531  /* complex multiplication of input signal and HRTFs */
532  /* output channel (real): */
533  fft_acc[j].re += re * hcomplex->re - im * hcomplex->im;
534  /* output channel (imag): */
535  fft_acc[j].im += re * hcomplex->im + im * hcomplex->re;
536  }
537  }
538 
539  /* transform output signal of current channel back to time domain */
540  av_fft_permute(ifft, fft_acc);
541  av_fft_calc(ifft, fft_acc);
542 
543  for (j = 0; j < in->nb_samples; j++) {
544  /* write output signal of current channel to output buffer */
545  dst[mult * j] += fft_acc[j].re * fft_scale;
546  }
547 
548  for (j = 0; j < ir_samples - 1; j++) { /* overflow length is IR length - 1 */
549  /* write the rest of output signal to overflow buffer */
550  int write_pos = (wr + j) & modulo;
551 
552  *(ringbuffer + write_pos) += fft_acc[in->nb_samples + j].re * fft_scale;
553  }
554 
555  /* go through all samples of current output buffer: count clippings */
556  for (i = 0; i < out->nb_samples; i++) {
557  /* clippings counter */
558  if (fabsf(dst[i * mult]) > 1) { /* if current output sample > 1 */
559  n_clippings[0]++;
560  }
561  }
562 
563  /* remember read/write position in ringbuffer for next call */
564  *write = wr;
565 
566  return 0;
567 }
568 
570 {
571  AVFilterContext *ctx = inlink->dst;
572  SOFAlizerContext *s = ctx->priv;
573  AVFilterLink *outlink = ctx->outputs[0];
574  int n_clippings[2] = { 0 };
575  ThreadData td;
576  AVFrame *out;
577 
578  out = ff_get_audio_buffer(outlink, in->nb_samples);
579  if (!out) {
580  av_frame_free(&in);
581  return AVERROR(ENOMEM);
582  }
584 
585  td.in = in; td.out = out; td.write = s->write;
586  td.delay = s->delay; td.ir = s->data_ir; td.n_clippings = n_clippings;
587  td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
588  td.temp_fft = s->temp_fft;
589  td.temp_afft = s->temp_afft;
590 
591  if (s->type == TIME_DOMAIN) {
592  ctx->internal->execute(ctx, sofalizer_convolute, &td, NULL, 2);
593  } else if (s->type == FREQUENCY_DOMAIN) {
594  ctx->internal->execute(ctx, sofalizer_fast_convolute, &td, NULL, 2);
595  }
596  emms_c();
597 
598  /* display error message if clipping occurred */
599  if (n_clippings[0] + n_clippings[1] > 0) {
600  av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
601  n_clippings[0] + n_clippings[1], out->nb_samples * 2);
602  }
603 
604  av_frame_free(&in);
605  return ff_filter_frame(outlink, out);
606 }
607 
609 {
610  AVFilterLink *inlink = ctx->inputs[0];
611  AVFilterLink *outlink = ctx->outputs[0];
612  SOFAlizerContext *s = ctx->priv;
613  AVFrame *in;
614  int ret;
615 
617 
618  if (s->nb_samples)
619  ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
620  else
622  if (ret < 0)
623  return ret;
624  if (ret > 0)
625  return filter_frame(inlink, in);
626 
629 
630  return FFERROR_NOT_READY;
631 }
632 
634 {
635  struct SOFAlizerContext *s = ctx->priv;
638  int ret, sample_rates[] = { 48000, -1 };
639  static const enum AVSampleFormat sample_fmts[] = {
642  };
643 
645  if (!formats)
646  return AVERROR(ENOMEM);
648  if (ret)
649  return ret;
650 
652  if (!layouts)
653  return AVERROR(ENOMEM);
654 
655  ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts);
656  if (ret)
657  return ret;
658 
659  layouts = NULL;
661  if (ret)
662  return ret;
663 
664  ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
665  if (ret)
666  return ret;
667 
668  sample_rates[0] = s->sample_rate;
670  if (!formats)
671  return AVERROR(ENOMEM);
673 }
674 
675 static int getfilter_float(AVFilterContext *ctx, float x, float y, float z,
676  float *left, float *right,
677  float *delay_left, float *delay_right)
678 {
679  struct SOFAlizerContext *s = ctx->priv;
680  float c[3], delays[2];
681  float *fl, *fr;
682  int nearest;
683  int *neighbors;
684  float *res;
685 
686  c[0] = x, c[1] = y, c[2] = z;
687  nearest = mysofa_lookup(s->sofa.lookup, c);
688  if (nearest < 0)
689  return AVERROR(EINVAL);
690 
691  if (s->interpolate) {
692  neighbors = mysofa_neighborhood(s->sofa.neighborhood, nearest);
693  res = mysofa_interpolate(s->sofa.hrtf, c,
694  nearest, neighbors,
695  s->sofa.fir, delays);
696  } else {
697  if (s->sofa.hrtf->DataDelay.elements > s->sofa.hrtf->R) {
698  delays[0] = s->sofa.hrtf->DataDelay.values[nearest * s->sofa.hrtf->R];
699  delays[1] = s->sofa.hrtf->DataDelay.values[nearest * s->sofa.hrtf->R + 1];
700  } else {
701  delays[0] = s->sofa.hrtf->DataDelay.values[0];
702  delays[1] = s->sofa.hrtf->DataDelay.values[1];
703  }
704  res = s->sofa.hrtf->DataIR.values + nearest * s->sofa.hrtf->N * s->sofa.hrtf->R;
705  }
706 
707  *delay_left = delays[0];
708  *delay_right = delays[1];
709 
710  fl = res;
711  fr = res + s->sofa.hrtf->N;
712 
713  memcpy(left, fl, sizeof(float) * s->sofa.hrtf->N);
714  memcpy(right, fr, sizeof(float) * s->sofa.hrtf->N);
715 
716  return 0;
717 }
718 
719 static int load_data(AVFilterContext *ctx, int azim, int elev, float radius, int sample_rate)
720 {
721  struct SOFAlizerContext *s = ctx->priv;
722  int n_samples;
723  int ir_samples;
724  int n_conv = s->n_conv; /* no. channels to convolve */
725  int n_fft;
726  float delay_l; /* broadband delay for each IR */
727  float delay_r;
728  int nb_input_channels = ctx->inputs[0]->channels; /* no. input channels */
729  float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10); /* gain - 3dB/channel */
730  FFTComplex *data_hrtf_l = NULL;
731  FFTComplex *data_hrtf_r = NULL;
732  FFTComplex *fft_in_l = NULL;
733  FFTComplex *fft_in_r = NULL;
734  float *data_ir_l = NULL;
735  float *data_ir_r = NULL;
736  int offset = 0; /* used for faster pointer arithmetics in for-loop */
737  int i, j, azim_orig = azim, elev_orig = elev;
738  int ret = 0;
739  int n_current;
740  int n_max = 0;
741 
742  av_log(ctx, AV_LOG_DEBUG, "IR length: %d.\n", s->sofa.hrtf->N);
743  s->sofa.ir_samples = s->sofa.hrtf->N;
744  s->sofa.n_samples = 1 << (32 - ff_clz(s->sofa.ir_samples));
745 
746  n_samples = s->sofa.n_samples;
747  ir_samples = s->sofa.ir_samples;
748 
749  if (s->type == TIME_DOMAIN) {
750  s->data_ir[0] = av_calloc(n_samples, sizeof(float) * s->n_conv);
751  s->data_ir[1] = av_calloc(n_samples, sizeof(float) * s->n_conv);
752 
753  if (!s->data_ir[0] || !s->data_ir[1]) {
754  ret = AVERROR(ENOMEM);
755  goto fail;
756  }
757  }
758 
759  s->delay[0] = av_calloc(s->n_conv, sizeof(int));
760  s->delay[1] = av_calloc(s->n_conv, sizeof(int));
761 
762  if (!s->delay[0] || !s->delay[1]) {
763  ret = AVERROR(ENOMEM);
764  goto fail;
765  }
766 
767  /* get temporary IR for L and R channel */
768  data_ir_l = av_calloc(n_conv * n_samples, sizeof(*data_ir_l));
769  data_ir_r = av_calloc(n_conv * n_samples, sizeof(*data_ir_r));
770  if (!data_ir_r || !data_ir_l) {
771  ret = AVERROR(ENOMEM);
772  goto fail;
773  }
774 
775  if (s->type == TIME_DOMAIN) {
776  s->temp_src[0] = av_calloc(n_samples, sizeof(float));
777  s->temp_src[1] = av_calloc(n_samples, sizeof(float));
778  if (!s->temp_src[0] || !s->temp_src[1]) {
779  ret = AVERROR(ENOMEM);
780  goto fail;
781  }
782  }
783 
784  s->speaker_azim = av_calloc(s->n_conv, sizeof(*s->speaker_azim));
785  s->speaker_elev = av_calloc(s->n_conv, sizeof(*s->speaker_elev));
786  if (!s->speaker_azim || !s->speaker_elev) {
787  ret = AVERROR(ENOMEM);
788  goto fail;
789  }
790 
791  /* get speaker positions */
792  if ((ret = get_speaker_pos(ctx, s->speaker_azim, s->speaker_elev)) < 0) {
793  av_log(ctx, AV_LOG_ERROR, "Couldn't get speaker positions. Input channel configuration not supported.\n");
794  goto fail;
795  }
796 
797  for (i = 0; i < s->n_conv; i++) {
798  float coordinates[3];
799 
800  /* load and store IRs and corresponding delays */
801  azim = (int)(s->speaker_azim[i] + azim_orig) % 360;
802  elev = (int)(s->speaker_elev[i] + elev_orig) % 90;
803 
804  coordinates[0] = azim;
805  coordinates[1] = elev;
806  coordinates[2] = radius;
807 
808  mysofa_s2c(coordinates);
809 
810  /* get id of IR closest to desired position */
811  ret = getfilter_float(ctx, coordinates[0], coordinates[1], coordinates[2],
812  data_ir_l + n_samples * i,
813  data_ir_r + n_samples * i,
814  &delay_l, &delay_r);
815  if (ret < 0)
816  goto fail;
817 
818  s->delay[0][i] = delay_l * sample_rate;
819  s->delay[1][i] = delay_r * sample_rate;
820 
821  s->sofa.max_delay = FFMAX3(s->sofa.max_delay, s->delay[0][i], s->delay[1][i]);
822  }
823 
824  /* get size of ringbuffer (longest IR plus max. delay) */
825  /* then choose next power of 2 for performance optimization */
826  n_current = n_samples + s->sofa.max_delay;
827  /* length of longest IR plus max. delay */
828  n_max = FFMAX(n_max, n_current);
829 
830  /* buffer length is longest IR plus max. delay -> next power of 2
831  (32 - count leading zeros gives required exponent) */
832  s->buffer_length = 1 << (32 - ff_clz(n_max));
833  s->n_fft = n_fft = 1 << (32 - ff_clz(n_max + s->framesize));
834 
835  if (s->type == FREQUENCY_DOMAIN) {
836  av_fft_end(s->fft[0]);
837  av_fft_end(s->fft[1]);
838  s->fft[0] = av_fft_init(av_log2(s->n_fft), 0);
839  s->fft[1] = av_fft_init(av_log2(s->n_fft), 0);
840  av_fft_end(s->ifft[0]);
841  av_fft_end(s->ifft[1]);
842  s->ifft[0] = av_fft_init(av_log2(s->n_fft), 1);
843  s->ifft[1] = av_fft_init(av_log2(s->n_fft), 1);
844 
845  if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
846  av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
847  ret = AVERROR(ENOMEM);
848  goto fail;
849  }
850  }
851 
852  if (s->type == TIME_DOMAIN) {
853  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
854  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
855  } else if (s->type == FREQUENCY_DOMAIN) {
856  /* get temporary HRTF memory for L and R channel */
857  data_hrtf_l = av_malloc_array(n_fft, sizeof(*data_hrtf_l) * n_conv);
858  data_hrtf_r = av_malloc_array(n_fft, sizeof(*data_hrtf_r) * n_conv);
859  if (!data_hrtf_r || !data_hrtf_l) {
860  ret = AVERROR(ENOMEM);
861  goto fail;
862  }
863 
864  s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
865  s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
866  s->temp_fft[0] = av_malloc_array(s->n_fft, sizeof(FFTComplex));
867  s->temp_fft[1] = av_malloc_array(s->n_fft, sizeof(FFTComplex));
868  s->temp_afft[0] = av_malloc_array(s->n_fft, sizeof(FFTComplex));
869  s->temp_afft[1] = av_malloc_array(s->n_fft, sizeof(FFTComplex));
870  if (!s->temp_fft[0] || !s->temp_fft[1] ||
871  !s->temp_afft[0] || !s->temp_afft[1]) {
872  ret = AVERROR(ENOMEM);
873  goto fail;
874  }
875  }
876 
877  if (!s->ringbuffer[0] || !s->ringbuffer[1]) {
878  ret = AVERROR(ENOMEM);
879  goto fail;
880  }
881 
882  if (s->type == FREQUENCY_DOMAIN) {
883  fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l));
884  fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r));
885  if (!fft_in_l || !fft_in_r) {
886  ret = AVERROR(ENOMEM);
887  goto fail;
888  }
889  }
890 
891  for (i = 0; i < s->n_conv; i++) {
892  float *lir, *rir;
893 
894  offset = i * n_samples; /* no. samples already written */
895 
896  lir = data_ir_l + offset;
897  rir = data_ir_r + offset;
898 
899  if (s->type == TIME_DOMAIN) {
900  for (j = 0; j < ir_samples; j++) {
901  /* load reversed IRs of the specified source position
902  * sample-by-sample for left and right ear; and apply gain */
903  s->data_ir[0][offset + j] = lir[ir_samples - 1 - j] * gain_lin;
904  s->data_ir[1][offset + j] = rir[ir_samples - 1 - j] * gain_lin;
905  }
906  } else if (s->type == FREQUENCY_DOMAIN) {
907  memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
908  memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
909 
910  offset = i * n_fft; /* no. samples already written */
911  for (j = 0; j < ir_samples; j++) {
912  /* load non-reversed IRs of the specified source position
913  * sample-by-sample and apply gain,
914  * L channel is loaded to real part, R channel to imag part,
915  * IRs are shifted by L and R delay */
916  fft_in_l[s->delay[0][i] + j].re = lir[j] * gain_lin;
917  fft_in_r[s->delay[1][i] + j].re = rir[j] * gain_lin;
918  }
919 
920  /* actually transform to frequency domain (IRs -> HRTFs) */
921  av_fft_permute(s->fft[0], fft_in_l);
922  av_fft_calc(s->fft[0], fft_in_l);
923  memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
924  av_fft_permute(s->fft[0], fft_in_r);
925  av_fft_calc(s->fft[0], fft_in_r);
926  memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
927  }
928  }
929 
930  if (s->type == FREQUENCY_DOMAIN) {
931  s->data_hrtf[0] = av_malloc_array(n_fft * s->n_conv, sizeof(FFTComplex));
932  s->data_hrtf[1] = av_malloc_array(n_fft * s->n_conv, sizeof(FFTComplex));
933  if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
934  ret = AVERROR(ENOMEM);
935  goto fail;
936  }
937 
938  memcpy(s->data_hrtf[0], data_hrtf_l, /* copy HRTF data to */
939  sizeof(FFTComplex) * n_conv * n_fft); /* filter struct */
940  memcpy(s->data_hrtf[1], data_hrtf_r,
941  sizeof(FFTComplex) * n_conv * n_fft);
942  }
943 
944 fail:
945  av_freep(&data_hrtf_l); /* free temporary HRTF memory */
946  av_freep(&data_hrtf_r);
947 
948  av_freep(&data_ir_l); /* free temprary IR memory */
949  av_freep(&data_ir_r);
950 
951  av_freep(&fft_in_l); /* free temporary FFT memory */
952  av_freep(&fft_in_r);
953 
954  return ret;
955 }
956 
958 {
959  SOFAlizerContext *s = ctx->priv;
960  int ret;
961 
962  if (!s->filename) {
963  av_log(ctx, AV_LOG_ERROR, "Valid SOFA filename must be set.\n");
964  return AVERROR(EINVAL);
965  }
966 
967  /* preload SOFA file, */
968  ret = preload_sofa(ctx, s->filename, &s->sample_rate);
969  if (ret) {
970  /* file loading error */
971  av_log(ctx, AV_LOG_ERROR, "Error while loading SOFA file: '%s'\n", s->filename);
972  } else { /* no file loading error, resampling not required */
973  av_log(ctx, AV_LOG_DEBUG, "File '%s' loaded.\n", s->filename);
974  }
975 
976  if (ret) {
977  av_log(ctx, AV_LOG_ERROR, "No valid SOFA file could be loaded. Please specify valid SOFA file.\n");
978  return ret;
979  }
980 
981  s->fdsp = avpriv_float_dsp_alloc(0);
982  if (!s->fdsp)
983  return AVERROR(ENOMEM);
984 
985  return 0;
986 }
987 
989 {
990  AVFilterContext *ctx = inlink->dst;
991  SOFAlizerContext *s = ctx->priv;
992  int ret;
993 
994  if (s->type == FREQUENCY_DOMAIN)
995  s->nb_samples = s->framesize;
996 
997  /* gain -3 dB per channel */
998  s->gain_lfe = expf((s->gain - 3 * inlink->channels + s->lfe_gain) / 20 * M_LN10);
999 
1000  s->n_conv = inlink->channels;
1001 
1002  /* load IRs to data_ir[0] and data_ir[1] for required directions */
1003  if ((ret = load_data(ctx, s->rotation, s->elevation, s->radius, inlink->sample_rate)) < 0)
1004  return ret;
1005 
1006  av_log(ctx, AV_LOG_DEBUG, "Samplerate: %d Channels to convolute: %d, Length of ringbuffer: %d x %d\n",
1007  inlink->sample_rate, s->n_conv, inlink->channels, s->buffer_length);
1008 
1009  return 0;
1010 }
1011 
1013 {
1014  SOFAlizerContext *s = ctx->priv;
1015 
1016  close_sofa(&s->sofa);
1017  av_fft_end(s->ifft[0]);
1018  av_fft_end(s->ifft[1]);
1019  av_fft_end(s->fft[0]);
1020  av_fft_end(s->fft[1]);
1021  s->ifft[0] = NULL;
1022  s->ifft[1] = NULL;
1023  s->fft[0] = NULL;
1024  s->fft[1] = NULL;
1025  av_freep(&s->delay[0]);
1026  av_freep(&s->delay[1]);
1027  av_freep(&s->data_ir[0]);
1028  av_freep(&s->data_ir[1]);
1029  av_freep(&s->ringbuffer[0]);
1030  av_freep(&s->ringbuffer[1]);
1031  av_freep(&s->speaker_azim);
1032  av_freep(&s->speaker_elev);
1033  av_freep(&s->temp_src[0]);
1034  av_freep(&s->temp_src[1]);
1035  av_freep(&s->temp_afft[0]);
1036  av_freep(&s->temp_afft[1]);
1037  av_freep(&s->temp_fft[0]);
1038  av_freep(&s->temp_fft[1]);
1039  av_freep(&s->data_hrtf[0]);
1040  av_freep(&s->data_hrtf[1]);
1041  av_freep(&s->fdsp);
1042 }
1043 
1044 #define OFFSET(x) offsetof(SOFAlizerContext, x)
1045 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1046 
1047 static const AVOption sofalizer_options[] = {
1048  { "sofa", "sofa filename", OFFSET(filename), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
1049  { "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
1050  { "rotation", "set rotation" , OFFSET(rotation), AV_OPT_TYPE_FLOAT, {.dbl=0}, -360, 360, .flags = FLAGS },
1051  { "elevation", "set elevation", OFFSET(elevation), AV_OPT_TYPE_FLOAT, {.dbl=0}, -90, 90, .flags = FLAGS },
1052  { "radius", "set radius", OFFSET(radius), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 5, .flags = FLAGS },
1053  { "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, "type" },
1054  { "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, "type" },
1055  { "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, "type" },
1056  { "speakers", "set speaker custom positions", OFFSET(speakers_pos), AV_OPT_TYPE_STRING, {.str=0}, 0, 0, .flags = FLAGS },
1057  { "lfegain", "set lfe gain", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20,40, .flags = FLAGS },
1058  { "framesize", "set frame size", OFFSET(framesize), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
1059  { "normalize", "normalize IRs", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, .flags = FLAGS },
1060  { "interpolate","interpolate IRs from neighbors", OFFSET(interpolate),AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
1061  { "minphase", "minphase IRs", OFFSET(minphase), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS },
1062  { "anglestep", "set neighbor search angle step", OFFSET(anglestep), AV_OPT_TYPE_FLOAT, {.dbl=.5}, 0.01, 10, .flags = FLAGS },
1063  { "radstep", "set neighbor search radius step", OFFSET(radstep), AV_OPT_TYPE_FLOAT, {.dbl=.01}, 0.01, 1, .flags = FLAGS },
1064  { NULL }
1065 };
1066 
1067 AVFILTER_DEFINE_CLASS(sofalizer);
1068 
1069 static const AVFilterPad inputs[] = {
1070  {
1071  .name = "default",
1072  .type = AVMEDIA_TYPE_AUDIO,
1073  .config_props = config_input,
1074  },
1075  { NULL }
1076 };
1077 
1078 static const AVFilterPad outputs[] = {
1079  {
1080  .name = "default",
1081  .type = AVMEDIA_TYPE_AUDIO,
1082  },
1083  { NULL }
1084 };
1085 
1087  .name = "sofalizer",
1088  .description = NULL_IF_CONFIG_SMALL("SOFAlizer (Spatially Oriented Format for Acoustics)."),
1089  .priv_size = sizeof(SOFAlizerContext),
1090  .priv_class = &sofalizer_class,
1091  .init = init,
1092  .activate = activate,
1093  .uninit = uninit,
1095  .inputs = inputs,
1096  .outputs = outputs,
1098 };
formats
formats
Definition: signature.h:48
av_fft_end
av_cold void av_fft_end(FFTContext *s)
Definition: avfft.c:48
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
SOFAlizerContext::write
int write[2]
Definition: af_sofalizer.c:81
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
td
#define td
Definition: regdef.h:70
TIME_DOMAIN
#define TIME_DOMAIN
Definition: af_sofalizer.c:42
SOFAlizerContext::filename
char * filename
Definition: af_sofalizer.c:65
SOFAlizerContext::nb_samples
int nb_samples
Definition: af_sofalizer.c:85
SOFAlizerContext::speakers_pos
char * speakers_pos
Definition: af_sofalizer.c:71
SOFAlizerContext::radstep
float radstep
Definition: af_sofalizer.c:107
SOFAlizerContext
Definition: af_sofalizer.c:62
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
SOFAlizerContext::vspkrpos
VirtualSpeaker vspkrpos[64]
Definition: af_sofalizer.c:109
AV_CH_TOP_FRONT_CENTER
#define AV_CH_TOP_FRONT_CENTER
Definition: channel_layout.h:62
out
FILE * out
Definition: movenc.c:54
AV_CH_LOW_FREQUENCY_2
#define AV_CH_LOW_FREQUENCY_2
Definition: channel_layout.h:73
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:686
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ff_clz
#define ff_clz
Definition: intmath.h:142
ThreadData::delay
int ** delay
Definition: af_headphone.c:156
AV_CH_TOP_FRONT_RIGHT
#define AV_CH_TOP_FRONT_RIGHT
Definition: channel_layout.h:63
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
SOFAlizerContext::lfe_channel
int lfe_channel
Definition: af_sofalizer.c:74
im
float im
Definition: fft.c:82
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
SOFAlizerContext::temp_fft
FFTComplex * temp_fft[2]
Definition: af_sofalizer.c:93
AVOption
AVOption.
Definition: opt.h:246
expf
#define expf(x)
Definition: libm.h:283
av_fft_permute
void av_fft_permute(FFTContext *s, FFTComplex *z)
Do the permutation needed BEFORE calling ff_fft_calc().
Definition: avfft.c:38
AV_CH_TOP_FRONT_LEFT
#define AV_CH_TOP_FRONT_LEFT
Definition: channel_layout.h:61
SOFAlizerContext::type
int type
Definition: af_sofalizer.c:101
av_get_channel_layout
uint64_t av_get_channel_layout(const char *name)
Return a channel layout id that matches name, or 0 if no match is found.
Definition: channel_layout.c:139
SOFAlizerContext::anglestep
float anglestep
Definition: af_sofalizer.c:106
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
parse_speaker_pos
static void parse_speaker_pos(AVFilterContext *ctx, int64_t in_channel_layout)
Definition: af_sofalizer.c:211
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:488
outputs
static const AVFilterPad outputs[]
Definition: af_sofalizer.c:1078
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1795
srcp
BYTE int const BYTE * srcp
Definition: avisynth_c.h:908
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
AV_CH_WIDE_LEFT
#define AV_CH_WIDE_LEFT
Definition: channel_layout.h:69
sample_rate
sample_rate
Definition: ffmpeg_filter.c:191
ThreadData::temp_src
float ** temp_src
Definition: af_headphone.c:160
SOFAlizerContext::gain_lfe
float gain_lfe
Definition: af_sofalizer.c:73
AV_CH_SURROUND_DIRECT_RIGHT
#define AV_CH_SURROUND_DIRECT_RIGHT
Definition: channel_layout.h:72
SOFAlizerContext::n_conv
int n_conv
Definition: af_sofalizer.c:76
AV_CH_WIDE_RIGHT
#define AV_CH_WIDE_RIGHT
Definition: channel_layout.h:70
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
AV_CH_TOP_BACK_LEFT
#define AV_CH_TOP_BACK_LEFT
Definition: channel_layout.h:64
ff_inlink_consume_frame
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1481
AV_CH_TOP_BACK_CENTER
#define AV_CH_TOP_BACK_CENTER
Definition: channel_layout.h:65
fail
#define fail()
Definition: checkasm.h:120
VirtualSpeaker::elev
float elev
Definition: af_sofalizer.c:59
AV_CH_BACK_LEFT
#define AV_CH_BACK_LEFT
Definition: channel_layout.h:53
ThreadData::ringbuffer
float ** ringbuffer
Definition: af_headphone.c:159
SOFAlizerContext::fft
FFTContext * fft[2]
Definition: af_sofalizer.c:111
SOFAlizerContext::sofa
MySofa sofa
Definition: af_sofalizer.c:66
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
SOFAlizerContext::sample_rate
int sample_rate
Definition: af_sofalizer.c:68
src
#define src
Definition: vp8dsp.c:254
AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_STEREO
Definition: channel_layout.h:86
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
MySofa::lir
float * lir
Definition: af_sofalizer.c:51
MySofa::n_samples
int n_samples
Definition: af_sofalizer.c:50
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:55
SOFAlizerContext::interpolate
int interpolate
Definition: af_sofalizer.c:104
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
AV_CH_LOW_FREQUENCY
#define AV_CH_LOW_FREQUENCY
Definition: channel_layout.h:52
FREQUENCY_DOMAIN
#define FREQUENCY_DOMAIN
Definition: af_sofalizer.c:43
mask
static const uint16_t mask[17]
Definition: lzw.c:38
ff_add_channel_layout
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:343
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:226
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
s
#define s(width, name)
Definition: cbs_vp9.c:257
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_sofalizer.c:988
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:184
get_speaker_pos
static int get_speaker_pos(AVFilterContext *ctx, float *speaker_azim, float *speaker_elev)
Definition: af_sofalizer.c:244
filters.h
load_data
static int load_data(AVFilterContext *ctx, int azim, int elev, float radius, int sample_rate)
Definition: af_sofalizer.c:719
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
SOFAlizerContext::data_ir
float * data_ir[2]
Definition: af_sofalizer.c:90
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_sofalizer.c:957
SOFAlizerContext::framesize
int framesize
Definition: af_sofalizer.c:102
sofalizer_fast_convolute
static int sofalizer_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_sofalizer.c:431
MySofa::lookup
struct MYSOFA_LOOKUP * lookup
Definition: af_sofalizer.c:47
MySofa::ir_samples
int ir_samples
Definition: af_sofalizer.c:49
SOFAlizerContext::fdsp
AVFloatDSPContext * fdsp
Definition: af_sofalizer.c:114
arg
const char * arg
Definition: jacosubdec.c:66
AV_CH_STEREO_RIGHT
#define AV_CH_STEREO_RIGHT
See AV_CH_STEREO_LEFT.
Definition: channel_layout.h:68
if
if(ret)
Definition: filter_design.txt:179
activate
static int activate(AVFilterContext *ctx)
Definition: af_sofalizer.c:608
SOFAlizerContext::ringbuffer
float * ringbuffer[2]
Definition: af_sofalizer.c:79
av_sscanf
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Definition: avsscanf.c:962
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
AV_CH_TOP_CENTER
#define AV_CH_TOP_CENTER
Definition: channel_layout.h:60
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1500
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
SOFAlizerContext::delay
int * delay[2]
Definition: af_sofalizer.c:88
ThreadData::temp_fft
FFTComplex ** temp_fft
Definition: af_headphone.c:161
SOFAlizerContext::buffer_length
int buffer_length
Definition: af_sofalizer.c:82
MySofa::rir
float * rir
Definition: af_sofalizer.c:51
MySofa::max_delay
int max_delay
Definition: af_sofalizer.c:53
avfft.h
AV_CH_FRONT_CENTER
#define AV_CH_FRONT_CENTER
Definition: channel_layout.h:51
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_sofalizer.c:569
AV_CH_FRONT_LEFT_OF_CENTER
#define AV_CH_FRONT_LEFT_OF_CENTER
Definition: channel_layout.h:55
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
float_dsp.h
inputs
static const AVFilterPad inputs[]
Definition: af_sofalizer.c:1069
SOFAlizerContext::speaker_azim
float * speaker_azim
Definition: af_sofalizer.c:69
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
VirtualSpeaker
Definition: af_sofalizer.c:56
SOFAlizerContext::n_fft
int n_fft
Definition: af_sofalizer.c:84
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
SOFAlizerContext::rotation
float rotation
Definition: af_sofalizer.c:98
FFTComplex::im
FFTSample im
Definition: avfft.h:38
AVFloatDSPContext
Definition: float_dsp.h:24
FFTComplex::re
FFTSample re
Definition: avfft.h:38
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_sofalizer.c:633
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
OFFSET
#define OFFSET(x)
Definition: af_sofalizer.c:1044
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
AV_CH_TOP_BACK_RIGHT
#define AV_CH_TOP_BACK_RIGHT
Definition: channel_layout.h:66
AV_CH_FRONT_RIGHT_OF_CENTER
#define AV_CH_FRONT_RIGHT_OF_CENTER
Definition: channel_layout.h:56
ff_all_channel_layouts
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:401
preload_sofa
static int preload_sofa(AVFilterContext *ctx, char *filename, int *samplingrate)
Definition: af_sofalizer.c:133
SOFAlizerContext::radius
float radius
Definition: af_sofalizer.c:100
interpolate
static void interpolate(float *out, float v1, float v2, int size)
Definition: twinvq.c:84
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
sample_rates
sample_rates
Definition: ffmpeg_filter.c:191
internal.h
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:226
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
getfilter_float
static int getfilter_float(AVFilterContext *ctx, float x, float y, float z, float *left, float *right, float *delay_left, float *delay_right)
Definition: af_sofalizer.c:675
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
normalize
Definition: normalize.py:1
ff_af_sofalizer
AVFilter ff_af_sofalizer
Definition: af_sofalizer.c:1086
FFTContext
Definition: fft.h:88
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
FLAGS
#define FLAGS
Definition: af_sofalizer.c:1045
parse_channel_name
static int parse_channel_name(char **arg, int *rchannel, char *buf)
Definition: af_sofalizer.c:186
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
sofalizer_options
static const AVOption sofalizer_options[]
Definition: af_sofalizer.c:1047
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
ThreadData
Used for passing data between threads.
Definition: af_adeclick.c:487
AV_CH_BACK_CENTER
#define AV_CH_BACK_CENTER
Definition: channel_layout.h:57
args
const char AVS_Value args
Definition: avisynth_c.h:873
AV_CH_FRONT_LEFT
#define AV_CH_FRONT_LEFT
Definition: channel_layout.h:49
uint8_t
uint8_t
Definition: audio_convert.c:194
ThreadData::n_clippings
int * n_clippings
Definition: af_headphone.c:158
AV_CH_SIDE_RIGHT
#define AV_CH_SIDE_RIGHT
Definition: channel_layout.h:59
len
int len
Definition: vorbis_enc_data.h:452
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
MySofa
Definition: af_sofalizer.c:45
SOFAlizerContext::lfe_gain
float lfe_gain
Definition: af_sofalizer.c:72
SOFAlizerContext::ifft
FFTContext * ifft[2]
Definition: af_sofalizer.c:111
AVFilter
Filter definition.
Definition: avfilter.h:144
ret
ret
Definition: filter_design.txt:187
ThreadData::write
int * write
Definition: af_headphone.c:155
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_sofalizer.c:1012
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
av_fft_init
FFTContext * av_fft_init(int nbits, int inverse)
Set up a complex FFT.
Definition: avfft.c:28
sofalizer_convolute
static int sofalizer_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: af_sofalizer.c:331
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
channel_layout.h
SOFAlizerContext::minphase
int minphase
Definition: af_sofalizer.c:105
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
MySofa::hrtf
struct MYSOFA_HRTF * hrtf
Definition: af_sofalizer.c:46
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
VirtualSpeaker::set
uint8_t set
Definition: af_sofalizer.c:57
AV_CH_SURROUND_DIRECT_LEFT
#define AV_CH_SURROUND_DIRECT_LEFT
Definition: channel_layout.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
MySofa::neighborhood
struct MYSOFA_NEIGHBORHOOD * neighborhood
Definition: af_sofalizer.c:48
AV_CH_FRONT_RIGHT
#define AV_CH_FRONT_RIGHT
Definition: channel_layout.h:50
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
audio.h
M_LN10
#define M_LN10
Definition: mathematics.h:43
VirtualSpeaker::azim
float azim
Definition: af_sofalizer.c:58
ThreadData::in
AVFrame * in
Definition: af_afftdn.c:1082
SOFAlizerContext::speaker_elev
float * speaker_elev
Definition: af_sofalizer.c:70
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FF_FILTER_FORWARD_STATUS
FF_FILTER_FORWARD_STATUS(inlink, outlink)
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
avpriv_float_dsp_alloc
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
Definition: float_dsp.c:135
SOFAlizerContext::gain
float gain
Definition: af_sofalizer.c:97
ThreadData::ir
float ** ir
Definition: af_headphone.c:157
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
AV_CH_BACK_RIGHT
#define AV_CH_BACK_RIGHT
Definition: channel_layout.h:54
SOFAlizerContext::temp_src
float * temp_src[2]
Definition: af_sofalizer.c:92
AV_CH_STEREO_LEFT
#define AV_CH_STEREO_LEFT
Stereo downmix.
Definition: channel_layout.h:67
MySofa::fir
float * fir
Definition: af_sofalizer.c:52
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:227
int
int
Definition: ffmpeg_filter.c:191
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
av_fft_calc
void av_fft_calc(FFTContext *s, FFTComplex *z)
Do a complex FFT with the parameters defined in av_fft_init().
Definition: avfft.c:43
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
SOFAlizerContext::data_hrtf
FFTComplex * data_hrtf[2]
Definition: af_sofalizer.c:112
SOFAlizerContext::normalize
int normalize
Definition: af_sofalizer.c:103
AV_SAMPLE_FMT_FLT
@ AV_SAMPLE_FMT_FLT
float
Definition: samplefmt.h:63
SOFAlizerContext::elevation
float elevation
Definition: af_sofalizer.c:99
SOFAlizerContext::temp_afft
FFTComplex * temp_afft[2]
Definition: af_sofalizer.c:94
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(sofalizer)
AV_CH_SIDE_LEFT
#define AV_CH_SIDE_LEFT
Definition: channel_layout.h:58
FFTComplex
Definition: avfft.h:37
re
float re
Definition: fft.c:82
close_sofa
static int close_sofa(struct MySofa *sofa)
Definition: af_sofalizer.c:117
ThreadData::temp_afft
FFTComplex ** temp_afft
Definition: af_headphone.c:162
intmath.h