Go to the documentation of this file.
44 #define FREQUENCY_DOMAIN 1
126 mysofa_lookup_free(sofa->
lookup);
129 mysofa_free(sofa->
hrtf);
139 struct MYSOFA_HRTF *mysofa;
143 mysofa = mysofa_load(filename, &
ret);
144 s->sofa.hrtf = mysofa;
145 if (
ret || !mysofa) {
150 ret = mysofa_check(mysofa);
151 if (
ret != MYSOFA_OK) {
157 mysofa_loudness(
s->sofa.hrtf);
160 mysofa_minphase(
s->sofa.hrtf, 0.01f);
162 mysofa_tocartesian(
s->sofa.hrtf);
164 s->sofa.lookup = mysofa_lookup_init(
s->sofa.hrtf);
165 if (
s->sofa.lookup ==
NULL)
169 s->sofa.neighborhood = mysofa_neighborhood_init_withstepdefine(
s->sofa.hrtf,
174 s->sofa.fir =
av_calloc(
s->sofa.hrtf->N *
s->sofa.hrtf->R,
sizeof(*
s->sofa.fir));
178 if (mysofa->DataSamplingRate.elements != 1)
181 *samplingrate = mysofa->DataSamplingRate.values[0];
182 license = mysofa_getAttribute(mysofa->attributes, (
char *)
"License");
198 if (channel_id < 0 || channel_id >= 64) {
203 *rchannel = channel_id;
207 if (channel_id < 0 || channel_id >= 64) {
211 *rchannel = channel_id;
221 char *
arg, *tokenizer, *p, *args =
av_strdup(
s->speakers_pos);
236 s->vspkrpos[out_ch_id].set = 1;
237 s->vspkrpos[out_ch_id].azim = azim;
238 s->vspkrpos[out_ch_id].elev = elev;
240 s->vspkrpos[out_ch_id].set = 1;
241 s->vspkrpos[out_ch_id].azim = azim;
242 s->vspkrpos[out_ch_id].elev = 0;
250 float *speaker_azim,
float *speaker_elev)
254 float azim[64] = { 0 };
255 float elev[64] = { 0 };
256 int ch,
n_conv =
ctx->inputs[0]->ch_layout.nb_channels;
258 if (n_conv < 0 || n_conv > 64)
267 for (ch = 0; ch <
n_conv; ch++) {
284 elev[ch] = 90;
break;
286 elev[ch] = 45;
break;
288 elev[ch] = 45;
break;
290 elev[ch] = 45;
break;
292 elev[ch] = 45;
break;
294 elev[ch] = 45;
break;
296 elev[ch] = 45;
break;
307 if (
s->vspkrpos[ch].set) {
308 azim[ch] =
s->vspkrpos[ch].azim;
309 elev[ch] =
s->vspkrpos[ch].elev;
339 int *write = &
td->write[jobnr];
340 const int *
const delay =
td->delay[jobnr];
341 const float *
const ir =
td->ir[jobnr];
342 int *n_clippings = &
td->n_clippings[jobnr];
343 float *ringbuffer =
td->ringbuffer[jobnr];
344 float *temp_src =
td->temp_src[jobnr];
345 const int ir_samples =
s->sofa.ir_samples;
346 const int n_samples =
s->sofa.n_samples;
350 float *dst = (
float *)
out->extended_data[jobnr *
planar];
351 const int in_channels =
s->n_conv;
353 const int buffer_length =
s->buffer_length;
355 const uint32_t modulo = (uint32_t)buffer_length - 1;
364 for (l = 0; l < in_channels; l++) {
366 buffer[l] = ringbuffer + l * buffer_length;
370 const float *temp_ir = ir;
374 for (l = 0; l < in_channels; l++) {
381 for (l = 0; l < in_channels; l++) {
388 for (l = 0; l < in_channels; l++) {
389 const float *
const bptr =
buffer[l];
391 if (l ==
s->lfe_channel) {
394 dst[0] += *(
buffer[
s->lfe_channel] + wr) *
s->gain_lfe;
395 temp_ir += n_samples;
402 read = (wr - delay[l] - (ir_samples - 1) + buffer_length) & modulo;
404 if (
read + ir_samples < buffer_length) {
405 memmove(temp_src, bptr +
read, ir_samples *
sizeof(*temp_src));
409 memmove(temp_src, bptr +
read,
len *
sizeof(*temp_src));
410 memmove(temp_src +
len, bptr, (n_samples -
len) *
sizeof(*temp_src));
414 dst[0] +=
s->fdsp->scalarproduct_float(temp_ir, temp_src,
FFALIGN(ir_samples, 32));
415 temp_ir += n_samples;
419 if (
fabsf(dst[0]) > 1)
425 wr = (wr + 1) & modulo;
439 int *write = &
td->write[jobnr];
441 int *n_clippings = &
td->n_clippings[jobnr];
442 float *ringbuffer =
td->ringbuffer[jobnr];
443 const int ir_samples =
s->sofa.ir_samples;
446 float *dst = (
float *)
out->extended_data[jobnr *
planar];
447 const int in_channels =
s->n_conv;
449 const int buffer_length =
s->buffer_length;
451 const uint32_t modulo = (uint32_t)buffer_length - 1;
459 const int n_conv =
s->n_conv;
460 const int n_fft =
s->n_fft;
461 const float fft_scale = 1.0f /
s->n_fft;
473 for (j = 0; j < n_read; j++) {
475 dst[
mult * j] = ringbuffer[wr];
476 ringbuffer[wr] = 0.0f;
478 wr = (wr + 1) & modulo;
489 for (
i = 0;
i < n_conv;
i++) {
492 if (
i ==
s->lfe_channel) {
496 dst[2 * j] +=
src[
i + j * in_channels] *
s->gain_lfe;
501 dst[j] +=
src[j] *
s->gain_lfe;
509 hrtf_offset = hrtf +
offset;
518 fft_in[j].
re =
src[j * in_channels +
i];
524 fft_in[j].
re =
src[j];
529 tx_fn(fft, fft_out, fft_in,
sizeof(*fft_in));
531 for (j = 0; j < n_fft; j++) {
533 const float re = fft_out[j].
re;
534 const float im = fft_out[j].
im;
538 fft_acc[j].
re += re * hcomplex->
re - im * hcomplex->
im;
540 fft_acc[j].
im += re * hcomplex->
im + im * hcomplex->
re;
545 itx_fn(ifft, fft_out, fft_acc,
sizeof(*fft_acc));
549 dst[
mult * j] += fft_out[j].
re * fft_scale;
552 for (j = 0; j < ir_samples - 1; j++) {
554 int write_pos = (wr + j) & modulo;
556 *(ringbuffer + write_pos) += fft_out[in->
nb_samples + j].
re * fft_scale;
560 for (
i = 0;
i <
out->nb_samples;
i++) {
578 int n_clippings[2] = { 0 };
589 td.in = in;
td.out =
out;
td.write =
s->write;
590 td.delay =
s->delay;
td.ir =
s->data_ir;
td.n_clippings = n_clippings;
591 td.ringbuffer =
s->ringbuffer;
td.temp_src =
s->temp_src;
592 td.in_fft =
s->in_fft;
593 td.out_fft =
s->out_fft;
594 td.temp_afft =
s->temp_afft;
603 if (n_clippings[0] + n_clippings[1] > 0) {
605 n_clippings[0] + n_clippings[1],
out->nb_samples * 2);
673 float *
left,
float *right,
674 float *delay_left,
float *delay_right)
677 float c[3], delays[2];
683 c[0] = x,
c[1] = y,
c[2] = z;
684 nearest = mysofa_lookup(
s->sofa.lookup,
c);
688 if (
s->interpolate) {
689 neighbors = mysofa_neighborhood(
s->sofa.neighborhood, nearest);
690 res = mysofa_interpolate(
s->sofa.hrtf,
c,
692 s->sofa.fir, delays);
694 if (
s->sofa.hrtf->DataDelay.elements >
s->sofa.hrtf->R) {
695 delays[0] =
s->sofa.hrtf->DataDelay.values[nearest *
s->sofa.hrtf->R];
696 delays[1] =
s->sofa.hrtf->DataDelay.values[nearest *
s->sofa.hrtf->R + 1];
698 delays[0] =
s->sofa.hrtf->DataDelay.values[0];
699 delays[1] =
s->sofa.hrtf->DataDelay.values[1];
701 res =
s->sofa.hrtf->DataIR.values + nearest *
s->sofa.hrtf->N *
s->sofa.hrtf->R;
704 *delay_left = delays[0];
705 *delay_right = delays[1];
708 fr = res +
s->sofa.hrtf->N;
710 memcpy(
left, fl,
sizeof(
float) *
s->sofa.hrtf->N);
711 memcpy(right, fr,
sizeof(
float) *
s->sofa.hrtf->N);
725 int nb_input_channels =
ctx->inputs[0]->ch_layout.nb_channels;
726 float gain_lin =
expf((
s->gain - 3 * nb_input_channels) / 20 *
M_LN10);
733 float *data_ir_l =
NULL;
734 float *data_ir_r =
NULL;
736 int i, j, azim_orig = azim, elev_orig = elev;
742 s->sofa.ir_samples =
s->sofa.hrtf->N;
743 s->sofa.n_samples = 1 << (32 -
ff_clz(
s->sofa.ir_samples));
745 n_samples =
s->sofa.n_samples;
746 ir_samples =
s->sofa.ir_samples;
749 s->data_ir[0] =
av_calloc(n_samples,
sizeof(
float) *
s->n_conv);
750 s->data_ir[1] =
av_calloc(n_samples,
sizeof(
float) *
s->n_conv);
752 if (!
s->data_ir[0] || !
s->data_ir[1]) {
761 if (!
s->delay[0] || !
s->delay[1]) {
769 if (!data_ir_r || !data_ir_l) {
775 s->temp_src[0] =
av_calloc(n_samples,
sizeof(
float));
776 s->temp_src[1] =
av_calloc(n_samples,
sizeof(
float));
777 if (!
s->temp_src[0] || !
s->temp_src[1]) {
783 s->speaker_azim =
av_calloc(
s->n_conv,
sizeof(*
s->speaker_azim));
784 s->speaker_elev =
av_calloc(
s->n_conv,
sizeof(*
s->speaker_elev));
785 if (!
s->speaker_azim || !
s->speaker_elev) {
792 av_log(
ctx,
AV_LOG_ERROR,
"Couldn't get speaker positions. Input channel configuration not supported.\n");
796 for (
i = 0;
i <
s->n_conv;
i++) {
797 float coordinates[3];
800 azim = (
int)(
s->speaker_azim[
i] + azim_orig) % 360;
801 elev = (
int)(
s->speaker_elev[
i] + elev_orig) % 90;
803 coordinates[0] = azim;
804 coordinates[1] = elev;
807 mysofa_s2c(coordinates);
811 data_ir_l + n_samples *
i,
812 data_ir_r + n_samples *
i,
820 s->sofa.max_delay =
FFMAX3(
s->sofa.max_delay,
s->delay[0][
i],
s->delay[1][
i]);
825 n_current = n_samples +
s->sofa.max_delay;
827 n_max =
FFMAX(n_max, n_current);
831 s->buffer_length = 1 << (32 -
ff_clz(n_max));
856 s->ringbuffer[0] =
av_calloc(
s->buffer_length,
sizeof(
float) * nb_input_channels);
857 s->ringbuffer[1] =
av_calloc(
s->buffer_length,
sizeof(
float) * nb_input_channels);
862 if (!data_hrtf_r || !data_hrtf_l) {
867 s->ringbuffer[0] =
av_calloc(
s->buffer_length,
sizeof(
float));
868 s->ringbuffer[1] =
av_calloc(
s->buffer_length,
sizeof(
float));
875 if (!
s->in_fft[0] || !
s->in_fft[1] ||
876 !
s->out_fft[0] || !
s->out_fft[1] ||
877 !
s->temp_afft[0] || !
s->temp_afft[1]) {
883 if (!
s->ringbuffer[0] || !
s->ringbuffer[1]) {
893 if (!fft_in_l || !fft_in_r ||
894 !fft_out_l || !fft_out_r) {
900 for (
i = 0;
i <
s->n_conv;
i++) {
909 for (j = 0; j < ir_samples; j++) {
912 s->data_ir[0][
offset + j] = lir[ir_samples - 1 - j] * gain_lin;
913 s->data_ir[1][
offset + j] = rir[ir_samples - 1 - j] * gain_lin;
916 memset(fft_in_l, 0,
n_fft *
sizeof(*fft_in_l));
917 memset(fft_in_r, 0,
n_fft *
sizeof(*fft_in_r));
920 for (j = 0; j < ir_samples; j++) {
925 fft_in_l[
s->delay[0][
i] + j].
re = lir[j] * gain_lin;
926 fft_in_r[
s->delay[1][
i] + j].
re = rir[j] * gain_lin;
930 s->tx_fn[0](
s->fft[0], fft_out_l, fft_in_l,
sizeof(*fft_in_l));
931 memcpy(data_hrtf_l +
offset, fft_out_l,
n_fft *
sizeof(*fft_out_l));
932 s->tx_fn[1](
s->fft[1], fft_out_r, fft_in_r,
sizeof(*fft_in_r));
933 memcpy(data_hrtf_r +
offset, fft_out_r,
n_fft *
sizeof(*fft_out_r));
940 if (!
s->data_hrtf[0] || !
s->data_hrtf[1]) {
945 memcpy(
s->data_hrtf[0], data_hrtf_l,
947 memcpy(
s->data_hrtf[1], data_hrtf_r,
1005 s->nb_samples =
s->framesize;
1008 s->gain_lfe =
expf((
s->gain - 3 *
inlink->ch_layout.nb_channels +
s->lfe_gain) / 20 *
M_LN10);
1010 s->n_conv =
inlink->ch_layout.nb_channels;
1016 av_log(
ctx,
AV_LOG_DEBUG,
"Samplerate: %d Channels to convolute: %d, Length of ringbuffer: %d x %d\n",
1017 inlink->sample_rate,
s->n_conv,
inlink->ch_layout.nb_channels,
s->buffer_length);
1056 #define OFFSET(x) offsetof(SOFAlizerContext, x)
1057 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1090 .
name =
"sofalizer",
1093 .priv_class = &sofalizer_class,
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
@ AV_SAMPLE_FMT_FLTP
float, planar
A list of supported channel layouts.
#define AV_LOG_WARNING
Something somehow does not look correct.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVComplexFloat ** out_fft
VirtualSpeaker vspkrpos[64]
static int parse_channel_name(AVFilterContext *ctx, char **arg, int *rchannel)
#define AV_CHANNEL_LAYOUT_STEREO
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static enum AVSampleFormat sample_fmts[]
enum MovChannelLayoutTag * layouts
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
AVComplexFloat * in_fft[2]
This structure describes decoded (raw) audio or video data.
enum AVChannel av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout, unsigned int idx)
Get the channel with the given index in a channel layout.
#define FILTER_QUERY_FUNC(func)
const char * name
Filter name.
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
@ AV_CHAN_SURROUND_DIRECT_LEFT
static void parse_speaker_pos(AVFilterContext *ctx)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_always_inline float scale(float x, float s)
static __device__ float fabsf(float a)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
A filter pad used for either input or output.
@ AV_CHAN_STEREO_RIGHT
See above.
static int16_t mult(Float11 *f1, Float11 *f2)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
static int config_input(AVFilterLink *inlink)
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
static int get_speaker_pos(AVFilterContext *ctx, float *speaker_azim, float *speaker_elev)
@ AV_TX_FLOAT_FFT
Standard complex to complex FFT with sample data type of AVComplexFloat, AVComplexDouble or AVComplex...
static int load_data(AVFilterContext *ctx, int azim, int elev, float radius, int sample_rate)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
AVComplexFloat * data_hrtf[2]
static av_cold int init(AVFilterContext *ctx)
static int sofalizer_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
struct MYSOFA_LOOKUP * lookup
#define FILTER_INPUTS(array)
static int activate(AVFilterContext *ctx)
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Describe the class of an AVClass context structure.
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
@ AV_CHAN_TOP_BACK_CENTER
const AVFilterPad ff_audio_default_filterpad[1]
An AVFilterPad array whose only entry has name "default" and is of type AVMEDIA_TYPE_AUDIO.
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
@ AV_CHAN_FRONT_RIGHT_OF_CENTER
static const AVFilterPad inputs[]
AVComplexFloat * temp_afft[2]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
An AVChannelLayout holds information about the channel layout of audio data.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static int query_formats(AVFilterContext *ctx)
@ AV_CHAN_TOP_FRONT_RIGHT
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
FF_FILTER_FORWARD_WANTED(outlink, inlink)
@ AV_CHAN_FRONT_LEFT_OF_CENTER
static int preload_sofa(AVFilterContext *ctx, char *filename, int *samplingrate)
static void interpolate(float *out, float v1, float v2, int size)
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
#define AV_LOG_INFO
Standard information.
static int getfilter_float(AVFilterContext *ctx, float x, float y, float z, float *left, float *right, float *delay_left, float *delay_right)
int nb_samples
number of audio samples (per channel) described by this frame
@ AV_CHAN_SURROUND_DIRECT_RIGHT
#define i(width, name, range_min, range_max)
uint8_t ** extended_data
pointers to the data planes/channels.
#define av_malloc_array(a, b)
static const AVOption sofalizer_options[]
AVSampleFormat
Audio sample formats.
Used for passing data between threads.
const char * name
Pad name.
@ AV_CHAN_STEREO_LEFT
Stereo downmix.
void * av_calloc(size_t nmemb, size_t size)
static av_cold void uninit(AVFilterContext *ctx)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
enum AVChannel av_channel_from_string(const char *str)
This is the inverse function of av_channel_name().
AVComplexFloat ** temp_afft
AVComplexFloat * out_fft[2]
static int sofalizer_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
@ AV_CHAN_LOW_FREQUENCY_2
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
struct MYSOFA_HRTF * hrtf
struct MYSOFA_NEIGHBORHOOD * neighborhood
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
char * av_strdup(const char *s)
Duplicate a string.
@ AV_CHAN_TOP_FRONT_CENTER
const AVFilter ff_af_sofalizer
FF_FILTER_FORWARD_STATUS(inlink, outlink)
#define FILTER_OUTPUTS(array)
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
static uint32_t BS_FUNC() read(BSCTX *bc, unsigned int n)
Return n bits from the buffer, n has to be in the 0-32 range.
AVFILTER_DEFINE_CLASS(sofalizer)
static int close_sofa(struct MySofa *sofa)