Go to the documentation of this file.
33 if (nb_words >
s->input_size) {
38 s->input += nb_words *
s->word_bytes;
39 s->input_size -= nb_words;
64 if (nb_words >
s->input_size) {
69 switch (
s->word_bits) {
71 for (
i = 0;
i < nb_words;
i++,
src += 2, dst += 2)
76 for (
i = 0;
i < nb_words;
i++,
src += 3)
81 for (
i = 0;
i < nb_words;
i++,
src += 3, dst += 3)
129 for (
i = 0;
i <
s->nb_channels;
i++)
135 for (
i = 0;
i <
s->nb_channels;
i++) {
163 for (
i = 0;
i <
c->nb_mstr_exp;
i++)
166 for (
i = 0;
i <
g->nb_exponent;
i++)
169 for (
i = k = 0;
i <
c->nb_mstr_exp;
i++)
170 for (j = 0; j <
g->nb_bias_exp[
i]; j++, k++)
171 c->exponents[
g->exp_ofs + k] = mstr_exp[
i] + bias_exp[k];
179 for (
i = 0, p =
NULL,
g =
c->groups; i < c->nb_groups;
i++, p =
g,
g++) {
181 if (
c->exp_strategy[
i]) {
184 memcpy(
c->exponents +
g->exp_ofs,
186 g->nb_exponent *
sizeof(
c->exponents[0]));
201 int lwc_val[17] = { 0 };
204 for (
i = 0;
i < 11;
i++) {
209 for (j =
FFMAX(
i - 3, 0), k = 0; j <=
i + 3; j++, k++) {
218 if (msk_val[
i] < thr) {
219 for (j =
FFMAX(max_j - 3, 0),
220 k =
FFMAX(3 - max_j, 0);
221 j <= max_j + 3; j++, k++)
226 for (
i = 0;
i < 16;
i++) {
227 int v =
FFMAX(lwc_val[
i], -512);
228 msk_val[
i] =
FFMAX(msk_val[
i] + v, 0);
234 int fg_spc,
int fg_ofs,
int msk_mod,
int snr_ofs)
246 const uint16_t *fast_decay =
fast_decay_tab[nb_code][dc_code][msk_mod];
251 for (
i = 0;
i < nb_exponent;
i++)
252 psd_val[
i] = (48 -
exp[
i]) * 64;
255 for (
i = 0;
i < nb_exponent;
i++) {
256 fast_leak =
log_add(fast_leak - fast_decay[
i],
257 psd_val[
i] - fast_gain + fast_gain_adj[
i]);
258 slow_leak =
log_add(slow_leak - slow_decay,
259 psd_val[
i] - slow_gain[
i]);
260 msk_val[
i] =
FFMAX(fast_leak, slow_leak);
265 fast_leak =
log_add(fast_leak - misc_decay, psd_val[
i] - fast_gain);
266 msk_val[
i] =
FFMAX(msk_val[
i], fast_leak);
269 for (
i = 0;
i < nb_exponent;
i++)
270 msk_val[
i] =
FFMAX(msk_val[
i], hearing_thresh[
i]);
275 for (
i = 0;
i < nb_exponent;
i++) {
276 int v = 16 * (snr_ofs - 64) + psd_val[
i] - msk_val[
i] >> 5;
277 bap[
i] =
bap_tab[av_clip_uintp2(v, 6)];
288 for (
i = 0;
i <
c->nb_groups;
i++) {
290 if (bap_strategy[
i]) {
295 fg_spc[
i] = fg_spc[
i - 1];
296 fg_ofs[
i] = fg_ofs[
i - 1];
297 msk_mod[
i] = msk_mod[
i - 1];
308 memset(
c->bap, 0,
sizeof(
c->bap));
312 for (
i = 0, p =
NULL,
g =
c->groups; i < c->nb_groups;
i++, p =
g,
g++) {
313 if (
c->exp_strategy[
i] || bap_strategy[
i]) {
315 c->exponents +
g->exp_ofs,
c->bap +
g->exp_ofs,
316 fg_spc[
i], fg_ofs[
i], msk_mod[
i], snr_ofs);
318 memcpy(
c->bap +
g->exp_ofs,
320 g->nb_exponent *
sizeof(
c->bap[0]));
332 for (
i = 0, p =
NULL,
g =
c->groups; i < c->nb_groups;
i++, p =
g,
g++) {
336 if (
start >
g->nb_exponent) {
341 for (j = 0; j <
start; j++)
342 c->idx[
g->exp_ofs + j] = 0;
344 for (; j <
g->nb_exponent; j++)
347 memcpy(
c->idx +
g->exp_ofs,
349 g->nb_exponent *
sizeof(
c->idx[0]));
351 memset(
c->idx +
g->exp_ofs, 0,
g->nb_exponent *
sizeof(
c->idx[0]));
363 for (
i = 0,
g =
c->groups; i < c->nb_groups;
i++,
g++) {
364 float *mnt =
c->mantissas +
g->mnt_ofs;
366 for (j = 0; j <
g->nb_exponent; j++) {
367 int bap =
c->bap[
g->exp_ofs + j];
368 int idx =
c->idx[
g->exp_ofs + j];
370 int count =
g->nb_mantissa[j];
375 memset(mnt, 0,
count *
sizeof(*mnt));
378 int escape = -(1 << size1 - 1);
380 for (k = 0; k <
count; k++)
383 for (k = 0; k <
count; k++) {
384 if (
values[k] != escape) {
385 mnt[k] =
values[k] * scale;
398 for (k = 0; k <
count; k++)
405 for (; j <
g->nb_exponent +
c->bw_code; j++) {
406 memset(mnt, 0,
g->nb_mantissa[j] *
sizeof(*mnt));
407 mnt +=
g->nb_mantissa[j];
419 if (
s->rev_id[
ch] > 1) {
430 if (
c->gr_code == 3) {
439 for (
i = 0;
i <
c->nb_groups;
i++) {
441 if (
c->nb_mstr_exp == 2) {
442 c->groups[
i].nb_exponent -=
c->bw_code;
443 c->groups[
i].nb_bias_exp[1] -=
c->bw_code;
472 if (!
s->ch_size[
ch]) {
473 s->channels[seg_id][
ch].nb_groups = 0;
481 s->channels[seg_id][
ch].nb_groups = 0;
504 switch (
g->imdct_phs) {
507 for (
i = 0;
i < n2;
i++)
515 for (
i = 0;
i < n2;
i++)
530 memset(
result, 0, 1152 *
sizeof(
float));
531 for (
i = 0,
g =
c->groups; i < c->nb_groups;
i++,
g++) {
533 float *dst =
result +
g->dst_ofs;
537 s->fdsp->vector_fmul_add(dst,
src,
win, dst,
g->win_len);
540 for (
i = 0;
i < 256;
i++)
542 for (
i = 256;
i < 896;
i++)
544 for (
i = 0;
i < 256;
i++)
550 if (begin == 960 &&
end == 960)
570 if (
s->nb_channels == 4)
572 else if (
s->nb_channels == 6)
583 for (
ch = 0;
ch <
s->nb_channels;
ch++) {
594 int *got_frame_ptr,
AVPacket *avpkt)
603 if ((hdr & 0xfffffe) == 0x7888e) {
605 }
else if ((hdr & 0xffffe0) == 0x788e0) {
607 }
else if ((hdr & 0xfffe00) == 0x78e00) {
614 s->word_bytes =
s->word_bits + 7 >> 3;
615 s->input = avpkt->
data +
s->word_bytes;
616 s->input_size = avpkt->
size /
s->word_bytes - 1;
617 s->key_present = hdr >> 24 -
s->word_bits & 1;
622 if (
s->nb_programs > 1 && !
s->multi_prog_warned) {
624 "channels will be output in native order.\n",
s->nb_programs,
s->prog_conf);
625 s->multi_prog_warned = 1;
628 switch (
s->nb_channels) {
644 i =
s->nb_channels / 2;
669 memset(
s->history, 0,
sizeof(
s->history));
677 for (
i = 0;
i < 3;
i++)
689 for (
i = 1;
i < 17;
i++)
692 for (
i = 2;
i < 16;
i++) {
702 for (
i = 1;
i < 17;
i++) {
706 for (j = 1; j < 4; j++)
712 for (
i = 0;
i < 25;
i++) {
717 for (
i = 1;
i < 1024;
i++)
722 for (
i = 0;
i < 128;
i++)
726 for (
i = 0;
i < 192;
i++)
730 for (
i = 0;
i < 192;
i++)
732 for (
i = 0;
i < 64;
i++)
736 for (
i = 0;
i < 64;
i++)
738 for (
i = 0;
i < 192;
i++)
742 for (
i = 0;
i < 128;
i++)
744 for (
i = 0;
i < 64;
i++)
749 for (
i = 0;
i < 640;
i++)
751 for (
i = 0;
i < 256;
i++)
755 for (
i = 0;
i < 192;
i++)
759 for (
i = 0;
i < 256;
i++)
763 for (
i = 0;
i < 256;
i++)
767 for (
i = 0;
i < 448;
i++)
780 for (
i = 0;
i < 3;
i++)
static const uint8_t ch_reorder_6[6]
static float mantissa_tab2[17][4]
@ AV_SAMPLE_FMT_FLTP
float, planar
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
static const uint8_t nb_programs_tab[MAX_PROG_CONF+1]
static av_cold int dolby_e_close(AVCodecContext *avctx)
static av_cold int init(AVCodecContext *avctx)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static const int8_t lfe_channel_tab[MAX_PROG_CONF+1]
static av_cold int dolby_e_init(AVCodecContext *avctx)
uint64_t channel_layout
Audio channel layout.
int sample_rate
samples per second
#define u(width, name, range_min, range_max)
static enum AVSampleFormat sample_fmts[]
static const uint8_t log_add_tab[212]
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static av_cold int end(AVCodecContext *avctx)
static const uint8_t dc_code_tab[5]
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static const float short_window2[192]
static const uint8_t nb_mstr_exp_tab[4]
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
uint64_t request_channel_layout
Request decoder to use this channel layout if it can (0 for default)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static float mantissa_tab1[17][4]
static float win(SuperEqualizerContext *s, float n, int N)
static float exponent_tab[50]
static void skip_bits(GetBitContext *s, int n)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static int parse_bit_alloc(DBEContext *s, DBEChannel *c)
static SDL_Window * window
static int convert_input(DBEContext *s, int nb_words, int key)
static const uint16_t misc_decay_tab[3][2][2]
static const uint8_t nb_channels_tab[MAX_PROG_CONF+1]
static const int16_t lwc_adj_tab[7]
static int parse_mantissas(DBEContext *s, DBEChannel *c)
static const uint16_t slow_decay_tab[2][2]
static const uint8_t mantissa_size2[16][4]
static void transform(DBEContext *s, DBEChannel *c, float *history, float *output)
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int parse_audio(DBEContext *s, int start, int end, int seg_id)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static av_cold void dolby_e_flush(AVCodecContext *avctx)
static const DBEGroup *const frm_ofs_tab[2][4]
static const float short_window3[64]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static int parse_meter(DBEContext *s)
static int parse_key(DBEContext *s)
static int get_sbits(GetBitContext *s, int n)
static const uint16_t sample_rate_tab[16]
static void unbias_exponents(DBEContext *s, DBEChannel *c, DBEGroup *g)
static const int16_t lwc_gain_tab[11][7]
static void bit_allocate(int nb_exponent, int nb_code, int fr_code, int *exp, int *bap, int fg_spc, int fg_ofs, int msk_mod, int snr_ofs)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static const uint8_t band_low_tab[3]
static const uint8_t band_ofs_tab[3][4]
static const uint16_t slow_gain_tab[3][2][50]
and forward the result(frame or status change) to the corresponding input. If nothing is possible
static void flush(AVCodecContext *avctx)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static int parse_exponents(DBEContext *s, DBEChannel *c)
static int parse_metadata(DBEContext *s)
static const uint8_t nb_groups_tab[4]
static unsigned int get_bits1(GetBitContext *s)
#define AV_CH_LAYOUT_5POINT1
#define AV_EF_EXPLODE
abort decoding on minor error detection
static const uint16_t fast_gain_adj_tab[3][2][62]
static int dolby_e_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void(* imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input)
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
void(* imdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static void imdct_calc(DBEContext *s, DBEGroup *g, float *result, float *values)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
enum AVSampleFormat sample_fmt
audio sample format
static const uint16_t fast_decay_tab[3][2][2][50]
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static const uint8_t ht_code_tab[5]
static const float start_window[192]
static void apply_gain(DBEContext *s, int begin, int end, float *output)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static int skip_input(DBEContext *s, int nb_words)
static void skip_bits1(GetBitContext *s)
int channels
number of audio channels
static void calc_lowcomp(int *msk_val)
static const uint16_t hearing_thresh_tab[3][3][50]
#define i(width, name, range_min, range_max)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static int parse_metadata_ext(DBEContext *s)
static int filter_frame(DBEContext *s, AVFrame *frame)
#define AV_CH_LAYOUT_NATIVE
Channel mask value used for AVCodecContext.request_channel_layout to indicate that the user requests ...
AVSampleFormat
Audio sample formats.
#define AV_CH_LAYOUT_7POINT1
static const uint8_t ch_reorder_n[8]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
const char * name
Name of the codec implementation.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static float gain_tab[1024]
static int log_add(int a, int b)
main external API structure.
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
av_cold void ff_kbd_window_init(float *window, float alpha, int n)
Generate a Kaiser-Bessel Derived Window.
static const uint8_t imdct_bits_tab[3]
static const uint8_t ch_reorder_8[8]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
static const uint8_t bap_tab[64]
static const uint8_t ch_reorder_4[4]
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static int parse_indices(DBEContext *s, DBEChannel *c)
This structure stores compressed data.
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
static float mantissa_tab3[17][4]
#define AV_CH_LAYOUT_4POINT0
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define LOCAL_ALIGNED_32(t, v,...)
AVCodec ff_dolby_e_decoder
static int parse_channel(DBEContext *s, int ch, int seg_id)
static const uint16_t fast_gain_tab[8]
static const uint8_t mantissa_size1[16][4]
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
static av_cold void init_tables(void)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16