43 #define LPC_FILTERORDER 10 47 #define ST_MEM_L_TBL 85 48 #define MEM_LF_TBL 147 49 #define STATE_SHORT_LEN_20MS 57 50 #define STATE_SHORT_LEN_30MS 58 52 #define BLOCKL_MAX 240 55 #define CB_HALFFILTERLEN 4 56 #define CB_FILTERLEN 8 58 #define ENH_NBLOCKS_TOT 8 60 #define ENH_BUFL (ENH_NBLOCKS_TOT)*ENH_BLOCKL 61 #define ENH_BUFL_FILTEROVERHEAD 3 62 #define BLOCKL_MAX 240 70 #define STATE_SHORT_LEN_30MS 58 71 #define STATE_SHORT_LEN_20MS 57 73 #define SPL_MUL_16_16(a, b) ((int32_t) (((int16_t)(a)) * ((int16_t)(b)))) 74 #define SPL_MUL_16_16_RSFT(a, b, c) (SPL_MUL_16_16(a, b) >> (c)) 164 for (j = 0; j < 48; j++)
199 for (j = 0; j < 56; j++)
272 for (k = 4; k < 6; k++) {
273 if (index[k] >= 44 && index[k] < 108) {
275 }
else if (index[k] >= 108 && index[k] < 128) {
283 int i, j,
pos = 0, cb_pos = 0;
287 lsfdeq[pos + j] =
lsf_codebook[cb_pos + index[
i] * lsf_dim_codebook[
i] + j];
290 pos += lsf_dim_codebook[
i];
300 index[LSF_NSPLIT +
i] * lsf_dim_codebook[
i] + j];
303 pos += lsf_dim_codebook[
i];
311 for (
int n = 0; n < 2; n++) {
312 for (
int m = 0; m < nb_vectors; m++) {
313 for (
int k = 0; k < dim - 1; k++) {
316 if ((lsf[i + 1] - lsf[i]) < 319) {
317 if (lsf[i + 1] < lsf[i]) {
318 lsf[i + 1] = lsf[
i] + 160;
319 lsf[
i] = lsf[i + 1] - 160;
326 lsf[
i] = av_clip(lsf[i], 82, 25723);
333 int16_t *in2, int16_t coef,
336 int invcoef = 16384 - coef,
i;
338 for (i = 0; i <
size; i++)
339 out[i] = (coef * in1[i] + invcoef * in2[i] + 8192) >> 14;
348 for (i = 0; i < order; i++) {
349 freq = (lsf[
i] * 20861) >> 15;
356 k =
FFMIN(freq >> 8, 63);
372 f[1] = lsp[0] * -1024;
374 for (i = 2, k = 2, l = 2; i <= 5; i++, k += 2) {
377 for (j = i; j > 1; j--, l--) {
378 high = f[l - 1] >> 16;
379 low = (f[l - 1] - (high * (1 << 16))) >> 1;
381 tmp = ((high * lsp[k]) * 4) + (((low * lsp[k]) >> 15) * 4);
384 f[l] -= (unsigned)tmp;
387 f[l] -= lsp[k] * (1 << 10);
404 for (i = 5; i > 0; i--) {
405 f[0][
i] += (unsigned)f[0][i - 1];
406 f[1][
i] -= (unsigned)f[1][i - 1];
410 for (i = 5; i > 0; i--) {
411 tmp = f[0][6 -
i] + (unsigned)f[1][6 - i] + 4096;
412 a[6 -
i] = tmp >> 13;
414 tmp = f[0][6 -
i] - (unsigned)f[1][6 - i] + 4096;
415 a[5 +
i] = tmp >> 13;
420 int16_t *lsf2,
int coef,
int length)
433 for (i = 1; i <
length; i++)
434 out[i] = (coef[i] * in[i] + 16384) >> 15;
438 int16_t *lsfdeq, int16_t
length,
442 int i,
pos, lp_length;
444 lsfdeq2 = lsfdeq +
length;
445 lp_length = length + 1;
449 memcpy(syntdenum, lp, lp_length * 2);
453 for (i = 1; i < 6; i++) {
457 memcpy(syntdenum + pos, lp, lp_length * 2);
463 for (i = 0; i < s->
nsub; i++) {
466 memcpy(syntdenum + pos, lp, lp_length * 2);
473 memcpy(s->
lsfdeqold, lsfdeq2, length * 2);
475 memcpy(s->
lsfdeqold, lsfdeq, length * 2);
480 int16_t *
B, int16_t B_length,
485 for (i = 0; i <
length; i++) {
486 const int16_t *b_ptr = &B[0];
487 const int16_t *x_ptr = &in_ptr[
i];
490 for (j = 0; j < B_length; j++)
491 o += b_ptr[j] * *x_ptr--;
493 o = av_clip(o, -134217728, 134215679);
495 out_ptr[
i] = ((o + 2048) >> 12);
502 int coefficients_length,
507 for (i = 0; i < data_length; i++) {
510 for (j = coefficients_length - 1; j > 0; j--) {
511 sum += (unsigned)(coefficients[j] * data_out[i - j]);
514 output = coefficients[0] * data_in[
i] - (unsigned)sum;
515 output = av_clip(output, -134217728, 134215679);
517 data_out[
i] = (output + 2048) >> 12;
522 int16_t *synt_denum, int16_t *Out_fix,
527 int16_t *tmp1, *tmp2, *tmp3;
539 numerator[k] = synt_denum[LPC_FILTERORDER - k];
548 tmp2 = &idx[len - 1];
551 for (k = 0; k <
len; k++) {
558 }
else if (ifm < 59) {
559 for (k = 0; k <
len; k++) {
567 for (k = 0; k <
len; k++) {
577 memset(&sampleVal[len], 0, len * 2);
582 memset(sampleValVec, 0, LPC_FILTERORDER * 2);
585 filter_mafq12(sampleVal, sampleMa, numerator, LPC_FILTERORDER + 1, len + LPC_FILTERORDER);
586 memset(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER) * 2);
587 filter_arfq12(sampleMa, sampleAr, synt_denum, LPC_FILTERORDER + 1, 2 * len);
589 tmp1 = &sampleAr[len - 1];
590 tmp2 = &sampleAr[2 * len - 1];
592 for (k = 0; k <
len; k++) {
593 (*tmp3) = (*tmp1) + (*tmp2);
604 return ((scale *
ilbc_gain[stage][index]) + 8192) >> 14;
612 out[
i] = (in[
i] * win[-
i]) >>
shift;
624 const int16_t *in2,
int length,
628 out[
i] = (in1[
i] + in2[
i]) >>
shift;
634 int interpolation_length =
FFMIN(4, index);
635 int16_t ilow = index - interpolation_length;
637 memcpy(cbVec, buffer - index, index * 2);
643 memcpy(cbVec + index, buffer - index,
FFMIN(
SUBL - index, index) *
sizeof(*cbVec));
653 int16_t k, base_size;
656 int16_t tempbuff2[
SUBL + 5];
659 base_size = lMem - cbveclen + 1;
661 if (cbveclen ==
SUBL) {
662 base_size += cbveclen / 2;
666 if (index < lMem - cbveclen + 1) {
669 k = index + cbveclen;
671 memcpy(cbvec, mem + lMem - k, cbveclen * 2);
672 }
else if (index < base_size) {
676 k = (int16_t)
SPL_MUL_16_16(2, (index - (lMem - cbveclen + 1))) + cbveclen;
686 if (index - base_size < lMem - cbveclen + 1) {
690 memIndTest = lMem - (index - base_size + cbveclen);
708 lag = (cbveclen << 1) - 20 + index - base_size - lMem - 1;
724 int16_t cbvec0[
SUBL];
725 int16_t cbvec1[
SUBL];
726 int16_t cbvec2[
SUBL];
745 for (j = 0; j < veclen; j++) {
750 decvector[j] = (
int)(a32 + 8192) >> 14;
756 int16_t* destPtr = dest;
757 int16_t* sourcePtr =
source;
760 for (j = 0; j <
length; j++)
761 *destPtr-- = *sourcePtr++;
766 int16_t *decresidual,
769 int16_t meml_gotten, Nfor, Nback,
diff, start_pos;
770 int16_t subcount, subframe;
771 int16_t *reverseDecresidual = s->
enh_buf;
780 start_pos = (encbits->
start - 1) *
SUBL + diff;
800 memset(mem, 0, (int16_t) (
CB_MEML - meml_gotten) * 2);
806 reverse_memcpy(&decresidual[start_pos - 1], reverseDecresidual, diff);
821 for (subframe = 0; subframe < Nfor; subframe++) {
826 memmove(mem, mem + SUBL, (
CB_MEML - SUBL) *
sizeof(*mem));
827 memcpy(mem +
CB_MEML - SUBL, &decresidual[(encbits->
start + 1 + subframe) * SUBL], SUBL * 2);
835 Nback = encbits->
start - 1;
845 memset(mem, 0, (int16_t) (
CB_MEML - meml_gotten) * 2);
848 for (subframe = 0; subframe < Nback; subframe++) {
854 memmove(mem, mem + SUBL, (
CB_MEML - SUBL) *
sizeof(*mem));
855 memcpy(mem +
CB_MEML - SUBL, &reverseDecresidual[subframe * SUBL], SUBL * 2);
867 int i = 0, absolute = 0, maximum = 0;
869 if (vector ==
NULL || length <= 0) {
873 for (i = 0; i <
length; i++) {
874 absolute =
FFABS(vector[i]);
875 if (absolute > maximum)
880 return FFMIN(maximum, INT16_MAX);
887 if (0xFFFF0000 & n) {
893 if (0x0000FF00 & (n >> bits)) bits += 8;
894 if (0x000000F0 & (n >> bits)) bits += 4;
895 if (0x0000000C & (n >> bits)) bits += 2;
896 if (0x00000002 & (n >> bits)) bits += 1;
897 if (0x00000001 & (n >> bits)) bits += 1;
907 sum += (v1[
i] * v2[
i]) >> scaling;
909 return av_clipl_int32(sum);
913 int16_t lag, int16_t blen, int16_t srange, int16_t scale)
917 w16ptr = &buffer[blen - srange - lag];
928 #define SPL_SHIFT_W32(x, c) (((c) >= 0) ? ((x) << (c)) : ((x) >> (-(c)))) 949 static void do_plc(int16_t *plc_residual,
953 int16_t *decresidual,
959 int32_t cross, ener, cross_comp, ener_comp = 0;
960 int32_t measure, max_measure, energy;
961 int16_t
max, cross_square_max, cross_square;
962 int16_t j, lag, tmp1, tmp2, randlag;
969 int16_t max_perSquare;
970 int16_t scale1, scale2;
1014 for (j = inlag - 2; j <= inlag + 3; j++) {
1031 if (((shift_max << 1) + shift3) > ((shift1 << 1) + shift2)) {
1032 tmp1 =
FFMIN(31, (shift_max << 1) + shift3 - (shift1 << 1) - shift2);
1036 tmp2 =
FFMIN(31, (shift1 << 1) + shift2 - (shift_max << 1) - shift3);
1039 if ((measure >> tmp1) > (max_measure >> tmp2)) {
1042 cross_square_max = cross_square;
1059 if ((tmp2W32 > 0) && (ener_comp > 0)) {
1073 totscale = scale1 + scale2 - 1;
1109 if (max_perSquare > 7868) {
1111 }
else if (max_perSquare > 839) {
1114 while ((max_perSquare <
kPlcPerSqr[ind]) && (ind > 0)) {
1120 pitchfact =
FFMIN(tmpW32, 32767);
1138 randlag = 53 + (s->
seed & 63);
1154 plc_residual[
i] = plc_residual[pick];
1159 tot_gain = use_gain;
1160 }
else if (i < 160) {
1167 plc_residual[
i] =
SPL_MUL_16_16_RSFT(tot_gain, (pitchfact * plc_residual[i] + (32767 - pitchfact) * randvec[i] + 16384) >> 15, 15);
1179 plc_residual[
i] = randvec[
i];
1204 int16_t subl, int16_t searchLen,
1210 int16_t cross_corr_scale, energy_scale;
1211 int16_t cross_corr_sg_mod, cross_corr_sg_mod_max;
1213 int16_t cross_corr_mod, energy_mod, enery_mod_max;
1215 int16_t *rp_beg, *rp_end;
1216 int16_t totscale, totscale_max;
1223 cross_corr_sg_mod_max = 0;
1224 enery_mod_max = INT16_MAX;
1225 totscale_max = -500;
1233 rp_end = ®ressor[subl];
1235 max =
max_abs_value_w16(®ressor[-searchLen], (int16_t) (subl + searchLen - 1));
1236 rp_beg = ®ressor[-1];
1237 rp_end = ®ressor[subl - 1];
1253 for (k = 0; k < searchLen; k++) {
1255 rp = ®ressor[
pos];
1259 if ((energy > 0) && (cross_corr > 0)) {
1261 cross_corr_scale =
norm_w32(cross_corr) - 16;
1262 cross_corr_mod = (int16_t)
SPL_SHIFT_W32(cross_corr, cross_corr_scale);
1263 energy_scale =
norm_w32(energy) - 16;
1267 cross_corr_sg_mod = (int16_t)
SPL_MUL_16_16_RSFT(cross_corr_mod, cross_corr_mod, 16);
1272 totscale = energy_scale - (cross_corr_scale * 2);
1277 scalediff = totscale - totscale_max;
1278 scalediff =
FFMIN(scalediff, 31);
1279 scalediff =
FFMAX(scalediff, -31);
1285 if (scalediff < 0) {
1286 new_crit = ((
int32_t) cross_corr_sg_mod * enery_mod_max) >> (-scalediff);
1287 max_crit = ((
int32_t) cross_corr_sg_mod_max * energy_mod);
1289 new_crit = ((
int32_t) cross_corr_sg_mod * enery_mod_max);
1290 max_crit = ((
int32_t) cross_corr_sg_mod_max * energy_mod) >> scalediff;
1296 if (new_crit > max_crit) {
1297 cross_corr_sg_mod_max = cross_corr_sg_mod;
1298 enery_mod_max = energy_mod;
1299 totscale_max = totscale;
1306 energy += (unsigned)step * ((*rp_end * *rp_end - *rp_beg * *rp_beg) >>
shifts);
1315 static void hp_output(int16_t *signal,
const int16_t *ba, int16_t *y,
1316 int16_t *x, int16_t
len)
1320 for (
int i = 0;
i <
len;
i++) {
1337 signal[
i] = av_clip_intp2(tmp + 1024, 26) >> 11;
1344 if (tmp > 268435455) {
1346 }
else if (tmp < -268435456) {
1353 y[1] = (tmp - (y[0] * (1 << 16))) >> 1;
1358 int *got_frame_ptr,
AVPacket *avpkt)
1401 if (s->
mode == 20) {
1419 for (i = 0; i < s->
nsub; i++) {
1463 if (s->
mode == 30) {
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int shift(int a, int b)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
static const int16_t cos_derivative_tbl[64]
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
int64_t bit_rate
the average bitrate
static float win(SuperEqualizerContext *s, float n, int N)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static void state_construct(int16_t ifm, int16_t *idx, int16_t *synt_denum, int16_t *Out_fix, int16_t len)
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
static const int16_t hp_out_coeffs[]
int16_t lsf[LSF_NSPLIT *LPC_N_MAX]
static const int16_t frg_quant_mod[64]
#define STATE_SHORT_LEN_30MS
static void decode_residual(ILBCContext *s, ILBCFrame *encbits, int16_t *decresidual, int16_t *syntdenum)
enum AVSampleFormat sample_fmt
audio sample format
static void construct_vector(int16_t *decvector, int16_t *index, int16_t *gain_index, int16_t *mem, int16_t lMem, int16_t veclen)
static void lsf_dequantization(int16_t *lsfdeq, int16_t *index, int16_t lpc_n)
static void index_conv(int16_t *index)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int16_t lsfdeq[LPC_FILTERORDER *LPC_N_MAX]
int16_t lsfdeqold[LPC_FILTERORDER]
bitstream reader API header.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
#define SPL_MUL_16_16_RSFT(a, b, c)
static void vector_multiplication(int16_t *out, const int16_t *in, const int16_t *win, int length, int shift)
static const int16_t kPlcPfSlope[]
#define SPL_MUL_16_16(a, b)
static int32_t scale_dot_product(const int16_t *v1, const int16_t *v2, int length, int scaling)
static const int shift1[6]
#define SPL_SHIFT_W32(x, c)
static const uint8_t lsf_size_codebook[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static void vector_rmultiplication(int16_t *out, const int16_t *in, const int16_t *win, int length, int shift)
static int16_t get_size_in_bits(uint32_t n)
const char * name
Name of the codec implementation.
static int16_t norm_w32(int32_t a)
int16_t old_syntdenum[NSUB_MAX *(LPC_FILTERORDER+1)]
static const int16_t cos_tbl[64]
static void create_augmented_vector(int index, int16_t *buffer, int16_t *cbVec)
static void lsf2lsp(int16_t *lsf, int16_t *lsp, int order)
uint64_t channel_layout
Audio channel layout.
int16_t plc_lpc[LPC_FILTERORDER+1]
static int32_t div_w32_w16(int32_t num, int16_t den)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static double coefficients[8 *8]
int16_t enh_buf[ENH_BUFL+ENH_BUFL_FILTEROVERHEAD]
int16_t plc_residual[BLOCKL_MAX+LPC_FILTERORDER]
static void lsf_check_stability(int16_t *lsf, int dim, int nb_vectors)
static int16_t max_abs_value_w16(const int16_t *vector, int length)
static void add_vector_and_shift(int16_t *out, const int16_t *in1, const int16_t *in2, int length, int shift)
static void get_codebook(int16_t *cbvec, int16_t *mem, int16_t index, int16_t lMem, int16_t cbveclen)
Libavcodec external API header.
static const int16_t kPlcPitchFact[]
int sample_rate
samples per second
static int16_t gain_dequantization(int index, int max_in, int stage)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static const int16_t alpha[]
main external API structure.
#define STATE_SHORT_LEN_20MS
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
static void correlation(int32_t *corr, int32_t *ener, int16_t *buffer, int16_t lag, int16_t blen, int16_t srange, int16_t scale)
int16_t prev_lpc[LPC_FILTERORDER+1]
static const int16_t lsf_codebook[64 *3+128 *3+128 *4]
static void bw_expand(int16_t *out, const int16_t *in, const int16_t *coef, int length)
static unsigned int get_bits1(GetBitContext *s)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
int16_t cb_index[CB_NSTAGES *(NASUB_MAX+1)]
int16_t decresidual[BLOCKL_MAX]
int16_t weightdenum[(LPC_FILTERORDER+1)*NSUB_MAX]
static void lsf_interpolate(int16_t *out, int16_t *in1, int16_t *in2, int16_t coef, int size)
static void lsf2poly(int16_t *a, int16_t *lsf)
static void get_lsp_poly(int16_t *lsp, int32_t *f)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define ENH_BUFL_FILTEROVERHEAD
static const int16_t *const ilbc_gain[]
common internal api header.
int16_t syntdenum[NSUB_MAX *(LPC_FILTERORDER+1)]
int16_t idx[STATE_SHORT_LEN_30MS]
static const int16_t kPlcPerSqr[]
static const int shift2[6]
static av_cold int ilbc_decode_init(AVCodecContext *avctx)
static const int16_t kCbFiltersRev[]
static const uint8_t shifts[2][12]
static void do_plc(int16_t *plc_residual, int16_t *plc_lpc, int16_t PLI, int16_t *decresidual, int16_t *lpc, int16_t inlag, ILBCContext *s)
static const int16_t kLpcChirpSyntDenum[]
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static void reverse_memcpy(int16_t *dest, int16_t *source, int length)
int channels
number of audio channels
static const int16_t lsf_weight_30ms[]
int16_t syntMem[LPC_FILTERORDER]
static void lsp_interpolate2polydec(int16_t *a, int16_t *lsf1, int16_t *lsf2, int coef, int length)
static const int16_t ilbc_state[8]
static int xcorr_coeff(int16_t *target, int16_t *regressor, int16_t subl, int16_t searchLen, int16_t offset, int16_t step)
static void filter_arfq12(const int16_t *data_in, int16_t *data_out, const int16_t *coefficients, int coefficients_length, int data_length)
static void lsp_interpolate(int16_t *syntdenum, int16_t *weightdenum, int16_t *lsfdeq, int16_t length, ILBCContext *s)
static void hp_output(int16_t *signal, const int16_t *ba, int16_t *y, int16_t *x, int16_t len)
static const uint8_t lsf_dim_codebook[]
static int unpack_frame(ILBCContext *s)
#define AV_CH_LAYOUT_MONO
static const int16_t lsf_weight_20ms[]
int16_t gain_index[CB_NSTAGES *(NASUB_MAX+1)]
This structure stores compressed data.
static void filter_mafq12(int16_t *in_ptr, int16_t *out_ptr, int16_t *B, int16_t B_length, int16_t length)
mode
Use these values in ebur128_init (or'ed).
int nb_samples
number of audio samples (per channel) described by this frame
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
int16_t prevResidual[NSUB_MAX *SUBL]
static int ilbc_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step