Go to the documentation of this file.
41 #define AMR_USE_16BIT_TABLES
115 ctx->first_frame = 1;
120 for (
i = 0;
i < 4;
i++)
143 ctx->fr_cur_mode =
buf[0] >> 3 & 0x0F;
144 ctx->fr_quality = (
buf[0] & 0x4) == 0x4;
159 for (
i = 0;
i < 9;
i++)
162 for (
i = 0;
i < 7;
i++)
163 isf_q[
i + 9] =
dico2_isf[ind[1]][
i] * (1.0
f / (1 << 15));
165 for (
i = 0;
i < 5;
i++)
168 for (
i = 0;
i < 4;
i++)
171 for (
i = 0;
i < 7;
i++)
185 for (
i = 0;
i < 9;
i++)
188 for (
i = 0;
i < 7;
i++)
189 isf_q[
i + 9] =
dico2_isf[ind[1]][
i] * (1.0
f / (1 << 15));
191 for (
i = 0;
i < 3;
i++)
194 for (
i = 0;
i < 3;
i++)
197 for (
i = 0;
i < 3;
i++)
200 for (
i = 0;
i < 3;
i++)
203 for (
i = 0;
i < 4;
i++)
238 for (k = 0; k < 3; k++) {
241 isp_q[k][
i] = (1.0 -
c) * isp4_past[
i] +
c * isp_q[3][
i];
257 uint8_t *base_lag_int,
int subframe)
259 if (subframe == 0 || subframe == 2) {
260 if (pitch_index < 376) {
261 *lag_int = (pitch_index + 137) >> 2;
262 *lag_frac = pitch_index - (*lag_int << 2) + 136;
263 }
else if (pitch_index < 440) {
264 *lag_int = (pitch_index + 257 - 376) >> 1;
265 *lag_frac = (pitch_index - (*lag_int << 1) + 256 - 376) * 2;
268 *lag_int = pitch_index - 280;
272 *base_lag_int = av_clip(*lag_int - 8 - (*lag_frac < 0),
278 *lag_int = (pitch_index + 1) >> 2;
279 *lag_frac = pitch_index - (*lag_int << 2);
280 *lag_int += *base_lag_int;
293 if (pitch_index < 116) {
294 *lag_int = (pitch_index + 69) >> 1;
295 *lag_frac = (pitch_index - (*lag_int << 1) + 68) * 2;
297 *lag_int = pitch_index - 24;
301 *base_lag_int = av_clip(*lag_int - 8 - (*lag_frac < 0),
304 *lag_int = (pitch_index + 1) >> 1;
305 *lag_frac = (pitch_index - (*lag_int << 1)) * 2;
306 *lag_int += *base_lag_int;
322 int pitch_lag_int, pitch_lag_frac;
324 float *exc =
ctx->excitation;
329 &
ctx->base_pitch_lag, subframe,
mode);
332 &
ctx->base_pitch_lag, subframe);
334 ctx->pitch_lag_int = pitch_lag_int;
335 pitch_lag_int += pitch_lag_frac > 0;
339 ctx->acelpf_ctx.acelp_interpolatef(exc,
340 exc + 1 - pitch_lag_int,
342 pitch_lag_frac + (pitch_lag_frac > 0 ? 0 : 4),
347 if (amr_subframe->
ltp) {
351 ctx->pitch_vector[
i] = 0.18 * exc[
i - 1] + 0.64 * exc[
i] +
358 #define BIT_STR(x,lsb,len) av_mod_uintp2((x) >> (lsb), (len))
361 #define BIT_POS(x, p) (((x) >> (p)) & 1)
398 m - 1, off + half_2p);
404 int half_4p, subhalf_2p;
405 int b_offset = 1 << (m - 1);
413 m - 2, off + half_4p + subhalf_2p);
415 m - 1, off + half_4p);
421 m - 1, off + b_offset);
427 m - 1, off + b_offset);
433 m - 1, off + b_offset);
443 m - 1, off + half_3p);
450 int b_offset = 1 << (m - 1);
453 int half_other = b_offset - half_more;
458 m - 1, off + half_more);
460 m - 1, off + half_more);
464 m - 1, off + half_other);
466 m - 1, off + half_more);
470 m - 1, off + half_other);
472 m - 1, off + half_more);
478 m - 1, off + b_offset);
493 const uint16_t *pulse_lo,
const enum Mode mode)
503 for (
i = 0;
i < 2;
i++)
507 for (
i = 0;
i < 4;
i++)
511 for (
i = 0;
i < 4;
i++)
515 for (
i = 0;
i < 2;
i++)
517 for (
i = 2;
i < 4;
i++)
521 for (
i = 0;
i < 4;
i++)
525 for (
i = 0;
i < 4;
i++)
527 ((
int) pulse_hi[
i] << 14), 4, 1);
530 for (
i = 0;
i < 2;
i++)
532 ((
int) pulse_hi[
i] << 10), 4, 1);
533 for (
i = 2;
i < 4;
i++)
535 ((
int) pulse_hi[
i] << 14), 4, 1);
539 for (
i = 0;
i < 4;
i++)
541 ((
int) pulse_hi[
i] << 11), 4, 1);
547 for (
i = 0;
i < 4;
i++)
549 int pos = (
FFABS(sig_pos[
i][j]) - 1) * spacing +
i;
551 fixed_vector[pos] += sig_pos[
i][j] < 0 ? -1.0 : 1.0;
564 float *fixed_gain_factor,
float *pitch_gain)
569 *pitch_gain = gains[0] * (1.0f / (1 << 14));
570 *fixed_gain_factor = gains[1] * (1.0f / (1 << 11));
587 fixed_vector[
i] -= fixed_vector[
i - 1] *
ctx->tilt_coef;
591 fixed_vector[
i] += fixed_vector[
i -
ctx->pitch_lag_int] * 0.85;
604 float *f_vector,
float f_gain,
607 double p_ener = (double)
ctx->dot_productf(p_vector, p_vector,
610 double f_ener = (double)
ctx->dot_productf(f_vector, f_vector,
614 return (p_ener - f_ener) / (p_ener + f_ener + 0.01);
628 float *fixed_vector,
float *
buf)
635 if (
ctx->pitch_gain[0] < 0.6) {
637 }
else if (
ctx->pitch_gain[0] < 0.9) {
643 if (
ctx->fixed_gain[0] > 3.0 *
ctx->fixed_gain[1]) {
644 if (ir_filter_nr < 2)
649 for (
i = 0;
i < 6;
i++)
650 if (
ctx->pitch_gain[
i] < 0.6)
656 if (ir_filter_nr >
ctx->prev_ir_filter_nr + 1)
661 ctx->prev_ir_filter_nr = ir_filter_nr;
665 if (ir_filter_nr < 2) {
697 acc += (isf[
i] - isf_past[
i]) * (isf[
i] - isf_past[
i]);
701 return FFMAX(0.0, 1.25 -
acc * 0.8 * 512);
716 float voice_fac,
float stab_fac)
718 float sm_fac = 0.5 * (1 - voice_fac) * stab_fac;
724 if (fixed_gain < *prev_tr_gain) {
725 g0 =
FFMIN(*prev_tr_gain, fixed_gain + fixed_gain *
726 (6226 * (1.0
f / (1 << 15))));
728 g0 =
FFMAX(*prev_tr_gain, fixed_gain *
729 (27536 * (1.0
f / (1 << 15))));
733 return sm_fac * g0 + (1 - sm_fac) * fixed_gain;
745 float cpe = 0.125 * (1 + voice_fac);
746 float last = fixed_vector[0];
748 fixed_vector[0] -= cpe * fixed_vector[1];
751 float cur = fixed_vector[
i];
753 fixed_vector[
i] -= cpe * (last + fixed_vector[
i + 1]);
771 float fixed_gain,
const float *fixed_vector,
774 ctx->acelpv_ctx.weighted_vector_sumf(excitation,
ctx->pitch_vector, fixed_vector,
780 float energy =
ctx->celpm_ctx.dot_productf(excitation, excitation,
785 float pitch_factor = 0.25 *
ctx->pitch_gain[0] *
ctx->pitch_gain[0];
788 excitation[
i] += pitch_factor *
ctx->pitch_vector[
i];
794 ctx->celpf_ctx.celp_lp_synthesis_filterf(
samples, lpc, excitation,
832 int int_part = 0, frac_part;
835 for (j = 0; j < o_size / 5; j++) {
840 for (k = 1; k < 5; k++) {
841 out[
i] =
ctx->dot_productf(in0 + int_part,
878 return av_clipf((1.0 - tilt) * (1.25 - 0.25 * wsp), 0.1, 1.0);
891 const float *synth_exc,
float hb_gain)
894 float energy =
ctx->celpm_ctx.dot_productf(synth_exc, synth_exc,
902 energy * hb_gain * hb_gain,
915 float prod = (diff_isf[
i] - mean) * (diff_isf[
i - lag] - mean);
930 float diff_isf[
LP_ORDER - 2], diff_mean;
933 int i, j, i_max_corr;
939 diff_isf[
i] = isf[
i + 1] - isf[
i];
943 diff_mean += diff_isf[
i] * (1.0
f / (
LP_ORDER - 4));
947 for (
i = 0;
i < 3;
i++) {
950 if (corr_lag[
i] > corr_lag[i_max_corr])
956 isf[
i] = isf[
i - 1] + isf[
i - 1 - i_max_corr]
957 - isf[
i - 2 - i_max_corr];
960 est = 7965 + (isf[2] - isf[3] - isf[4]) / 6.0;
965 diff_isf[j] = scale * (isf[
i] - isf[
i - 1]);
969 if (diff_isf[
i] + diff_isf[
i - 1] < 5.0) {
970 if (diff_isf[
i] > diff_isf[
i - 1]) {
971 diff_isf[
i - 1] = 5.0 - diff_isf[
i];
973 diff_isf[
i] = 5.0 - diff_isf[
i - 1];
977 isf[
i] = isf[
i - 1] + diff_isf[j] * (1.0
f / (1 << 15));
999 out[
i] = lpc[
i] * fac;
1016 const float *exc,
const float *isf,
const float *isf_past)
1025 ctx->acelpv_ctx.weighted_vector_sumf(e_isf, isf_past, isf,
isfp_inter[subframe],
1055 #ifndef hb_fir_filter
1083 memmove(&
ctx->pitch_gain[1], &
ctx->pitch_gain[0], 5 *
sizeof(
float));
1084 memmove(&
ctx->fixed_gain[1], &
ctx->fixed_gain[0], 1 *
sizeof(
float));
1095 int *got_frame_ptr,
AVPacket *avpkt)
1101 int buf_size = avpkt->
size;
1102 int expected_fr_size, header_size;
1105 float fixed_gain_factor;
1106 float *synth_fixed_vector;
1107 float synth_fixed_gain;
1108 float voice_fac, stab_fac;
1119 buf_out = (
float *)
frame->data[0];
1124 "Invalid mode %d\n",
ctx->fr_cur_mode);
1127 expected_fr_size = ((
cf_sizes_wb[
ctx->fr_cur_mode] + 7) >> 3) + 1;
1129 if (buf_size < expected_fr_size) {
1131 "Frame too small (%d bytes). Truncated file?\n", buf_size);
1163 if (
ctx->first_frame) {
1164 ctx->first_frame = 0;
1165 memcpy(
ctx->isp_sub4_past,
ctx->isp[3],
LP_ORDER *
sizeof(
double));
1169 for (sub = 0; sub < 4; sub++)
1172 for (sub = 0; sub < 4; sub++) {
1180 cur_subframe->
pul_il,
ctx->fr_cur_mode);
1185 &fixed_gain_factor, &
ctx->pitch_gain[0]);
1187 ctx->fixed_gain[0] =
1189 ctx->celpm_ctx.dot_productf(
ctx->fixed_vector,
1193 ctx->prediction_error,
1198 ctx->fixed_vector,
ctx->fixed_gain[0],
1200 ctx->tilt_coef = voice_fac * 0.25 + 0.25;
1204 ctx->excitation[
i] *=
ctx->pitch_gain[0];
1205 ctx->excitation[
i] +=
ctx->fixed_gain[0] *
ctx->fixed_vector[
i];
1211 voice_fac, stab_fac);
1225 ctx->acelpf_ctx.acelp_apply_order_2_transfer_function(&
ctx->samples_up[
UPS_MEM_SIZE],
1233 ctx->acelpf_ctx.acelp_apply_order_2_transfer_function(hb_samples,
1243 hb_exc,
ctx->isf_cur,
ctx->isf_past_final);
1255 sub_buf[
i] = (sub_buf[
i] + hb_samples[
i]) * (1.0f / (1 << 15));
1263 memcpy(
ctx->isf_past_final,
ctx->isf_cur,
LP_ORDER *
sizeof(
float));
1267 return expected_fr_size;
static void lpc_weighting(float *out, const float *lpc, float gamma, int size)
Spectral expand the LP coefficients using the equation: y[i] = x[i] * (gamma ** i)
static void pitch_enhancer(float *fixed_vector, float voice_fac)
Filter the fixed_vector to emphasize the higher frequencies.
uint8_t base_pitch_lag
integer part of pitch lag for the next relative subframe
static av_cold int init(AVCodecContext *avctx)
#define UPS_FIR_SIZE
upsampling filter size
float demph_mem[1]
previous value in the de-emphasis filter
uint8_t prev_ir_filter_nr
previous impulse response filter "impNr": 0 - strong, 1 - medium, 2 - none
double isp_sub4_past[LP_ORDER]
ISP vector for the 4th subframe of the previous frame.
uint64_t channel_layout
Audio channel layout.
float ff_amr_set_fixed_gain(float fixed_gain_factor, float fixed_mean_energy, float *prediction_error, float energy_mean, const float *pred_table)
Calculate fixed gain (part of section 6.1.3 of AMR spec)
static void decode_2p_track(int *out, int code, int m, int off)
code: 2m+1 bits
static const int16_t dico22_isf[128][3]
static const float upsample_fir[4][24]
Interpolation coefficients for 5/4 signal upsampling Table from the reference source was reordered fo...
static const uint16_t qua_hb_gain[16]
High band quantized gains for 23k85 in Q14.
int sample_rate
samples per second
av_cold void av_lfg_init(AVLFG *c, unsigned int seed)
float samples_az[LP_ORDER+AMRWB_SFR_SIZE]
low-band samples and memory from synthesis at 12.8kHz
static enum AVSampleFormat sample_fmts[]
static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
#define AV_CH_LAYOUT_MONO
float prev_sparse_fixed_gain
previous fixed gain; used by anti-sparseness to determine "onset"
void ff_amrwb_lsp2lpc(const double *lsp, float *lp, int lp_order)
LSP to LP conversion (5.2.4 of AMR-WB)
static const int16_t dico2_isf[256][7]
uint16_t hb_gain
high-band energy index (mode 23k85 only)
static void decode_isf_indices_36b(uint16_t *ind, float *isf_q)
Decode quantized ISF vectors using 36-bit indexes (6K60 mode only).
static av_always_inline av_const float truncf(float x)
This structure describes decoded (raw) audio or video data.
uint16_t adap
adaptive codebook index
void ff_acelp_filter_init(ACELPFContext *c)
Initialize ACELPFContext.
static float voice_factor(float *p_vector, float p_gain, float *f_vector, float f_gain, CELPMContext *ctx)
Calculate the voicing factor (-1.0 = unvoiced to 1.0 = voiced).
float pitch_gain[6]
quantified pitch gains for the current and previous five subframes
CELPMContext celpm_ctx
context for fixed point math operations
#define AMRWB_P_DELAY_MAX
maximum pitch delay value
uint8_t pitch_lag_int
integer part of pitch lag of the previous subframe
static const float bpf_6_7_coef[31]
High-band post-processing FIR filters coefficients from Q15.
static const int16_t dico23_isf[128][3]
uint16_t isp_id[7]
index of ISP subvectors
uint8_t fr_quality
frame quality index (FQI)
float * excitation
points to current excitation in excitation_buf[]
float samples_hb[LP_ORDER_16k+AMRWB_SFR_SIZE_16k]
high-band samples and memory from synthesis at 16kHz
static void decode_pitch_lag_low(int *lag_int, int *lag_frac, int pitch_index, uint8_t *base_lag_int, int subframe, enum Mode mode)
Decode an adaptive codebook index into pitch lag for 8k85 and 6k60 modes.
static void update_sub_state(AMRWBContext *ctx)
Update context state before the next subframe.
#define BIT_STR(x, lsb, len)
Get x bits in the index interval [lsb,lsb+len-1] inclusive.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void interpolate_isp(double isp_q[4][LP_ORDER], const double *isp4_past)
Interpolate the fourth ISP vector from current and past frames to obtain an ISP vector for each subfr...
static void extrapolate_isf(float isf[LP_ORDER_16k])
Extrapolate a ISF vector to the 16kHz range (20th order LP) used at mode 6k60 LP filter for the high ...
static int decode_mime_header(AMRWBContext *ctx, const uint8_t *buf)
Decode the frame header in the "MIME/storage" format.
Mode
Frame type (Table 1a in 3GPP TS 26.101)
AMRWBFrame frame
AMRWB parameters decoded from bitstream.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static const int16_t dico21_isf_36b[128][5]
float fixed_vector[AMRWB_SFR_SIZE]
algebraic codebook (fixed) vector for current subframe
#define BIT_POS(x, p)
Get the bit at specified position.
static unsigned int av_lfg_get(AVLFG *c)
Get the next random unsigned 32-bit number using an ALFG.
float lp_coef[4][LP_ORDER]
Linear Prediction Coefficients from ISP vector.
static const int16_t isf_mean[LP_ORDER]
Means of ISF vectors in Q15.
static const float energy_pred_fac[4]
4-tap moving average prediction coefficients in reverse order
static void decode_gains(const uint8_t vq_gain, const enum Mode mode, float *fixed_gain_factor, float *pitch_gain)
Decode pitch gain and fixed gain correction factor.
static const float hpf_zeros[2]
High-pass filters coefficients for 31 Hz and 400 Hz cutoff.
#define MIN_ISF_SPACING
minimum isf gap
void ff_acelp_lsf2lspd(double *lsp, const float *lsf, int lp_order)
Floating point version of ff_acelp_lsf2lsp()
uint16_t ltp
ltp-filtering flag
enum Mode fr_cur_mode
mode index of current frame
static const uint16_t *const amr_bit_orderings_by_mode[]
Reordering array addresses for each mode.
static void pitch_sharpening(AMRWBContext *ctx, float *fixed_vector)
Apply pitch sharpening filters to the fixed codebook vector.
static void scaled_hb_excitation(AMRWBContext *ctx, float *hb_exc, const float *synth_exc, float hb_gain)
Generate the high-band excitation with the same energy from the lower one and scaled by the given gai...
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
float samples_up[UPS_MEM_SIZE+AMRWB_SFR_SIZE]
low-band samples and memory processed for upsampling
@ MODE_SID
comfort noise frame
uint16_t vq_gain
VQ adaptive and innovative gains.
static const int16_t qua_gain_7b[128][2]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static const uint16_t cf_sizes_wb[]
Core frame sizes in each mode.
static const float *const ir_filters_lookup[2]
float lpf_7_mem[HB_FIR_SIZE]
previous values in the high-band low pass filter
static const float hpf_31_poles[2]
#define AMRWB_SFR_SIZE
samples per subframe at 12.8 kHz
static const int16_t dico21_isf[64][3]
#define AMRWB_SFR_SIZE_16k
samples per subframe at 16 kHz
static const int16_t dico25_isf[32][4]
static const int16_t dico23_isf_36b[64][7]
static float noise_enhancer(float fixed_gain, float *prev_tr_gain, float voice_fac, float stab_fac)
Apply a non-linear fixed gain smoothing in order to reduce fluctuation in the energy of excitation.
static const uint8_t pulses_nb_per_mode_tr[][4]
[i][j] is the number of pulses present in track j at mode i
static float auto_correlation(float *diff_isf, float mean, int lag)
Calculate the auto-correlation for the ISF difference vector.
#define AMRWB_P_DELAY_MIN
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
AVLFG prng
random number generator for white noise excitation
void ff_acelp_vectors_init(ACELPVContext *c)
Initialize ACELPVContext.
Context structure for the Lagged Fibonacci PRNG.
void ff_celp_math_init(CELPMContext *c)
Initialize CELPMContext.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
uint8_t first_frame
flag active during decoding of the first frame
static float find_hb_gain(AMRWBContext *ctx, const float *synth, uint16_t hb_idx, uint8_t vad)
Calculate the high-band gain based on encoded index (23k85 mode) or on the low-band speech signal and...
#define ENERGY_MEAN
mean innovation energy (dB) in all modes
#define LP_ORDER_16k
lpc filter order at 16kHz
static const float hpf_31_gain
static void de_emphasis(float *out, float *in, float m, float mem[1])
Apply to synthesis a de-emphasis filter of the form: H(z) = 1 / (1 - m * z^-1)
enum AVSampleFormat sample_fmt
audio sample format
static void decode_5p_track(int *out, int code, int m, int off)
code: 5m bits
static void hb_fir_filter(float *out, const float fir_coef[HB_FIR_SIZE+1], float mem[HB_FIR_SIZE], const float *in)
Apply a 15th order filter to high-band samples.
float pitch_vector[AMRWB_SFR_SIZE]
adaptive codebook (pitch) vector for current subframe
float hpf_400_mem[2]
previous values in the high pass filters
static const int16_t dico22_isf_36b[128][4]
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
ACELPFContext acelpf_ctx
context for filters for ACELP-based codecs
static const int16_t dico24_isf[32][3]
static void decode_pitch_lag_high(int *lag_int, int *lag_frac, int pitch_index, uint8_t *base_lag_int, int subframe)
Decode an adaptive codebook index into pitch lag (except 6k60, 8k85 modes).
static const int16_t isf_init[LP_ORDER]
Initialization tables for the processed ISF vector in Q15.
static float stability_factor(const float *isf, const float *isf_past)
Calculate a stability factor {teta} based on distance between current and past isf.
uint16_t vad
voice activity detection flag
void ff_celp_filter_init(CELPFContext *c)
Initialize CELPFContext.
float isf_past_final[LP_ORDER]
final processed ISF vector of the previous frame
int channels
number of audio channels
float fixed_gain[2]
quantified fixed gains for the current and previous subframes
static void isf_add_mean_and_past(float *isf_q, float *isf_past)
Apply mean and past ISF values using the prediction factor.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
AVSampleFormat
Audio sample formats.
float bpf_6_7_mem[HB_FIR_SIZE]
previous values in the high-band band pass filter
const char * name
Name of the codec implementation.
static void decode_4p_track(int *out, int code, int m, int off)
code: 4m bits
double isp[4][LP_ORDER]
ISP vectors from current frame.
static const float lpf_7_coef[31]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void hb_synthesis(AMRWBContext *ctx, int subframe, float *samples, const float *exc, const float *isf, const float *isf_past)
Conduct 20th order linear predictive coding synthesis for the high frequency band excitation at 16kHz...
static const float ac_inter[65]
Coefficients for FIR interpolation of excitation vector at pitch lag resulting the adaptive codebook ...
uint16_t pul_ih[4]
MSBs part of codebook index (high modes only)
void ff_celp_circ_addf(float *out, const float *in, const float *lagged, int lag, float fac, int n)
Add an array to a rotated array.
main external API structure.
static float * anti_sparseness(AMRWBContext *ctx, float *fixed_vector, float *buf)
Reduce fixed vector sparseness by smoothing with one of three IR filters, also known as "adaptive pha...
static const float hpf_400_gain
static void decode_isf_indices_46b(uint16_t *ind, float *isf_q)
Decode quantized ISF vectors using 46-bit indexes (except 6K60 mode).
float isf_cur[LP_ORDER]
working ISF vector from current frame
Filter the word “frame” indicates either a video frame or a group of audio samples
static const float isfp_inter[4]
ISF/ISP interpolation coefficients for each subframe.
static void ff_amr_bit_reorder(uint16_t *out, int size, const uint8_t *data, const R_TABLE_TYPE *ord_table)
Fill the frame structure variables from bitstream by parsing the given reordering table that uses the...
#define LP_ORDER
linear predictive coding filter order
static void decode_3p_track(int *out, int code, int m, int off)
code: 3m+1 bits
float prev_tr_gain
previous initial gain used by noise enhancer for threshold
static void decode_fixed_vector(float *fixed_vector, const uint16_t *pulse_hi, const uint16_t *pulse_lo, const enum Mode mode)
Decode the algebraic codebook index to pulse positions and signs, then construct the algebraic codebo...
#define avpriv_request_sample(...)
float prediction_error[4]
quantified prediction errors {20log10(^gamma_gc)} for previous four subframes
static void decode_1p_track(int *out, int code, int m, int off)
The next six functions decode_[i]p_track decode exactly i pulses positions and amplitudes (-1 or 1) i...
static void synthesis(AMRWBContext *ctx, float *lpc, float *excitation, float fixed_gain, const float *fixed_vector, float *samples)
Conduct 16th order linear predictive coding synthesis from excitation.
This structure stores compressed data.
float excitation_buf[AMRWB_P_DELAY_MAX+LP_ORDER+2+AMRWB_SFR_SIZE]
current excitation and all necessary excitation history
#define HB_FIR_SIZE
amount of past data needed by HB filters
static av_cold int amrwb_decode_init(AVCodecContext *avctx)
static const int16_t dico1_isf[256][9]
Indexed tables for retrieval of quantized ISF vectors in Q15.
static void decode_6p_track(int *out, int code, int m, int off)
code: 6m-2 bits
void ff_scale_vector_to_given_sum_of_squares(float *out, const float *in, float sum_of_squares, const int n)
Set the sum of squares of a signal by scaling.
AMRWBSubFrame subframe[4]
data for subframes
float isf_q_past[LP_ORDER]
quantized ISF vector of the previous frame
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define MIN_ENERGY
Initial energy in dB.
static const int16_t qua_gain_6b[64][2]
Tables for decoding quantized gains { pitch (Q14), fixed factor (Q11) }.
uint16_t pul_il[4]
LSBs part of codebook index.
static void upsample_5_4(float *out, const float *in, int o_size, CELPMContext *ctx)
Upsample a signal by 5/4 ratio (from 12.8kHz to 16kHz) using a FIR interpolation filter.
ACELPVContext acelpv_ctx
context for vector operations for ACELP-based codecs
float tilt_coef
{beta_1} related to the voicing of the previous subframe
static void decode_pitch_vector(AMRWBContext *ctx, const AMRWBSubFrame *amr_subframe, const int subframe)
Find the pitch vector by interpolating the past excitation at the pitch delay, which is obtained in t...
CELPFContext celpf_ctx
context for filters for CELP-based codecs
static const float hpf_400_poles[2]
void ff_set_min_dist_lsf(float *lsf, double min_spacing, int size)
Adjust the quantized LSFs so they are increasing and not too close.
#define PREEMPH_FAC
factor used to de-emphasize synthesis