[FFmpeg-devel] [PATCH] Implement AAC Long Term Prediction (LTP) decoding module

Young Han Lee cpumaker
Fri Feb 4 11:54:43 CET 2011


On Fri, Feb 4, 2011 at 2:09 PM, Alex Converse <alex.converse at gmail.com>wrote:

> On Thu, Feb 3, 2011 at 3:37 AM, Young Han Lee <cpumaker at gmail.com> wrote:
> > On Thu, Feb 3, 2011 at 2:18 PM, Alex Converse <alex.converse at gmail.com
> >wrote:
>
> [...]
>
> >
> > please check again.
> >
>
> > diff --git a/libavcodec/aac.h b/libavcodec/aac.h
> > index cff476a..1e300fb 100644
> > --- a/libavcodec/aac.h
> > +++ b/libavcodec/aac.h
> [...]
> > @@ -206,14 +220,16 @@ typedef struct {
> >      IndividualChannelStream ics;
> >      TemporalNoiseShaping tns;
> >      Pulse pulse;
> > -    enum BandType band_type[128];             ///< band types
> > -    int band_type_run_end[120];               ///< band type run end
> points
> > -    float sf[120];                            ///< scalefactors
> > -    int sf_idx[128];                          ///< scalefactor indices
> (used by encoder)
> > -    uint8_t zeroes[128];                      ///< band is not coded
> (used by encoder)
> > -    DECLARE_ALIGNED(16, float, coeffs)[1024]; ///< coefficients for
> IMDCT
> > -    DECLARE_ALIGNED(16, float, saved)[1024];  ///< overlap
> > -    DECLARE_ALIGNED(16, float, ret)[2048];    ///< PCM output
> > +    enum BandType band_type[128];                 ///< band types
> > +    int band_type_run_end[120];                   ///< band type run end
> points
> > +    float sf[120];                                ///< scalefactors
> > +    int sf_idx[128];                              ///< scalefactor
> indices (used by encoder)
> > +    uint8_t zeroes[128];                          ///< band is not coded
> (used by encoder)
> > +    DECLARE_ALIGNED(16, float, coeffs)[1024];     ///< coefficients for
> IMDCT
> > +    DECLARE_ALIGNED(16, float, saved)[1024];      ///< overlap
> > +    DECLARE_ALIGNED(16, float, saved_ltp)[1024];  ///< overlap for LTP
> > +    DECLARE_ALIGNED(16, float, ret)[2048];        ///< PCM output
> > +    int16_t ltp_state[3072];
>
> I meant DECLARE_ALIGNED here
>
> >      PredictorState predictor_state[MAX_PREDICTORS];
> >  } SingleChannelElement;
> >
> > @@ -259,7 +275,7 @@ typedef struct {
> >       * @defgroup temporary aligned temporary buffers (We do not want to
> have these on the stack.)
> >       * @{
> >       */
> > -    DECLARE_ALIGNED(16, float, buf_mdct)[1024];
> > +    DECLARE_ALIGNED(16, float, buf_mdct)[2048];
> >      /** @} */
> >
> >      /**
> > @@ -268,6 +284,7 @@ typedef struct {
> >       */
> >      FFTContext mdct;
> >      FFTContext mdct_small;
> > +    FFTContext mdct_ltp;
> >      DSPContext dsp;
> >      FmtConvertContext fmt_conv;
> >      int random_state;
> > diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c
> > index 411c1df..cc4ab6c 100644
> > --- a/libavcodec/aacdec.c
> > +++ b/libavcodec/aacdec.c
> > @@ -478,6 +478,7 @@ static int decode_audio_specific_config(AACContext
> *ac,
> >      switch (m4ac->object_type) {
> >      case AOT_AAC_MAIN:
> >      case AOT_AAC_LC:
> > +    case AOT_AAC_LTP:
> >          if (decode_ga_specific_config(ac, avctx, &gb, m4ac,
> m4ac->chan_config))
> >              return -1;
> >          break;
> > @@ -580,8 +581,9 @@ static av_cold int aac_decode_init(AVCodecContext
> *avctx)
> >                      ff_aac_scalefactor_code,
> sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]),
> >                      352);
> >
> > -    ff_mdct_init(&ac->mdct, 11, 1, 1.0);
> > -    ff_mdct_init(&ac->mdct_small, 8, 1, 1.0);
> > +    ff_mdct_init(&ac->mdct,       11, 1, 1.0);
> > +    ff_mdct_init(&ac->mdct_small,  8, 1, 1.0);
> > +    ff_mdct_init(&ac->mdct_ltp,   11, 0, 1.0);
> >      // window initialization
> >      ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
> >      ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
> > @@ -631,6 +633,21 @@ static int decode_prediction(AACContext *ac,
> IndividualChannelStream *ics,
> >  }
> >
> >  /**
> > + * Decode Long Term Prediction data; reference: table 4.xx.
> > + */
> > +static void decode_ltp(AACContext *ac, LongTermPrediction *ltp,
> > +                       GetBitContext *gb, uint8_t max_sfb)
> > +{
> > +    int sfb;
> > +
> > +    ltp->lag  = get_bits(gb, 11);
> > +    ltp->coef = ltp_coef[get_bits(gb, 3)] * ac->sf_scale;
> > +    for (sfb = 0; sfb < FFMIN(max_sfb, MAX_LTP_LONG_SFB); sfb++)
> > +        ltp->used[sfb] = get_bits1(gb);
> > +}
> > +
> > +
> > +/**
> >   * Decode Individual Channel Stream info; reference: table 4.6.
> >   *
> >   * @param   common_window   Channels have independent [0], or shared
> [1], Individual Channel Stream information.
> > @@ -684,9 +701,8 @@ static int decode_ics_info(AACContext *ac,
> IndividualChannelStream *ics,
> >                  memset(ics, 0, sizeof(IndividualChannelStream));
> >                  return -1;
> >              } else {
> > -                av_log_missing_feature(ac->avctx, "Predictor bit set but
> LTP is", 1);
> > -                memset(ics, 0, sizeof(IndividualChannelStream));
> > -                return -1;
> > +                if ((ics->ltp.present = get_bits(gb, 1)))
> > +                    decode_ltp(ac, &ics->ltp, gb, ics->max_sfb);
> >              }
> >          }
> >      }
> > @@ -1420,6 +1436,9 @@ static int decode_cpe(AACContext *ac, GetBitContext
> *gb, ChannelElement *cpe)
> >          i = cpe->ch[1].ics.use_kb_window[0];
> >          cpe->ch[1].ics = cpe->ch[0].ics;
> >          cpe->ch[1].ics.use_kb_window[1] = i;
> > +        if (cpe->ch[1].ics.predictor_present && (ac->m4ac.object_type !=
> AOT_AAC_MAIN))
> > +            if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1)))
> > +                decode_ltp(ac, &cpe->ch[1].ics.ltp, gb,
> cpe->ch[1].ics.max_sfb);
> >          ms_present = get_bits(gb, 2);
> >          if (ms_present == 3) {
> >              av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is
> reserved.\n");
> > @@ -1659,6 +1678,7 @@ static void apply_tns(float coef[1024],
> TemporalNoiseShaping *tns,
> >      int w, filt, m, i;
> >      int bottom, top, order, start, end, size, inc;
> >      float lpc[TNS_MAX_ORDER];
> > +    float tmp[TNS_MAX_ORDER];
> >
> >      for (w = 0; w < ics->num_windows; w++) {
> >          bottom = ics->num_swb;
> > @@ -1684,14 +1704,123 @@ static void apply_tns(float coef[1024],
> TemporalNoiseShaping *tns,
> >              }
> >              start += w * 128;
> >
> > -            // ar filter
> > -            for (m = 0; m < size; m++, start += inc)
> > -                for (i = 1; i <= FFMIN(m, order); i++)
> > -                    coef[start] -= coef[start - i * inc] * lpc[i - 1];
> > +            if (decode) {
> > +                // ar filter
> > +                for (m = 0; m < size; m++, start += inc)
> > +                    for (i = 1; i <= FFMIN(m, order); i++)
> > +                        coef[start] -= coef[start - i * inc] * lpc[i -
> 1];
> > +            } else {
> > +                // ma filter
> > +                for (m = 0; m < size; m++, start += inc) {
> > +                    tmp[0] = coef[start];
> > +                    for (i = 1; i <= FFMIN(m, order); i++)
> > +                        coef[start] += tmp[i] * lpc[i - 1];
> > +                    for (i = order; i > 0; i--)
> > +                        tmp[i] = tmp[i - 1];
> > +                }
> > +            }
> >          }
> >      }
> >  }
> >
> > +
> > +
> > +/**
> > + *  Apply windowing and MDCT to obtain the spectral
> > + *  coefficient from the predicted sample by LTP.
> > + */
> > +static void windowing_and_mdct_ltp(AACContext *ac, float *out,
> > +                                   float *in, IndividualChannelStream
> *ics)
> > +{
> > +    const float * lwindow      = ics->use_kb_window[0] ?
> ff_aac_kbd_long_1024 : ff_sine_1024;
> > +    const float * swindow      = ics->use_kb_window[0] ?
> ff_aac_kbd_short_128 : ff_sine_128;
> > +    const float * lwindow_prev = ics->use_kb_window[1] ?
> ff_aac_kbd_long_1024 : ff_sine_1024;
> > +    const float * swindow_prev = ics->use_kb_window[1] ?
> ff_aac_kbd_short_128 : ff_sine_128;
> > +    float * buf = ac->buf_mdct;
> > +
> > +    if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
> > +        ac->dsp.vector_fmul(buf, in, lwindow_prev, 1024);
> > +    } else {
> > +        memset(buf, 0, 448 * sizeof(float));
> > +        ac->dsp.vector_fmul(buf + 448, in + 448, swindow_prev, 128);
> > +        memcpy(buf + 576, in + 576, 448 * sizeof(float));
> > +    }
> > +    if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
> > +        ac->dsp.vector_fmul_reverse(buf + 1024, in + 1024, lwindow,
> 1024);
> > +    } else {
> > +        memcpy(buf + 1024, in + 1024, 448 * sizeof(float));
> > +        ac->dsp.vector_fmul_reverse(buf + 1024 + 448, in + 1024 + 448,
> swindow, 128);
> > +        memset(buf + 1024 + 576, 0, 448 * sizeof(float));
> > +    }
> > +    ff_mdct_calc(&ac->mdct_ltp, out, buf);
> > +}
> > +
> > +
> > +/**
> > + * Apply the long term prediction
> > + */
> > +static void apply_ltp(AACContext *ac, SingleChannelElement *sce)
> > +{
> > +    const LongTermPrediction *ltp = &sce->ics.ltp;
> > +    const uint16_t *offsets = sce->ics.swb_offset;
> > +    int i, sfb;
> > +
> > +    if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
> > +        float x_est[2048], X_est[1024];
>
> These are too big to use stack allocations.
> Also these names are too similar.
>
> Also any buffer being sent to an MDCT needs to be DECALRE_ALIGNED
>
>
changed to the pointer


> > +        int16_t num_samples = 2048;
> > +        if (ltp->lag < 1024)
> > +            num_samples = ltp->lag + 1024;
> > +        for (i = 0; i < num_samples; i++)
> > +            x_est[i] = sce->ltp_state[i + 2048 - ltp->lag] * ltp->coef;
> > +        for ( ; i < 2048; i++)
> > +            x_est[i] = 0.0f;
>
> Let's use memset() for this last loop
>
>
changed


> > +
> > +        windowing_and_mdct_ltp(ac, X_est, x_est, &sce->ics);
> > +
> > +        if (sce->tns.present)
> > +            apply_tns(X_est, &sce->tns, &sce->ics, 0);
> > +
> > +        for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB);
> sfb++)
> > +            if (ltp->used[sfb])
> > +                for (i = offsets[sfb]; i < offsets[sfb + 1]; i++)
> > +                    sce->coeffs[i] += X_est[i];
> > +    }
> > +}
> > +
> > +/**
> > + * Update the LTP buffer for next frame
> > + */
> > +static void update_ltp(AACContext *ac, SingleChannelElement *sce)
> > +{
> > +    IndividualChannelStream *ics = &sce->ics;
> > +    float *saved     = sce->saved;
> > +    float *saved_ltp = sce->saved_ltp;
> > +    const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024
> : ff_sine_1024;
> > +    const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128
> : ff_sine_128;
> > +    float *buf  = ac->buf_mdct;
> > +    int i;
> > +
> > +    for (i = 0; i < 512; i++)
> > +        buf[1535 - i] = buf[512 + i];
> > +
> > +    if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
> > +        memcpy(saved_ltp,       saved, 512 * sizeof(float));
> > +        memset(saved_ltp + 576, 0,     448 * sizeof(float));
> > +        ac->dsp.vector_fmul_reverse(saved_ltp+448, buf + 960,
> swindow,     128);
> > +    } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
> > +        memcpy(saved_ltp,       buf + 512, 448 * sizeof(float));
> > +        memset(saved_ltp + 576, 0,         448 * sizeof(float));
> > +        ac->dsp.vector_fmul_reverse(saved_ltp+448, buf + 960,
> swindow,     128);
> > +    } else { // LONG_STOP or ONLY_LONG
> > +        ac->dsp.vector_fmul_reverse(saved_ltp,     buf + 512,
> lwindow,     1024);
> > +    }
> > +
> > +    memcpy(sce->ltp_state, &sce->ltp_state[1024], 1024 *
> sizeof(int16_t));
> > +    ac->fmt_conv.float_to_int16(&(sce->ltp_state[1024]), sce->ret,
> 1024);
> > +    ac->fmt_conv.float_to_int16(&(sce->ltp_state[2048]), sce->saved_ltp,
> 1024);
> > +}
> > +
> > +
> >  /**
> >   * Conduct IMDCT and windowing.
> >   */
> > @@ -1857,6 +1986,14 @@ static void spectral_to_sample(AACContext *ac)
> >              if (che) {
> >                  if (type <= TYPE_CPE)
> >                      apply_channel_coupling(ac, che, type, i, BEFORE_TNS,
> apply_dependent_coupling);
> > +                if (che->ch[0].ics.predictor_present) {
> > +                    if (ac->m4ac.object_type == AOT_AAC_LTP) {
>
> I would slightly prefer the order of these two ifs to be swapped.
>
>
changed


> > +                        if (che->ch[0].ics.ltp.present)
> > +                            apply_ltp(ac, &che->ch[0]);
> > +                        if (che->ch[1].ics.ltp.present && type ==
> TYPE_CPE)
> > +                            apply_ltp(ac, &che->ch[1]);
> > +                    }
> > +                }
> >                  if (che->ch[0].tns.present)
> >                      apply_tns(che->ch[0].coeffs, &che->ch[0].tns,
> &che->ch[0].ics, 1);
> >                  if (che->ch[1].tns.present)
>
> [...]
>
> Thanks,
> Alex
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel at mplayerhq.hu
> https://lists.mplayerhq.hu/mailman/listinfo/ffmpeg-devel
>

Thank you, Alex :)

Young Han
-------------- next part --------------
A non-text attachment was scrubbed...
Name: AAC_DEC_LTP-006.patch
Type: text/x-patch
Size: 13918 bytes
Desc: not available
URL: <http://lists.mplayerhq.hu/pipermail/ffmpeg-devel/attachments/20110204/979147f4/attachment.bin>



More information about the ffmpeg-devel mailing list