[FFmpeg-devel] [PATCH] Implement AAC Long Term Prediction (LTP) decoding module

Alex Converse alex.converse
Thu Feb 3 06:18:49 CET 2011


On Mon, Jan 31, 2011 at 5:57 PM, Young Han Lee <cpumaker at gmail.com> wrote:

[...]

> I'm little late because of business trip.
>
> please check it again. :)
>
> Young Han
>

Great start so far.

> diff --git a/libavcodec/aac.h b/libavcodec/aac.h
> index 714e314..0a94785 100644
> --- a/libavcodec/aac.h
> +++ b/libavcodec/aac.h
> @@ -42,6 +42,7 @@
>  #define MAX_ELEM_ID 16
>
>  #define TNS_MAX_ORDER 20
> +#define MAX_LTP_LONG_SFB 40
>
>  enum RawDataBlockType {
>      TYPE_SCE,
> @@ -129,6 +130,18 @@ typedef struct {
>  #define SCALE_MAX_DIFF   60    ///< maximum scalefactor difference allowed by standard
>  #define SCALE_DIFF_ZERO  60    ///< codebook index corresponding to zero scalefactor indices difference
>
> +
> +/**
> + * Long Term Prediction
> + */
> +typedef struct {
> +    int present;
> +    int lag;
> +    float coef;
> +    int used[MAX_LTP_LONG_SFB];

This can be an int8_t

> +} LongTermPrediction;
> +
> +
>  /**
>   * Individual Channel Stream
>   */
> @@ -138,6 +151,8 @@ typedef struct {
>      uint8_t use_kb_window[2];   ///< If set, use Kaiser-Bessel window, otherwise use a sinus window.
>      int num_window_groups;
>      uint8_t group_len[8];
> +    LongTermPrediction ltp;
> +    LongTermPrediction ltp2;

Let's keep only one set of LTP variables per ICS, see my notes below for details

>      const uint16_t *swb_offset; ///< table of offsets to the lowest spectral coefficient of a scalefactor band, sfb, for a particular window
>      const uint8_t *swb_sizes;   ///< table of scalefactor band sizes for a particular window
>      int num_swb;                ///< number of scalefactor window bands
> @@ -212,6 +227,8 @@ typedef struct {
>      uint8_t zeroes[128];                      ///< band is not coded (used by encoder)
>      DECLARE_ALIGNED(16, float, coeffs)[1024]; ///< coefficients for IMDCT
>      DECLARE_ALIGNED(16, float, saved)[1024];  ///< overlap
> +    DECLARE_ALIGNED(16, float, saved_ltp)[1024];  ///< overlap for LTP
> +    int16_t ltp_state[3072];

This needs to be aligned

>      DECLARE_ALIGNED(16, float, ret)[2048];    ///< PCM output
>      PredictorState predictor_state[MAX_PREDICTORS];
>  } SingleChannelElement;
> @@ -258,7 +275,7 @@ typedef struct {
>       * @defgroup temporary aligned temporary buffers (We do not want to have these on the stack.)
>       * @{
>       */
> -    DECLARE_ALIGNED(16, float, buf_mdct)[1024];
> +    DECLARE_ALIGNED(16, float, buf_mdct)[2048];
>      /** @} */
>
>      /**
> @@ -267,6 +284,7 @@ typedef struct {
>       */
>      FFTContext mdct;
>      FFTContext mdct_small;
> +    FFTContext mdct_ltp;
>      DSPContext dsp;
>      int random_state;
>      /** @} */
> diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c
> index 2127099..249498c 100644
> --- a/libavcodec/aacdec.c
> +++ b/libavcodec/aacdec.c
> @@ -477,6 +477,7 @@ static int decode_audio_specific_config(AACContext *ac,
>      switch (m4ac->object_type) {
>      case AOT_AAC_MAIN:
>      case AOT_AAC_LC:
> +    case AOT_AAC_LTP:
>          if (decode_ga_specific_config(ac, avctx, &gb, m4ac, m4ac->chan_config))
>              return -1;
>          break;
> @@ -578,8 +579,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
>                      ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]),
>                      352);
>
> -    ff_mdct_init(&ac->mdct, 11, 1, 1.0);
> -    ff_mdct_init(&ac->mdct_small, 8, 1, 1.0);
> +    ff_mdct_init(&ac->mdct,       11, 1, 1.0);
> +    ff_mdct_init(&ac->mdct_small,  8, 1, 1.0);
> +    ff_mdct_init(&ac->mdct_ltp,   11, 0, 1.0);
>      // window initialization
>      ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
>      ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
> @@ -629,6 +631,24 @@ static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
>  }
>
>  /**
> + * Decode Long Term Prediction data; reference: table 4.xx.
> + */
> +static void decode_ltp(AACContext *ac, LongTermPrediction *ltp,
> +                       GetBitContext *gb, uint8_t max_sfb)
> +{
> +    int sfb;
> +    if (ac->m4ac.object_type == AOT_ER_AAC_LD) {
> +        av_log(ac->avctx, AV_LOG_ERROR, "LTP is not supported in ER AAC LD .\n");

No ER syntax is currently supported so this check is unnecessary.

> +    } else {
> +        ltp->lag  = get_bits(gb, 11);
> +        ltp->coef = ltp_coef[get_bits(gb, 3)] * ac->sf_scale;
> +        for (sfb = 0; sfb < FFMIN(max_sfb, MAX_LTP_LONG_SFB); sfb++)
> +            ltp->used[sfb] = get_bits1(gb);
> +    }
> +}
> +
> +
> +/**
>   * Decode Individual Channel Stream info; reference: table 4.6.
>   *
>   * @param   common_window   Channels have independent [0], or shared [1], Individual Channel Stream information.
> @@ -682,9 +702,11 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
>                  memset(ics, 0, sizeof(IndividualChannelStream));
>                  return -1;
>              } else {
> -                av_log_missing_feature(ac->avctx, "Predictor bit set but LTP is", 1);
> -                memset(ics, 0, sizeof(IndividualChannelStream));
> -                return -1;
> +                if ((ics->ltp.present = get_bits(gb, 1)))
> +                    decode_ltp(ac, &ics->ltp, gb, ics->max_sfb);
> +                if (common_window)
> +                    if ((ics->ltp2.present = get_bits(gb, 1)))
> +                        decode_ltp(ac, &ics->ltp2, gb, ics->max_sfb);

Let's check for this second set of LTP parameters when we return from
ice_info in the common_window case

>              }
>          }
>      }
> @@ -1418,6 +1440,7 @@ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
>          i = cpe->ch[1].ics.use_kb_window[0];
>          cpe->ch[1].ics = cpe->ch[0].ics;
>          cpe->ch[1].ics.use_kb_window[1] = i;
> +        cpe->ch[1].ics.ltp = cpe->ch[0].ics.ltp2;
>          ms_present = get_bits(gb, 2);
>          if (ms_present == 3) {
>              av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n");
> @@ -1657,6 +1680,7 @@ static void apply_tns(float coef[1024], TemporalNoiseShaping *tns,
>      int w, filt, m, i;
>      int bottom, top, order, start, end, size, inc;
>      float lpc[TNS_MAX_ORDER];
> +    float tmp[1024];

This is a fairly large stack allocation. If it can't be shrunk let's
put it in the context or reuse one of the other scratch buffers in the
context.

>
>      for (w = 0; w < ics->num_windows; w++) {
>          bottom = ics->num_swb;
> @@ -1682,23 +1706,108 @@ static void apply_tns(float coef[1024], TemporalNoiseShaping *tns,
>              }
>              start += w * 128;
>
> -            // ar filter
> -            for (m = 0; m < size; m++, start += inc)
> -                for (i = 1; i <= FFMIN(m, order); i++)
> -                    coef[start] -= coef[start - i * inc] * lpc[i - 1];
> +            if (decode) {
> +                // ar filter
> +                for (m = 0; m < size; m++, start += inc)
> +                    for (i = 1; i <= FFMIN(m, order); i++)
> +                        coef[start] -= coef[start - i * inc] * lpc[i - 1];
> +            } else {
> +                // ma filter
> +                for (m = 0; m < size; m++, start += inc) {
> +                    tmp[start] = coef[start];
> +                    for (i = 1; i <= FFMIN(m, order); i++)
> +                        coef[start] += tmp[start - i * inc] * lpc[i - 1];
> +                }
> +            }
>          }
>      }
>  }
>
> +
> +
> +/**
> + * Windowing and MDCT to obtain the spectral coefficient from the predicted sample by LTP
> + */
> +static void windowing_and_mdct_ltp(AACContext *ac, float *out,
> +                                   float *in, IndividualChannelStream *ics)
> +{
> +    const float * lwindow      = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
> +    const float * swindow      = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
> +    const float * lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
> +    const float * swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
> +    float * buf = ac->buf_mdct;
> +
> +    if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
> +        ac->dsp.vector_fmul(buf, in, lwindow_prev, 1024);
> +    } else {
> +        memset(buf, 0, 448 * sizeof(float));
> +        ac->dsp.vector_fmul(buf + 448, in + 448, swindow_prev, 128);
> +        memcpy(buf + 576, in + 576, 448 * sizeof(float));
> +    }
> +    if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
> +        ac->dsp.vector_fmul_reverse(buf + 1024, in + 1024, lwindow, 1024);
> +    } else {
> +        memcpy(buf + 1024, in + 1024, 448 * sizeof(float));
> +        ac->dsp.vector_fmul_reverse(buf + 1024 + 448, in + 1024 + 448, swindow, 128);
> +        memset(buf + 1024 + 576, 0, 448 * sizeof(float));
> +    }
> +    ff_mdct_calc(&ac->mdct_ltp, out, buf);
> +}
> +
> +
> +/**
> + * Apply the long term prediction
> + */
> +static void apply_ltp(AACContext *ac, SingleChannelElement *sce)
> +{
> +    const LongTermPrediction *ltp = &sce->ics.ltp;
> +    const uint16_t *offsets = sce->ics.swb_offset;
> +    int i, sfb;
> +
> +    if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
> +        float x_est[2048], X_est[1024];
> +        int16_t num_samples = 2048;
> +        if (ltp->lag < 1024)
> +            num_samples = ltp->lag + 1024;
> +        for (i = 0; i < num_samples; i++)
> +            x_est[i] = sce->ltp_state[i + 2048 - ltp->lag] * ltp->coef;
> +        for ( ; i < 2048; i++)
> +            x_est[i] = 0.0f;
> +
> +        windowing_and_mdct_ltp(ac, X_est, x_est, &sce->ics);
> +
> +        if (sce->tns.present)
> +            apply_tns(X_est, &sce->tns, &sce->ics, 0);
> +
> +        for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++)
> +            if (ltp->used[sfb])
> +                for (i = offsets[sfb]; i < offsets[sfb + 1]; i++)
> +                    sce->coeffs[i] += X_est[i];
> +    }
> +}
> +
> +/**
> + * Update the LTP buffer for next frame
> + */
> +static void update_ltp(AACContext *ac, SingleChannelElement *sce)
> +{
> +    memcpy(sce->ltp_state, &sce->ltp_state[1024], 1024 * sizeof(int16_t));
> +    ac->dsp.float_to_int16(&(sce->ltp_state[1024]), sce->ret,       1024);
> +    ac->dsp.float_to_int16(&(sce->ltp_state[2048]), sce->saved_ltp, 1024);
> +}
> +
> +
>  /**
>   * Conduct IMDCT and windowing.
>   */
>  static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
>  {
>      IndividualChannelStream *ics = &sce->ics;
> -    float *in    = sce->coeffs;
> -    float *out   = sce->ret;
> -    float *saved = sce->saved;
> +    float *in        = sce->coeffs;
> +    float *out       = sce->ret;
> +    float *saved     = sce->saved;
> +    float *saved_ltp = sce->saved_ltp;
> +    const float *lwindow      = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
>      const float *swindow      = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
>      const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
>      const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
> @@ -1713,6 +1822,9 @@ static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
>      } else
>          ff_imdct_half(&ac->mdct, buf, in);
>
> +    for (i = 0; i < 512; i++)
> +        buf[1535 - i] = buf[512 + i];
> +

While it's good to have LTP for completeness the typical use case of
the decoder will still be non-LTP streams.

As a result we should try not to inconvenience the decoder with extra
computation in the non-LTP case.

Put this extra copying behind a guard to make sure we are only doing
it on LTP streams.

The same applies for the other code added in this function

>      /* window overlapping
>       * NOTE: To simplify the overlapping code, all 'meaningless' short to long
>       * and long to short transitions are considered to be short to short
> @@ -1745,11 +1857,18 @@ static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
>          ac->dsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 0, 64);
>          ac->dsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 0, 64);
>          memcpy(                    saved + 448, buf + 7*128 + 64,  64 * sizeof(float));
> +        memcpy(                    saved_ltp,   saved,            512 * sizeof(float));
> +        ac->dsp.vector_fmul_reverse(saved_ltp+448, buf + 960,     swindow,     128);
> +        memset(                    saved_ltp + 576, 0,            448 * sizeof(float));
>      } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
>          memcpy(                    saved,       buf + 512,        448 * sizeof(float));
>          memcpy(                    saved + 448, buf + 7*128 + 64,  64 * sizeof(float));
> +        memcpy(                    saved_ltp,   buf + 512,        448 * sizeof(float));
> +        ac->dsp.vector_fmul_reverse(saved_ltp+448, buf + 960,     swindow,     128);
> +        memset(                    saved_ltp + 576, 0,            448 * sizeof(float));
>      } else { // LONG_STOP or ONLY_LONG
>          memcpy(                    saved,       buf + 512,        512 * sizeof(float));
> +        ac->dsp.vector_fmul_reverse(saved_ltp,  buf + 512,        lwindow,     1024);
>      }
>  }
>
> @@ -1855,6 +1974,14 @@ static void spectral_to_sample(AACContext *ac)
>              if (che) {
>                  if (type <= TYPE_CPE)
>                      apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling);
> +                if (che->ch[0].ics.predictor_present) {
> +                    if (ac->m4ac.object_type == AOT_AAC_LTP) {
> +                        if (che->ch[0].ics.ltp.present)
> +                            apply_ltp(ac, &che->ch[0]);
> +                        if (che->ch[1].ics.ltp.present && type == TYPE_CPE)
> +                            apply_ltp(ac, &che->ch[1]);
> +                    }
> +                }
>                  if (che->ch[0].tns.present)
>                      apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1);
>                  if (che->ch[1].tns.present)
> @@ -1872,6 +1999,11 @@ static void spectral_to_sample(AACContext *ac)
>                  }
>                  if (type <= TYPE_CCE)
>                      apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling);
> +                if (ac->m4ac.object_type == AOT_AAC_LTP) {
> +                    update_ltp(ac, &che->ch[0]);
> +                    if (type == TYPE_CPE)
> +                        update_ltp(ac, &che->ch[1]);
> +                }
>              }
>          }
>      }
> @@ -2078,6 +2210,7 @@ static av_cold int aac_decode_close(AVCodecContext *avctx)
>
>      ff_mdct_end(&ac->mdct);
>      ff_mdct_end(&ac->mdct_small);
> +    ff_mdct_end(&ac->mdct_ltp);
>      return 0;
>  }
>
> diff --git a/libavcodec/aacdectab.h b/libavcodec/aacdectab.h
> index b4307f1..500e8f2 100644
> --- a/libavcodec/aacdectab.h
> +++ b/libavcodec/aacdectab.h
OK

[...]

Regards,
Alex Converse



More information about the ffmpeg-devel mailing list