[FFmpeg-devel] [PATCH 1/3] mips: optimization for float aac decoder (core module)

Nedeljko Babic nbabic at mips.com
Wed Jan 9 10:45:32 CET 2013


From: Mirjana Vulin <mirjana.vulin at rt-rk.com>

Change-Id: I20673f7528108d4b607a25194dc4c7cf490ff1e0
Signed-off-by: Mirjana Vulin <mvulin at mips.com>
---
 libavcodec/aac.h                |   18 +-
 libavcodec/aacdec.c             |   39 ++-
 libavcodec/mips/Makefile        |    1 +
 libavcodec/mips/aacdec_mips.c   |  830 +++++++++++++++++++++++++++++++++++++++
 libavcodec/mips/aacdec_mips.h   |  251 ++++++++++++
 libavcodec/mips/dsputil_mips.c  |   84 ++++-
 libavutil/mips/float_dsp_mips.c |   41 ++-
 7 files changed, 1248 insertions(+), 16 deletions(-)
 create mode 100644 libavcodec/mips/aacdec_mips.c
 create mode 100644 libavcodec/mips/aacdec_mips.h

diff --git a/libavcodec/aac.h b/libavcodec/aac.h
index 9e9fa0e..7b96e3b 100644
--- a/libavcodec/aac.h
+++ b/libavcodec/aac.h
@@ -257,10 +257,12 @@ typedef struct ChannelElement {
     SpectralBandReplication sbr;
 } ChannelElement;
 
+typedef struct AACContext AACContext;
+
 /**
  * main AAC context
  */
-typedef struct AACContext {
+struct AACContext {
     AVClass        *class;
     AVCodecContext *avctx;
     AVFrame frame;
@@ -318,6 +320,18 @@ typedef struct AACContext {
 
     OutputConfiguration oc[2];
     int warned_num_aac_frames;
-} AACContext;
+
+    /* aacdec functions pointers */
+    void (*imdct_and_windowing)(AACContext *ac, SingleChannelElement *sce);
+    void (*apply_ltp)(AACContext *ac, SingleChannelElement *sce);
+    void (*apply_tns)(float coef[1024], TemporalNoiseShaping *tns,
+                      IndividualChannelStream *ics, int decode);
+    void (*windowing_and_mdct_ltp)(AACContext *ac, float *out,
+                                   float *in, IndividualChannelStream *ics);
+    void (*update_ltp)(AACContext *ac, SingleChannelElement *sce);
+
+};
+
+extern VLC vlc_spectral[11];
 
 #endif /* AVCODEC_AAC_H */
diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c
index 239153a..4beacc9 100644
--- a/libavcodec/aacdec.c
+++ b/libavcodec/aacdec.c
@@ -108,10 +108,12 @@
 
 #if ARCH_ARM
 #   include "arm/aac.h"
+#elif ARCH_MIPS
+#   include "mips/aacdec_mips.h"
 #endif
 
 static VLC vlc_scalefactors;
-static VLC vlc_spectral[11];
+VLC vlc_spectral[11];
 
 #define overread_err "Input buffer exhausted before END element found\n"
 
@@ -870,6 +872,8 @@ static void reset_predictor_group(PredictorState *ps, int group_num)
         ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), sizeof(ff_aac_spectral_codes[num][0]), \
         size);
 
+static void ff_aacdec_init(AACContext *ac);
+
 static av_cold int aac_decode_init(AVCodecContext *avctx)
 {
     AACContext *ac = avctx->priv_data;
@@ -877,6 +881,8 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
     ac->avctx = avctx;
     ac->oc[1].m4ac.sample_rate = avctx->sample_rate;
 
+    ff_aacdec_init(ac);
+
     avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
 
     if (avctx->extradata_size > 0) {
@@ -2159,10 +2165,10 @@ static void apply_ltp(AACContext *ac, SingleChannelElement *sce)
             predTime[i] = sce->ltp_state[i + 2048 - ltp->lag] * ltp->coef;
         memset(&predTime[i], 0, (2048 - i) * sizeof(float));
 
-        windowing_and_mdct_ltp(ac, predFreq, predTime, &sce->ics);
+        ac->windowing_and_mdct_ltp(ac, predFreq, predTime, &sce->ics);
 
         if (sce->tns.present)
-            apply_tns(predFreq, &sce->tns, &sce->ics, 0);
+            ac->apply_tns(predFreq, &sce->tns, &sce->ics, 0);
 
         for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++)
             if (ltp->used[sfb])
@@ -2374,25 +2380,25 @@ static void spectral_to_sample(AACContext *ac)
                 if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
                     if (che->ch[0].ics.predictor_present) {
                         if (che->ch[0].ics.ltp.present)
-                            apply_ltp(ac, &che->ch[0]);
+                            ac->apply_ltp(ac, &che->ch[0]);
                         if (che->ch[1].ics.ltp.present && type == TYPE_CPE)
-                            apply_ltp(ac, &che->ch[1]);
+                            ac->apply_ltp(ac, &che->ch[1]);
                     }
                 }
                 if (che->ch[0].tns.present)
-                    apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1);
+                    ac->apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1);
                 if (che->ch[1].tns.present)
-                    apply_tns(che->ch[1].coeffs, &che->ch[1].tns, &che->ch[1].ics, 1);
+                    ac->apply_tns(che->ch[1].coeffs, &che->ch[1].tns, &che->ch[1].ics, 1);
                 if (type <= TYPE_CPE)
                     apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, apply_dependent_coupling);
                 if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) {
-                    imdct_and_windowing(ac, &che->ch[0]);
+                    ac->imdct_and_windowing(ac, &che->ch[0]);
                     if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
-                        update_ltp(ac, &che->ch[0]);
+                        ac->update_ltp(ac, &che->ch[0]);
                     if (type == TYPE_CPE) {
-                        imdct_and_windowing(ac, &che->ch[1]);
+                        ac->imdct_and_windowing(ac, &che->ch[1]);
                         if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
-                            update_ltp(ac, &che->ch[1]);
+                            ac->update_ltp(ac, &che->ch[1]);
                     }
                     if (ac->oc[1].m4ac.sbr > 0) {
                         ff_sbr_apply(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret);
@@ -2969,6 +2975,17 @@ static av_cold int latm_decode_init(AVCodecContext *avctx)
     return ret;
 }
 
+static void ff_aacdec_init(AACContext *c)
+{
+    c->imdct_and_windowing                      = imdct_and_windowing;
+    c->apply_ltp                                = apply_ltp;
+    c->apply_tns                                = apply_tns;
+    c->windowing_and_mdct_ltp                   = windowing_and_mdct_ltp;
+    c->update_ltp                               = update_ltp;
+
+    if(ARCH_MIPS)
+        ff_aacdec_init_mips(c);
+}
 /**
  * AVOptions for Japanese DTV specific extensions (ADTS only)
  */
diff --git a/libavcodec/mips/Makefile b/libavcodec/mips/Makefile
index feeec99..ded74db 100644
--- a/libavcodec/mips/Makefile
+++ b/libavcodec/mips/Makefile
@@ -14,3 +14,4 @@ MIPSFPU-OBJS-$(CONFIG_FFT)                += mips/fft_mips.o
 MIPSFPU-OBJS                              += mips/dsputil_mips.o           \
                                              mips/fmtconvert_mips.o
 OBJS-$(CONFIG_AC3DSP)                     += mips/ac3dsp_mips.o
+OBJS-$(CONFIG_AAC_DECODER)                += mips/aacdec_mips.o
diff --git a/libavcodec/mips/aacdec_mips.c b/libavcodec/mips/aacdec_mips.c
new file mode 100644
index 0000000..1b348f4
--- /dev/null
+++ b/libavcodec/mips/aacdec_mips.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright (c) 2012
+ *      MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors:  Darko Laus      (darko at mips.com)
+ *           Djordje Pesut   (djordje at mips.com)
+ *           Mirjana Vulin   (mvulin at mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aacdec.c
+ */
+
+#include "aacdec_mips.h"
+#include "libavcodec/aactab.h"
+#include "libavcodec/sinewin.h"
+
+#if HAVE_INLINE_ASM
+static av_always_inline int lcg_random(unsigned previous_val)
+{
+    union { unsigned u; int s; } v = { previous_val * 1664525u + 1013904223 };
+    return v.s;
+}
+
+static void imdct_and_windowing_mips(AACContext *ac, SingleChannelElement *sce)
+{
+    IndividualChannelStream *ics = &sce->ics;
+    float *in    = sce->coeffs;
+    float *out   = sce->ret;
+    float *saved = sce->saved;
+    const float *swindow      = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
+    const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
+    const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
+    float *buf  = ac->buf_mdct;
+    int i;
+
+    if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+        for (i = 0; i < 1024; i += 128)
+            ac->mdct_small.imdct_half(&ac->mdct_small, buf + i, in + i);
+    } else
+        ac->mdct.imdct_half(&ac->mdct, buf, in);
+
+    /* window overlapping
+     * NOTE: To simplify the overlapping code, all 'meaningless' short to long
+     * and long to short transitions are considered to be short to short
+     * transitions. This leaves just two cases (long to long and short to short)
+     * with a little special sauce for EIGHT_SHORT_SEQUENCE.
+     */
+    if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
+            (ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
+        ac->dsp.vector_fmul_window(    out,               saved,            buf,         lwindow_prev, 512);
+    } else {
+        {
+            float *buf1 = saved;
+            float *buf2 = out;
+            int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+            int loop_end;
+
+            /* loop unrolled 8 times */
+            __asm__ volatile (
+                ".set push                               \n\t"
+                ".set noreorder                          \n\t"
+                "addiu   %[loop_end], %[src],      1792  \n\t"
+            "1:                                          \n\t"
+                "lw      %[temp0],    0(%[src])          \n\t"
+                "lw      %[temp1],    4(%[src])          \n\t"
+                "lw      %[temp2],    8(%[src])          \n\t"
+                "lw      %[temp3],    12(%[src])         \n\t"
+                "lw      %[temp4],    16(%[src])         \n\t"
+                "lw      %[temp5],    20(%[src])         \n\t"
+                "lw      %[temp6],    24(%[src])         \n\t"
+                "lw      %[temp7],    28(%[src])         \n\t"
+                "addiu   %[src],      %[src],      32    \n\t"
+                "sw      %[temp0],    0(%[dst])          \n\t"
+                "sw      %[temp1],    4(%[dst])          \n\t"
+                "sw      %[temp2],    8(%[dst])          \n\t"
+                "sw      %[temp3],    12(%[dst])         \n\t"
+                "sw      %[temp4],    16(%[dst])         \n\t"
+                "sw      %[temp5],    20(%[dst])         \n\t"
+                "sw      %[temp6],    24(%[dst])         \n\t"
+                "sw      %[temp7],    28(%[dst])         \n\t"
+                "bne     %[src],      %[loop_end], 1b    \n\t"
+                " addiu  %[dst],      %[dst],      32    \n\t"
+                ".set pop                                \n\t"
+
+                : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+                  [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+                  [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+                  [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+                  [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+                  [dst]"+r"(buf2)
+                :
+                : "memory"
+            );
+        }
+
+        if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+            {
+                float wi;
+                float wj;
+                int i;
+                float temp0, temp1, temp2, temp3;
+                float *dst0 = out + 448 + 0*128;
+                float *dst1 = dst0 + 64 + 63;
+                float *dst2 = saved + 63;
+                float *win0 = (float*)swindow;
+                float *win1 = win0 + 64 + 63;
+                float *win0_prev = (float*)swindow_prev;
+                float *win1_prev = win0_prev + 64 + 63;
+                float *src0_prev = saved + 448;
+                float *src1_prev = buf + 0*128 + 63;
+                float *src0 = buf + 0*128 + 64;
+                float *src1 = buf + 1*128 + 63;
+
+                for(i = 0; i < 64; i++)
+                {
+                    temp0 = src0_prev[0];
+                    temp1 = src1_prev[0];
+                    wi = *win0_prev;
+                    wj = *win1_prev;
+                    temp2 = src0[0];
+                    temp3 = src1[0];
+                    dst0[0] = temp0 * wj - temp1 * wi;
+                    dst1[0] = temp0 * wi + temp1 * wj;
+
+                    wi = *win0;
+                    wj = *win1;
+
+                    temp0 = src0[128];
+                    temp1 = src1[128];
+                    dst0[128] = temp2 * wj - temp3 * wi;
+                    dst1[128] = temp2 * wi + temp3 * wj;
+
+                    temp2 = src0[256];
+                    temp3 = src1[256];
+                    dst0[256] = temp0 * wj - temp1 * wi;
+                    dst1[256] = temp0 * wi + temp1 * wj;
+                    dst0[384] = temp2 * wj - temp3 * wi;
+                    dst1[384] = temp2 * wi + temp3 * wj;
+
+                    temp0 = src0[384];
+                    temp1 = src1[384];
+                    dst0[512] = temp0 * wj - temp1 * wi;
+                    dst2[0] = temp0 * wi + temp1 * wj;
+
+                    src0++;
+                    src1--;
+                    src0_prev++;
+                    src1_prev--;
+                    win0++;
+                    win1--;
+                    win0_prev++;
+                    win1_prev--;
+                    dst0++;
+                    dst1--;
+                    dst2--;
+                }
+            }
+        } else {
+            ac->dsp.vector_fmul_window(out + 448,         saved + 448,      buf,         swindow_prev, 64);
+            {
+                float *buf1 = buf + 64;
+                float *buf2 = out + 576;
+                int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+                int loop_end;
+
+                /* loop unrolled 8 times */
+                __asm__ volatile (
+                    ".set push                               \n\t"
+                    ".set noreorder                          \n\t"
+                    "addiu   %[loop_end], %[src],      1792  \n\t"
+                "1:                                          \n\t"
+                    "lw      %[temp0],    0(%[src])          \n\t"
+                    "lw      %[temp1],    4(%[src])          \n\t"
+                    "lw      %[temp2],    8(%[src])          \n\t"
+                    "lw      %[temp3],    12(%[src])         \n\t"
+                    "lw      %[temp4],    16(%[src])         \n\t"
+                    "lw      %[temp5],    20(%[src])         \n\t"
+                    "lw      %[temp6],    24(%[src])         \n\t"
+                    "lw      %[temp7],    28(%[src])         \n\t"
+                    "addiu   %[src],      %[src],      32    \n\t"
+                    "sw      %[temp0],    0(%[dst])          \n\t"
+                    "sw      %[temp1],    4(%[dst])          \n\t"
+                    "sw      %[temp2],    8(%[dst])          \n\t"
+                    "sw      %[temp3],    12(%[dst])         \n\t"
+                    "sw      %[temp4],    16(%[dst])         \n\t"
+                    "sw      %[temp5],    20(%[dst])         \n\t"
+                    "sw      %[temp6],    24(%[dst])         \n\t"
+                    "sw      %[temp7],    28(%[dst])         \n\t"
+                    "bne     %[src],      %[loop_end], 1b    \n\t"
+                    " addiu  %[dst],      %[dst],      32    \n\t"
+                    ".set pop                                \n\t"
+
+                    : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+                      [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+                      [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+                      [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+                      [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+                      [dst]"+r"(buf2)
+                    :
+                    : "memory"
+                );
+            }
+        }
+    }
+
+    // buffer update
+    if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+        ac->dsp.vector_fmul_window(saved + 64,  buf + 4*128 + 64, buf + 5*128, swindow, 64);
+        ac->dsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
+        ac->dsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
+        {
+            float *buf1 = buf + 7*128 + 64;
+            float *buf2 = saved + 448;
+            int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+            int loop_end;
+
+            /* loop unrolled 8 times */
+            __asm__ volatile (
+                ".set push                                \n\t"
+                ".set noreorder                           \n\t"
+                "addiu   %[loop_end], %[src],       256   \n\t"
+            "1:                                           \n\t"
+                "lw      %[temp0],    0(%[src])           \n\t"
+                "lw      %[temp1],    4(%[src])           \n\t"
+                "lw      %[temp2],    8(%[src])           \n\t"
+                "lw      %[temp3],    12(%[src])          \n\t"
+                "lw      %[temp4],    16(%[src])          \n\t"
+                "lw      %[temp5],    20(%[src])          \n\t"
+                "lw      %[temp6],    24(%[src])          \n\t"
+                "lw      %[temp7],    28(%[src])          \n\t"
+                "addiu   %[src],      %[src],       32    \n\t"
+                "sw      %[temp0],    0(%[dst])           \n\t"
+                "sw      %[temp1],    4(%[dst])           \n\t"
+                "sw      %[temp2],    8(%[dst])           \n\t"
+                "sw      %[temp3],    12(%[dst])          \n\t"
+                "sw      %[temp4],    16(%[dst])          \n\t"
+                "sw      %[temp5],    20(%[dst])          \n\t"
+                "sw      %[temp6],    24(%[dst])          \n\t"
+                "sw      %[temp7],    28(%[dst])          \n\t"
+                "bne     %[src],      %[loop_end],  1b    \n\t"
+                " addiu  %[dst],      %[dst],       32    \n\t"
+                ".set pop                                 \n\t"
+
+                : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+                  [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+                  [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+                  [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+                  [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+                  [dst]"+r"(buf2)
+                :
+                : "memory"
+            );
+        }
+    } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
+        float *buf1 = buf + 512;
+        float *buf2 = saved;
+        int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+        int loop_end;
+
+        /* loop unrolled 8 times */
+        __asm__ volatile (
+            ".set push                                \n\t"
+            ".set noreorder                           \n\t"
+            "addiu   %[loop_end], %[src],       1792  \n\t"
+        "1:                                           \n\t"
+            "lw      %[temp0],    0(%[src])           \n\t"
+            "lw      %[temp1],    4(%[src])           \n\t"
+            "lw      %[temp2],    8(%[src])           \n\t"
+            "lw      %[temp3],    12(%[src])          \n\t"
+            "lw      %[temp4],    16(%[src])          \n\t"
+            "lw      %[temp5],    20(%[src])          \n\t"
+            "lw      %[temp6],    24(%[src])          \n\t"
+            "lw      %[temp7],    28(%[src])          \n\t"
+            "addiu   %[src],      %[src],       32    \n\t"
+            "sw      %[temp0],    0(%[dst])           \n\t"
+            "sw      %[temp1],    4(%[dst])           \n\t"
+            "sw      %[temp2],    8(%[dst])           \n\t"
+            "sw      %[temp3],    12(%[dst])          \n\t"
+            "sw      %[temp4],    16(%[dst])          \n\t"
+            "sw      %[temp5],    20(%[dst])          \n\t"
+            "sw      %[temp6],    24(%[dst])          \n\t"
+            "sw      %[temp7],    28(%[dst])          \n\t"
+            "bne     %[src],      %[loop_end],  1b    \n\t"
+            " addiu  %[dst],      %[dst],       32    \n\t"
+            ".set pop                                 \n\t"
+
+            : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+              [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+              [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+              [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+              [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+              [dst]"+r"(buf2)
+            :
+            : "memory"
+        );
+        {
+            float *buf1 = buf + 7*128 + 64;
+            float *buf2 = saved + 448;
+            int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+            int loop_end;
+
+            /* loop unrolled 8 times */
+            __asm__ volatile (
+                ".set push                                 \n\t"
+                ".set noreorder                            \n\t"
+                "addiu   %[loop_end], %[src],        256   \n\t"
+            "1:                                            \n\t"
+                "lw      %[temp0],    0(%[src])            \n\t"
+                "lw      %[temp1],    4(%[src])            \n\t"
+                "lw      %[temp2],    8(%[src])            \n\t"
+                "lw      %[temp3],    12(%[src])           \n\t"
+                "lw      %[temp4],    16(%[src])           \n\t"
+                "lw      %[temp5],    20(%[src])           \n\t"
+                "lw      %[temp6],    24(%[src])           \n\t"
+                "lw      %[temp7],    28(%[src])           \n\t"
+                "addiu   %[src],      %[src],        32    \n\t"
+                "sw      %[temp0],    0(%[dst])            \n\t"
+                "sw      %[temp1],    4(%[dst])            \n\t"
+                "sw      %[temp2],    8(%[dst])            \n\t"
+                "sw      %[temp3],    12(%[dst])           \n\t"
+                "sw      %[temp4],    16(%[dst])           \n\t"
+                "sw      %[temp5],    20(%[dst])           \n\t"
+                "sw      %[temp6],    24(%[dst])           \n\t"
+                "sw      %[temp7],    28(%[dst])           \n\t"
+                "bne     %[src],      %[loop_end],   1b    \n\t"
+                " addiu  %[dst],      %[dst],        32    \n\t"
+                ".set pop                                  \n\t"
+
+                : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+                  [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+                  [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+                  [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+                  [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+                  [dst]"+r"(buf2)
+                :
+                : "memory"
+            );
+        }
+    } else { // LONG_STOP or ONLY_LONG
+        float *buf1 = buf + 512;
+        float *buf2 = saved;
+        int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+        int loop_end;
+
+        /* loop unrolled 8 times */
+        __asm__ volatile (
+            ".set push                                 \n\t"
+            ".set noreorder                            \n\t"
+            "addiu   %[loop_end], %[src],        2048  \n\t"
+        "1:                                            \n\t"
+            "lw      %[temp0],    0(%[src])            \n\t"
+            "lw      %[temp1],    4(%[src])            \n\t"
+            "lw      %[temp2],    8(%[src])            \n\t"
+            "lw      %[temp3],    12(%[src])           \n\t"
+            "lw      %[temp4],    16(%[src])           \n\t"
+            "lw      %[temp5],    20(%[src])           \n\t"
+            "lw      %[temp6],    24(%[src])           \n\t"
+            "lw      %[temp7],    28(%[src])           \n\t"
+            "addiu   %[src],      %[src],        32    \n\t"
+            "sw      %[temp0],    0(%[dst])            \n\t"
+            "sw      %[temp1],    4(%[dst])            \n\t"
+            "sw      %[temp2],    8(%[dst])            \n\t"
+            "sw      %[temp3],    12(%[dst])           \n\t"
+            "sw      %[temp4],    16(%[dst])           \n\t"
+            "sw      %[temp5],    20(%[dst])           \n\t"
+            "sw      %[temp6],    24(%[dst])           \n\t"
+            "sw      %[temp7],    28(%[dst])           \n\t"
+            "bne     %[src],      %[loop_end],   1b    \n\t"
+            " addiu  %[dst],      %[dst],        32    \n\t"
+            ".set pop                                  \n\t"
+
+            : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+              [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+              [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+              [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+              [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+              [dst]"+r"(buf2)
+            :
+            : "memory"
+        );
+    }
+}
+
+static void apply_ltp_mips(AACContext *ac, SingleChannelElement *sce)
+{
+    const LongTermPrediction *ltp = &sce->ics.ltp;
+    const uint16_t *offsets = sce->ics.swb_offset;
+    int i, sfb;
+    int j, k;
+
+    if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
+        float *predTime = sce->ret;
+        float *predFreq = ac->buf_mdct;
+        float *p_predTime;
+        int16_t num_samples = 2048;
+
+        if (ltp->lag < 1024)
+            num_samples = ltp->lag + 1024;
+            j = (2048 - num_samples) >> 2;
+            k = (2048 - num_samples) & 3;
+            p_predTime = &predTime[num_samples];
+
+        for (i = 0; i < num_samples; i++)
+            predTime[i] = sce->ltp_state[i + 2048 - ltp->lag] * ltp->coef;
+        for (i = 0; i < j; i++) {
+
+            /* loop unrolled 4 times */
+            __asm__ volatile (
+                "sw      $0,              0(%[p_predTime])        \n\t"
+                "sw      $0,              4(%[p_predTime])        \n\t"
+                "sw      $0,              8(%[p_predTime])        \n\t"
+                "sw      $0,              12(%[p_predTime])       \n\t"
+                "addiu   %[p_predTime],   %[p_predTime],     16   \n\t"
+
+                : [p_predTime]"+r"(p_predTime)
+                :
+                : "memory"
+            );
+        }
+        for (i = 0; i < k; i++) {
+
+            __asm__ volatile (
+                "sw      $0,              0(%[p_predTime])        \n\t"
+                "addiu   %[p_predTime],   %[p_predTime],     4    \n\t"
+
+                : [p_predTime]"+r"(p_predTime)
+                :
+                : "memory"
+            );
+        }
+
+        ac->windowing_and_mdct_ltp(ac, predFreq, predTime, &sce->ics);
+
+        if (sce->tns.present)
+            ac->apply_tns(predFreq, &sce->tns, &sce->ics, 0);
+
+        for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++)
+            if (ltp->used[sfb])
+                for (i = offsets[sfb]; i < offsets[sfb + 1]; i++)
+                    sce->coeffs[i] += predFreq[i];
+    }
+}
+
+#if HAVE_MIPSFPU
+static void update_ltp_mips(AACContext *ac, SingleChannelElement *sce)
+{
+    IndividualChannelStream *ics = &sce->ics;
+    float *saved     = sce->saved;
+    float *saved_ltp = sce->coeffs;
+    const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
+    const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
+    int i;
+    int loop_end, loop_end1, loop_end2;
+    float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9, temp10, temp11;
+
+    if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+        float *buf = saved;
+        float *buf0 = saved_ltp;
+        float *p_saved_ltp = saved_ltp + 576;
+        float *ptr1 = &saved_ltp[512];
+        float *ptr2 = &ac->buf_mdct[1023];
+        float *ptr3 = (float*)&swindow[63];
+        loop_end1 = (int)(p_saved_ltp + 448);
+
+        /* loop unrolled 8 times */
+        __asm__ volatile (
+            ".set push                                     \n\t"
+            ".set noreorder                                \n\t"
+            "addiu   %[loop_end],   %[src],         2048   \n\t"
+        "1:                                                \n\t"
+            "lw      %[temp0],      0(%[src])              \n\t"
+            "lw      %[temp1],      4(%[src])              \n\t"
+            "lw      %[temp2],      8(%[src])              \n\t"
+            "lw      %[temp3],      12(%[src])             \n\t"
+            "lw      %[temp4],      16(%[src])             \n\t"
+            "lw      %[temp5],      20(%[src])             \n\t"
+            "lw      %[temp6],      24(%[src])             \n\t"
+            "lw      %[temp7],      28(%[src])             \n\t"
+            "addiu   %[src],        %[src],         32     \n\t"
+            "sw      %[temp0],      0(%[dst])              \n\t"
+            "sw      %[temp1],      4(%[dst])              \n\t"
+            "sw      %[temp2],      8(%[dst])              \n\t"
+            "sw      %[temp3],      12(%[dst])             \n\t"
+            "sw      %[temp4],      16(%[dst])             \n\t"
+            "sw      %[temp5],      20(%[dst])             \n\t"
+            "sw      %[temp6],      24(%[dst])             \n\t"
+            "sw      %[temp7],      28(%[dst])             \n\t"
+            "bne     %[src],        %[loop_end],    1b     \n\t"
+            " addiu  %[dst],        %[dst],         32     \n\t"
+            ".set pop                                      \n\t"
+
+            : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+              [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+              [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+              [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+              [loop_end]"=&r"(loop_end), [src]"+r"(buf),
+              [dst]"+r"(buf0)
+            :
+            : "memory"
+        );
+
+        /* loop unrolled 8 times */
+        __asm__ volatile (
+        "1:                                                   \n\t"
+            "sw     $0,              0(%[p_saved_ltp])        \n\t"
+            "sw     $0,              4(%[p_saved_ltp])        \n\t"
+            "sw     $0,              8(%[p_saved_ltp])        \n\t"
+            "sw     $0,              12(%[p_saved_ltp])       \n\t"
+            "sw     $0,              16(%[p_saved_ltp])       \n\t"
+            "sw     $0,              20(%[p_saved_ltp])       \n\t"
+            "sw     $0,              24(%[p_saved_ltp])       \n\t"
+            "sw     $0,              28(%[p_saved_ltp])       \n\t"
+            "addiu  %[p_saved_ltp],  %[p_saved_ltp],     32   \n\t"
+            "bne    %[p_saved_ltp],  %[loop_end1],       1b   \n\t"
+
+            : [p_saved_ltp]"+r"(p_saved_ltp)
+            : [loop_end1]"r"(loop_end1)
+            : "memory"
+        );
+
+        ac->dsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960,     &swindow[64],      64);
+        for (i = 0; i < 16; i++){
+            /* loop unrolled 4 times */
+            __asm__ volatile (
+                "lwc1    %[temp0],    0(%[ptr2])                \n\t"
+                "lwc1    %[temp1],    -4(%[ptr2])               \n\t"
+                "lwc1    %[temp2],    -8(%[ptr2])               \n\t"
+                "lwc1    %[temp3],    -12(%[ptr2])              \n\t"
+                "lwc1    %[temp4],    0(%[ptr3])                \n\t"
+                "lwc1    %[temp5],    -4(%[ptr3])               \n\t"
+                "lwc1    %[temp6],    -8(%[ptr3])               \n\t"
+                "lwc1    %[temp7],    -12(%[ptr3])              \n\t"
+                "mul.s   %[temp8],    %[temp0],     %[temp4]    \n\t"
+                "mul.s   %[temp9],    %[temp1],     %[temp5]    \n\t"
+                "mul.s   %[temp10],   %[temp2],     %[temp6]    \n\t"
+                "mul.s   %[temp11],   %[temp3],     %[temp7]    \n\t"
+                "swc1    %[temp8],    0(%[ptr1])                \n\t"
+                "swc1    %[temp9],    4(%[ptr1])                \n\t"
+                "swc1    %[temp10],   8(%[ptr1])                \n\t"
+                "swc1    %[temp11],   12(%[ptr1])               \n\t"
+                "addiu   %[ptr1],     %[ptr1],      16          \n\t"
+                "addiu   %[ptr2],     %[ptr2],      -16         \n\t"
+                "addiu   %[ptr3],     %[ptr3],      -16         \n\t"
+
+                : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+                  [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
+                  [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+                  [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+                  [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
+                  [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
+                  [ptr1]"+r"(ptr1), [ptr2]"+r"(ptr2), [ptr3]"+r"(ptr3)
+                :
+                : "memory"
+            );
+        }
+    } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
+        float *buff0 = saved;
+        float *buff1 = saved_ltp;
+        float *ptr1 = &saved_ltp[512];
+        float *ptr2 = &ac->buf_mdct[1023];
+        float *ptr3 = (float*)&swindow[63];
+        loop_end = (int)(saved + 448);
+
+        /* loop unrolled 8 times */
+        __asm__ volatile (
+            ".set push                                  \n\t"
+            ".set noreorder                             \n\t"
+        "1:                                             \n\t"
+            "lw      %[temp0],    0(%[src])             \n\t"
+            "lw      %[temp1],    4(%[src])             \n\t"
+            "lw      %[temp2],    8(%[src])             \n\t"
+            "lw      %[temp3],    12(%[src])            \n\t"
+            "lw      %[temp4],    16(%[src])            \n\t"
+            "lw      %[temp5],    20(%[src])            \n\t"
+            "lw      %[temp6],    24(%[src])            \n\t"
+            "lw      %[temp7],    28(%[src])            \n\t"
+            "addiu   %[src],      %[src],         32    \n\t"
+            "sw      %[temp0],    0(%[dst])             \n\t"
+            "sw      %[temp1],    4(%[dst])             \n\t"
+            "sw      %[temp2],    8(%[dst])             \n\t"
+            "sw      %[temp3],    12(%[dst])            \n\t"
+            "sw      %[temp4],    16(%[dst])            \n\t"
+            "sw      %[temp5],    20(%[dst])            \n\t"
+            "sw      %[temp6],    24(%[dst])            \n\t"
+            "sw      %[temp7],    28(%[dst])            \n\t"
+            "sw      $0,          2304(%[dst])          \n\t"
+            "sw      $0,          2308(%[dst])          \n\t"
+            "sw      $0,          2312(%[dst])          \n\t"
+            "sw      $0,          2316(%[dst])          \n\t"
+            "sw      $0,          2320(%[dst])          \n\t"
+            "sw      $0,          2324(%[dst])          \n\t"
+            "sw      $0,          2328(%[dst])          \n\t"
+            "sw      $0,          2332(%[dst])          \n\t"
+            "bne     %[src],      %[loop_end],    1b    \n\t"
+            " addiu  %[dst],      %[dst],         32    \n\t"
+            ".set pop                                   \n\t"
+
+            : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+              [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+              [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+              [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+              [src]"+r"(buff0), [dst]"+r"(buff1)
+            : [loop_end]"r"(loop_end)
+            : "memory"
+        );
+        ac->dsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960,     &swindow[64],      64);
+        for (i = 0; i < 16; i++){
+            /* loop unrolled 8 times */
+            __asm__ volatile (
+                "lwc1    %[temp0],    0(%[ptr2])                \n\t"
+                "lwc1    %[temp1],    -4(%[ptr2])               \n\t"
+                "lwc1    %[temp2],    -8(%[ptr2])               \n\t"
+                "lwc1    %[temp3],    -12(%[ptr2])              \n\t"
+                "lwc1    %[temp4],    0(%[ptr3])                \n\t"
+                "lwc1    %[temp5],    -4(%[ptr3])               \n\t"
+                "lwc1    %[temp6],    -8(%[ptr3])               \n\t"
+                "lwc1    %[temp7],    -12(%[ptr3])              \n\t"
+                "mul.s   %[temp8],    %[temp0],     %[temp4]    \n\t"
+                "mul.s   %[temp9],    %[temp1],     %[temp5]    \n\t"
+                "mul.s   %[temp10],   %[temp2],     %[temp6]    \n\t"
+                "mul.s   %[temp11],   %[temp3],     %[temp7]    \n\t"
+                "swc1    %[temp8],    0(%[ptr1])                \n\t"
+                "swc1    %[temp9],    4(%[ptr1])                \n\t"
+                "swc1    %[temp10],   8(%[ptr1])                \n\t"
+                "swc1    %[temp11],   12(%[ptr1])               \n\t"
+                "addiu   %[ptr1],     %[ptr1],      16          \n\t"
+                "addiu   %[ptr2],     %[ptr2],      -16         \n\t"
+                "addiu   %[ptr3],     %[ptr3],      -16         \n\t"
+
+                : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+                  [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
+                  [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+                  [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+                  [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
+                  [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
+                  [ptr1]"+r"(ptr1), [ptr2]"+r"(ptr2), [ptr3]"+r"(ptr3)
+                :
+                : "memory"
+            );
+        }
+    } else { // LONG_STOP or ONLY_LONG
+        float *ptr1, *ptr2, *ptr3;
+        ac->dsp.vector_fmul_reverse(saved_ltp,       ac->buf_mdct + 512,     &lwindow[512],     512);
+
+        ptr1 = &saved_ltp[512];
+        ptr2 = &ac->buf_mdct[1023];
+        ptr3 = (float*)&lwindow[511];
+
+        for (i = 0; i < 512; i+=4){
+            /* loop unrolled 4 times */
+            __asm__ volatile (
+                "lwc1    %[temp0],    0(%[ptr2])                \n\t"
+                "lwc1    %[temp1],    -4(%[ptr2])               \n\t"
+                "lwc1    %[temp2],    -8(%[ptr2])               \n\t"
+                "lwc1    %[temp3],    -12(%[ptr2])              \n\t"
+                "lwc1    %[temp4],    0(%[ptr3])                \n\t"
+                "lwc1    %[temp5],    -4(%[ptr3])               \n\t"
+                "lwc1    %[temp6],    -8(%[ptr3])               \n\t"
+                "lwc1    %[temp7],    -12(%[ptr3])              \n\t"
+                "mul.s   %[temp8],    %[temp0],     %[temp4]    \n\t"
+                "mul.s   %[temp9],    %[temp1],     %[temp5]    \n\t"
+                "mul.s   %[temp10],   %[temp2],     %[temp6]    \n\t"
+                "mul.s   %[temp11],   %[temp3],     %[temp7]    \n\t"
+                "swc1    %[temp8],    0(%[ptr1])                \n\t"
+                "swc1    %[temp9],    4(%[ptr1])                \n\t"
+                "swc1    %[temp10],   8(%[ptr1])                \n\t"
+                "swc1    %[temp11],   12(%[ptr1])               \n\t"
+                "addiu   %[ptr1],     %[ptr1],      16          \n\t"
+                "addiu   %[ptr2],     %[ptr2],      -16         \n\t"
+                "addiu   %[ptr3],     %[ptr3],      -16         \n\t"
+
+                : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+                  [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
+                  [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+                  [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+                  [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
+                  [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
+                  [ptr1]"+r"(ptr1), [ptr2]"+r"(ptr2),
+                  [ptr3]"+r"(ptr3)
+                :
+                : "memory"
+            );
+        }
+    }
+
+    {
+        float *buf1 = sce->ltp_state+1024;
+        float *buf2 = sce->ltp_state;
+        float *buf3 = sce->ret;
+        float *buf4 = sce->ltp_state+1024;
+        float *buf5 = saved_ltp;
+        float *buf6 = sce->ltp_state+2048;
+
+        /* loops unrolled 8 times */
+        __asm__ volatile (
+            ".set push                                    \n\t"
+            ".set noreorder                               \n\t"
+            "addiu   %[loop_end],   %[src],         4096  \n\t"
+            "addiu   %[loop_end1],  %[src1],        4096  \n\t"
+            "addiu   %[loop_end2],  %[src2],        4096  \n\t"
+        "1:                                               \n\t"
+            "lw      %[temp0],      0(%[src])             \n\t"
+            "lw      %[temp1],      4(%[src])             \n\t"
+            "lw      %[temp2],      8(%[src])             \n\t"
+            "lw      %[temp3],      12(%[src])            \n\t"
+            "lw      %[temp4],      16(%[src])            \n\t"
+            "lw      %[temp5],      20(%[src])            \n\t"
+            "lw      %[temp6],      24(%[src])            \n\t"
+            "lw      %[temp7],      28(%[src])            \n\t"
+            "addiu   %[src],        %[src],         32    \n\t"
+            "sw      %[temp0],      0(%[dst])             \n\t"
+            "sw      %[temp1],      4(%[dst])             \n\t"
+            "sw      %[temp2],      8(%[dst])             \n\t"
+            "sw      %[temp3],      12(%[dst])            \n\t"
+            "sw      %[temp4],      16(%[dst])            \n\t"
+            "sw      %[temp5],      20(%[dst])            \n\t"
+            "sw      %[temp6],      24(%[dst])            \n\t"
+            "sw      %[temp7],      28(%[dst])            \n\t"
+            "bne     %[src],        %[loop_end],    1b    \n\t"
+            " addiu  %[dst],        %[dst],         32    \n\t"
+        "2:                                               \n\t"
+            "lw      %[temp0],      0(%[src1])            \n\t"
+            "lw      %[temp1],      4(%[src1])            \n\t"
+            "lw      %[temp2],      8(%[src1])            \n\t"
+            "lw      %[temp3],      12(%[src1])           \n\t"
+            "lw      %[temp4],      16(%[src1])           \n\t"
+            "lw      %[temp5],      20(%[src1])           \n\t"
+            "lw      %[temp6],      24(%[src1])           \n\t"
+            "lw      %[temp7],      28(%[src1])           \n\t"
+            "addiu   %[src1],       %[src1],        32    \n\t"
+            "sw      %[temp0],      0(%[dst1])            \n\t"
+            "sw      %[temp1],      4(%[dst1])            \n\t"
+            "sw      %[temp2],      8(%[dst1])            \n\t"
+            "sw      %[temp3],      12(%[dst1])           \n\t"
+            "sw      %[temp4],      16(%[dst1])           \n\t"
+            "sw      %[temp5],      20(%[dst1])           \n\t"
+            "sw      %[temp6],      24(%[dst1])           \n\t"
+            "sw      %[temp7],      28(%[dst1])           \n\t"
+            "bne     %[src1],       %[loop_end1],   2b    \n\t"
+            " addiu  %[dst1],       %[dst1],        32    \n\t"
+        "3:                                               \n\t"
+            "lw      %[temp0],      0(%[src2])            \n\t"
+            "lw      %[temp1],      4(%[src2])            \n\t"
+            "lw      %[temp2],      8(%[src2])            \n\t"
+            "lw      %[temp3],      12(%[src2])           \n\t"
+            "lw      %[temp4],      16(%[src2])           \n\t"
+            "lw      %[temp5],      20(%[src2])           \n\t"
+            "lw      %[temp6],      24(%[src2])           \n\t"
+            "lw      %[temp7],      28(%[src2])           \n\t"
+            "addiu   %[src2],       %[src2],        32    \n\t"
+            "sw      %[temp0],      0(%[dst2])            \n\t"
+            "sw      %[temp1],      4(%[dst2])            \n\t"
+            "sw      %[temp2],      8(%[dst2])            \n\t"
+            "sw      %[temp3],      12(%[dst2])           \n\t"
+            "sw      %[temp4],      16(%[dst2])           \n\t"
+            "sw      %[temp5],      20(%[dst2])           \n\t"
+            "sw      %[temp6],      24(%[dst2])           \n\t"
+            "sw      %[temp7],      28(%[dst2])           \n\t"
+            "bne     %[src2],       %[loop_end2],   3b    \n\t"
+            " addiu  %[dst2],       %[dst2],        32    \n\t"
+            ".set pop                                     \n\t"
+
+            : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+              [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+              [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+              [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+              [loop_end]"=&r"(loop_end), [loop_end1]"=&r"(loop_end1),
+              [loop_end2]"=&r"(loop_end2), [src]"+r"(buf1),
+              [dst]"+r"(buf2), [src1]"+r"(buf3), [dst1]"+r"(buf4),
+              [src2]"+r"(buf5), [dst2]"+r"(buf6)
+            :
+            : "memory"
+        );
+    }
+}
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+
+void ff_aacdec_init_mips(AACContext *c)
+{
+#if HAVE_INLINE_ASM
+    c->imdct_and_windowing         = imdct_and_windowing_mips;
+    c->apply_ltp                   = apply_ltp_mips;
+#if HAVE_MIPSFPU
+    c->update_ltp                  = update_ltp_mips;
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+}
diff --git a/libavcodec/mips/aacdec_mips.h b/libavcodec/mips/aacdec_mips.h
new file mode 100644
index 0000000..05a5b6c
--- /dev/null
+++ b/libavcodec/mips/aacdec_mips.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2012
+ *      MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors:  Darko Laus      (darko at mips.com)
+ *           Djordje Pesut   (djordje at mips.com)
+ *           Mirjana Vulin   (mvulin at mips.com)
+ *
+ * AAC Spectral Band Replication decoding functions optimized for MIPS
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aacdec.c
+ */
+
+#ifndef AVCODEC_MIPS_AACDEC_FLOAT_H
+#define AVCODEC_MIPS_AACDEC_FLOAT_H
+
+#include "libavcodec/aac.h"
+
+#if HAVE_INLINE_ASM && HAVE_MIPSFPU
+static inline float *VMUL2_mips(float *dst, const float *v, unsigned idx,
+                           const float *scale)
+{
+    float temp0, temp1, temp2;
+    int temp3, temp4;
+    float *ret;
+
+    __asm__ volatile(
+        "andi    %[temp3],  %[idx],       15           \n\t"
+        "ext     %[temp4],  %[idx],       4,      4    \n\t"
+        "sll     %[temp3],  %[temp3],     2            \n\t"
+        "sll     %[temp4],  %[temp4],     2            \n\t"
+        "lwc1    %[temp2],  0(%[scale])                \n\t"
+        "lwxc1   %[temp0],  %[temp3](%[v])             \n\t"
+        "lwxc1   %[temp1],  %[temp4](%[v])             \n\t"
+        "mul.s   %[temp0],  %[temp0],     %[temp2]     \n\t"
+        "mul.s   %[temp1],  %[temp1],     %[temp2]     \n\t"
+        "addiu   %[ret],    %[dst],       8            \n\t"
+        "swc1    %[temp0],  0(%[dst])                  \n\t"
+        "swc1    %[temp1],  4(%[dst])                  \n\t"
+
+        : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+          [temp2]"=&f"(temp2), [temp3]"=&r"(temp3),
+          [temp4]"=&r"(temp4), [ret]"=&r"(ret)
+        : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
+          [dst]"r"(dst)
+        : "memory"
+    );
+    return ret;
+}
+
+static inline float *VMUL4_mips(float *dst, const float *v, unsigned idx,
+                           const float *scale)
+{
+    int temp0, temp1, temp2, temp3;
+    float temp4, temp5, temp6, temp7, temp8;
+    float *ret;
+
+    __asm__ volatile(
+        "andi    %[temp0],  %[idx],       3           \n\t"
+        "ext     %[temp1],  %[idx],       2,      2   \n\t"
+        "ext     %[temp2],  %[idx],       4,      2   \n\t"
+        "ext     %[temp3],  %[idx],       6,      2   \n\t"
+        "sll     %[temp0],  %[temp0],     2           \n\t"
+        "sll     %[temp1],  %[temp1],     2           \n\t"
+        "sll     %[temp2],  %[temp2],     2           \n\t"
+        "sll     %[temp3],  %[temp3],     2           \n\t"
+        "lwc1    %[temp4],  0(%[scale])               \n\t"
+        "lwxc1   %[temp5],  %[temp0](%[v])            \n\t"
+        "lwxc1   %[temp6],  %[temp1](%[v])            \n\t"
+        "lwxc1   %[temp7],  %[temp2](%[v])            \n\t"
+        "lwxc1   %[temp8],  %[temp3](%[v])            \n\t"
+        "mul.s   %[temp5],  %[temp5],     %[temp4]    \n\t"
+        "mul.s   %[temp6],  %[temp6],     %[temp4]    \n\t"
+        "mul.s   %[temp7],  %[temp7],     %[temp4]    \n\t"
+        "mul.s   %[temp8],  %[temp8],     %[temp4]    \n\t"
+        "addiu   %[ret],    %[dst],       16          \n\t"
+        "swc1    %[temp5],  0(%[dst])                 \n\t"
+        "swc1    %[temp6],  4(%[dst])                 \n\t"
+        "swc1    %[temp7],  8(%[dst])                 \n\t"
+        "swc1    %[temp8],  12(%[dst])                \n\t"
+
+        : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+          [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+          [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+          [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+          [temp8]"=&f"(temp8), [ret]"=&r"(ret)
+        : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
+          [dst]"r"(dst)
+        : "memory"
+    );
+    return ret;
+}
+
+static inline float *VMUL2S_mips(float *dst, const float *v, unsigned idx,
+                            unsigned sign, const float *scale)
+{
+    int temp0, temp1, temp2, temp3, temp4, temp5;
+    float temp6, temp7, temp8, temp9;
+    float *ret;
+
+    __asm__ volatile(
+        "andi    %[temp0],  %[idx],       15         \n\t"
+        "ext     %[temp1],  %[idx],       4,     4   \n\t"
+        "lw      %[temp4],  0(%[scale])              \n\t"
+        "srl     %[temp2],  %[sign],      1          \n\t"
+        "sll     %[temp3],  %[sign],      31         \n\t"
+        "sll     %[temp2],  %[temp2],     31         \n\t"
+        "sll     %[temp0],  %[temp0],     2          \n\t"
+        "sll     %[temp1],  %[temp1],     2          \n\t"
+        "lwxc1   %[temp8],  %[temp0](%[v])           \n\t"
+        "lwxc1   %[temp9],  %[temp1](%[v])           \n\t"
+        "xor     %[temp5],  %[temp4],     %[temp2]   \n\t"
+        "xor     %[temp4],  %[temp4],     %[temp3]   \n\t"
+        "mtc1    %[temp5],  %[temp6]                 \n\t"
+        "mtc1    %[temp4],  %[temp7]                 \n\t"
+        "mul.s   %[temp8],  %[temp8],     %[temp6]   \n\t"
+        "mul.s   %[temp9],  %[temp9],     %[temp7]   \n\t"
+        "addiu   %[ret],    %[dst],       8          \n\t"
+        "swc1    %[temp8],  0(%[dst])                \n\t"
+        "swc1    %[temp9],  4(%[dst])                \n\t"
+
+        : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+          [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+          [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+          [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+          [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
+          [ret]"=&r"(ret)
+        : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
+          [dst]"r"(dst), [sign]"r"(sign)
+        : "memory"
+    );
+    return ret;
+}
+
+static inline float *VMUL4S_mips(float *dst, const float *v, unsigned idx,
+                            unsigned sign, const float *scale)
+{
+    int temp0, temp1, temp2, temp3, temp4;
+    float temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17;
+    float *ret;
+    unsigned int mask = 1U << 31;
+
+    __asm__ volatile(
+        "lw      %[temp0],   0(%[scale])               \n\t"
+        "and     %[temp1],   %[idx],       3           \n\t"
+        "ext     %[temp2],   %[idx],       2,      2   \n\t"
+        "ext     %[temp3],   %[idx],       4,      2   \n\t"
+        "ext     %[temp4],   %[idx],       6,      2   \n\t"
+        "sll     %[temp1],   %[temp1],     2           \n\t"
+        "sll     %[temp2],   %[temp2],     2           \n\t"
+        "sll     %[temp3],   %[temp3],     2           \n\t"
+        "sll     %[temp4],   %[temp4],     2           \n\t"
+        "lwxc1   %[temp10],  %[temp1](%[v])            \n\t"
+        "lwxc1   %[temp11],  %[temp2](%[v])            \n\t"
+        "lwxc1   %[temp12],  %[temp3](%[v])            \n\t"
+        "lwxc1   %[temp13],  %[temp4](%[v])            \n\t"
+        "and     %[temp1],   %[sign],      %[mask]     \n\t"
+        "ext     %[temp2],   %[idx],       12,     1   \n\t"
+        "ext     %[temp3],   %[idx],       13,     1   \n\t"
+        "ext     %[temp4],   %[idx],       14,     1   \n\t"
+        "sllv    %[sign],    %[sign],      %[temp2]    \n\t"
+        "xor     %[temp1],   %[temp0],     %[temp1]    \n\t"
+        "and     %[temp2],   %[sign],      %[mask]     \n\t"
+        "mtc1    %[temp1],   %[temp14]                 \n\t"
+        "xor     %[temp2],   %[temp0],     %[temp2]    \n\t"
+        "sllv    %[sign],    %[sign],      %[temp3]    \n\t"
+        "mtc1    %[temp2],   %[temp15]                 \n\t"
+        "and     %[temp3],   %[sign],      %[mask]     \n\t"
+        "sllv    %[sign],    %[sign],      %[temp4]    \n\t"
+        "xor     %[temp3],   %[temp0],     %[temp3]    \n\t"
+        "and     %[temp4],   %[sign],      %[mask]     \n\t"
+        "mtc1    %[temp3],   %[temp16]                 \n\t"
+        "xor     %[temp4],   %[temp0],     %[temp4]    \n\t"
+        "mtc1    %[temp4],   %[temp17]                 \n\t"
+        "mul.s   %[temp10],  %[temp10],    %[temp14]   \n\t"
+        "mul.s   %[temp11],  %[temp11],    %[temp15]   \n\t"
+        "mul.s   %[temp12],  %[temp12],    %[temp16]   \n\t"
+        "mul.s   %[temp13],  %[temp13],    %[temp17]   \n\t"
+        "addiu   %[ret],     %[dst],       16          \n\t"
+        "swc1    %[temp10],  0(%[dst])                 \n\t"
+        "swc1    %[temp11],  4(%[dst])                 \n\t"
+        "swc1    %[temp12],  8(%[dst])                 \n\t"
+        "swc1    %[temp13],  12(%[dst])                \n\t"
+
+        : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+          [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+          [temp4]"=&r"(temp4), [temp10]"=&f"(temp10),
+          [temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
+          [temp13]"=&f"(temp13), [temp14]"=&f"(temp14),
+          [temp15]"=&f"(temp15), [temp16]"=&f"(temp16),
+          [temp17]"=&f"(temp17), [ret]"=&r"(ret),
+          [sign]"+r"(sign)
+        : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
+          [dst]"r"(dst), [mask]"r"(mask)
+        : "memory"
+    );
+    return ret;
+}
+
+#define VMUL2 VMUL2_mips
+#define VMUL4 VMUL4_mips
+#define VMUL2S VMUL2S_mips
+#define VMUL4S VMUL4S_mips
+#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
+
+void ff_aacdec_init_mips(AACContext *c);
+
+#endif /* AVCODEC_MIPS_AACDEC_FLOAT_H */
diff --git a/libavcodec/mips/dsputil_mips.c b/libavcodec/mips/dsputil_mips.c
index 76dc664..172cbfa 100644
--- a/libavcodec/mips/dsputil_mips.c
+++ b/libavcodec/mips/dsputil_mips.c
@@ -26,7 +26,8 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * Author:  Zoran Lukic (zoranl at mips.com)
+ * Authors:  Zoran Lukic (zoranl at mips.com)
+ *           Darko Laus  (darko at mips.com)
  *
  * This file is part of FFmpeg.
  *
@@ -158,11 +159,90 @@ static void vector_fmul_window_mips(float *dst, const float *src0,
         );
     }
 }
+
+static void butterflies_float_mips(float *av_restrict v1, float *av_restrict v2,
+                                int len)
+{
+    float temp0, temp1, temp2, temp3, temp4;
+    float temp5, temp6, temp7, temp8, temp9;
+    float temp10, temp11, temp12, temp13, temp14, temp15;
+    int pom;
+    pom = (len >> 2)-1;
+
+    /* loop unrolled 4 times */
+    __asm__ volatile (
+        "lwc1     %[temp0],    0(%[v1])                 \n\t"
+        "lwc1     %[temp1],    4(%[v1])                 \n\t"
+        "lwc1     %[temp2],    8(%[v1])                 \n\t"
+        "lwc1     %[temp3],    12(%[v1])                \n\t"
+        "lwc1     %[temp4],    0(%[v2])                 \n\t"
+        "lwc1     %[temp5],    4(%[v2])                 \n\t"
+        "lwc1     %[temp6],    8(%[v2])                 \n\t"
+        "lwc1     %[temp7],    12(%[v2])                \n\t"
+        "beq      %[pom],      $zero,       2f          \n\t"
+    "1:                                                 \n\t"
+        "sub.s    %[temp8],    %[temp0],    %[temp4]    \n\t"
+        "add.s    %[temp9],    %[temp0],    %[temp4]    \n\t"
+        "sub.s    %[temp10],   %[temp1],    %[temp5]    \n\t"
+        "add.s    %[temp11],   %[temp1],    %[temp5]    \n\t"
+        "sub.s    %[temp12],   %[temp2],    %[temp6]    \n\t"
+        "add.s    %[temp13],   %[temp2],    %[temp6]    \n\t"
+        "sub.s    %[temp14],   %[temp3],    %[temp7]    \n\t"
+        "add.s    %[temp15],   %[temp3],    %[temp7]    \n\t"
+        "addiu    %[v1],       %[v1],       16          \n\t"
+        "addiu    %[v2],       %[v2],       16          \n\t"
+        "addiu    %[pom],      %[pom],      -1          \n\t"
+        "lwc1     %[temp0],    0(%[v1])                 \n\t"
+        "lwc1     %[temp1],    4(%[v1])                 \n\t"
+        "lwc1     %[temp2],    8(%[v1])                 \n\t"
+        "lwc1     %[temp3],    12(%[v1])                \n\t"
+        "lwc1     %[temp4],    0(%[v2])                 \n\t"
+        "lwc1     %[temp5],    4(%[v2])                 \n\t"
+        "lwc1     %[temp6],    8(%[v2])                 \n\t"
+        "lwc1     %[temp7],    12(%[v2])                \n\t"
+        "swc1     %[temp9],    -16(%[v1])               \n\t"
+        "swc1     %[temp8],    -16(%[v2])               \n\t"
+        "swc1     %[temp11],   -12(%[v1])               \n\t"
+        "swc1     %[temp10],   -12(%[v2])               \n\t"
+        "swc1     %[temp13],   -8(%[v1])                \n\t"
+        "swc1     %[temp12],   -8(%[v2])                \n\t"
+        "swc1     %[temp15],   -4(%[v1])                \n\t"
+        "swc1     %[temp14],   -4(%[v2])                \n\t"
+        "bgtz     %[pom],      1b                       \n\t"
+    "2:                                                 \n\t"
+        "sub.s    %[temp8],    %[temp0],    %[temp4]    \n\t"
+        "add.s    %[temp9],    %[temp0],    %[temp4]    \n\t"
+        "sub.s    %[temp10],   %[temp1],    %[temp5]    \n\t"
+        "add.s    %[temp11],   %[temp1],    %[temp5]    \n\t"
+        "sub.s    %[temp12],   %[temp2],    %[temp6]    \n\t"
+        "add.s    %[temp13],   %[temp2],    %[temp6]    \n\t"
+        "sub.s    %[temp14],   %[temp3],    %[temp7]    \n\t"
+        "add.s    %[temp15],   %[temp3],    %[temp7]    \n\t"
+        "swc1     %[temp9],    0(%[v1])                 \n\t"
+        "swc1     %[temp8],    0(%[v2])                 \n\t"
+        "swc1     %[temp11],   4(%[v1])                 \n\t"
+        "swc1     %[temp10],   4(%[v2])                 \n\t"
+        "swc1     %[temp13],   8(%[v1])                 \n\t"
+        "swc1     %[temp12],   8(%[v2])                 \n\t"
+        "swc1     %[temp15],   12(%[v1])                \n\t"
+        "swc1     %[temp14],   12(%[v2])                \n\t"
+
+        : [v1]"+r"(v1), [v2]"+r"(v2), [pom]"+r"(pom), [temp0] "=&f" (temp0),
+          [temp1]"=&f"(temp1), [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
+          [temp4]"=&f"(temp4), [temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
+          [temp7]"=&f"(temp7), [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
+          [temp10]"=&f"(temp10), [temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
+          [temp13]"=&f"(temp13), [temp14]"=&f"(temp14), [temp15]"=&f"(temp15)
+        :
+        : "memory"
+    );
+}
 #endif /* HAVE_INLINE_ASM */
 
 av_cold void ff_dsputil_init_mips( DSPContext* c, AVCodecContext *avctx )
 {
 #if HAVE_INLINE_ASM
     c->vector_fmul_window = vector_fmul_window_mips;
-#endif
+    c->butterflies_float   = butterflies_float_mips;
+#endif /* HAVE_INLINE_ASM */
 }
diff --git a/libavutil/mips/float_dsp_mips.c b/libavutil/mips/float_dsp_mips.c
index 8c3611d..22aa3c3 100644
--- a/libavutil/mips/float_dsp_mips.c
+++ b/libavutil/mips/float_dsp_mips.c
@@ -26,7 +26,8 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * Author:  Branimir Vasic (bvasic at mips.com)
+ * Authors:  Branimir Vasic (bvasic at mips.com)
+ *           Mirjana Vulin  (mvulin at mips.com)
  *
  * This file is part of FFmpeg.
  *
@@ -104,10 +105,48 @@ static void vector_fmul_mips(float *dst, const float *src0, const float *src1,
         );
     }
 }
+
+static void vector_fmul_scalar_mips(float *dst, const float *src, float mul,
+                                 int len)
+{
+    float temp0, temp1, temp2, temp3;
+    float *local_src = (float*)src;
+    float *end = local_src + len;
+
+    /* loop unrolled 4 times */
+    __asm__ volatile(
+        ".set    push                             \n\t"
+        ".set    noreorder                        \n\t"
+    "1:                                           \n\t"
+        "lwc1    %[temp0],   0(%[src])            \n\t"
+        "lwc1    %[temp1],   4(%[src])            \n\t"
+        "lwc1    %[temp2],   8(%[src])            \n\t"
+        "lwc1    %[temp3],   12(%[src])           \n\t"
+        "addiu   %[dst],     %[dst],     16       \n\t"
+        "mul.s   %[temp0],   %[temp0],   %[mul]   \n\t"
+        "mul.s   %[temp1],   %[temp1],   %[mul]   \n\t"
+        "mul.s   %[temp2],   %[temp2],   %[mul]   \n\t"
+        "mul.s   %[temp3],   %[temp3],   %[mul]   \n\t"
+        "addiu   %[src],     %[src],     16       \n\t"
+        "swc1    %[temp0],   -16(%[dst])          \n\t"
+        "swc1    %[temp1],   -12(%[dst])          \n\t"
+        "swc1    %[temp2],   -8(%[dst])           \n\t"
+        "bne     %[src],     %[end],     1b       \n\t"
+        " swc1   %[temp3],   -4(%[dst])           \n\t"
+        ".set    pop                              \n\t"
+
+        : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+          [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
+          [dst]"+r"(dst), [src]"+r"(local_src)
+        : [end]"r"(end), [mul]"f"(mul)
+        : "memory"
+    );
+}
 #endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
 
 void ff_float_dsp_init_mips(AVFloatDSPContext *fdsp) {
 #if HAVE_INLINE_ASM && HAVE_MIPSFPU
     fdsp->vector_fmul = vector_fmul_mips;
+    fdsp->vector_fmul_scalar  = vector_fmul_scalar_mips;
 #endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
 }
-- 
1.7.3.4



More information about the ffmpeg-devel mailing list