[FFmpeg-devel] [PATCH 2/2] avcodec: optimize mpegvideo decoder for Loongson-3 v1

周晓勇 zhouxiaoyong at loongson.cn
Mon May 11 06:41:51 CEST 2015


>From 3d5a9d0d38c96d6b6ec51b082102f3a231b5b881 Mon Sep 17 00:00:00 2001
From: ZhouXiaoyong <zhouxiaoyong at loongson.cn>
Date: Mon, 11 May 2015 09:27:00 +0800
Subject: [PATCH 2/2] avcodec: optimize mpegvideo decoder for Loongson-3 v1

Signed-off-by: ZhouXiaoyong <zhouxiaoyong at loongson.cn>
---
 libavcodec/mips/Makefile             |    4 +
 libavcodec/mips/mpegvideo_init.c     |   44 +++
 libavcodec/mips/mpegvideo_loongson.c |  563 ++++++++++++++++++++++++++++++++++
 libavcodec/mips/mpegvideo_loongson.h |   40 +++
 libavcodec/mpegvideo.c               |    2 +
 libavcodec/mpegvideo.h               |    1 +
 6 files changed, 654 insertions(+), 0 deletions(-)

diff --git a/libavcodec/mips/Makefile b/libavcodec/mips/Makefile
index eaedd7f..c9b3294 100644
--- a/libavcodec/mips/Makefile
+++ b/libavcodec/mips/Makefile
@@ -22,3 +22,7 @@ OBJS-$(CONFIG_HEVC_DECODER)               += mips/hevcdsp_init_mips.o
 OBJS-$(CONFIG_H264DSP)                    += mips/h264dsp_init_mips.o
 MSA-OBJS-$(CONFIG_HEVC_DECODER)           += mips/hevcdsp_msa.o
 MSA-OBJS-$(CONFIG_H264DSP)                += mips/h264dsp_msa.o
+
+#Loongson-3 SIMD Optimization
+LOONGSON3-OBJS-$(CONFIG_MPEGVIDEO)        += mips/mpegvideo_init.o
+LOONGSON3-OBJS-$(CONFIG_MPEGVIDEO)        += mips/mpegvideo_loongson.o
diff --git a/libavcodec/mips/mpegvideo_init.c b/libavcodec/mips/mpegvideo_init.c
new file mode 100644
index 0000000..ba8c801
--- /dev/null
+++ b/libavcodec/mips/mpegvideo_init.c
@@ -0,0 +1,44 @@
+/*
+ * Loongson optimized mpegvideo
+ *
+ * Copyright (c) 2015 Loongson Technology Corporation Limited
+ * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong at loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavcodec/mpegvideo.h"
+#include "mpegvideo_loongson.h"
+
+av_cold void ff_MPV_common_init_loongson(MpegEncContext *s)
+{
+    s->dct_unquantize_h263_intra = ff_dct_unquantize_h263_intra_mmi;
+    s->dct_unquantize_h263_inter = ff_dct_unquantize_h263_inter_mmi;
+    s->dct_unquantize_mpeg1_intra = ff_dct_unquantize_mpeg1_intra_mmi;
+    s->dct_unquantize_mpeg1_inter = ff_dct_unquantize_mpeg1_inter_mmi;
+
+    if (s->flags & CODEC_FLAG_BITEXACT) {
+        s->dct_unquantize_mpeg2_intra = ff_dct_unquantize_mpeg2_intra_bitexact_mmi;
+    }
+
+    s->dct_unquantize_mpeg2_inter = ff_dct_unquantize_mpeg2_inter_mmi;
+    s->denoise_dct = ff_denoise_dct_mmi;
+}
diff --git a/libavcodec/mips/mpegvideo_loongson.c b/libavcodec/mips/mpegvideo_loongson.c
new file mode 100644
index 0000000..50a97f3
--- /dev/null
+++ b/libavcodec/mips/mpegvideo_loongson.c
@@ -0,0 +1,563 @@
+/*
+ * Loongson optimized mpegvideo
+ *
+ * Copyright (c) 2015 Loongson Technology Corporation Limited
+ * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong at loongson.cn>
+ *                    Zhang Shuangshuang <zhangshuangshuang at ict.ac.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "mpegvideo_loongson.h"
+
+void ff_dct_unquantize_h263_intra_mmi(MpegEncContext *s, int16_t *block,
+        int n, int qscale)
+{
+    int64_t level, qmul, qadd, nCoeffs;
+
+    qmul = qscale << 1;
+    assert(s->block_last_index[n]>=0 || s->h263_aic);
+
+    if (!s->h263_aic) {
+        if (n<4)
+            level = block[0] * s->y_dc_scale;
+        else
+            level = block[0] * s->c_dc_scale;
+        qadd = (qscale-1) | 1;
+    } else {
+        qadd = 0;
+        level = block[0];
+    }
+
+    if(s->ac_pred)
+        nCoeffs = 63;
+    else
+        nCoeffs = s->inter_scantable.raster_end[s->block_last_index[n]];
+
+    __asm__ volatile (
+        ".set arch=loongson3a           \r\n"
+        "xor $f12, $f12, $f12           \r\n"
+        "lwc1 $f12, %1                  \n\r" //qmul
+        "xor $f10, $f10, $f10           \r\n"
+        "lwc1 $f10, %2                  \r\n" //qadd
+        "xor $f14, $f14, $f14           \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "packsswh $f10, $f10, $f10      \r\n"
+        "packsswh $f10, $f10, $f10      \r\n"
+        "psubh $f14, $f14, $f10         \r\n"
+        "xor $f8, $f8, $f8              \r\n"
+        ".p2align 4                     \r\n"
+        "1:                             \r\n"
+        "daddu $8, %0, %3               \r\n"
+        "gsldlc1 $f0, 7($8)             \r\n"
+        "gsldrc1 $f0, 0($8)             \r\n"
+        "gsldlc1 $f2, 15($8)            \r\n"
+        "gsldrc1 $f2, 8($8)             \r\n"
+        "mov.d $f4, $f0                 \r\n"
+        "mov.d $f6, $f2                 \r\n"
+        "pmullh $f0, $f0, $f12          \r\n"
+        "pmullh $f2, $f2, $f12          \r\n"
+        "pcmpgth $f4, $f4, $f8          \r\n" // block[i] < 0 ? -1 : 0
+        "pcmpgth $f6, $f6, $f8          \r\n" // block[i] < 0 ? -1 : 0
+        "xor $f0, $f0, $f4              \r\n"
+        "xor $f2, $f2, $f6              \r\n"
+        "paddh $f0, $f0, $f14           \r\n"
+        "paddh $f2, $f2, $f14           \r\n"
+        "xor $f4, $f4, $f0              \r\n"
+        "xor $f6, $f6, $f2              \r\n"
+        "pcmpeqh $f0, $f0, $f14         \r\n" // block[i] == 0 ? -1 : 0
+        "pcmpeqh $f2, $f2, $f14         \r\n" // block[i] == 0 ? -1 : 0
+        "pandn $f0, $f0, $f4            \r\n"
+        "pandn $f2, $f2, $f6            \r\n"
+        "gssdlc1 $f0, 7($8)             \r\n"
+        "gssdrc1 $f0, 0($8)             \r\n"
+        "gssdlc1 $f2, 15($8)            \r\n"
+        "gssdrc1 $f2, 8($8)             \r\n"
+        "addi %3, %3, 16                \r\n"
+        "blez %3, 1b                    \r\n"
+        ::"r"(block+nCoeffs),"m"(qmul),"m"(qadd),"r"(2*(-nCoeffs))
+        :"$8","memory"
+    );
+
+    block[0] = level;
+}
+
+void ff_dct_unquantize_h263_inter_mmi(MpegEncContext *s, int16_t *block,
+        int n, int qscale)
+{
+    int64_t qmul, qadd, nCoeffs;
+
+    qmul = qscale << 1;
+    qadd = (qscale - 1) | 1;
+    assert(s->block_last_index[n]>=0 || s->h263_aic);
+    nCoeffs = s->inter_scantable.raster_end[s->block_last_index[n]];
+
+    __asm__ volatile (
+        ".set arch=loongson3a           \r\n"
+        "xor $f12, $f12, $f12           \r\n"
+        "lwc1 $f12, %1                  \r\n" //qmul
+        "xor $f10, $f10, $f10           \r\n"
+        "lwc1 $f10, %2                  \r\n" //qadd
+        "packsswh $f12, $f12, $f12      \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "xor $f14, $f14, $f14           \r\n"
+        "packsswh $f10, $f10, $f10      \r\n"
+        "packsswh $f10, $f10, $f10      \r\n"
+        "psubh $f14, $f14, $f10         \r\n"
+        "xor $f8, $f8, $f8              \r\n"
+        ".p2align 4                     \r\n"
+        "1:                             \r\n"
+        "daddu $8, %0, %3               \r\n"
+        "gsldlc1 $f0, 7($8)             \r\n"
+        "gsldrc1 $f0, 0($8)             \r\n"
+        "gsldlc1 $f2, 15($8)            \r\n"
+        "gsldrc1 $f2, 8($8)             \r\n"
+        "mov.d $f4, $f0                 \r\n"
+        "mov.d $f6, $f2                 \r\n"
+        "pmullh $f0, $f0, $f12          \r\n"
+        "pmullh $f2, $f2, $f12          \r\n"
+        "pcmpgth $f4, $f4, $f8          \r\n" // block[i] < 0 ? -1 : 0
+        "pcmpgth $f6, $f6, $f8          \r\n" // block[i] < 0 ? -1 : 0
+        "xor $f0, $f0, $f4              \r\n"
+        "xor $f2, $f2, $f6              \r\n"
+        "paddh $f0, $f0, $f14           \r\n"
+        "paddh $f2, $f2, $f14           \r\n"
+        "xor $f4, $f4, $f0              \r\n"
+        "xor $f6, $f6, $f2              \r\n"
+        "pcmpeqh $f0, $f0, $f14         \r\n" // block[i] == 0 ? -1 : 0
+        "pcmpeqh $f2, $f2, $f14         \r\n" // block[i] == 0 ? -1 : 0
+        "pandn $f0, $f0, $f4            \r\n"
+        "pandn $f2, $f2, $f6            \r\n"
+        "gssdlc1 $f0, 7($8)             \r\n"
+        "gssdrc1 $f0, 0($8)             \r\n"
+        "gssdlc1 $f2, 15($8)            \r\n"
+        "gssdrc1 $f2, 8($8)             \r\n"
+        "addi %3, %3, 16                \r\n"
+        "blez %3, 1b                    \r\n"
+        ::"r"(block+nCoeffs),"m"(qmul),"m"(qadd),"r"(2*(-nCoeffs))
+        : "$8","memory"
+    );
+}
+
+void ff_dct_unquantize_mpeg1_intra_mmi(MpegEncContext *s, int16_t *block,
+        int n, int qscale)
+{
+    int64_t nCoeffs;
+    const uint16_t *quant_matrix;
+    int block0;
+
+    assert(s->block_last_index[n]>=0);
+    nCoeffs = s->intra_scantable.raster_end[s->block_last_index[n]] + 1;
+
+    if (n<4)
+        block0 = block[0] * s->y_dc_scale;
+    else
+        block0 = block[0] * s->c_dc_scale;
+
+    /* XXX: only mpeg1 */
+    quant_matrix = s->intra_matrix;
+
+    __asm__ volatile (
+        ".set arch=loongson3a           \r\n"
+        "pcmpeqh $f14, $f14, $f14       \r\n"
+        "dli $10, 15                    \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "xor $f12, $f12, $f12           \r\n"
+        "lwc1 $f12, %2                  \r\n"
+        "psrlh $f14, $f14, $f16         \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "or $8, %3, $0                  \r\n"
+        ".p2align 4                     \r\n"
+        "1:                             \r\n"
+        "gsldxc1 $f0, 0($8, %0)         \r\n"
+        "gsldxc1 $f2, 8($8, %0)         \r\n"
+        "mov.d $f16, $f0                \r\n"
+        "mov.d $f18, $f2                \r\n"
+        "gsldxc1 $f8, 0($8, %1)         \r\n"
+        "gsldxc1 $f10, 8($8, %1)        \r\n"
+        "pmullh $f8, $f8, $f12          \r\n" // q=qscale*quant_matrix[i]
+        "pmullh $f10, $f10, $f12        \r\n" // q=qscale*quant_matrix[i]
+        "xor $f4, $f4, $f4              \r\n"
+        "xor $f6, $f6, $f6              \r\n"
+        "pcmpgth $f4, $f4, $f0          \r\n" // block[i] < 0 ? -1 : 0
+        "pcmpgth $f6, $f6, $f2          \r\n" // block[i] < 0 ? -1 : 0
+        "xor $f0, $f0, $f4              \r\n"
+        "xor $f2, $f2, $f6              \r\n"
+        "psubh $f0, $f0, $f4            \r\n" // abs(block[i])
+        "psubh $f2, $f2, $f6            \r\n" // abs(block[i])
+        "pmullh $f0, $f0, $f8           \r\n" // abs(block[i])*q
+        "pmullh $f2, $f2, $f10          \r\n" // abs(block[i])*q
+        "xor $f8, $f8, $f8              \r\n"
+        "xor $f10, $f10, $f10           \r\n" // FIXME slow
+        "pcmpeqh $f8, $f8, $f16         \r\n" // block[i] == 0 ? -1 : 0
+        "pcmpeqh $f10, $f10, $f18       \r\n" // block[i] == 0 ? -1 : 0
+        "dli $10, 3                     \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "psrah $f0, $f0, $f16           \r\n"
+        "psrah $f2, $f2, $f16           \r\n"
+        "psubh $f0, $f0, $f14           \r\n"
+        "psubh $f2, $f2, $f14           \r\n"
+        "or $f0, $f0, $f14              \r\n"
+        "or $f2, $f2, $f14              \r\n"
+        "xor $f0, $f0, $f4              \r\n"
+        "xor $f2, $f2, $f6              \r\n"
+        "psubh $f0, $f0, $f4            \r\n"
+        "psubh $f2, $f2, $f6            \r\n"
+        "pandn $f8, $f8, $f0            \r\n"
+        "pandn $f10, $f10, $f2          \r\n"
+        "gssdxc1 $f8, 0($8, %0)         \r\n"
+        "gssdxc1 $f10, 8($8, %0)        \r\n"
+        "addi $8, $8, 16                \r\n"
+        "bltz $8, 1b                    \r\n"
+        ::"r"(block+nCoeffs),"r"(quant_matrix+nCoeffs),"m"(qscale),
+          "g"(-2*nCoeffs)
+        : "$8","$10","memory"
+    );
+
+    block[0] = block0;
+}
+
+void ff_dct_unquantize_mpeg1_inter_mmi(MpegEncContext *s, int16_t *block,
+        int n, int qscale)
+{
+    int64_t nCoeffs;
+    const uint16_t *quant_matrix;
+
+    assert(s->block_last_index[n] >= 0);
+    nCoeffs = s->intra_scantable.raster_end[s->block_last_index[n]] + 1;
+    quant_matrix = s->inter_matrix;
+
+    __asm__ volatile (
+        ".set arch=loongson3a           \r\n"
+        "pcmpeqh $f14, $f14, $f14       \r\n"
+        "dli $10, 15                    \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "xor $f12, $f12, $f12           \r\n"
+        "lwc1 $f12, %2                  \r\n"
+        "psrlh $f14, $f14, $f16         \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "or $8, %3, $0                  \r\n"
+        ".p2align 4                     \r\n"
+        "1:                             \r\n"
+        "gsldxc1 $f0, 0($8, %0)         \r\n"
+        "gsldxc1 $f2, 8($8, %0)         \r\n"
+        "mov.d $f16, $f0                \r\n"
+        "mov.d $f18, $f2                \r\n"
+        "gsldxc1 $f8, 0($8, %1)         \r\n"
+        "gsldxc1 $f10, 8($8, %1)        \r\n"
+        "pmullh $f8, $f8, $f12          \r\n" // q=qscale*quant_matrix[i]
+        "pmullh $f10, $f10, $f12        \r\n" // q=qscale*quant_matrix[i]
+        "xor $f4, $f4, $f4              \r\n"
+        "xor $f6, $f6, $f6              \r\n"
+        "pcmpgth $f4, $f4, $f0          \r\n" // block[i] < 0 ? -1 : 0
+        "pcmpgth $f6, $f6, $f2          \r\n" // block[i] < 0 ? -1 : 0
+        "xor $f0, $f0, $f4              \r\n"
+        "xor $f2, $f2, $f6              \r\n"
+        "psubh $f0, $f0, $f4            \r\n" // abs(block[i])
+        "psubh $f2, $f2, $f6            \r\n" // abs(block[i])
+        "paddh $f0, $f0, $f0            \r\n" // abs(block[i])*2
+        "paddh $f2, $f2, $f2            \r\n" // abs(block[i])*2
+        "paddh $f0, $f0, $f14           \r\n" // abs(block[i])*2 + 1
+        "paddh $f2, $f2, $f14           \r\n" // abs(block[i])*2 + 1
+        "pmullh $f0, $f0, $f8           \r\n" // (abs(block[i])*2 + 1)*q
+        "pmullh $f2, $f2, $f10          \r\n" // (abs(block[i])*2 + 1)*q
+        "xor $f8, $f8, $f8              \r\n"
+        "xor $f10, $f10, $f10           \r\n" // FIXME slow
+        "pcmpeqh $f8, $f8, $f16         \r\n" // block[i] == 0 ? -1 : 0
+        "pcmpeqh $f10, $f10, $f18       \r\n" // block[i] == 0 ? -1 : 0
+        "dli $10, 4                     \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "psrah $f0, $f0, $f16           \r\n"
+        "psrah $f2, $f2, $f16           \r\n"
+        "psubh $f0, $f0, $f14           \r\n"
+        "psubh $f2, $f2, $f14           \r\n"
+        "or $f0, $f0, $f14              \r\n"
+        "or $f2, $f2, $f14              \r\n"
+        "xor $f0, $f0, $f4              \r\n"
+        "xor $f2, $f2, $f6              \r\n"
+        "psubh $f0, $f0, $f4            \r\n"
+        "psubh $f2, $f2, $f6            \r\n"
+        "pandn $f8, $f8, $f0            \r\n"
+        "pandn $f10, $f10, $f2          \r\n"
+        "gssdxc1 $f8, 0($8, %0)         \r\n"
+        "gssdxc1 $f10, 8($8, %0)        \r\n"
+        "addi $8, $8, 16                \r\n"
+        "bltz $8, 1b                    \r\n"
+        ::"r"(block+nCoeffs),"r"(quant_matrix+nCoeffs),"m"(qscale),
+          "g"(-2*nCoeffs)
+        :"$8","$10","memory"
+    );
+}
+
+void ff_dct_unquantize_mpeg2_intra_bitexact_mmi(MpegEncContext *s,
+        int16_t *block, int n, int qscale)
+{
+    int64_t nCoeffs;
+    const uint16_t *quant_matrix;
+    int block0;
+
+    assert(s->block_last_index[n] >= 0);
+
+    if (s->alternate_scan)
+        nCoeffs = 63; //FIXME
+    else
+        nCoeffs = s->intra_scantable.raster_end[s->block_last_index[n]];
+
+    if (n<4)
+        block0 = block[0] * s->y_dc_scale;
+    else
+        block0 = block[0] * s->c_dc_scale;
+
+    quant_matrix = s->intra_matrix;
+
+    __asm__ volatile (
+        ".set arch=loongson3a           \r\n"
+        "pcmpeqh $f14, $f14, $f14       \r\n"
+        "dli $10, 15                    \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "xor $f12, $f12, $f12           \r\n"
+        "lwc1 $f12, %2                  \r\n"
+        "psrlh $f14, $f14, $f16         \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "or $8, %3, $0                  \r\n"
+        "1:                             \r\n"
+        "addu $2, $8, %0                \r\n"
+        "gsldlc1 $f0, 7($2)             \r\n"
+        "gsldrc1 $f0, 0($2)             \r\n"
+        "gsldlc1 $f2, 15($2)            \r\n"
+        "gsldrc1 $f2, 8($2)             \r\n"
+        "mov.d $f16, $f0                \r\n"
+        "mov.d $f18, $f2                \r\n"
+        "addu $2, $8, %1                \r\n"
+        "gsldlc1 $f8, 7($2)             \r\n"
+        "gsldrc1 $f8, 0($2)             \r\n"
+        "gsldlc1 $f10, 15($2)           \r\n"
+        "gsldrc1 $f10, 8($2)            \r\n"
+        "pmullh $f8, $f8, $f12          \r\n" // q=qscale*quant_matrix[i]
+        "pmullh $f10, $f10, $f12        \r\n" // q=qscale*quant_matrix[i]
+        "xor $f4, $f4, $f4              \r\n"
+        "xor $f6, $f6, $f6              \r\n"
+        "pcmpgth $f4, $f4, $f0          \r\n" // block[i] < 0 ? -1 : 0
+        "pcmpgth $f6, $f6, $f2          \r\n" // block[i] < 0 ? -1 : 0
+        "xor $f0, $f0, $f4              \r\n"
+        "xor $f2, $f2, $f6              \r\n"
+        "psubh $f0, $f0, $f4            \r\n" // abs(block[i])
+        "psubh $f2, $f2, $f6            \r\n" // abs(block[i])
+        "pmullh $f0, $f0, $f8           \r\n" // abs(block[i])*q
+        "pmullh $f2, $f2, $f10          \r\n" // abs(block[i])*q
+        "xor $f8, $f8, $f8              \r\n"
+        "xor $f10, $f10, $f10           \r\n"
+        "pcmpeqh $f8, $f8, $f16         \r\n" // block[i] == 0 ? -1 : 0
+        "pcmpeqh $f10 ,$f10, $f18       \r\n" // block[i] == 0 ? -1 : 0
+        "dli $10, 3                     \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "psrah $f0, $f0, $f16           \r\n"
+        "psrah $f2, $f2, $f16           \r\n"
+        "xor $f0, $f0, $f4              \r\n"
+        "xor $f2, $f2, $f6              \r\n"
+        "psubh $f0, $f0, $f4            \r\n"
+        "psubh $f2, $f2, $f6            \r\n"
+        "pandn $f8, $f8, $f0            \r\n"
+        "pandn $f10, $f10, $f2          \r\n"
+        "addu $2, $8, %0                \r\n"
+        "gssdlc1 $f8, 7($2)             \r\n"
+        "gssdrc1 $f8, 0($2)             \r\n"
+        "gssdlc1 $f10, 15($2)           \r\n"
+        "gssdrc1 $f10, 8($2)            \r\n"
+        "addi $8, $8, 16                \r\n"
+        "blez $8, 1b                    \r\n"
+        ::"r"(block+nCoeffs),"r"(quant_matrix+nCoeffs),"m"(qscale),
+          "g"(-2*nCoeffs)
+        : "$2","$8","$10","memory"
+    );
+
+    block[0] = block0;
+    /* Note, we do not do mismatch control for intra as errors cannot
+     * accumulate
+     */
+}
+
+void ff_dct_unquantize_mpeg2_inter_mmi(MpegEncContext *s, int16_t *block,
+        int n, int qscale)
+{
+    int64_t nCoeffs;
+    const uint16_t *quant_matrix;
+
+    assert(s->block_last_index[n] >= 0);
+
+    if (s->alternate_scan)
+        nCoeffs = 63; //FIXME
+    else
+        nCoeffs = s->intra_scantable.raster_end[s->block_last_index[n]];
+
+    quant_matrix = s->inter_matrix;
+
+    __asm__ volatile (
+        ".set arch=loongson3a           \r\n"
+        "pcmpeqh $f14, $f14, $f14       \r\n"
+        "dli $10, 48                    \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "xor $f12, $f12, $f12           \r\n"
+        "lwc1 $f12, %2                  \r\n"
+        "psrlw $f14, $f14, $f16         \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "packsswh $f12, $f12, $f12      \r\n"
+        "or $8, %3, $0                  \r\n"
+        "1:                             \r\n"
+        "addu $2, $8, %0                \r\n"
+        "gsldlc1 $f0, 7($2)             \r\n"
+        "gsldrc1 $f0, 0($2)             \r\n"
+        "gsldlc1 $f2, 15($2)            \r\n"
+        "gsldrc1 $f2, 8($2)             \r\n"
+        "mov.d $f16, $f0                \r\n"
+        "mov.d $f18, $f2                \r\n"
+        "addu $2, $8, %1                \r\n"
+        "gsldlc1 $f8, 7($2)             \r\n"
+        "gsldrc1 $f8, 0($2)             \r\n"
+        "gsldlc1 $f10, 15($2)           \r\n"
+        "gsldrc1 $f10, 8($2)            \r\n"
+        "pmullh $f8, $f8, $f12          \r\n" // q=qscale*quant_matrix[i]
+        "pmullh $f10, $f10, $f12        \r\n" // q=qscale*quant_matrix[i]
+        "xor $f4, $f4, $f4              \r\n"
+        "xor $f6, $f6, $f6              \r\n"
+        "pcmpgth $f4, $f4, $f0          \r\n" // block[i] < 0 ? -1 : 0
+        "pcmpgth $f6, $f6, $f2          \r\n" // block[i] < 0 ? -1 : 0
+        "xor $f0, $f0, $f4              \r\n"
+        "xor $f2, $f2, $f6              \r\n"
+        "psubh $f0, $f0, $f4            \r\n" // abs(block[i])
+        "psubh $f2, $f2, $f6            \r\n" // abs(block[i])
+        "pmullh $f0, $f0, $f8           \r\n" // abs(block[i])*q
+        "pmullh $f2, $f2, $f10          \r\n" // abs(block[i])*q
+        "xor $f8, $f8, $f8              \r\n"
+        "xor $f10, $f10, $f10           \r\n"
+        "pcmpeqh $f8, $f8, $f16         \r\n" // block[i] == 0 ? -1 : 0
+        "pcmpeqh $f10 ,$f10, $f18       \r\n" // block[i] == 0 ? -1 : 0
+        "dli $10, 4                     \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "psrah $f0, $f0, $f16           \r\n"
+        "psrah $f2, $f2, $f16           \r\n"
+        "xor $f0, $f0, $f4              \r\n"
+        "xor $f2, $f2, $f6              \r\n"
+        "psubh $f0, $f0, $f4            \r\n"
+        "psubh $f2, $f2, $f6            \r\n"
+        "pandn $f8, $f8, $f0            \r\n"
+        "pandn $f10, $f10, $f2          \r\n"
+        "addu $2, $8, %0                \r\n"
+        "gssdlc1 $f8, 7($2)             \r\n"
+        "gssdrc1 $f8, 0($2)             \r\n"
+        "gssdlc1 $f10, 15($2)           \r\n"
+        "gssdrc1 $f10, 8($2)            \r\n"
+        "addi $8, $8, 16                \r\n"
+        "blez $8, 1b                    \r\n"
+        "nop                            \r\n"
+        "addu $2, %3, %0                \r\n"
+        "gslwlc1 $f0, 3($2)             \r\n"
+        "gslwrc1 $f0, 0($2)             \r\n"
+        "mov.d $f14, $f12               \r\n"
+        "dli $10, 32                    \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "psrlw $f14, $f14, $f16         \r\n"
+        "xor $f14, $f14, $f12           \r\n"
+        "mov.d $f12, $f14               \r\n"
+        "dli $10, 16                    \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "psrlw $f14, $f14, $f16         \r\n"
+        "xor $f14, $f14, $f12           \r\n"
+        "dli $10, 31                    \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "psllh $f14, $f14, $f16         \r\n"
+        "dli $10, 15                    \r\n"
+        "dmtc1 $10, $f16                \r\n"
+        "psrlw $f14, $f14, $f16         \r\n"
+        "xor $f0, $f0, $f14             \r\n"
+        "gsswlc1 $f0, 3($2)             \r\n"
+        "gsswrc1 $f0, 0($2)             \r\n"
+        ::"r"(block+nCoeffs),"r"(quant_matrix+nCoeffs),"m"(qscale),
+          "g"(-2*nCoeffs)
+        : "$2","$8","$10","memory"
+    );
+    /* Note, we do not do mismatch control for intra as errors cannot
+     * accumulate
+     */
+}
+
+void ff_denoise_dct_mmi(MpegEncContext *s, int16_t *block)
+{
+    const int intra = s->mb_intra;
+    int *sum = s->dct_error_sum[intra];
+    uint16_t *offset = s->dct_offset[intra];
+
+    s->dct_count[intra]++;
+
+    __asm__ volatile (
+        ".set arch=loongson3a           \r\n"
+        "xor $f14, $f14, $f14           \r\n"
+        "1:                             \r\n"
+        "ldc1 $f4, 0(%0)                \r\n"
+        "xor $f0, $f0, $f0              \r\n"
+        "ldc1 $f6, 8(%0)                \r\n"
+        "xor $f2, $f2, $f2              \r\n"
+        "pcmpgth $f0, $f0, $f4          \r\n"
+        "pcmpgth $f2, $f2, $f6          \r\n"
+        "xor $f4, $f4, $f0              \r\n"
+        "xor $f6, $f6, $f2              \r\n"
+        "psubh $f4, $f4, $f0            \r\n"
+        "psubh $f6, $f6, $f2            \r\n"
+        "ldc1 $f12, 0(%2)               \r\n"
+        "mov.d $f8, $f4                 \r\n"
+        "psubush $f4, $f4, $f12         \r\n"
+        "ldc1 $f12, 8(%2)               \r\n"
+        "mov.d $f10, $f6                \r\n"
+        "psubush $f6, $f6, $f12         \r\n"
+        "xor $f4, $f4, $f0              \r\n"
+        "xor $f6, $f6, $f2              \r\n"
+        "psubh $f4, $f4, $f0            \r\n"
+        "psubh $f6, $f6, $f2            \r\n"
+        "sdc1 $f4, 0(%0)                \r\n"
+        "sdc1 $f6, 8(%0)                \r\n"
+        "mov.d $f4, $f8                 \r\n"
+        "mov.d $f6, $f10                \r\n"
+        "punpcklhw $f8, $f8, $f14       \r\n"
+        "punpckhhw $f4, $f4, $f14       \r\n"
+        "punpcklhw $f10, $f10, $f14     \r\n"
+        "punpckhhw $f6, $f6, $f14       \r\n"
+        "ldc1 $f0, 0(%1)                \r\n"
+        "paddw $f8, $f8, $f0            \r\n"
+        "ldc1 $f0, 8(%1)                \r\n"
+        "paddw $f4, $f4, $f0            \r\n"
+        "ldc1 $f0, 16(%1)               \r\n"
+        "paddw $f10, $f10, $f0          \r\n"
+        "ldc1 $f0, 24(%1)               \r\n"
+        "paddw $f6, $f6, $f0            \r\n"
+        "sdc1 $f8, 0(%1)                \r\n"
+        "sdc1 $f4, 8(%1)                \r\n"
+        "sdc1 $f10, 16(%1)              \r\n"
+        "sdc1 $f6, 24(%1)               \r\n"
+        "addi %0, %0, 16                \r\n"
+        "addi %1, %1, 32                \r\n"
+        "addi %2, %2, 16                \r\n"
+        "sub $8, %3, %0                 \r\n"
+        "bgtz $8, 1b                    \r\n"
+        : "+r"(block),"+r"(sum),"+r"(offset)
+        : "r"(block+64)
+        : "$8","memory"
+    );
+}
diff --git a/libavcodec/mips/mpegvideo_loongson.h b/libavcodec/mips/mpegvideo_loongson.h
new file mode 100644
index 0000000..ca95c7b
--- /dev/null
+++ b/libavcodec/mips/mpegvideo_loongson.h
@@ -0,0 +1,40 @@
+/*
+ * Loongson optimized mpegvidio
+ *
+ * Copyright (c) 2015 Loongson Technology Corporation Limited
+ * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong at loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/mpegvideo.h"
+
+//extern uint16_t inv_zigzag_direct16[64];
+
+void ff_dct_unquantize_h263_intra_mmi(MpegEncContext *s, int16_t *block,
+        int n, int qscale);
+void ff_dct_unquantize_h263_inter_mmi(MpegEncContext *s, int16_t *block,
+        int n, int qscale);
+void ff_dct_unquantize_mpeg1_intra_mmi(MpegEncContext *s, int16_t *block,
+        int n, int qscale);
+void ff_dct_unquantize_mpeg1_inter_mmi(MpegEncContext *s, int16_t *block,
+        int n, int qscale);
+void ff_dct_unquantize_mpeg2_intra_bitexact_mmi(MpegEncContext *s,
+        int16_t *block, int n, int qscale);
+void ff_dct_unquantize_mpeg2_inter_mmi(MpegEncContext *s, int16_t *block,
+        int n, int qscale);
+void ff_denoise_dct_mmi(MpegEncContext *s, int16_t *block);
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 11d6729..d2c1750 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -420,6 +420,8 @@ static av_cold int dct_init(MpegEncContext *s)
         ff_mpv_common_init_ppc(s);
     if (ARCH_X86)
         ff_mpv_common_init_x86(s);
+    if (HAVE_LOONGSON3)
+        ff_MPV_common_init_loongson(s);
 
     return 0;
 }
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index 0be2024..2f0b2b2 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -750,6 +750,7 @@ void ff_mpv_common_init_axp(MpegEncContext *s);
 void ff_mpv_common_init_neon(MpegEncContext *s);
 void ff_mpv_common_init_ppc(MpegEncContext *s);
 void ff_mpv_common_init_x86(MpegEncContext *s);
+void ff_MPV_common_init_loongson(MpegEncContext *s);
 
 int ff_mpv_common_frame_size_change(MpegEncContext *s);
 void ff_mpv_common_end(MpegEncContext *s);
--
1.7.1



More information about the ffmpeg-devel mailing list