[FFmpeg-devel] [PATCH] avcodec/mips: MSA (MIPS-SIMD-Arch) optimizations for HEVC copy and hv mc functions
shivraj.patil at imgtec.com
shivraj.patil at imgtec.com
Sun Apr 19 12:37:26 CEST 2015
From: Shivraj Patil <shivraj.patil at imgtec.com>
Signed-off-by: Shivraj Patil <shivraj.patil at imgtec.com>
---
libavcodec/mips/hevcdsp_init_mips.c | 19 +
libavcodec/mips/hevcdsp_mips.h | 20 +
libavcodec/mips/hevcdsp_msa.c | 1098 +++++++++++++++++++++++++++++++++++
libavutil/mips/generic_macros_msa.h | 133 +++++
4 files changed, 1270 insertions(+)
diff --git a/libavcodec/mips/hevcdsp_init_mips.c b/libavcodec/mips/hevcdsp_init_mips.c
index 05ed81f..4fec336 100644
--- a/libavcodec/mips/hevcdsp_init_mips.c
+++ b/libavcodec/mips/hevcdsp_init_mips.c
@@ -25,6 +25,16 @@ static av_cold void hevc_dsp_init_msa(HEVCDSPContext *c,
const int bit_depth)
{
if (8 == bit_depth) {
+ c->put_hevc_qpel[1][0][0] = ff_hevc_put_hevc_pel_pixels4_8_msa;
+ c->put_hevc_qpel[2][0][0] = ff_hevc_put_hevc_pel_pixels6_8_msa;
+ c->put_hevc_qpel[3][0][0] = ff_hevc_put_hevc_pel_pixels8_8_msa;
+ c->put_hevc_qpel[4][0][0] = ff_hevc_put_hevc_pel_pixels12_8_msa;
+ c->put_hevc_qpel[5][0][0] = ff_hevc_put_hevc_pel_pixels16_8_msa;
+ c->put_hevc_qpel[6][0][0] = ff_hevc_put_hevc_pel_pixels24_8_msa;
+ c->put_hevc_qpel[7][0][0] = ff_hevc_put_hevc_pel_pixels32_8_msa;
+ c->put_hevc_qpel[8][0][0] = ff_hevc_put_hevc_pel_pixels48_8_msa;
+ c->put_hevc_qpel[9][0][0] = ff_hevc_put_hevc_pel_pixels64_8_msa;
+
c->put_hevc_qpel[1][0][1] = ff_hevc_put_hevc_qpel_h4_8_msa;
c->put_hevc_qpel[3][0][1] = ff_hevc_put_hevc_qpel_h8_8_msa;
c->put_hevc_qpel[4][0][1] = ff_hevc_put_hevc_qpel_h12_8_msa;
@@ -42,6 +52,15 @@ static av_cold void hevc_dsp_init_msa(HEVCDSPContext *c,
c->put_hevc_qpel[7][1][0] = ff_hevc_put_hevc_qpel_v32_8_msa;
c->put_hevc_qpel[8][1][0] = ff_hevc_put_hevc_qpel_v48_8_msa;
c->put_hevc_qpel[9][1][0] = ff_hevc_put_hevc_qpel_v64_8_msa;
+
+ c->put_hevc_qpel[1][1][1] = ff_hevc_put_hevc_qpel_hv4_8_msa;
+ c->put_hevc_qpel[3][1][1] = ff_hevc_put_hevc_qpel_hv8_8_msa;
+ c->put_hevc_qpel[4][1][1] = ff_hevc_put_hevc_qpel_hv12_8_msa;
+ c->put_hevc_qpel[5][1][1] = ff_hevc_put_hevc_qpel_hv16_8_msa;
+ c->put_hevc_qpel[6][1][1] = ff_hevc_put_hevc_qpel_hv24_8_msa;
+ c->put_hevc_qpel[7][1][1] = ff_hevc_put_hevc_qpel_hv32_8_msa;
+ c->put_hevc_qpel[8][1][1] = ff_hevc_put_hevc_qpel_hv48_8_msa;
+ c->put_hevc_qpel[9][1][1] = ff_hevc_put_hevc_qpel_hv64_8_msa;
}
}
#endif // #if HAVE_MSA
diff --git a/libavcodec/mips/hevcdsp_mips.h b/libavcodec/mips/hevcdsp_mips.h
index 13cdb5b..4f7f273 100644
--- a/libavcodec/mips/hevcdsp_mips.h
+++ b/libavcodec/mips/hevcdsp_mips.h
@@ -29,6 +29,16 @@ void ff_hevc_put_hevc_##PEL##_##DIR####WIDTH##_8_msa(int16_t *dst, \
intptr_t my, \
int width)
+MC(pel, pixels, 4);
+MC(pel, pixels, 6);
+MC(pel, pixels, 8);
+MC(pel, pixels, 12);
+MC(pel, pixels, 16);
+MC(pel, pixels, 24);
+MC(pel, pixels, 32);
+MC(pel, pixels, 48);
+MC(pel, pixels, 64);
+
MC(qpel, h, 4);
MC(qpel, h, 8);
MC(qpel, h, 12);
@@ -46,4 +56,14 @@ MC(qpel, v, 24);
MC(qpel, v, 32);
MC(qpel, v, 48);
MC(qpel, v, 64);
+
+MC(qpel, hv, 4);
+MC(qpel, hv, 8);
+MC(qpel, hv, 12);
+MC(qpel, hv, 16);
+MC(qpel, hv, 24);
+MC(qpel, hv, 32);
+MC(qpel, hv, 48);
+MC(qpel, hv, 64);
+
#undef MC
diff --git a/libavcodec/mips/hevcdsp_msa.c b/libavcodec/mips/hevcdsp_msa.c
index 88e97d6..fcc344b 100644
--- a/libavcodec/mips/hevcdsp_msa.c
+++ b/libavcodec/mips/hevcdsp_msa.c
@@ -21,6 +21,18 @@
#include "libavutil/mips/generic_macros_msa.h"
#include "libavcodec/mips/hevcdsp_mips.h"
+#define HEVC_FILT_8TAP_DPADD_W(vec0, vec1, vec2, vec3, \
+ filt0, filt1, filt2, filt3) \
+( { \
+ v4i32 out; \
+ \
+ out = __msa_dotp_s_w((v8i16) (vec0), (v8i16) (filt0)); \
+ out = __msa_dpadd_s_w(out, (v8i16) (vec1), (v8i16) (filt1)); \
+ out = __msa_dpadd_s_w(out, (v8i16) (vec2), (v8i16) (filt2)); \
+ out = __msa_dpadd_s_w(out, (v8i16) (vec3), (v8i16) (filt3)); \
+ out; \
+} )
+
#define HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3, \
filt0, filt1, filt2, filt3, \
var_in) \
@@ -34,6 +46,603 @@
out; \
} )
+static void hevc_copy_4w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ int32_t height)
+{
+ v16i8 zero = { 0 };
+
+ if (2 == height) {
+ uint64_t out0, out1;
+ v16i8 src0, src1;
+ v8i16 input0;
+
+ LOAD_2VECS_SB(src, src_stride, src0, src1);
+
+ src0 = (v16i8) __msa_ilvr_w((v4i32) src1, (v4i32) src0);
+
+ input0 = (v8i16) __msa_ilvr_b(zero, src0);
+
+ input0 <<= 6;
+
+ out0 = __msa_copy_u_d((v2i64) input0, 0);
+ out1 = __msa_copy_u_d((v2i64) input0, 1);
+
+ STORE_DWORD(dst, out0);
+ dst += dst_stride;
+ STORE_DWORD(dst, out1);
+ } else if (4 == height) {
+ uint64_t out0, out1, out2, out3;
+ v16i8 src0, src1, src2, src3;
+ v8i16 input0, input1;
+
+ LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+
+ src0 = (v16i8) __msa_ilvr_w((v4i32) src1, (v4i32) src0);
+ src1 = (v16i8) __msa_ilvr_w((v4i32) src3, (v4i32) src2);
+
+ input0 = (v8i16) __msa_ilvr_b(zero, src0);
+ input1 = (v8i16) __msa_ilvr_b(zero, src1);
+
+ input0 <<= 6;
+ input1 <<= 6;
+
+ out0 = __msa_copy_u_d((v2i64) input0, 0);
+ out1 = __msa_copy_u_d((v2i64) input0, 1);
+ out2 = __msa_copy_u_d((v2i64) input1, 0);
+ out3 = __msa_copy_u_d((v2i64) input1, 1);
+
+ STORE_DWORD(dst, out0);
+ dst += dst_stride;
+ STORE_DWORD(dst, out1);
+ dst += dst_stride;
+ STORE_DWORD(dst, out2);
+ dst += dst_stride;
+ STORE_DWORD(dst, out3);
+ } else if (0 == height % 8) {
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+ uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
+ v8i16 input0, input1, input2, input3;
+ uint32_t loop_cnt;
+
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ LOAD_8VECS_SB(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+
+ src0 = (v16i8) __msa_ilvr_w((v4i32) src1, (v4i32) src0);
+ src1 = (v16i8) __msa_ilvr_w((v4i32) src3, (v4i32) src2);
+ src2 = (v16i8) __msa_ilvr_w((v4i32) src5, (v4i32) src4);
+ src3 = (v16i8) __msa_ilvr_w((v4i32) src7, (v4i32) src6);
+
+ input0 = (v8i16) __msa_ilvr_b(zero, src0);
+ input1 = (v8i16) __msa_ilvr_b(zero, src1);
+ input2 = (v8i16) __msa_ilvr_b(zero, src2);
+ input3 = (v8i16) __msa_ilvr_b(zero, src3);
+
+ input0 <<= 6;
+ input1 <<= 6;
+ input2 <<= 6;
+ input3 <<= 6;
+
+ out0 = __msa_copy_u_d((v2i64) input0, 0);
+ out1 = __msa_copy_u_d((v2i64) input0, 1);
+ out2 = __msa_copy_u_d((v2i64) input1, 0);
+ out3 = __msa_copy_u_d((v2i64) input1, 1);
+ out4 = __msa_copy_u_d((v2i64) input2, 0);
+ out5 = __msa_copy_u_d((v2i64) input2, 1);
+ out6 = __msa_copy_u_d((v2i64) input3, 0);
+ out7 = __msa_copy_u_d((v2i64) input3, 1);
+
+ STORE_DWORD(dst, out0);
+ dst += dst_stride;
+ STORE_DWORD(dst, out1);
+ dst += dst_stride;
+ STORE_DWORD(dst, out2);
+ dst += dst_stride;
+ STORE_DWORD(dst, out3);
+ dst += dst_stride;
+ STORE_DWORD(dst, out4);
+ dst += dst_stride;
+ STORE_DWORD(dst, out5);
+ dst += dst_stride;
+ STORE_DWORD(dst, out6);
+ dst += dst_stride;
+ STORE_DWORD(dst, out7);
+ dst += dst_stride;
+ }
+ }
+}
+
+static void hevc_copy_6w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ int32_t height)
+{
+ uint32_t loop_cnt;
+ uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
+ uint32_t out8, out9, out10, out11, out12, out13, out14, out15;
+ v16i8 zero = { 0 };
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v8i16 input0, input1, input2, input3, input4, input5, input6, input7;
+
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ LOAD_8VECS_SB(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+
+ input0 = (v8i16) __msa_ilvr_b(zero, src0);
+ input1 = (v8i16) __msa_ilvr_b(zero, src1);
+ input2 = (v8i16) __msa_ilvr_b(zero, src2);
+ input3 = (v8i16) __msa_ilvr_b(zero, src3);
+ input4 = (v8i16) __msa_ilvr_b(zero, src4);
+ input5 = (v8i16) __msa_ilvr_b(zero, src5);
+ input6 = (v8i16) __msa_ilvr_b(zero, src6);
+ input7 = (v8i16) __msa_ilvr_b(zero, src7);
+
+ input0 <<= 6;
+ input1 <<= 6;
+ input2 <<= 6;
+ input3 <<= 6;
+ input4 <<= 6;
+ input5 <<= 6;
+ input6 <<= 6;
+ input7 <<= 6;
+
+ out0 = __msa_copy_u_d((v2i64) input0, 0);
+ out1 = __msa_copy_u_d((v2i64) input1, 0);
+ out2 = __msa_copy_u_d((v2i64) input2, 0);
+ out3 = __msa_copy_u_d((v2i64) input3, 0);
+ out4 = __msa_copy_u_d((v2i64) input4, 0);
+ out5 = __msa_copy_u_d((v2i64) input5, 0);
+ out6 = __msa_copy_u_d((v2i64) input6, 0);
+ out7 = __msa_copy_u_d((v2i64) input7, 0);
+
+ out8 = __msa_copy_u_w((v4i32) input0, 2);
+ out9 = __msa_copy_u_w((v4i32) input1, 2);
+ out10 = __msa_copy_u_w((v4i32) input2, 2);
+ out11 = __msa_copy_u_w((v4i32) input3, 2);
+ out12 = __msa_copy_u_w((v4i32) input4, 2);
+ out13 = __msa_copy_u_w((v4i32) input5, 2);
+ out14 = __msa_copy_u_w((v4i32) input6, 2);
+ out15 = __msa_copy_u_w((v4i32) input7, 2);
+
+ STORE_DWORD(dst, out0);
+ STORE_WORD(dst + 4, out8);
+ dst += dst_stride;
+ STORE_DWORD(dst, out1);
+ STORE_WORD(dst + 4, out9);
+ dst += dst_stride;
+ STORE_DWORD(dst, out2);
+ STORE_WORD(dst + 4, out10);
+ dst += dst_stride;
+ STORE_DWORD(dst, out3);
+ STORE_WORD(dst + 4, out11);
+ dst += dst_stride;
+ STORE_DWORD(dst, out4);
+ STORE_WORD(dst + 4, out12);
+ dst += dst_stride;
+ STORE_DWORD(dst, out5);
+ STORE_WORD(dst + 4, out13);
+ dst += dst_stride;
+ STORE_DWORD(dst, out6);
+ STORE_WORD(dst + 4, out14);
+ dst += dst_stride;
+ STORE_DWORD(dst, out7);
+ STORE_WORD(dst + 4, out15);
+ dst += dst_stride;
+ }
+}
+
+static void hevc_copy_8w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ int32_t height)
+{
+ v16i8 zero = { 0 };
+
+ if (2 == height) {
+ v16i8 src0, src1;
+ v8i16 input0, input1;
+
+ LOAD_2VECS_SB(src, src_stride, src0, src1);
+
+ input0 = (v8i16) __msa_ilvr_b(zero, src0);
+ input1 = (v8i16) __msa_ilvr_b(zero, src1);
+
+ input0 <<= 6;
+ input1 <<= 6;
+
+ STORE_2VECS_SH(dst, dst_stride, input0, input1);
+ } else if (4 == height) {
+ v16i8 src0, src1, src2, src3;
+ v8i16 input0, input1, input2, input3;
+
+ LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+
+ input0 = (v8i16) __msa_ilvr_b(zero, src0);
+ input1 = (v8i16) __msa_ilvr_b(zero, src1);
+ input2 = (v8i16) __msa_ilvr_b(zero, src2);
+ input3 = (v8i16) __msa_ilvr_b(zero, src3);
+
+ input0 <<= 6;
+ input1 <<= 6;
+ input2 <<= 6;
+ input3 <<= 6;
+
+ STORE_4VECS_SH(dst, dst_stride, input0, input1, input2, input3);
+ } else if (6 == height) {
+ v16i8 src0, src1, src2, src3, src4, src5;
+ v8i16 input0, input1, input2, input3, input4, input5;
+
+ LOAD_6VECS_SB(src, src_stride, src0, src1, src2, src3, src4, src5);
+
+ input0 = (v8i16) __msa_ilvr_b(zero, src0);
+ input1 = (v8i16) __msa_ilvr_b(zero, src1);
+ input2 = (v8i16) __msa_ilvr_b(zero, src2);
+ input3 = (v8i16) __msa_ilvr_b(zero, src3);
+ input4 = (v8i16) __msa_ilvr_b(zero, src4);
+ input5 = (v8i16) __msa_ilvr_b(zero, src5);
+
+ input0 <<= 6;
+ input1 <<= 6;
+ input2 <<= 6;
+ input3 <<= 6;
+ input4 <<= 6;
+ input5 <<= 6;
+
+ STORE_6VECS_SH(dst, dst_stride,
+ input0, input1, input2, input3, input4, input5);
+ } else if (0 == height % 8) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v8i16 input0, input1, input2, input3;
+ v8i16 input4, input5, input6, input7;
+
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ LOAD_8VECS_SB(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+
+ input0 = (v8i16) __msa_ilvr_b(zero, src0);
+ input1 = (v8i16) __msa_ilvr_b(zero, src1);
+ input2 = (v8i16) __msa_ilvr_b(zero, src2);
+ input3 = (v8i16) __msa_ilvr_b(zero, src3);
+ input4 = (v8i16) __msa_ilvr_b(zero, src4);
+ input5 = (v8i16) __msa_ilvr_b(zero, src5);
+ input6 = (v8i16) __msa_ilvr_b(zero, src6);
+ input7 = (v8i16) __msa_ilvr_b(zero, src7);
+
+ input0 <<= 6;
+ input1 <<= 6;
+ input2 <<= 6;
+ input3 <<= 6;
+ input4 <<= 6;
+ input5 <<= 6;
+ input6 <<= 6;
+ input7 <<= 6;
+
+ STORE_8VECS_SH(dst, dst_stride,
+ input0, input1, input2, input3,
+ input4, input5, input6, input7);
+ dst += (8 * dst_stride);
+ }
+ }
+}
+
+static void hevc_copy_12w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ int32_t height)
+{
+ uint32_t loop_cnt;
+ uint64_t dst_val0, dst_val1, dst_val2, dst_val3;
+ v16i8 zero = { 0 };
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v8i16 input0, input1;
+ v8i16 input0_r, input1_r, input2_r, input3_r;
+
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ LOAD_8VECS_SB(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+
+ input0_r = (v8i16) __msa_ilvr_b(zero, src0);
+ input1_r = (v8i16) __msa_ilvr_b(zero, src1);
+ input2_r = (v8i16) __msa_ilvr_b(zero, src2);
+ input3_r = (v8i16) __msa_ilvr_b(zero, src3);
+
+ input0_r <<= 6;
+ input1_r <<= 6;
+ input2_r <<= 6;
+ input3_r <<= 6;
+
+ src0 = (v16i8) __msa_ilvl_w((v4i32) src1, (v4i32) src0);
+ src1 = (v16i8) __msa_ilvl_w((v4i32) src3, (v4i32) src2);
+
+ input0 = (v8i16) __msa_ilvr_b(zero, src0);
+ input1 = (v8i16) __msa_ilvr_b(zero, src1);
+
+ input0 <<= 6;
+ input1 <<= 6;
+
+ dst_val0 = __msa_copy_u_d((v2i64) input0, 0);
+ dst_val1 = __msa_copy_u_d((v2i64) input0, 1);
+ dst_val2 = __msa_copy_u_d((v2i64) input1, 0);
+ dst_val3 = __msa_copy_u_d((v2i64) input1, 1);
+
+ STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
+
+ STORE_DWORD(dst + 8, dst_val0);
+ dst += dst_stride;
+ STORE_DWORD(dst + 8, dst_val1);
+ dst += dst_stride;
+ STORE_DWORD(dst + 8, dst_val2);
+ dst += dst_stride;
+ STORE_DWORD(dst + 8, dst_val3);
+ dst += dst_stride;
+
+ input0_r = (v8i16) __msa_ilvr_b(zero, src4);
+ input1_r = (v8i16) __msa_ilvr_b(zero, src5);
+ input2_r = (v8i16) __msa_ilvr_b(zero, src6);
+ input3_r = (v8i16) __msa_ilvr_b(zero, src7);
+
+ input0_r <<= 6;
+ input1_r <<= 6;
+ input2_r <<= 6;
+ input3_r <<= 6;
+
+ src0 = (v16i8) __msa_ilvl_w((v4i32) src5, (v4i32) src4);
+ src1 = (v16i8) __msa_ilvl_w((v4i32) src7, (v4i32) src6);
+
+ input0 = (v8i16) __msa_ilvr_b(zero, src0);
+ input1 = (v8i16) __msa_ilvr_b(zero, src1);
+
+ input0 <<= 6;
+ input1 <<= 6;
+
+ dst_val0 = __msa_copy_u_d((v2i64) input0, 0);
+ dst_val1 = __msa_copy_u_d((v2i64) input0, 1);
+ dst_val2 = __msa_copy_u_d((v2i64) input1, 0);
+ dst_val3 = __msa_copy_u_d((v2i64) input1, 1);
+
+ STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
+
+ STORE_DWORD(dst + 8, dst_val0);
+ dst += dst_stride;
+ STORE_DWORD(dst + 8, dst_val1);
+ dst += dst_stride;
+ STORE_DWORD(dst + 8, dst_val2);
+ dst += dst_stride;
+ STORE_DWORD(dst + 8, dst_val3);
+ dst += dst_stride;
+ }
+}
+
+static void hevc_copy_16multx8mult_msa(uint8_t * __restrict src,
+ int32_t src_stride,
+ int16_t * __restrict dst,
+ int32_t dst_stride,
+ int32_t height,
+ int32_t width)
+{
+ uint8_t *src_tmp;
+ int16_t *dst_tmp;
+ uint32_t loop_cnt, cnt;
+ v16i8 zero = { 0 };
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v8i16 input0_r, input1_r, input2_r, input3_r;
+ v8i16 input0_l, input1_l, input2_l, input3_l;
+
+ for (cnt = (width >> 4); cnt--;) {
+ src_tmp = src;
+ dst_tmp = dst;
+
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ LOAD_8VECS_SB(src_tmp, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
+ src_tmp += (8 * src_stride);
+
+ input0_r = (v8i16) __msa_ilvr_b(zero, src0);
+ input0_l = (v8i16) __msa_ilvl_b(zero, src0);
+ input1_r = (v8i16) __msa_ilvr_b(zero, src1);
+ input1_l = (v8i16) __msa_ilvl_b(zero, src1);
+ input2_r = (v8i16) __msa_ilvr_b(zero, src2);
+ input2_l = (v8i16) __msa_ilvl_b(zero, src2);
+ input3_r = (v8i16) __msa_ilvr_b(zero, src3);
+ input3_l = (v8i16) __msa_ilvl_b(zero, src3);
+
+ input0_r <<= 6;
+ input0_l <<= 6;
+ input1_r <<= 6;
+ input1_l <<= 6;
+ input2_r <<= 6;
+ input2_l <<= 6;
+ input3_r <<= 6;
+ input3_l <<= 6;
+
+ STORE_4VECS_SH(dst_tmp, dst_stride,
+ input0_r, input1_r, input2_r, input3_r);
+ STORE_4VECS_SH((dst_tmp + 8), dst_stride,
+ input0_l, input1_l, input2_l, input3_l);
+ dst_tmp += (4 * dst_stride);
+
+ input0_r = (v8i16) __msa_ilvr_b(zero, src4);
+ input0_l = (v8i16) __msa_ilvl_b(zero, src4);
+ input1_r = (v8i16) __msa_ilvr_b(zero, src5);
+ input1_l = (v8i16) __msa_ilvl_b(zero, src5);
+ input2_r = (v8i16) __msa_ilvr_b(zero, src6);
+ input2_l = (v8i16) __msa_ilvl_b(zero, src6);
+ input3_r = (v8i16) __msa_ilvr_b(zero, src7);
+ input3_l = (v8i16) __msa_ilvl_b(zero, src7);
+
+ input0_r <<= 6;
+ input0_l <<= 6;
+ input1_r <<= 6;
+ input1_l <<= 6;
+ input2_r <<= 6;
+ input2_l <<= 6;
+ input3_r <<= 6;
+ input3_l <<= 6;
+
+ STORE_4VECS_SH(dst_tmp, dst_stride,
+ input0_r, input1_r, input2_r, input3_r);
+ STORE_4VECS_SH((dst_tmp + 8), dst_stride,
+ input0_l, input1_l, input2_l, input3_l);
+ dst_tmp += (4 * dst_stride);
+ }
+
+ src += 16;
+ dst += 16;
+ }
+}
+
+static void hevc_copy_16w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ int32_t height)
+{
+ v16i8 zero = { 0 };
+
+ if (4 == height) {
+ v16i8 src0, src1, src2, src3;
+ v8i16 input0_r, input1_r, input2_r, input3_r;
+ v8i16 input0_l, input1_l, input2_l, input3_l;
+
+ LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+
+ input0_r = (v8i16) __msa_ilvr_b(zero, src0);
+ input0_l = (v8i16) __msa_ilvl_b(zero, src0);
+ input1_r = (v8i16) __msa_ilvr_b(zero, src1);
+ input1_l = (v8i16) __msa_ilvl_b(zero, src1);
+ input2_r = (v8i16) __msa_ilvr_b(zero, src2);
+ input2_l = (v8i16) __msa_ilvl_b(zero, src2);
+ input3_r = (v8i16) __msa_ilvr_b(zero, src3);
+ input3_l = (v8i16) __msa_ilvl_b(zero, src3);
+
+ input0_r <<= 6;
+ input0_l <<= 6;
+ input1_r <<= 6;
+ input1_l <<= 6;
+ input2_r <<= 6;
+ input2_l <<= 6;
+ input3_r <<= 6;
+ input3_l <<= 6;
+
+ STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
+ STORE_4VECS_SH((dst + 8), dst_stride,
+ input0_l, input1_l, input2_l, input3_l);
+ } else if (12 == height) {
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+ v16i8 src8, src9, src10, src11;
+ v8i16 input0_r, input1_r, input2_r, input3_r;
+ v8i16 input0_l, input1_l, input2_l, input3_l;
+
+ LOAD_8VECS_SB(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6, src7);
+ src += (8 * src_stride);
+
+ LOAD_4VECS_SB(src, src_stride, src8, src9, src10, src11);
+
+ input0_r = (v8i16) __msa_ilvr_b(zero, src0);
+ input0_l = (v8i16) __msa_ilvl_b(zero, src0);
+ input1_r = (v8i16) __msa_ilvr_b(zero, src1);
+ input1_l = (v8i16) __msa_ilvl_b(zero, src1);
+ input2_r = (v8i16) __msa_ilvr_b(zero, src2);
+ input2_l = (v8i16) __msa_ilvl_b(zero, src2);
+ input3_r = (v8i16) __msa_ilvr_b(zero, src3);
+ input3_l = (v8i16) __msa_ilvl_b(zero, src3);
+
+ input0_r <<= 6;
+ input0_l <<= 6;
+ input1_r <<= 6;
+ input1_l <<= 6;
+ input2_r <<= 6;
+ input2_l <<= 6;
+ input3_r <<= 6;
+ input3_l <<= 6;
+
+ STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
+ STORE_4VECS_SH((dst + 8), dst_stride,
+ input0_l, input1_l, input2_l, input3_l);
+ dst += (4 * dst_stride);
+
+ input0_r = (v8i16) __msa_ilvr_b(zero, src4);
+ input0_l = (v8i16) __msa_ilvl_b(zero, src4);
+ input1_r = (v8i16) __msa_ilvr_b(zero, src5);
+ input1_l = (v8i16) __msa_ilvl_b(zero, src5);
+ input2_r = (v8i16) __msa_ilvr_b(zero, src6);
+ input2_l = (v8i16) __msa_ilvl_b(zero, src6);
+ input3_r = (v8i16) __msa_ilvr_b(zero, src7);
+ input3_l = (v8i16) __msa_ilvl_b(zero, src7);
+
+ input0_r <<= 6;
+ input0_l <<= 6;
+ input1_r <<= 6;
+ input1_l <<= 6;
+ input2_r <<= 6;
+ input2_l <<= 6;
+ input3_r <<= 6;
+ input3_l <<= 6;
+
+ STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
+ STORE_4VECS_SH((dst + 8), dst_stride,
+ input0_l, input1_l, input2_l, input3_l);
+ dst += (4 * dst_stride);
+
+ input0_r = (v8i16) __msa_ilvr_b(zero, src8);
+ input0_l = (v8i16) __msa_ilvl_b(zero, src8);
+ input1_r = (v8i16) __msa_ilvr_b(zero, src9);
+ input1_l = (v8i16) __msa_ilvl_b(zero, src9);
+ input2_r = (v8i16) __msa_ilvr_b(zero, src10);
+ input2_l = (v8i16) __msa_ilvl_b(zero, src10);
+ input3_r = (v8i16) __msa_ilvr_b(zero, src11);
+ input3_l = (v8i16) __msa_ilvl_b(zero, src11);
+
+ input0_r <<= 6;
+ input0_l <<= 6;
+ input1_r <<= 6;
+ input1_l <<= 6;
+ input2_r <<= 6;
+ input2_l <<= 6;
+ input3_r <<= 6;
+ input3_l <<= 6;
+
+ STORE_4VECS_SH(dst, dst_stride, input0_r, input1_r, input2_r, input3_r);
+ STORE_4VECS_SH((dst + 8), dst_stride,
+ input0_l, input1_l, input2_l, input3_l);
+ } else if (0 == (height % 8)) {
+ hevc_copy_16multx8mult_msa(src, src_stride, dst, dst_stride,
+ height, 16);
+ }
+}
+
+static void hevc_copy_24w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ int32_t height)
+{
+ hevc_copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16);
+
+ hevc_copy_8w_msa(src + 16, src_stride, dst + 16, dst_stride, height);
+}
+
+static void hevc_copy_32w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ int32_t height)
+{
+ hevc_copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 32);
+}
+
+static void hevc_copy_48w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ int32_t height)
+{
+ hevc_copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 48);
+}
+
+static void hevc_copy_64w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ int32_t height)
+{
+ hevc_copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 64);
+}
+
static void hevc_hz_8t_4w_msa(uint8_t * __restrict src, int32_t src_stride,
int16_t * __restrict dst, int32_t dst_stride,
const int8_t * __restrict filter, int32_t height)
@@ -1223,6 +1832,468 @@ static void hevc_vt_8t_64w_msa(uint8_t * __restrict src, int32_t src_stride,
filter, height, 64);
}
+static void hevc_hv_8t_4w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ const int8_t * __restrict filter_x,
+ const int8_t * __restrict filter_y,
+ int32_t height)
+{
+ uint32_t loop_cnt;
+ uint64_t out0, out1;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v8i16 filt0, filt1, filt2, filt3, filter_vec;
+ v4i32 filt_h0, filt_h1, filt_h2, filt_h3;
+ v16i8 mask1, mask2, mask3;
+ v8u16 const_vec;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
+ v8i16 dst30, dst41, dst52, dst63, dst66, dst87;
+ v4i32 dst0_r, dst1_r;
+ v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
+ v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
+ v16i8 tmp;
+ v16i8 mask0 = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20
+ };
+ v8u16 mask4 = { 0, 4, 1, 5, 2, 6, 3, 7 };
+
+ src -= ((3 * src_stride) + 3);
+
+ filter_vec = LOAD_SH(filter_x);
+ filt0 = __msa_splati_h(filter_vec, 0);
+ filt1 = __msa_splati_h(filter_vec, 1);
+ filt2 = __msa_splati_h(filter_vec, 2);
+ filt3 = __msa_splati_h(filter_vec, 3);
+
+ filter_vec = LOAD_SH(filter_y);
+ tmp = __msa_clti_s_b((v16i8) filter_vec, 0);
+ filter_vec = (v8i16) __msa_ilvr_b(tmp, (v16i8) filter_vec);
+
+ filt_h0 = __msa_splati_w((v4i32) filter_vec, 0);
+ filt_h1 = __msa_splati_w((v4i32) filter_vec, 1);
+ filt_h2 = __msa_splati_w((v4i32) filter_vec, 2);
+ filt_h3 = __msa_splati_w((v4i32) filter_vec, 3);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+ mask3 = mask0 + 6;
+
+ const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec <<= 6;
+
+ LOAD_7VECS_SB(src, src_stride,
+ src0, src1, src2, src3, src4, src5, src6);
+ src += (7 * src_stride);
+
+ XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
+ src0, src1, src2, src3, src4, src5, src6, 128);
+
+ /* Row 0 Row 1 Row 2 Row 3 */
+ vec0 = __msa_vshf_b(mask0, src3, src0);
+ vec1 = __msa_vshf_b(mask1, src3, src0);
+ vec2 = __msa_vshf_b(mask2, src3, src0);
+ vec3 = __msa_vshf_b(mask3, src3, src0);
+
+ vec4 = __msa_vshf_b(mask0, src4, src1);
+ vec5 = __msa_vshf_b(mask1, src4, src1);
+ vec6 = __msa_vshf_b(mask2, src4, src1);
+ vec7 = __msa_vshf_b(mask3, src4, src1);
+
+ vec8 = __msa_vshf_b(mask0, src5, src2);
+ vec9 = __msa_vshf_b(mask1, src5, src2);
+ vec10 = __msa_vshf_b(mask2, src5, src2);
+ vec11 = __msa_vshf_b(mask3, src5, src2);
+
+ vec12 = __msa_vshf_b(mask0, src6, src3);
+ vec13 = __msa_vshf_b(mask1, src6, src3);
+ vec14 = __msa_vshf_b(mask2, src6, src3);
+ vec15 = __msa_vshf_b(mask3, src6, src3);
+
+ dst30 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ dst41 = HEVC_FILT_8TAP_DPADD_H(vec4, vec5, vec6, vec7,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ dst52 = HEVC_FILT_8TAP_DPADD_H(vec8, vec9, vec10, vec11,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ dst63 = HEVC_FILT_8TAP_DPADD_H(vec12, vec13, vec14, vec15,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ dst10_r = __msa_ilvr_h(dst41, dst30);
+ dst21_r = __msa_ilvr_h(dst52, dst41);
+ dst32_r = __msa_ilvr_h(dst63, dst52);
+
+ dst43_r = __msa_ilvl_h(dst41, dst30);
+ dst54_r = __msa_ilvl_h(dst52, dst41);
+ dst65_r = __msa_ilvl_h(dst63, dst52);
+
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
+
+ for (loop_cnt = height >> 1; loop_cnt--;) {
+ LOAD_2VECS_SB(src, src_stride, src7, src8);
+ src += (2 * src_stride);
+
+ src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
+ src8 = (v16i8) __msa_xori_b((v16u8) src8, 128);
+
+ vec0 = __msa_vshf_b(mask0, src8, src7);
+ vec1 = __msa_vshf_b(mask1, src8, src7);
+ vec2 = __msa_vshf_b(mask2, src8, src7);
+ vec3 = __msa_vshf_b(mask3, src8, src7);
+
+ dst87 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ dst76_r = __msa_ilvr_h(dst87, dst66);
+
+ dst0_r = HEVC_FILT_8TAP_DPADD_W(dst10_r, dst32_r, dst54_r, dst76_r,
+ filt_h0, filt_h1, filt_h2, filt_h3);
+
+ dst87_r = __msa_vshf_h((v8i16) mask4, dst87, dst87);
+
+ dst1_r = HEVC_FILT_8TAP_DPADD_W(dst21_r, dst43_r, dst65_r, dst87_r,
+ filt_h0, filt_h1, filt_h2, filt_h3);
+
+ dst0_r >>= 6;
+ dst1_r >>= 6;
+
+ dst0_r = (v4i32) __msa_pckev_h((v8i16) dst1_r, (v8i16) dst0_r);
+
+ out0 = __msa_copy_u_d((v2i64) dst0_r, 0);
+ out1 = __msa_copy_u_d((v2i64) dst0_r, 1);
+
+ STORE_DWORD(dst, out0);
+ dst += dst_stride;
+ STORE_DWORD(dst, out1);
+ dst += dst_stride;
+
+ dst10_r = dst32_r;
+ dst32_r = dst54_r;
+ dst54_r = dst76_r;
+
+ dst21_r = dst43_r;
+ dst43_r = dst65_r;
+ dst65_r = dst87_r;
+
+ dst66 = (v8i16) __msa_splati_d((v2i64) dst87, 1);
+ }
+}
+
+static void hevc_hv_8t_8multx2mult_msa(uint8_t * __restrict src,
+ int32_t src_stride,
+ int16_t * __restrict dst,
+ int32_t dst_stride,
+ const int8_t * __restrict filter_x,
+ const int8_t * __restrict filter_y,
+ int32_t height, int32_t width)
+{
+ uint32_t loop_cnt, cnt;
+ uint8_t *src_tmp;
+ int16_t *dst_tmp;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ v8i16 filt0, filt1, filt2, filt3, filter_vec;
+ v4i32 filt_h0, filt_h1, filt_h2, filt_h3;
+ v16i8 mask1, mask2, mask3;
+ v8u16 const_vec;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
+ v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
+ v4i32 dst0_r, dst0_l;
+ v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
+ v8i16 dst10_l, dst32_l, dst54_l, dst76_l;
+ v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
+ v8i16 dst21_l, dst43_l, dst65_l, dst87_l;
+ v16i8 tmp;
+ v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+
+ src -= ((3 * src_stride) + 3);
+
+ filter_vec = LOAD_SH(filter_x);
+ filt0 = __msa_splati_h(filter_vec, 0);
+ filt1 = __msa_splati_h(filter_vec, 1);
+ filt2 = __msa_splati_h(filter_vec, 2);
+ filt3 = __msa_splati_h(filter_vec, 3);
+
+ filter_vec = LOAD_SH(filter_y);
+ tmp = __msa_clti_s_b((v16i8) filter_vec, 0);
+ filter_vec = (v8i16) __msa_ilvr_b(tmp, (v16i8) filter_vec);
+
+ filt_h0 = __msa_splati_w((v4i32) filter_vec, 0);
+ filt_h1 = __msa_splati_w((v4i32) filter_vec, 1);
+ filt_h2 = __msa_splati_w((v4i32) filter_vec, 2);
+ filt_h3 = __msa_splati_w((v4i32) filter_vec, 3);
+
+ mask1 = mask0 + 2;
+ mask2 = mask0 + 4;
+ mask3 = mask0 + 6;
+
+ const_vec = (v8u16) __msa_ldi_h(128);
+ const_vec <<= 6;
+
+ for (cnt = width >> 3; cnt--;) {
+ src_tmp = src;
+ dst_tmp = dst;
+
+ LOAD_7VECS_SB(src_tmp, src_stride,
+ src0, src1, src2, src3, src4, src5, src6);
+ src_tmp += (7 * src_stride);
+
+ XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
+ src0, src1, src2, src3, src4, src5, src6, 128);
+
+ /* Row 0 Row 1 Row 2 Row 3 */
+ vec0 = __msa_vshf_b(mask0, src0, src0);
+ vec1 = __msa_vshf_b(mask1, src0, src0);
+ vec2 = __msa_vshf_b(mask2, src0, src0);
+ vec3 = __msa_vshf_b(mask3, src0, src0);
+
+ vec4 = __msa_vshf_b(mask0, src1, src1);
+ vec5 = __msa_vshf_b(mask1, src1, src1);
+ vec6 = __msa_vshf_b(mask2, src1, src1);
+ vec7 = __msa_vshf_b(mask3, src1, src1);
+
+ vec8 = __msa_vshf_b(mask0, src2, src2);
+ vec9 = __msa_vshf_b(mask1, src2, src2);
+ vec10 = __msa_vshf_b(mask2, src2, src2);
+ vec11 = __msa_vshf_b(mask3, src2, src2);
+
+ vec12 = __msa_vshf_b(mask0, src3, src3);
+ vec13 = __msa_vshf_b(mask1, src3, src3);
+ vec14 = __msa_vshf_b(mask2, src3, src3);
+ vec15 = __msa_vshf_b(mask3, src3, src3);
+
+ dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ dst1 = HEVC_FILT_8TAP_DPADD_H(vec4, vec5, vec6, vec7,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ dst2 = HEVC_FILT_8TAP_DPADD_H(vec8, vec9, vec10, vec11,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ dst3 = HEVC_FILT_8TAP_DPADD_H(vec12, vec13, vec14, vec15,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ /* Row 4 Row 5 Row 6 */
+ vec0 = __msa_vshf_b(mask0, src4, src4);
+ vec1 = __msa_vshf_b(mask1, src4, src4);
+ vec2 = __msa_vshf_b(mask2, src4, src4);
+ vec3 = __msa_vshf_b(mask3, src4, src4);
+
+ vec4 = __msa_vshf_b(mask0, src5, src5);
+ vec5 = __msa_vshf_b(mask1, src5, src5);
+ vec6 = __msa_vshf_b(mask2, src5, src5);
+ vec7 = __msa_vshf_b(mask3, src5, src5);
+
+ vec8 = __msa_vshf_b(mask0, src6, src6);
+ vec9 = __msa_vshf_b(mask1, src6, src6);
+ vec10 = __msa_vshf_b(mask2, src6, src6);
+ vec11 = __msa_vshf_b(mask3, src6, src6);
+
+ dst4 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ dst5 = HEVC_FILT_8TAP_DPADD_H(vec4, vec5, vec6, vec7,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ dst6 = HEVC_FILT_8TAP_DPADD_H(vec8, vec9, vec10, vec11,
+ filt0, filt1, filt2, filt3, const_vec);
+
+ ILVR_H_6VECS_SH(dst0, dst2, dst4, dst1, dst3, dst5,
+ dst1, dst3, dst5, dst2, dst4, dst6,
+ dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r);
+
+ ILVL_H_6VECS_SH(dst0, dst2, dst4, dst1, dst3, dst5,
+ dst1, dst3, dst5, dst2, dst4, dst6,
+ dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l);
+
+ for (loop_cnt = height >> 1; loop_cnt--;) {
+ src7 = LOAD_SB(src_tmp);
+ src_tmp += src_stride;
+
+ src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
+
+ vec0 = __msa_vshf_b(mask0, src7, src7);
+ vec1 = __msa_vshf_b(mask1, src7, src7);
+ vec2 = __msa_vshf_b(mask2, src7, src7);
+ vec3 = __msa_vshf_b(mask3, src7, src7);
+
+ dst7 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+ filt0, filt1, filt2, filt3,
+ const_vec);
+
+ dst76_r = __msa_ilvr_h(dst7, dst6);
+ dst76_l = __msa_ilvl_h(dst7, dst6);
+
+ dst0_r = HEVC_FILT_8TAP_DPADD_W(dst10_r, dst32_r, dst54_r, dst76_r,
+ filt_h0, filt_h1, filt_h2, filt_h3);
+
+ dst0_l = HEVC_FILT_8TAP_DPADD_W(dst10_l, dst32_l, dst54_l, dst76_l,
+ filt_h0, filt_h1, filt_h2, filt_h3);
+
+ dst0_r >>= 6;
+ dst0_l >>= 6;
+
+ dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
+
+ STORE_SW(dst0_r, dst_tmp);
+ dst_tmp += dst_stride;
+
+ /* Next row */
+ src8 = LOAD_SB(src_tmp);
+ src_tmp += src_stride;
+
+ src8 = (v16i8) __msa_xori_b((v16u8) src8, 128);
+
+ vec0 = __msa_vshf_b(mask0, src8, src8);
+ vec1 = __msa_vshf_b(mask1, src8, src8);
+ vec2 = __msa_vshf_b(mask2, src8, src8);
+ vec3 = __msa_vshf_b(mask3, src8, src8);
+
+ dst8 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+ filt0, filt1, filt2, filt3,
+ const_vec);
+
+ dst87_r = __msa_ilvr_h(dst8, dst7);
+ dst87_l = __msa_ilvl_h(dst8, dst7);
+
+ dst6 = dst8;
+
+ dst0_r = HEVC_FILT_8TAP_DPADD_W(dst21_r, dst43_r, dst65_r, dst87_r,
+ filt_h0, filt_h1, filt_h2, filt_h3);
+
+ dst0_l = HEVC_FILT_8TAP_DPADD_W(dst21_l, dst43_l, dst65_l, dst87_l,
+ filt_h0, filt_h1, filt_h2, filt_h3);
+
+ dst0_r >>= 6;
+ dst0_l >>= 6;
+
+ dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
+
+ STORE_SW(dst0_r, dst_tmp);
+ dst_tmp += dst_stride;
+
+ dst10_r = dst32_r;
+ dst32_r = dst54_r;
+ dst54_r = dst76_r;
+
+ dst10_l = dst32_l;
+ dst32_l = dst54_l;
+ dst54_l = dst76_l;
+
+ dst21_r = dst43_r;
+ dst43_r = dst65_r;
+ dst65_r = dst87_r;
+
+ dst21_l = dst43_l;
+ dst43_l = dst65_l;
+ dst65_l = dst87_l;
+ }
+
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void hevc_hv_8t_8w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ const int8_t * __restrict filter_x,
+ const int8_t * __restrict filter_y,
+ int32_t height)
+{
+ hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
+ filter_x, filter_y, height, 8);
+}
+
+static void hevc_hv_8t_12w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ const int8_t * __restrict filter_x,
+ const int8_t * __restrict filter_y,
+ int32_t height)
+{
+ hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
+ filter_x, filter_y, height, 8);
+
+ hevc_hv_8t_4w_msa(src + 8, src_stride, dst + 8, dst_stride,
+ filter_x, filter_y, height);
+}
+
+static void hevc_hv_8t_16w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ const int8_t * __restrict filter_x,
+ const int8_t * __restrict filter_y,
+ int32_t height)
+{
+ hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
+ filter_x, filter_y, height, 16);
+}
+
+static void hevc_hv_8t_24w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ const int8_t * __restrict filter_x,
+ const int8_t * __restrict filter_y,
+ int32_t height)
+{
+ hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
+ filter_x, filter_y, height, 24);
+}
+
+static void hevc_hv_8t_32w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ const int8_t * __restrict filter_x,
+ const int8_t * __restrict filter_y,
+ int32_t height)
+{
+ hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
+ filter_x, filter_y, height, 32);
+}
+
+static void hevc_hv_8t_48w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ const int8_t * __restrict filter_x,
+ const int8_t * __restrict filter_y,
+ int32_t height)
+{
+ hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
+ filter_x, filter_y, height, 48);
+}
+
+static void hevc_hv_8t_64w_msa(uint8_t * __restrict src, int32_t src_stride,
+ int16_t * __restrict dst, int32_t dst_stride,
+ const int8_t * __restrict filter_x,
+ const int8_t * __restrict filter_y,
+ int32_t height)
+{
+ hevc_hv_8t_8multx2mult_msa(src, src_stride, dst, dst_stride,
+ filter_x, filter_y, height, 64);
+}
+
+#define MC_COPY(WIDTH) \
+void ff_hevc_put_hevc_pel_pixels##WIDTH##_8_msa(int16_t *dst, \
+ uint8_t *src, \
+ ptrdiff_t src_stride, \
+ int height, \
+ intptr_t mx, \
+ intptr_t my, \
+ int width) \
+{ \
+ hevc_copy_##WIDTH##w_msa(src, src_stride, dst, MAX_PB_SIZE, height); \
+}
+
+MC_COPY(4);
+MC_COPY(6);
+MC_COPY(8);
+MC_COPY(12);
+MC_COPY(16);
+MC_COPY(24);
+MC_COPY(32);
+MC_COPY(48);
+MC_COPY(64);
+
+#undef MC_COPY
+
#define MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
void ff_hevc_put_hevc_##PEL##_##DIR####WIDTH##_8_msa(int16_t *dst, \
uint8_t *src, \
@@ -1257,3 +2328,30 @@ MC(qpel, v, 48, 8, vt, my);
MC(qpel, v, 64, 8, vt, my);
#undef MC
+
+#define MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
+void ff_hevc_put_hevc_##PEL##_##DIR####WIDTH##_8_msa(int16_t *dst, \
+ uint8_t *src, \
+ ptrdiff_t src_stride, \
+ int height, \
+ intptr_t mx, \
+ intptr_t my, \
+ int width) \
+{ \
+ const int8_t *filter_x = ff_hevc_##PEL##_filters[mx - 1]; \
+ const int8_t *filter_y = ff_hevc_##PEL##_filters[my - 1]; \
+ \
+ hevc_##DIR1##_##TAP##t_##WIDTH##w_msa(src, src_stride, dst, MAX_PB_SIZE, \
+ filter_x, filter_y, height); \
+}
+
+MC_HV(qpel, hv, 4, 8, hv);
+MC_HV(qpel, hv, 8, 8, hv);
+MC_HV(qpel, hv, 12, 8, hv);
+MC_HV(qpel, hv, 16, 8, hv);
+MC_HV(qpel, hv, 24, 8, hv);
+MC_HV(qpel, hv, 32, 8, hv);
+MC_HV(qpel, hv, 48, 8, hv);
+MC_HV(qpel, hv, 64, 8, hv);
+
+#undef MC_HV
diff --git a/libavutil/mips/generic_macros_msa.h b/libavutil/mips/generic_macros_msa.h
index d2fd87c..234528a 100644
--- a/libavutil/mips/generic_macros_msa.h
+++ b/libavutil/mips/generic_macros_msa.h
@@ -50,7 +50,25 @@
*((v8i16 *) (pdest)) = (vec); \
}
+#define STORE_SW(vec, pdest) \
+{ \
+ *((v4i32 *) (pdest)) = (vec); \
+}
+
#if (__mips_isa_rev >= 6)
+ #define STORE_WORD(pdst, val) \
+ { \
+ uint8_t *dst_ptr_m = (uint8_t *) (pdst); \
+ uint32_t val_m = (val); \
+ \
+ __asm__ __volatile__ ( \
+ "sw %[val_m], %[dst_ptr_m] \n\t" \
+ \
+ : [dst_ptr_m] "=m" (*dst_ptr_m) \
+ : [val_m] "r" (val_m) \
+ ); \
+ }
+
#define STORE_DWORD(pdst, val) \
{ \
uint8_t *dst_ptr_m = (uint8_t *) (pdst); \
@@ -64,6 +82,19 @@
); \
}
#else
+ #define STORE_WORD(pdst, val) \
+ { \
+ uint8_t *dst_ptr_m = (uint8_t *) (pdst); \
+ uint32_t val_m = (val); \
+ \
+ __asm__ __volatile__ ( \
+ "usw %[val_m], %[dst_ptr_m] \n\t" \
+ \
+ : [dst_ptr_m] "=m" (*dst_ptr_m) \
+ : [val_m] "r" (val_m) \
+ ); \
+ }
+
#define STORE_DWORD(pdst, val) \
{ \
uint8_t *dst1_m = (uint8_t *) (pdst); \
@@ -83,6 +114,13 @@
}
#endif
+#define LOAD_2VECS_SB(psrc, stride, \
+ val0, val1) \
+{ \
+ val0 = LOAD_SB(psrc + 0 * stride); \
+ val1 = LOAD_SB(psrc + 1 * stride); \
+}
+
#define LOAD_4VECS_SB(psrc, stride, \
val0, val1, val2, val3) \
{ \
@@ -92,6 +130,15 @@
val3 = LOAD_SB(psrc + 3 * stride); \
}
+#define LOAD_6VECS_SB(psrc, stride, \
+ out0, out1, out2, out3, out4, out5) \
+{ \
+ LOAD_4VECS_SB((psrc), (stride), \
+ (out0), (out1), (out2), (out3)); \
+ LOAD_2VECS_SB((psrc + 4 * stride), (stride), \
+ (out4), (out5)); \
+}
+
#define LOAD_7VECS_SB(psrc, stride, \
val0, val1, val2, val3, \
val4, val5, val6) \
@@ -115,6 +162,48 @@
(out4), (out5), (out6), (out7)); \
}
+#define STORE_2VECS_SH(ptr, stride, \
+ in0, in1) \
+{ \
+ STORE_SH(in0, ((ptr) + 0 * stride)); \
+ STORE_SH(in1, ((ptr) + 1 * stride)); \
+}
+
+#define STORE_4VECS_SH(ptr, stride, \
+ in0, in1, in2, in3) \
+{ \
+ STORE_SH(in0, ((ptr) + 0 * stride)); \
+ STORE_SH(in1, ((ptr) + 1 * stride)); \
+ STORE_SH(in2, ((ptr) + 2 * stride)); \
+ STORE_SH(in3, ((ptr) + 3 * stride)); \
+}
+
+#define STORE_6VECS_SH(ptr, stride, \
+ in0, in1, in2, in3, \
+ in4, in5) \
+{ \
+ STORE_SH(in0, ((ptr) + 0 * stride)); \
+ STORE_SH(in1, ((ptr) + 1 * stride)); \
+ STORE_SH(in2, ((ptr) + 2 * stride)); \
+ STORE_SH(in3, ((ptr) + 3 * stride)); \
+ STORE_SH(in4, ((ptr) + 4 * stride)); \
+ STORE_SH(in5, ((ptr) + 5 * stride)); \
+}
+
+#define STORE_8VECS_SH(ptr, stride, \
+ in0, in1, in2, in3, \
+ in4, in5, in6, in7) \
+{ \
+ STORE_SH(in0, ((ptr) + 0 * stride)); \
+ STORE_SH(in1, ((ptr) + 1 * stride)); \
+ STORE_SH(in2, ((ptr) + 2 * stride)); \
+ STORE_SH(in3, ((ptr) + 3 * stride)); \
+ STORE_SH(in4, ((ptr) + 4 * stride)); \
+ STORE_SH(in5, ((ptr) + 5 * stride)); \
+ STORE_SH(in6, ((ptr) + 6 * stride)); \
+ STORE_SH(in7, ((ptr) + 7 * stride)); \
+}
+
#define ILVR_B_2VECS_SB(in0_r, in1_r, in0_l, in1_l, \
out0, out1) \
{ \
@@ -164,6 +253,28 @@
out6, out7); \
}
+#define ILVR_H_2VECS_SH(in0_r, in1_r, in0_l, in1_l, \
+ out0, out1) \
+{ \
+ out0 = __msa_ilvr_h((v8i16) (in0_l), (v8i16) (in0_r)); \
+ out1 = __msa_ilvr_h((v8i16) (in1_l), (v8i16) (in1_r)); \
+}
+
+#define ILVR_H_6VECS_SH(in0_r, in1_r, in2_r, \
+ in3_r, in4_r, in5_r, \
+ in0_l, in1_l, in2_l, \
+ in3_l, in4_l, in5_l, \
+ out0, out1, out2, \
+ out3, out4, out5) \
+{ \
+ ILVR_H_2VECS_SH(in0_r, in1_r, in0_l, in1_l, \
+ out0, out1); \
+ ILVR_H_2VECS_SH(in2_r, in3_r, in2_l, in3_l, \
+ out2, out3); \
+ ILVR_H_2VECS_SH(in4_r, in5_r, in4_l, in5_l, \
+ out4, out5); \
+}
+
#define ILVL_B_2VECS_SB(in0_r, in1_r, in0_l, in1_l, \
out0, out1) \
{ \
@@ -196,6 +307,28 @@
out4, out5); \
}
+#define ILVL_H_2VECS_SH(in0_r, in1_r, in0_l, in1_l, \
+ out0, out1) \
+{ \
+ out0 = __msa_ilvl_h((v8i16) (in0_l), (v8i16) (in0_r)); \
+ out1 = __msa_ilvl_h((v8i16) (in1_l), (v8i16) (in1_r)); \
+}
+
+#define ILVL_H_6VECS_SH(in0_r, in1_r, in2_r, \
+ in3_r, in4_r, in5_r, \
+ in0_l, in1_l, in2_l, \
+ in3_l, in4_l, in5_l, \
+ out0, out1, out2, \
+ out3, out4, out5) \
+{ \
+ ILVL_H_2VECS_SH(in0_r, in1_r, in0_l, in1_l, \
+ out0, out1); \
+ ILVL_H_2VECS_SH(in2_r, in3_r, in2_l, in3_l, \
+ out2, out3); \
+ ILVL_H_2VECS_SH(in4_r, in5_r, in4_l, in5_l, \
+ out4, out5); \
+}
+
#define ILVR_D_2VECS_SB(out0, in0_l, in0_r, \
out1, in1_l, in1_r) \
{ \
--
2.3.2
More information about the ffmpeg-devel
mailing list