[FFmpeg-cvslog] avcodec/mips: [loongson] optimize put_hevc_epel_bi_hv_8 with mmi.
Shiyou Yin
git at videolan.org
Sat Feb 2 21:18:05 EET 2019
ffmpeg | branch: master | Shiyou Yin <yinshiyou-hf at loongson.cn> | Fri Feb 1 14:05:00 2019 +0800| [c0942b7a2c5ccfd39907257b3b0a10f86d6b9164] | committer: Michael Niedermayer
avcodec/mips: [loongson] optimize put_hevc_epel_bi_hv_8 with mmi.
Optimize put_hevc_epel_bi_hv_8 with mmi in the case width=4/8/12/16/24/32.
This optimization improved HEVC decoding performance 1.7%(2.30x to 2.34x, tested on loongson 3A3000).
Signed-off-by: Michael Niedermayer <michael at niedermayer.cc>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=c0942b7a2c5ccfd39907257b3b0a10f86d6b9164
---
libavcodec/mips/hevcdsp_init_mips.c | 7 ++
libavcodec/mips/hevcdsp_mips.h | 6 ++
libavcodec/mips/hevcdsp_mmi.c | 186 ++++++++++++++++++++++++++++++++++++
3 files changed, 199 insertions(+)
diff --git a/libavcodec/mips/hevcdsp_init_mips.c b/libavcodec/mips/hevcdsp_init_mips.c
index 9e08c0eeee..bf7d823b56 100644
--- a/libavcodec/mips/hevcdsp_init_mips.c
+++ b/libavcodec/mips/hevcdsp_init_mips.c
@@ -55,6 +55,13 @@ static av_cold void hevc_dsp_init_mmi(HEVCDSPContext *c,
c->put_hevc_epel_bi[6][0][0] = ff_hevc_put_hevc_pel_bi_pixels24_8_mmi;
c->put_hevc_epel_bi[7][0][0] = ff_hevc_put_hevc_pel_bi_pixels32_8_mmi;
+ c->put_hevc_epel_bi[1][1][1] = ff_hevc_put_hevc_epel_bi_hv4_8_mmi;
+ c->put_hevc_epel_bi[3][1][1] = ff_hevc_put_hevc_epel_bi_hv8_8_mmi;
+ c->put_hevc_epel_bi[4][1][1] = ff_hevc_put_hevc_epel_bi_hv12_8_mmi;
+ c->put_hevc_epel_bi[5][1][1] = ff_hevc_put_hevc_epel_bi_hv16_8_mmi;
+ c->put_hevc_epel_bi[6][1][1] = ff_hevc_put_hevc_epel_bi_hv24_8_mmi;
+ c->put_hevc_epel_bi[7][1][1] = ff_hevc_put_hevc_epel_bi_hv32_8_mmi;
+
c->put_hevc_qpel_uni[1][1][1] = ff_hevc_put_hevc_qpel_uni_hv4_8_mmi;
c->put_hevc_qpel_uni[3][1][1] = ff_hevc_put_hevc_qpel_uni_hv8_8_mmi;
c->put_hevc_qpel_uni[4][1][1] = ff_hevc_put_hevc_qpel_uni_hv12_8_mmi;
diff --git a/libavcodec/mips/hevcdsp_mips.h b/libavcodec/mips/hevcdsp_mips.h
index ebd5f326b5..fe4faae41a 100644
--- a/libavcodec/mips/hevcdsp_mips.h
+++ b/libavcodec/mips/hevcdsp_mips.h
@@ -524,6 +524,12 @@ L_BI_MC(qpel, hv, 32, mmi);
L_BI_MC(qpel, hv, 48, mmi);
L_BI_MC(qpel, hv, 64, mmi);
+L_BI_MC(epel, hv, 4, mmi);
+L_BI_MC(epel, hv, 8, mmi);
+L_BI_MC(epel, hv, 12, mmi);
+L_BI_MC(epel, hv, 16, mmi);
+L_BI_MC(epel, hv, 24, mmi);
+L_BI_MC(epel, hv, 32, mmi);
#undef L_BI_MC
#define L_UNI_MC(PEL, DIR, WIDTH, TYPE) \
diff --git a/libavcodec/mips/hevcdsp_mmi.c b/libavcodec/mips/hevcdsp_mmi.c
index a8b3967943..42795e4d2c 100644
--- a/libavcodec/mips/hevcdsp_mmi.c
+++ b/libavcodec/mips/hevcdsp_mmi.c
@@ -446,6 +446,192 @@ PUT_HEVC_QPEL_BI_HV(32, 8, -32, -64, -32);
PUT_HEVC_QPEL_BI_HV(48, 12, -48, -96, -48);
PUT_HEVC_QPEL_BI_HV(64, 16, -64, -128, -64);
+#define PUT_HEVC_EPEL_BI_HV(w, x_step, src_step, src2_step, dst_step) \
+void ff_hevc_put_hevc_epel_bi_hv##w##_8_mmi(uint8_t *_dst, \
+ ptrdiff_t _dststride, \
+ uint8_t *_src, \
+ ptrdiff_t _srcstride, \
+ int16_t *src2, int height, \
+ intptr_t mx, intptr_t my, \
+ int width) \
+{ \
+ int x, y; \
+ pixel *src = (pixel *)_src; \
+ ptrdiff_t srcstride = _srcstride / sizeof(pixel); \
+ pixel *dst = (pixel *)_dst; \
+ ptrdiff_t dststride = _dststride / sizeof(pixel); \
+ const int8_t *filter = ff_hevc_epel_filters[mx - 1]; \
+ int16_t tmp_array[(MAX_PB_SIZE + EPEL_EXTRA) * MAX_PB_SIZE]; \
+ int16_t *tmp = tmp_array; \
+ uint64_t ftmp[12]; \
+ uint64_t rtmp[1]; \
+ int shift = 7; \
+ int offset = 64; \
+ \
+ src -= (EPEL_EXTRA_BEFORE * srcstride + 1); \
+ x = width >> 2; \
+ y = height + EPEL_EXTRA; \
+ __asm__ volatile( \
+ MMI_LWC1(%[ftmp1], %[filter], 0x00) \
+ "li %[rtmp0], 0x08 \n\t" \
+ "dmtc1 %[rtmp0], %[ftmp0] \n\t" \
+ "punpcklbh %[ftmp1], %[ftmp0], %[ftmp1] \n\t" \
+ "psrah %[ftmp1], %[ftmp1], %[ftmp0] \n\t" \
+ "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
+ \
+ "1: \n\t" \
+ "2: \n\t" \
+ "gslwlc1 %[ftmp2], 0x03(%[src]) \n\t" \
+ "gslwrc1 %[ftmp2], 0x00(%[src]) \n\t" \
+ "gslwlc1 %[ftmp3], 0x04(%[src]) \n\t" \
+ "gslwrc1 %[ftmp3], 0x01(%[src]) \n\t" \
+ "gslwlc1 %[ftmp4], 0x05(%[src]) \n\t" \
+ "gslwrc1 %[ftmp4], 0x02(%[src]) \n\t" \
+ "gslwlc1 %[ftmp5], 0x06(%[src]) \n\t" \
+ "gslwrc1 %[ftmp5], 0x03(%[src]) \n\t" \
+ "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" \
+ "pmullh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" \
+ "punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \
+ "pmullh %[ftmp3], %[ftmp3], %[ftmp1] \n\t" \
+ "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t" \
+ "pmullh %[ftmp4], %[ftmp4], %[ftmp1] \n\t" \
+ "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" \
+ "pmullh %[ftmp5], %[ftmp5], %[ftmp1] \n\t" \
+ TRANSPOSE_4H(%[ftmp2], %[ftmp3], %[ftmp4], %[ftmp5], \
+ %[ftmp6], %[ftmp7], %[ftmp8], %[ftmp9]) \
+ "paddh %[ftmp2], %[ftmp2], %[ftmp3] \n\t" \
+ "paddh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" \
+ "paddh %[ftmp2], %[ftmp2], %[ftmp4] \n\t" \
+ "gssdlc1 %[ftmp2], 0x07(%[tmp]) \n\t" \
+ "gssdrc1 %[ftmp2], 0x00(%[tmp]) \n\t" \
+ \
+ "daddi %[x], %[x], -0x01 \n\t" \
+ PTR_ADDIU "%[src], %[src], 0x04 \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x08 \n\t" \
+ "bnez %[x], 2b \n\t" \
+ \
+ "daddi %[y], %[y], -0x01 \n\t" \
+ "li %[x], " #x_step " \n\t" \
+ PTR_ADDIU "%[src], %[src], " #src_step " \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], " #src2_step " \n\t" \
+ PTR_ADDU "%[src], %[src], %[stride] \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "bnez %[y], 1b \n\t" \
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), \
+ [rtmp0]"=&r"(rtmp[0]), \
+ [src]"+&r"(src), [tmp]"+&r"(tmp), [y]"+&r"(y), \
+ [x]"+&r"(x) \
+ : [filter]"r"(filter), [stride]"r"(srcstride) \
+ : "memory" \
+ ); \
+ \
+ tmp = tmp_array; \
+ filter = ff_hevc_epel_filters[my - 1]; \
+ x = width >> 2; \
+ y = height; \
+ __asm__ volatile( \
+ MMI_LWC1(%[ftmp1], %[filter], 0x00) \
+ "li %[rtmp0], 0x08 \n\t" \
+ "dmtc1 %[rtmp0], %[ftmp0] \n\t" \
+ "punpcklbh %[ftmp1], %[ftmp0], %[ftmp1] \n\t" \
+ "psrah %[ftmp1], %[ftmp1], %[ftmp0] \n\t" \
+ "li %[rtmp0], 0x06 \n\t" \
+ "dmtc1 %[rtmp0], %[ftmp0] \n\t" \
+ "punpcklwd %[offset], %[offset], %[offset] \n\t" \
+ "xor %[ftmp2], %[ftmp2], %[ftmp2] \n\t" \
+ \
+ "1: \n\t" \
+ "li %[x], " #x_step " \n\t" \
+ "2: \n\t" \
+ "gsldlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "gsldlc1 %[ftmp4], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp4], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "gsldlc1 %[ftmp5], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp5], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "gsldlc1 %[ftmp6], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp6], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], -0x180 \n\t" \
+ TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \
+ %[ftmp7], %[ftmp8], %[ftmp9], %[ftmp10]) \
+ "pmaddhw %[ftmp7], %[ftmp3], %[ftmp1] \n\t" \
+ "pmaddhw %[ftmp8], %[ftmp4], %[ftmp1] \n\t" \
+ TRANSPOSE_2W(%[ftmp7], %[ftmp8], %[ftmp3], %[ftmp4]) \
+ "paddw %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
+ "psraw %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \
+ "pmaddhw %[ftmp7], %[ftmp5], %[ftmp1] \n\t" \
+ "pmaddhw %[ftmp8], %[ftmp6], %[ftmp1] \n\t" \
+ TRANSPOSE_2W(%[ftmp7], %[ftmp8], %[ftmp5], %[ftmp6]) \
+ "paddw %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
+ "psraw %[ftmp5], %[ftmp5], %[ftmp0] \n\t" \
+ "packsswh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
+ "gsldlc1 %[ftmp4], 0x07(%[src2]) \n\t" \
+ "gsldrc1 %[ftmp4], 0x00(%[src2]) \n\t" \
+ "li %[rtmp0], 0x10 \n\t" \
+ "dmtc1 %[rtmp0], %[ftmp8] \n\t" \
+ "punpcklhw %[ftmp5], %[ftmp2], %[ftmp3] \n\t" \
+ "punpckhhw %[ftmp6], %[ftmp2], %[ftmp3] \n\t" \
+ "punpckhhw %[ftmp3], %[ftmp2], %[ftmp4] \n\t" \
+ "punpcklhw %[ftmp4], %[ftmp2], %[ftmp4] \n\t" \
+ "psraw %[ftmp5], %[ftmp5], %[ftmp8] \n\t" \
+ "psraw %[ftmp6], %[ftmp6], %[ftmp8] \n\t" \
+ "psraw %[ftmp3], %[ftmp3], %[ftmp8] \n\t" \
+ "psraw %[ftmp4], %[ftmp4], %[ftmp8] \n\t" \
+ "paddw %[ftmp5], %[ftmp5], %[ftmp4] \n\t" \
+ "paddw %[ftmp6], %[ftmp6], %[ftmp3] \n\t" \
+ "paddw %[ftmp5], %[ftmp5], %[offset] \n\t" \
+ "paddw %[ftmp6], %[ftmp6], %[offset] \n\t" \
+ "psraw %[ftmp5], %[ftmp5], %[shift] \n\t" \
+ "psraw %[ftmp6], %[ftmp6], %[shift] \n\t" \
+ "packsswh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
+ "pcmpgth %[ftmp7], %[ftmp5], %[ftmp2] \n\t" \
+ "and %[ftmp3], %[ftmp5], %[ftmp7] \n\t" \
+ "packushb %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
+ "gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" \
+ "gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" \
+ \
+ "daddi %[x], %[x], -0x01 \n\t" \
+ PTR_ADDIU "%[src2], %[src2], 0x08 \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x08 \n\t" \
+ PTR_ADDIU "%[dst], %[dst], 0x04 \n\t" \
+ "bnez %[x], 2b \n\t" \
+ \
+ "daddi %[y], %[y], -0x01 \n\t" \
+ PTR_ADDIU "%[src2], %[src2], " #src2_step " \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], " #src2_step " \n\t" \
+ PTR_ADDIU "%[dst], %[dst], " #dst_step " \n\t" \
+ PTR_ADDIU "%[src2], %[src2], 0x80 \n\t" \
+ PTR_ADDU "%[dst], %[dst], %[stride] \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "bnez %[y], 1b \n\t" \
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), \
+ [ftmp10]"=&f"(ftmp[10]), [src2]"+&r"(src2), \
+ [dst]"+&r"(dst), [tmp]"+&r"(tmp), [y]"+&r"(y), [x]"=&r"(x), \
+ [offset]"+&f"(offset), [rtmp0]"=&r"(rtmp[0]) \
+ : [filter]"r"(filter), [stride]"r"(dststride), \
+ [shift]"f"(shift) \
+ : "memory" \
+ ); \
+}
+
+PUT_HEVC_EPEL_BI_HV(4, 1, -4, -8, -4);
+PUT_HEVC_EPEL_BI_HV(8, 2, -8, -16, -8);
+PUT_HEVC_EPEL_BI_HV(12, 3, -12, -24, -12);
+PUT_HEVC_EPEL_BI_HV(16, 4, -16, -32, -16);
+PUT_HEVC_EPEL_BI_HV(24, 6, -24, -48, -24);
+PUT_HEVC_EPEL_BI_HV(32, 8, -32, -64, -32);
+
#define PUT_HEVC_PEL_BI_PIXELS(w, x_step, src_step, dst_step, src2_step) \
void ff_hevc_put_hevc_pel_bi_pixels##w##_8_mmi(uint8_t *_dst, \
ptrdiff_t _dststride, \
More information about the ffmpeg-cvslog
mailing list