[FFmpeg-cvslog] avcodec/mips: [loongson] optimize put_hevc_qpel_uni_hv_8 with mmi.
Shiyou Yin
git at videolan.org
Sat Feb 2 21:18:02 EET 2019
ffmpeg | branch: master | Shiyou Yin <yinshiyou-hf at loongson.cn> | Fri Feb 1 14:04:59 2019 +0800| [0c43429210d674276fe19a127a69368e0129ca00] | committer: Michael Niedermayer
avcodec/mips: [loongson] optimize put_hevc_qpel_uni_hv_8 with mmi.
Optimize put_hevc_qpel_uni_hv_8 with mmi in the case width=4/8/12/16/24/32/48/64.
This optimization improved HEVC decoding performance 2.7%(2.24x to 2.30x, tested on loongson 3A3000).
Signed-off-by: Michael Niedermayer <michael at niedermayer.cc>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=0c43429210d674276fe19a127a69368e0129ca00
---
libavcodec/mips/hevcdsp_init_mips.c | 9 ++
libavcodec/mips/hevcdsp_mips.h | 21 ++++
libavcodec/mips/hevcdsp_mmi.c | 210 ++++++++++++++++++++++++++++++++++++
3 files changed, 240 insertions(+)
diff --git a/libavcodec/mips/hevcdsp_init_mips.c b/libavcodec/mips/hevcdsp_init_mips.c
index 18d97d9e0f..9e08c0eeee 100644
--- a/libavcodec/mips/hevcdsp_init_mips.c
+++ b/libavcodec/mips/hevcdsp_init_mips.c
@@ -54,6 +54,15 @@ static av_cold void hevc_dsp_init_mmi(HEVCDSPContext *c,
c->put_hevc_epel_bi[5][0][0] = ff_hevc_put_hevc_pel_bi_pixels16_8_mmi;
c->put_hevc_epel_bi[6][0][0] = ff_hevc_put_hevc_pel_bi_pixels24_8_mmi;
c->put_hevc_epel_bi[7][0][0] = ff_hevc_put_hevc_pel_bi_pixels32_8_mmi;
+
+ c->put_hevc_qpel_uni[1][1][1] = ff_hevc_put_hevc_qpel_uni_hv4_8_mmi;
+ c->put_hevc_qpel_uni[3][1][1] = ff_hevc_put_hevc_qpel_uni_hv8_8_mmi;
+ c->put_hevc_qpel_uni[4][1][1] = ff_hevc_put_hevc_qpel_uni_hv12_8_mmi;
+ c->put_hevc_qpel_uni[5][1][1] = ff_hevc_put_hevc_qpel_uni_hv16_8_mmi;
+ c->put_hevc_qpel_uni[6][1][1] = ff_hevc_put_hevc_qpel_uni_hv24_8_mmi;
+ c->put_hevc_qpel_uni[7][1][1] = ff_hevc_put_hevc_qpel_uni_hv32_8_mmi;
+ c->put_hevc_qpel_uni[8][1][1] = ff_hevc_put_hevc_qpel_uni_hv48_8_mmi;
+ c->put_hevc_qpel_uni[9][1][1] = ff_hevc_put_hevc_qpel_uni_hv64_8_mmi;
}
}
#endif // #if HAVE_MMI
diff --git a/libavcodec/mips/hevcdsp_mips.h b/libavcodec/mips/hevcdsp_mips.h
index 9f1e44734d..ebd5f326b5 100644
--- a/libavcodec/mips/hevcdsp_mips.h
+++ b/libavcodec/mips/hevcdsp_mips.h
@@ -525,4 +525,25 @@ L_BI_MC(qpel, hv, 48, mmi);
L_BI_MC(qpel, hv, 64, mmi);
#undef L_BI_MC
+
+#define L_UNI_MC(PEL, DIR, WIDTH, TYPE) \
+void ff_hevc_put_hevc_##PEL##_uni_##DIR##WIDTH##_8_##TYPE(uint8_t *dst, \
+ ptrdiff_t dst_stride, \
+ uint8_t *src, \
+ ptrdiff_t src_stride, \
+ int height, \
+ intptr_t mx, \
+ intptr_t my, \
+ int width)
+
+L_UNI_MC(qpel, hv, 4, mmi);
+L_UNI_MC(qpel, hv, 8, mmi);
+L_UNI_MC(qpel, hv, 12, mmi);
+L_UNI_MC(qpel, hv, 16, mmi);
+L_UNI_MC(qpel, hv, 24, mmi);
+L_UNI_MC(qpel, hv, 32, mmi);
+L_UNI_MC(qpel, hv, 48, mmi);
+L_UNI_MC(qpel, hv, 64, mmi);
+#undef L_UNI_MC
+
#endif // #ifndef AVCODEC_MIPS_HEVCDSP_MIPS_H
diff --git a/libavcodec/mips/hevcdsp_mmi.c b/libavcodec/mips/hevcdsp_mmi.c
index 727a718857..a8b3967943 100644
--- a/libavcodec/mips/hevcdsp_mmi.c
+++ b/libavcodec/mips/hevcdsp_mmi.c
@@ -556,3 +556,213 @@ PUT_HEVC_PEL_BI_PIXELS(24, 3, -24, -24, -48);
PUT_HEVC_PEL_BI_PIXELS(32, 4, -32, -32, -64);
PUT_HEVC_PEL_BI_PIXELS(48, 6, -48, -48, -96);
PUT_HEVC_PEL_BI_PIXELS(64, 8, -64, -64, -128);
+
+#define PUT_HEVC_QPEL_UNI_HV(w, x_step, src_step, dst_step, tmp_step) \
+void ff_hevc_put_hevc_qpel_uni_hv##w##_8_mmi(uint8_t *_dst, \
+ ptrdiff_t _dststride, \
+ uint8_t *_src, \
+ ptrdiff_t _srcstride, \
+ int height, \
+ intptr_t mx, intptr_t my, \
+ int width) \
+{ \
+ int x, y; \
+ const int8_t *filter; \
+ pixel *src = (pixel*)_src; \
+ ptrdiff_t srcstride = _srcstride / sizeof(pixel); \
+ pixel *dst = (pixel *)_dst; \
+ ptrdiff_t dststride = _dststride / sizeof(pixel); \
+ int16_t tmp_array[(MAX_PB_SIZE + QPEL_EXTRA) * MAX_PB_SIZE]; \
+ int16_t *tmp = tmp_array; \
+ uint64_t ftmp[20]; \
+ uint64_t rtmp[1]; \
+ int shift = 6; \
+ int offset = 32; \
+ \
+ src -= (QPEL_EXTRA_BEFORE * srcstride + 3); \
+ filter = ff_hevc_qpel_filters[mx - 1]; \
+ x = width >> 2; \
+ y = height + QPEL_EXTRA; \
+ __asm__ volatile( \
+ MMI_LDC1(%[ftmp1], %[filter], 0x00) \
+ "li %[rtmp0], 0x08 \n\t" \
+ "dmtc1 %[rtmp0], %[ftmp0] \n\t" \
+ "punpckhbh %[ftmp2], %[ftmp0], %[ftmp1] \n\t" \
+ "punpcklbh %[ftmp1], %[ftmp0], %[ftmp1] \n\t" \
+ "psrah %[ftmp1], %[ftmp1], %[ftmp0] \n\t" \
+ "psrah %[ftmp2], %[ftmp2], %[ftmp0] \n\t" \
+ "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
+ \
+ "1: \n\t" \
+ "2: \n\t" \
+ "gsldlc1 %[ftmp3], 0x07(%[src]) \n\t" \
+ "gsldrc1 %[ftmp3], 0x00(%[src]) \n\t" \
+ "gsldlc1 %[ftmp4], 0x08(%[src]) \n\t" \
+ "gsldrc1 %[ftmp4], 0x01(%[src]) \n\t" \
+ "gsldlc1 %[ftmp5], 0x09(%[src]) \n\t" \
+ "gsldrc1 %[ftmp5], 0x02(%[src]) \n\t" \
+ "gsldlc1 %[ftmp6], 0x0a(%[src]) \n\t" \
+ "gsldrc1 %[ftmp6], 0x03(%[src]) \n\t" \
+ "punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \
+ "punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \
+ "pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
+ "pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
+ "paddh %[ftmp3], %[ftmp7], %[ftmp8] \n\t" \
+ "punpcklbh %[ftmp7], %[ftmp4], %[ftmp0] \n\t" \
+ "punpckhbh %[ftmp8], %[ftmp4], %[ftmp0] \n\t" \
+ "pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
+ "pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
+ "paddh %[ftmp4], %[ftmp7], %[ftmp8] \n\t" \
+ "punpcklbh %[ftmp7], %[ftmp5], %[ftmp0] \n\t" \
+ "punpckhbh %[ftmp8], %[ftmp5], %[ftmp0] \n\t" \
+ "pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
+ "pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
+ "paddh %[ftmp5], %[ftmp7], %[ftmp8] \n\t" \
+ "punpcklbh %[ftmp7], %[ftmp6], %[ftmp0] \n\t" \
+ "punpckhbh %[ftmp8], %[ftmp6], %[ftmp0] \n\t" \
+ "pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
+ "pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
+ "paddh %[ftmp6], %[ftmp7], %[ftmp8] \n\t" \
+ TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \
+ %[ftmp7], %[ftmp8], %[ftmp9], %[ftmp10]) \
+ "paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
+ "paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
+ "paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
+ "gssdlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \
+ "gssdrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \
+ \
+ "daddi %[x], %[x], -0x01 \n\t" \
+ PTR_ADDIU "%[src], %[src], 0x04 \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x08 \n\t" \
+ "bnez %[x], 2b \n\t" \
+ \
+ "daddi %[y], %[y], -0x01 \n\t" \
+ "li %[x], " #x_step " \n\t" \
+ PTR_ADDIU "%[src], %[src], " #src_step " \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], " #tmp_step " \n\t" \
+ PTR_ADDU "%[src], %[src], %[stride] \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "bnez %[y], 1b \n\t" \
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), \
+ [ftmp10]"=&f"(ftmp[10]), [rtmp0]"=&r"(rtmp[0]), \
+ [src]"+&r"(src), [tmp]"+&r"(tmp), [y]"+&r"(y), \
+ [x]"+&r"(x) \
+ : [filter]"r"(filter), [stride]"r"(srcstride) \
+ : "memory" \
+ ); \
+ \
+ tmp = tmp_array; \
+ filter = ff_hevc_qpel_filters[my - 1]; \
+ x = width >> 2; \
+ y = height; \
+ __asm__ volatile( \
+ MMI_LDC1(%[ftmp1], %[filter], 0x00) \
+ "li %[rtmp0], 0x08 \n\t" \
+ "dmtc1 %[rtmp0], %[ftmp0] \n\t" \
+ "punpckhbh %[ftmp2], %[ftmp0], %[ftmp1] \n\t" \
+ "punpcklbh %[ftmp1], %[ftmp0], %[ftmp1] \n\t" \
+ "psrah %[ftmp1], %[ftmp1], %[ftmp0] \n\t" \
+ "psrah %[ftmp2], %[ftmp2], %[ftmp0] \n\t" \
+ "li %[rtmp0], 0x06 \n\t" \
+ "dmtc1 %[rtmp0], %[ftmp0] \n\t" \
+ "punpcklhw %[offset], %[offset], %[offset] \n\t" \
+ "punpcklwd %[offset], %[offset], %[offset] \n\t" \
+ \
+ "1: \n\t" \
+ "li %[x], " #x_step " \n\t" \
+ "2: \n\t" \
+ "gsldlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "gsldlc1 %[ftmp4], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp4], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "gsldlc1 %[ftmp5], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp5], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "gsldlc1 %[ftmp6], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp6], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "gsldlc1 %[ftmp7], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp7], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "gsldlc1 %[ftmp8], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp8], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "gsldlc1 %[ftmp9], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp9], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "gsldlc1 %[ftmp10], 0x07(%[tmp]) \n\t" \
+ "gsldrc1 %[ftmp10], 0x00(%[tmp]) \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], -0x380 \n\t" \
+ TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \
+ %[ftmp11], %[ftmp12], %[ftmp13], %[ftmp14]) \
+ TRANSPOSE_4H(%[ftmp7], %[ftmp8], %[ftmp9], %[ftmp10], \
+ %[ftmp11], %[ftmp12], %[ftmp13], %[ftmp14]) \
+ "pmaddhw %[ftmp11], %[ftmp3], %[ftmp1] \n\t" \
+ "pmaddhw %[ftmp12], %[ftmp7], %[ftmp2] \n\t" \
+ "pmaddhw %[ftmp13], %[ftmp4], %[ftmp1] \n\t" \
+ "pmaddhw %[ftmp14], %[ftmp8], %[ftmp2] \n\t" \
+ "paddw %[ftmp11], %[ftmp11], %[ftmp12] \n\t" \
+ "paddw %[ftmp13], %[ftmp13], %[ftmp14] \n\t" \
+ TRANSPOSE_2W(%[ftmp11], %[ftmp13], %[ftmp3], %[ftmp4]) \
+ "paddw %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
+ "psraw %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \
+ "pmaddhw %[ftmp11], %[ftmp5], %[ftmp1] \n\t" \
+ "pmaddhw %[ftmp12], %[ftmp9], %[ftmp2] \n\t" \
+ "pmaddhw %[ftmp13], %[ftmp6], %[ftmp1] \n\t" \
+ "pmaddhw %[ftmp14], %[ftmp10], %[ftmp2] \n\t" \
+ "paddw %[ftmp11], %[ftmp11], %[ftmp12] \n\t" \
+ "paddw %[ftmp13], %[ftmp13], %[ftmp14] \n\t" \
+ TRANSPOSE_2W(%[ftmp11], %[ftmp13], %[ftmp5], %[ftmp6]) \
+ "paddw %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
+ "psraw %[ftmp5], %[ftmp5], %[ftmp0] \n\t" \
+ "packsswh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
+ "paddh %[ftmp3], %[ftmp3], %[offset] \n\t" \
+ "psrah %[ftmp3], %[ftmp3], %[shift] \n\t" \
+ "xor %[ftmp7], %[ftmp7], %[ftmp7] \n\t" \
+ "pcmpgth %[ftmp7], %[ftmp3], %[ftmp7] \n\t" \
+ "and %[ftmp3], %[ftmp3], %[ftmp7] \n\t" \
+ "packushb %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
+ "gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" \
+ "gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" \
+ \
+ "daddi %[x], %[x], -0x01 \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x08 \n\t" \
+ PTR_ADDIU "%[dst], %[dst], 0x04 \n\t" \
+ "bnez %[x], 2b \n\t" \
+ \
+ "daddi %[y], %[y], -0x01 \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], " #tmp_step " \n\t" \
+ PTR_ADDIU "%[dst], %[dst], " #dst_step " \n\t" \
+ PTR_ADDU "%[dst], %[dst], %[stride] \n\t" \
+ PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
+ "bnez %[y], 1b \n\t" \
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
+ [ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
+ [ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
+ [ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), \
+ [ftmp10]"=&f"(ftmp[10]), [ftmp11]"=&f"(ftmp[11]), \
+ [ftmp12]"=&f"(ftmp[12]), [ftmp13]"=&f"(ftmp[13]), \
+ [ftmp14]"=&f"(ftmp[14]), \
+ [dst]"+&r"(dst), [tmp]"+&r"(tmp), [y]"+&r"(y), [x]"=&r"(x), \
+ [offset]"+&f"(offset), [rtmp0]"=&r"(rtmp[0]) \
+ : [filter]"r"(filter), [stride]"r"(dststride), \
+ [shift]"f"(shift) \
+ : "memory" \
+ ); \
+}
+
+PUT_HEVC_QPEL_UNI_HV(4, 1, -4, -4, -8);
+PUT_HEVC_QPEL_UNI_HV(8, 2, -8, -8, -16);
+PUT_HEVC_QPEL_UNI_HV(12, 3, -12, -12, -24);
+PUT_HEVC_QPEL_UNI_HV(16, 4, -16, -16, -32);
+PUT_HEVC_QPEL_UNI_HV(24, 6, -24, -24, -48);
+PUT_HEVC_QPEL_UNI_HV(32, 8, -32, -32, -64);
+PUT_HEVC_QPEL_UNI_HV(48, 12, -48, -48, -96);
+PUT_HEVC_QPEL_UNI_HV(64, 16, -64, -64, -128);
More information about the ffmpeg-cvslog
mailing list