26 #define H264_LOOP_FILTER_STRENGTH_ITERATION_LASX(edges, step, mask_mv, dir, \
30 int step_x4 = step << 2; \
31 int d_idx_12 = d_idx + 12; \
32 int d_idx_52 = d_idx + 52; \
33 int d_idx_x4 = d_idx << 2; \
34 int d_idx_x4_48 = d_idx_x4 + 48; \
35 int dir_x32 = dir * 32; \
36 uint8_t *ref_t = (uint8_t*)ref; \
37 uint8_t *mv_t = (uint8_t*)mv; \
38 uint8_t *nnz_t = (uint8_t*)nnz; \
39 uint8_t *bS_t = (uint8_t*)bS; \
41 for (; b_idx < edges; b_idx += step) { \
43 if (!(mask_mv & b_idx)) { \
45 ref2 = __lasx_xvldx(ref_t, d_idx_12); \
46 ref3 = __lasx_xvldx(ref_t, d_idx_52); \
47 ref0 = __lasx_xvld(ref_t, 12); \
48 ref1 = __lasx_xvld(ref_t, 52); \
49 ref2 = __lasx_xvilvl_w(ref3, ref2); \
50 ref0 = __lasx_xvilvl_w(ref0, ref0); \
51 ref1 = __lasx_xvilvl_w(ref1, ref1); \
52 ref3 = __lasx_xvshuf4i_w(ref2, 0xB1); \
53 ref0 = __lasx_xvsub_b(ref0, ref2); \
54 ref1 = __lasx_xvsub_b(ref1, ref3); \
55 ref0 = __lasx_xvor_v(ref0, ref1); \
57 tmp2 = __lasx_xvldx(mv_t, d_idx_x4_48); \
58 tmp3 = __lasx_xvld(mv_t, 48); \
59 tmp4 = __lasx_xvld(mv_t, 208); \
60 tmp5 = __lasx_xvld(mv_t + d_idx_x4, 208); \
61 DUP2_ARG3(__lasx_xvpermi_q, tmp2, tmp2, 0x20, tmp5, tmp5, \
63 tmp3 = __lasx_xvpermi_q(tmp4, tmp3, 0x20); \
64 tmp2 = __lasx_xvsub_h(tmp2, tmp3); \
65 tmp5 = __lasx_xvsub_h(tmp5, tmp3); \
66 DUP2_ARG2(__lasx_xvsat_h, tmp2, 7, tmp5, 7, tmp2, tmp5); \
67 tmp0 = __lasx_xvpickev_b(tmp5, tmp2); \
68 tmp0 = __lasx_xvpermi_d(tmp0, 0xd8); \
69 tmp0 = __lasx_xvadd_b(tmp0, cnst_1); \
70 tmp0 = __lasx_xvssub_bu(tmp0, cnst_0); \
71 tmp0 = __lasx_xvsat_h(tmp0, 7); \
72 tmp0 = __lasx_xvpickev_b(tmp0, tmp0); \
73 tmp0 = __lasx_xvpermi_d(tmp0, 0xd8); \
74 tmp1 = __lasx_xvpickod_d(tmp0, tmp0); \
75 out = __lasx_xvor_v(ref0, tmp0); \
76 tmp1 = __lasx_xvshuf4i_w(tmp1, 0xB1); \
77 out = __lasx_xvor_v(out, tmp1); \
78 tmp0 = __lasx_xvshuf4i_w(out, 0xB1); \
79 out = __lasx_xvmin_bu(out, tmp0); \
81 ref0 = __lasx_xvldx(ref_t, d_idx_12); \
82 ref3 = __lasx_xvld(ref_t, 12); \
83 tmp2 = __lasx_xvldx(mv_t, d_idx_x4_48); \
84 tmp3 = __lasx_xvld(mv_t, 48); \
85 tmp4 = __lasx_xvsub_h(tmp3, tmp2); \
86 tmp1 = __lasx_xvsat_h(tmp4, 7); \
87 tmp1 = __lasx_xvpickev_b(tmp1, tmp1); \
88 tmp1 = __lasx_xvadd_b(tmp1, cnst_1); \
89 out = __lasx_xvssub_bu(tmp1, cnst_0); \
90 out = __lasx_xvsat_h(out, 7); \
91 out = __lasx_xvpickev_b(out, out); \
92 ref0 = __lasx_xvsub_b(ref3, ref0); \
93 out = __lasx_xvor_v(out, ref0); \
96 tmp0 = __lasx_xvld(nnz_t, 12); \
97 tmp1 = __lasx_xvldx(nnz_t, d_idx_12); \
98 tmp0 = __lasx_xvor_v(tmp0, tmp1); \
99 tmp0 = __lasx_xvmin_bu(tmp0, cnst_2); \
100 out = __lasx_xvmin_bu(out, cnst_2); \
101 tmp0 = __lasx_xvslli_h(tmp0, 1); \
102 tmp0 = __lasx_xvmax_bu(out, tmp0); \
103 tmp0 = __lasx_vext2xv_hu_bu(tmp0); \
104 __lasx_xvstelm_d(tmp0, bS_t + dir_x32, 0, 0); \
113 int8_t
ref[2][40], int16_t
mv[2][40][2],
114 int bidir,
int edges,
int step,
115 int mask_mv0,
int mask_mv1,
int field)
118 __m256i ref0, ref1, ref2, ref3;
120 __m256i tmp2, tmp3, tmp4, tmp5;
121 __m256i cnst_0, cnst_1, cnst_2;
122 __m256i
zero = __lasx_xvldi(0);
123 __m256i one = __lasx_xvnor_v(
zero,
zero);
124 int64_t cnst3 = 0x0206020602060206, cnst4 = 0x0103010301030103;
126 cnst_0 = __lasx_xvreplgr2vr_d(cnst3);
127 cnst_1 = __lasx_xvreplgr2vr_d(cnst4);
128 cnst_2 = __lasx_xvldi(0x01);
130 DUP2_ARG1(__lasx_xvldi, 0x06, 0x03, cnst_0, cnst_1);
131 cnst_2 = __lasx_xvldi(0x01);
140 DUP2_ARG2(__lasx_xvld, (int8_t*)bS, 0, (int8_t*)bS, 16, tmp0, tmp1);
141 DUP2_ARG2(__lasx_xvilvh_d, tmp0, tmp0, tmp1, tmp1, tmp2, tmp3);
142 LASX_TRANSPOSE4x4_H(tmp0, tmp2, tmp1, tmp3, tmp2, tmp3, tmp4, tmp5);
143 __lasx_xvstelm_d(tmp2, (int8_t*)bS, 0, 0);
144 __lasx_xvstelm_d(tmp3, (int8_t*)bS + 8, 0, 0);
145 __lasx_xvstelm_d(tmp4, (int8_t*)bS + 16, 0, 0);
146 __lasx_xvstelm_d(tmp5, (int8_t*)bS + 24, 0, 0);