29 #define h264_loop_filter_strength_iteration_msa(edges, step, mask_mv, dir, \
33 int step_x4 = step << 2; \
34 int d_idx_12 = d_idx + 12; \
35 int d_idx_52 = d_idx + 52; \
36 int d_idx_x4 = d_idx << 2; \
37 int d_idx_x4_48 = d_idx_x4 + 48; \
38 int dir_x32 = dir * 32; \
39 uint8_t *ref_t = (uint8_t*)ref; \
40 uint8_t *mv_t = (uint8_t*)mv; \
41 uint8_t *nnz_t = (uint8_t*)nnz; \
42 uint8_t *bS_t = (uint8_t*)bS; \
44 for (; b_idx < edges; b_idx += step) { \
46 if (!(mask_mv & b_idx)) { \
48 ref_2 = LD_SB(ref_t + d_idx_12); \
49 ref_3 = LD_SB(ref_t + d_idx_52); \
50 ref_0 = LD_SB(ref_t + 12); \
51 ref_1 = LD_SB(ref_t + 52); \
52 ref_2 = (v16i8)__msa_ilvr_w((v4i32)ref_3, (v4i32)ref_2); \
53 ref_0 = (v16i8)__msa_ilvr_w((v4i32)ref_0, (v4i32)ref_0); \
54 ref_1 = (v16i8)__msa_ilvr_w((v4i32)ref_1, (v4i32)ref_1); \
55 ref_3 = (v16i8)__msa_shf_h((v8i16)ref_2, 0x4e); \
58 ref_0 = (v16i8)__msa_or_v((v16u8)ref_0, (v16u8)ref_1); \
60 tmp_2 = LD_SH(mv_t + d_idx_x4_48); \
61 tmp_3 = LD_SH(mv_t + 48); \
62 tmp_4 = LD_SH(mv_t + 208); \
63 tmp_5 = tmp_2 - tmp_3; \
64 tmp_6 = tmp_2 - tmp_4; \
65 SAT_SH2_SH(tmp_5, tmp_6, 7); \
66 tmp_0 = __msa_pckev_b((v16i8)tmp_6, (v16i8)tmp_5); \
68 tmp_0 = (v16i8)__msa_subs_u_b((v16u8)tmp_0, (v16u8)cnst_0);\
69 tmp_0 = (v16i8)__msa_sat_s_h((v8i16)tmp_0, 7); \
70 tmp_0 = __msa_pckev_b(tmp_0, tmp_0); \
71 out = (v16i8)__msa_or_v((v16u8)ref_0, (v16u8)tmp_0); \
73 tmp_2 = LD_SH(mv_t + 208 + d_idx_x4); \
74 tmp_5 = tmp_2 - tmp_3; \
75 tmp_6 = tmp_2 - tmp_4; \
76 SAT_SH2_SH(tmp_5, tmp_6, 7); \
77 tmp_1 = __msa_pckev_b((v16i8)tmp_6, (v16i8)tmp_5); \
79 tmp_1 = (v16i8)__msa_subs_u_b((v16u8)tmp_1, (v16u8)cnst_0); \
80 tmp_1 = (v16i8)__msa_sat_s_h((v8i16)tmp_1, 7); \
81 tmp_1 = __msa_pckev_b(tmp_1, tmp_1); \
83 tmp_1 = (v16i8)__msa_shf_h((v8i16)tmp_1, 0x4e); \
84 out = (v16i8)__msa_or_v((v16u8)out, (v16u8)tmp_1); \
85 tmp_0 = (v16i8)__msa_shf_h((v8i16)out, 0x4e); \
86 out = (v16i8)__msa_min_u_b((v16u8)out, (v16u8)tmp_0); \
88 ref_0 = LD_SB(ref_t + d_idx_12); \
89 ref_3 = LD_SB(ref_t + 12); \
90 tmp_2 = LD_SH(mv_t + d_idx_x4_48); \
91 tmp_3 = LD_SH(mv_t + 48); \
92 tmp_4 = tmp_3 - tmp_2; \
93 tmp_1 = (v16i8)__msa_sat_s_h(tmp_4, 7); \
94 tmp_1 = __msa_pckev_b(tmp_1, tmp_1); \
96 out = (v16i8)__msa_subs_u_b((v16u8)tmp_1, (v16u8)cnst_0); \
97 out = (v16i8)__msa_sat_s_h((v8i16)out, 7); \
98 out = __msa_pckev_b(out, out); \
99 ref_0 = ref_3 - ref_0; \
100 out = (v16i8)__msa_or_v((v16u8)out, (v16u8)ref_0); \
103 tmp_0 = LD_SB(nnz_t + 12); \
104 tmp_1 = LD_SB(nnz_t + d_idx_12); \
105 tmp_0 = (v16i8)__msa_or_v((v16u8)tmp_0, (v16u8)tmp_1); \
106 tmp_0 = (v16i8)__msa_min_u_b((v16u8)tmp_0, (v16u8)cnst_2); \
107 out = (v16i8)__msa_min_u_b((v16u8)out, (v16u8)cnst_2); \
108 tmp_0 = (v16i8)((v8i16)tmp_0 << 1); \
109 tmp_0 = (v16i8)__msa_max_u_b((v16u8)out, (v16u8)tmp_0); \
110 tmp_0 = __msa_ilvr_b(zero, tmp_0); \
111 ST_D1(tmp_0, 0, bS_t + dir_x32); \
120 int8_t
ref[2][40], int16_t
mv[2][40][2],
121 int bidir,
int edges,
int step,
122 int mask_mv0,
int mask_mv1,
int field)
125 v16i8 ref_0, ref_1, ref_2, ref_3;
127 v8i16 tmp_2, tmp_3, tmp_4, tmp_5, tmp_6;
128 v16i8 cnst_0, cnst_1, cnst_2;
130 v16i8 one = __msa_fill_b(0xff);
132 cnst_0 = (v16i8)__msa_fill_h(0x206);
133 cnst_1 = (v16i8)__msa_fill_h(0x103);
134 cnst_2 = (v16i8)__msa_fill_h(0x101);
136 cnst_0 = __msa_fill_b(0x6);
137 cnst_1 = __msa_fill_b(0x3);
138 cnst_2 = __msa_fill_b(0x1);
146 LD_SB2((int8_t*)bS, 16, tmp_0, tmp_1);
147 tmp_2 = (v8i16)__msa_ilvl_d((v2i64)tmp_0, (v2i64)tmp_0);
148 tmp_3 = (v8i16)__msa_ilvl_d((v2i64)tmp_1, (v2i64)tmp_1);
150 tmp_0 = (v16i8)__msa_ilvr_d((v2i64)tmp_3, (v2i64)tmp_2);
151 tmp_1 = (v16i8)__msa_ilvr_d((v2i64)tmp_5, (v2i64)tmp_4);
152 ST_SB2(tmp_0, tmp_1, (int8_t*)bS, 16);