27 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
29 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20
32 #define HEVC_UNIW_RND_CLIP2_MAX_SATU_H(in0_h, in1_h, wgt_w, offset_h, rnd_w, \
35 v4i32 in0_r_m, in0_l_m, in1_r_m, in1_l_m; \
37 ILVRL_H2_SW(in0_h, in0_h, in0_r_m, in0_l_m); \
38 ILVRL_H2_SW(in1_h, in1_h, in1_r_m, in1_l_m); \
39 DOTP_SH4_SW(in0_r_m, in1_r_m, in0_l_m, in1_l_m, wgt_w, wgt_w, wgt_w, \
40 wgt_w, in0_r_m, in1_r_m, in0_l_m, in1_l_m); \
41 SRAR_W4_SW(in0_r_m, in1_r_m, in0_l_m, in1_l_m, rnd_w); \
42 PCKEV_H2_SH(in0_l_m, in0_r_m, in1_l_m, in1_r_m, out0_h, out1_h); \
43 ADDS_SH2_SH(out0_h, offset_h, out1_h, offset_h, out0_h, out1_h); \
44 CLIP_SH2_0_255(out0_h, out1_h); \
47 #define HEVC_UNIW_RND_CLIP4_MAX_SATU_H(in0_h, in1_h, in2_h, in3_h, wgt_w, \
48 offset_h, rnd_w, out0_h, out1_h, \
51 HEVC_UNIW_RND_CLIP2_MAX_SATU_H(in0_h, in1_h, wgt_w, offset_h, rnd_w, \
53 HEVC_UNIW_RND_CLIP2_MAX_SATU_H(in2_h, in3_h, wgt_w, offset_h, rnd_w, \
66 uint32_t loop_cnt, tp0, tp1, tp2, tp3;
70 v8i16 dst0, dst1, dst2, dst3, offset_vec;
71 v4i32 weight_vec, rnd_vec;
74 weight_vec = __msa_fill_w(
weight);
75 offset_vec = __msa_fill_h(
offset);
76 rnd_vec = __msa_fill_w(rnd_val);
81 LW2(
src, src_stride, tp0, tp1);
83 dst0 = (v8i16) __msa_ilvr_b(
zero,
src0);
87 DOTP_SH2_SW(dst0_r, dst0_l, weight_vec, weight_vec, dst0_r, dst0_l);
89 dst0 = __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
92 out0 = (v16u8) __msa_pckev_b((v16i8) dst0, (v16i8) dst0);
93 ST_W2(out0, 0, 1, dst, dst_stride);
95 LW4(
src, src_stride, tp0, tp1, tp2, tp3);
100 rnd_vec, dst0, dst1);
101 out0 = (v16u8) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
102 ST_W4(out0, 0, 1, 2, 3, dst, dst_stride);
103 }
else if (0 == (
height % 8)) {
104 for (loop_cnt = (
height >> 3); loop_cnt--;) {
105 LW4(
src, src_stride, tp0, tp1, tp2, tp3);
106 src += 4 * src_stride;
108 LW4(
src, src_stride, tp0, tp1, tp2, tp3);
109 src += 4 * src_stride;
113 SLLI_4V(dst0, dst1, dst2, dst3, 6);
115 offset_vec, rnd_vec, dst0, dst1,
118 ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
119 dst += 8 * dst_stride;
134 uint64_t tp0, tp1, tp2, tp3;
136 v16u8 out0, out1, out2, out3;
138 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, offset_vec;
139 v4i32 weight_vec, rnd_vec;
142 weight_vec = __msa_fill_w(
weight);
143 offset_vec = __msa_fill_h(
offset);
144 rnd_vec = __msa_fill_w(rnd_val);
146 for (loop_cnt = (
height >> 3); loop_cnt--;) {
147 LD4(
src, src_stride, tp0, tp1, tp2, tp3);
148 src += (4 * src_stride);
151 LD4(
src, src_stride, tp0, tp1, tp2, tp3);
152 src += (4 * src_stride);
161 SLLI_4V(dst0, dst1, dst2, dst3, 6);
162 SLLI_4V(dst4, dst5, dst6, dst7, 6);
165 offset_vec, rnd_vec, dst0, dst1, dst2,
168 offset_vec, rnd_vec, dst4, dst5, dst6,
173 ST_W2(out0, 0, 2, dst, dst_stride);
174 ST_H2(out0, 2, 6, dst + 4, dst_stride);
175 ST_W2(out1, 0, 2, dst + 2 * dst_stride, dst_stride);
176 ST_H2(out1, 2, 6, dst + 2 * dst_stride + 4, dst_stride);
177 dst += (4 * dst_stride);
178 ST_W2(out2, 0, 2, dst, dst_stride);
179 ST_H2(out2, 2, 6, dst + 4, dst_stride);
180 ST_W2(out3, 0, 2, dst + 2 * dst_stride, dst_stride);
181 ST_H2(out3, 2, 6, dst + 2 * dst_stride + 4, dst_stride);
182 dst += (4 * dst_stride);
196 uint64_t tp0, tp1, tp2, tp3;
197 v16i8
src0 = { 0 },
src1 = { 0 },
src2 = { 0 }, src3 = { 0 };
199 v16u8 out0, out1, out2, out3;
200 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, offset_vec;
201 v4i32 weight_vec, rnd_vec;
204 weight_vec = __msa_fill_w(
weight);
205 offset_vec = __msa_fill_h(
offset);
206 rnd_vec = __msa_fill_w(rnd_val);
209 LD2(
src, src_stride, tp0, tp1);
214 rnd_vec, dst0, dst1);
215 out0 = (v16u8) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
216 ST_D2(out0, 0, 1, dst, dst_stride);
218 LD4(
src, src_stride, tp0, tp1, tp2, tp3);
223 SLLI_4V(dst0, dst1, dst2, dst3, 6);
225 offset_vec, rnd_vec, dst0, dst1, dst2,
228 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
230 LD4(
src, src_stride, tp0, tp1, tp2, tp3);
231 src += 4 * src_stride;
234 LD2(
src, src_stride, tp0, tp1);
239 SLLI_4V(dst0, dst1, dst2, dst3, 6);
242 offset_vec, rnd_vec, dst0, dst1, dst2,
245 rnd_vec, dst4, dst5);
246 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
247 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
248 ST_D2(out2, 0, 1, dst + 4 * dst_stride, dst_stride);
249 }
else if (0 ==
height % 8) {
250 for (loop_cnt = (
height >> 3); loop_cnt--;) {
251 LD4(
src, src_stride, tp0, tp1, tp2, tp3);
252 src += 4 * src_stride;
255 LD4(
src, src_stride, tp0, tp1, tp2, tp3);
256 src += 4 * src_stride;
264 SLLI_4V(dst0, dst1, dst2, dst3, 6);
265 SLLI_4V(dst4, dst5, dst6, dst7, 6);
267 offset_vec, rnd_vec, dst0, dst1,
270 offset_vec, rnd_vec, dst4, dst5,
274 ST_D8(out0, out1, out2, out3, 0, 1, 0, 1, 0, 1, 0, 1,
276 dst += (8 * dst_stride);
291 v16u8 out0, out1, out2;
293 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
296 v4i32 weight_vec, rnd_vec;
299 weight_vec = __msa_fill_w(
weight);
300 offset_vec = __msa_fill_h(
offset);
301 rnd_vec = __msa_fill_w(rnd_val);
303 for (loop_cnt = 4; loop_cnt--;) {
305 src += (4 * src_stride);
307 dst0, dst1, dst2, dst3);
311 SLLI_4V(dst0, dst1, dst2, dst3, 6);
314 offset_vec, rnd_vec, dst0, dst1, dst2,
317 rnd_vec, dst4, dst5);
319 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
320 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
321 ST_W4(out2, 0, 1, 2, 3, dst + 8, dst_stride);
322 dst += (4 * dst_stride);
336 v16u8 out0, out1, out2, out3;
339 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, offset_vec;
340 v4i32 weight_vec, rnd_vec;
343 weight_vec = __msa_fill_w(
weight);
344 offset_vec = __msa_fill_h(
offset);
345 rnd_vec = __msa_fill_w(rnd_val);
347 for (loop_cnt =
height >> 2; loop_cnt--;) {
349 src += (4 * src_stride);
354 SLLI_4V(dst0, dst1, dst2, dst3, 6);
355 SLLI_4V(dst4, dst5, dst6, dst7, 6);
357 offset_vec, rnd_vec, dst0, dst1, dst2,
360 offset_vec, rnd_vec, dst4, dst5, dst6,
364 ST_UB4(out0, out1, out2, out3, dst, dst_stride);
365 dst += (4 * dst_stride);
379 v16u8 out0, out1, out2, out3, out4, out5;
382 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, offset_vec;
383 v8i16 dst8, dst9, dst10, dst11;
384 v4i32 weight_vec, rnd_vec;
387 weight_vec = __msa_fill_w(
weight);
388 offset_vec = __msa_fill_h(
offset);
389 rnd_vec = __msa_fill_w(rnd_val);
391 for (loop_cnt = (
height >> 2); loop_cnt--;) {
394 src += (4 * src_stride);
402 SLLI_4V(dst0, dst1, dst2, dst3, 6);
403 SLLI_4V(dst4, dst5, dst6, dst7, 6);
404 SLLI_4V(dst8, dst9, dst10, dst11, 6);
406 offset_vec, rnd_vec, dst0, dst1, dst2,
409 offset_vec, rnd_vec, dst4, dst5, dst6,
412 offset_vec, rnd_vec, dst8, dst9, dst10,
414 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
415 PCKEV_B3_UB(dst7, dst6, dst9, dst8, dst11, dst10, out3, out4, out5);
416 ST_UB4(out0, out1, out3, out4, dst, dst_stride);
417 ST_D4(out2, out5, 0, 1, 0, 1, dst + 16, dst_stride);
418 dst += (4 * dst_stride);
432 v16u8 out0, out1, out2, out3;
435 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, offset_vec;
436 v4i32 weight_vec, rnd_vec;
439 weight_vec = __msa_fill_w(
weight);
440 offset_vec = __msa_fill_h(
offset);
441 rnd_vec = __msa_fill_w(rnd_val);
443 for (loop_cnt = (
height >> 1); loop_cnt--;) {
446 src += (2 * src_stride);
452 SLLI_4V(dst0, dst1, dst2, dst3, 6);
453 SLLI_4V(dst4, dst5, dst6, dst7, 6);
455 offset_vec, rnd_vec, dst0, dst1, dst2,
458 offset_vec, rnd_vec, dst4, dst5, dst6,
462 ST_UB2(out0, out1, dst, dst_stride);
463 ST_UB2(out2, out3, dst + 16, dst_stride);
464 dst += (2 * dst_stride);
478 v16u8 out0, out1, out2, out3, out4, out5;
481 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, offset_vec;
482 v8i16 dst6, dst7, dst8, dst9, dst10, dst11;
483 v4i32 weight_vec, rnd_vec;
486 weight_vec = __msa_fill_w(
weight);
487 offset_vec = __msa_fill_h(
offset);
488 rnd_vec = __msa_fill_w(rnd_val);
490 for (loop_cnt = (
height >> 1); loop_cnt--;) {
502 SLLI_4V(dst0, dst1, dst2, dst3, 6);
503 SLLI_4V(dst4, dst5, dst6, dst7, 6);
504 SLLI_4V(dst8, dst9, dst10, dst11, 6);
506 offset_vec, rnd_vec, dst0, dst1, dst2,
509 offset_vec, rnd_vec, dst4, dst5, dst6,
512 offset_vec, rnd_vec, dst8, dst9, dst10,
514 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
515 PCKEV_B3_UB(dst7, dst6, dst9, dst8, dst11, dst10, out3, out4, out5);
516 ST_UB2(out0, out1, dst, 16);
517 ST_UB(out2, dst + 32);
519 ST_UB2(out3, out4, dst, 16);
520 ST_UB(out5, dst + 32);
535 v16u8 out0, out1, out2, out3, out4, out5, out6, out7;
538 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, offset_vec;
539 v8i16 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
540 v4i32 weight_vec, rnd_vec;
543 weight_vec = __msa_fill_w(
weight);
544 offset_vec = __msa_fill_h(
offset);
545 rnd_vec = __msa_fill_w(rnd_val);
547 for (loop_cnt = (
height >> 1); loop_cnt--;) {
561 SLLI_4V(dst0, dst1, dst2, dst3, 6);
562 SLLI_4V(dst4, dst5, dst6, dst7, 6);
563 SLLI_4V(dst8, dst9, dst10, dst11, 6);
564 SLLI_4V(dst12, dst13, dst14, dst15, 6);
566 offset_vec, rnd_vec, dst0, dst1, dst2,
569 offset_vec, rnd_vec, dst4, dst5, dst6,
572 offset_vec, rnd_vec, dst8, dst9, dst10,
575 offset_vec, rnd_vec, dst12, dst13, dst14,
580 PCKEV_B2_UB(dst13, dst12, dst15, dst14, out6, out7);
581 ST_UB4(out0, out1, out2, out3, dst, 16);
583 ST_UB4(out4, out5, out6, out7, dst, 16);
600 v8i16 filt0, filt1, filt2, filt3;
602 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9, vec10;
603 v16i8 mask0, mask1, mask2, mask3, vec11, vec12, vec13, vec14, vec15;
604 v8i16 filter_vec, dst01, dst23, dst45, dst67;
605 v8i16 dst0, dst1, dst2, dst3, weight_vec_h, offset_vec, denom_vec;
606 v4i32 weight_vec, rnd_vec;
611 weight_vec = __msa_fill_w(
weight);
612 rnd_vec = __msa_fill_w(rnd_val);
617 weight_vec_h = __msa_fill_h(
weight);
618 offset_vec = __msa_fill_h(
offset);
619 denom_vec = __msa_fill_h(rnd_val);
621 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
622 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
625 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
632 for (loop_cnt = (
height >> 3); loop_cnt--;) {
634 src += (8 * src_stride);
638 vec0, vec1, vec2, vec3);
640 vec4, vec5, vec6, vec7);
641 VSHF_B4_SB(src4, src5, mask0, mask1, mask2, mask3,
642 vec8, vec9, vec10, vec11);
643 VSHF_B4_SB(src6, src7, mask0, mask1, mask2, mask3,
644 vec12, vec13, vec14, vec15);
655 offset_vec, rnd_vec, dst0, dst1, dst2,
659 ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
660 dst += (8 * dst_stride);
677 v8i16 filt0, filt1, filt2, filt3;
678 v16i8 mask0, mask1, mask2, mask3;
680 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
681 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
682 v8i16 dst0, dst1, dst2, dst3;
683 v8i16 weight_vec_h, offset_vec, denom_vec;
684 v4i32 weight_vec, rnd_vec;
689 weight_vec = __msa_fill_w(
weight);
690 rnd_vec = __msa_fill_w(rnd_val);
695 weight_vec_h = __msa_fill_h(
weight);
696 offset_vec = __msa_fill_h(
offset);
697 denom_vec = __msa_fill_h(rnd_val);
699 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
700 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
703 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
710 for (loop_cnt = (
height >> 2); loop_cnt--;) {
712 src += (4 * src_stride);
716 vec0, vec1, vec2, vec3);
718 vec4, vec5, vec6, vec7);
720 vec8, vec9, vec10, vec11);
721 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
722 vec12, vec13, vec14, vec15);
733 offset_vec, rnd_vec, dst0, dst1, dst2,
737 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
738 dst += (4 * dst_stride);
753 v16u8 out0, out1, out2;
754 v8i16 filt0, filt1, filt2, filt3;
756 v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
757 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
758 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
760 v8i16 dst01, dst23, dst0, dst1, dst2, dst3, dst4, dst5;
761 v8i16 weight_vec_h, offset_vec, denom_vec;
762 v4i32 weight_vec, rnd_vec;
767 weight_vec = __msa_fill_w(
weight);
768 rnd_vec = __msa_fill_w(rnd_val);
773 weight_vec_h = __msa_fill_h(
weight);
774 offset_vec = __msa_fill_h(
offset);
775 denom_vec = __msa_fill_h(rnd_val);
777 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
778 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
781 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
792 for (loop_cnt = (
height >> 2); loop_cnt--;) {
794 LD_SB4(
src + 8, src_stride, src4, src5, src6, src7);
795 src += (4 * src_stride);
799 vec0, vec1, vec2, vec3);
801 vec4, vec5, vec6, vec7);
803 vec8, vec9, vec10, vec11);
804 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
805 vec12, vec13, vec14, vec15);
814 VSHF_B4_SB(src4, src5, mask4, mask5, mask6, mask7,
815 vec0, vec1, vec2, vec3);
816 VSHF_B4_SB(src6, src7, mask4, mask5, mask6, mask7,
817 vec4, vec5, vec6, vec7);
824 offset_vec, rnd_vec, dst0, dst1, dst2,
827 rnd_vec, dst4, dst5);
829 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
830 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
831 ST_W4(out2, 0, 1, 2, 3, dst + 8, dst_stride);
832 dst += (4 * dst_stride);
849 v8i16 filt0, filt1, filt2, filt3;
850 v16i8 mask0, mask1, mask2, mask3;
852 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
853 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
854 v8i16 dst0, dst1, dst2, dst3;
855 v8i16 weight_vec_h, offset_vec, denom_vec;
856 v4i32 weight_vec, rnd_vec;
860 weight_vec = __msa_fill_w(
weight);
861 rnd_vec = __msa_fill_w(rnd_val);
866 weight_vec_h = __msa_fill_h(
weight);
867 offset_vec = __msa_fill_h(
offset);
868 denom_vec = __msa_fill_h(rnd_val);
870 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
871 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
874 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
881 for (loop_cnt = (
height >> 1); loop_cnt--;) {
884 src += (2 * src_stride);
888 vec0, vec1, vec2, vec3);
890 vec4, vec5, vec6, vec7);
892 vec8, vec9, vec10, vec11);
893 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
894 vec12, vec13, vec14, vec15);
905 offset_vec, rnd_vec, dst0, dst1, dst2,
909 ST_UB2(out0, out1, dst, dst_stride);
910 dst += (2 * dst_stride);
925 v16u8 out0, out1, out2;
927 v8i16 filt0, filt1, filt2, filt3;
928 v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
929 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
930 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
931 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
932 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
933 v4i32 weight_vec, rnd_vec;
937 weight_vec = __msa_fill_w(
weight);
938 rnd_vec = __msa_fill_w(rnd_val);
943 weight_vec_h = __msa_fill_h(
weight);
944 offset_vec = __msa_fill_h(
offset);
945 denom_vec = __msa_fill_h(rnd_val);
947 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
948 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
951 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
962 for (loop_cnt = 16; loop_cnt--;) {
969 vec0, vec1, vec2, vec3);
971 vec4, vec5, vec6, vec7);
973 vec8, vec9, vec10, vec11);
975 vec12, vec13, vec14, vec15);
986 vec0, vec1, vec2, vec3);
987 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
988 vec4, vec5, vec6, vec7);
995 offset_vec, rnd_vec, dst0, dst1, dst2,
998 rnd_vec, dst4, dst5);
1000 PCKEV_B3_UB(dst1, dst0, dst4, dst3, dst5, dst2, out0, out1, out2);
1001 ST_UB2(out0, out1, dst, dst_stride);
1002 ST_D2(out2, 0, 1, dst + 16, dst_stride);
1003 dst += (2 * dst_stride);
1018 v16u8 out0, out1, out2, out3;
1020 v8i16 filt0, filt1, filt2, filt3;
1021 v16i8 mask0, mask1, mask2, mask3;
1022 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1023 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1025 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1026 v8i16 weight_vec_h, offset_vec, denom_vec;
1027 v4i32 weight_vec, rnd_vec;
1031 weight_vec = __msa_fill_w(
weight);
1032 rnd_vec = __msa_fill_w(rnd_val);
1037 weight_vec_h = __msa_fill_h(
weight);
1038 offset_vec = __msa_fill_h(
offset);
1039 denom_vec = __msa_fill_h(rnd_val);
1041 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
1042 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
1045 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1052 for (loop_cnt =
height >> 1; loop_cnt--;) {
1060 vec0, vec1, vec2, vec3);
1062 vec4, vec5, vec6, vec7);
1064 vec8, vec9, vec10, vec11);
1065 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
1066 vec12, vec13, vec14, vec15);
1076 VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
1077 vec0, vec1, vec2, vec3);
1078 VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3,
1079 vec4, vec5, vec6, vec7);
1080 VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3,
1081 vec8, vec9, vec10, vec11);
1082 VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3,
1083 vec12, vec13, vec14, vec15);
1094 offset_vec, rnd_vec, dst0, dst1, dst2,
1097 offset_vec, rnd_vec, dst4, dst5, dst6,
1102 ST_UB2(out0, out1, dst, 16);
1104 ST_UB2(out2, out3, dst, 16);
1120 v16u8 out0, out1, out2;
1122 v8i16 filt0, filt1, filt2, filt3;
1123 v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
1124 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1125 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1126 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
1127 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
1128 v4i32 weight_vec, rnd_vec;
1133 weight_vec = __msa_fill_w(
weight);
1134 rnd_vec = __msa_fill_w(rnd_val);
1139 weight_vec_h = __msa_fill_h(
weight);
1140 offset_vec = __msa_fill_h(
offset);
1141 denom_vec = __msa_fill_h(rnd_val);
1143 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
1144 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
1147 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1158 for (loop_cnt = 64; loop_cnt--;) {
1165 vec0, vec1, vec2, vec3);
1167 vec4, vec5, vec6, vec7);
1169 vec8, vec9, vec10, vec11);
1171 vec12, vec13, vec14, vec15);
1182 vec0, vec1, vec2, vec3);
1183 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
1184 vec4, vec5, vec6, vec7);
1191 offset_vec, rnd_vec, dst0, dst1, dst2,
1194 rnd_vec, dst4, dst5);
1196 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
1197 ST_UB2(out0, out1, dst, 16);
1198 ST_UB(out2, dst + 32);
1213 const uint8_t *src_tmp;
1215 uint32_t loop_cnt, cnt;
1218 v8i16 filt0, filt1, filt2, filt3;
1219 v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
1220 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1221 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1222 v8i16 dst0, dst1, dst2, dst3;
1223 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
1224 v4i32 weight_vec, rnd_vec;
1228 weight_vec = __msa_fill_w(
weight);
1229 rnd_vec = __msa_fill_w(rnd_val);
1234 weight_vec_h = __msa_fill_h(
weight);
1235 offset_vec = __msa_fill_h(
offset);
1236 denom_vec = __msa_fill_h(rnd_val);
1238 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
1239 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
1242 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1253 for (loop_cnt =
height; loop_cnt--;) {
1257 for (cnt = 2; cnt--;) {
1264 vec0, vec1, vec2, vec3);
1266 vec4, vec5, vec6, vec7);
1268 vec8, vec9, vec10, vec11);
1270 vec12, vec13, vec14, vec15);
1281 offset_vec, rnd_vec, dst0, dst1,
1285 ST_UB2(out0, out1, dst_tmp, 16);
1306 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8;
1307 v16i8 src9, src10, src11, src12, src13, src14;
1308 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
1309 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
1310 v16i8 src1110_r, src1211_r, src1312_r, src1413_r;
1311 v16i8 src2110, src4332, src6554, src8776, src10998;
1312 v16i8 src12111110, src14131312;
1313 v8i16 filter_vec, dst01, dst23, dst45, dst67;
1314 v8i16 filt0, filt1, filt2, filt3;
1315 v8i16 dst0, dst1, dst2, dst3, weight_vec_h, offset_vec, denom_vec;
1316 v4i32 weight_vec, rnd_vec;
1318 src -= (3 * src_stride);
1321 weight_vec = __msa_fill_w(
weight);
1322 rnd_vec = __msa_fill_w(rnd_val);
1327 weight_vec_h = __msa_fill_h(
weight);
1328 offset_vec = __msa_fill_h(
offset);
1329 denom_vec = __msa_fill_h(rnd_val);
1331 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
1332 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
1335 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1338 src += (7 * src_stride);
1341 src10_r, src32_r, src54_r, src21_r);
1343 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1346 src32_r, src65_r, src54_r, src2110, src4332, src6554);
1350 for (loop_cnt = (
height >> 3); loop_cnt--;) {
1352 src7, src8, src9, src10, src11, src12, src13, src14);
1353 src += (8 * src_stride);
1354 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1355 src76_r, src87_r, src98_r, src109_r);
1356 ILVR_B4_SB(src11, src10, src12, src11, src13, src12, src14, src13,
1357 src1110_r, src1211_r, src1312_r, src1413_r);
1358 ILVR_D4_SB(src87_r, src76_r, src109_r, src98_r, src1211_r, src1110_r,
1359 src1413_r, src1312_r,
1360 src8776, src10998, src12111110, src14131312);
1363 filt1, filt2, filt3);
1365 filt1, filt2, filt3);
1367 filt0, filt1, filt2, filt3);
1369 filt0, filt1, filt2, filt3);
1372 offset_vec, rnd_vec, dst0, dst1, dst2,
1376 ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
1377 dst += (8 * dst_stride);
1380 src4332 = src12111110;
1381 src6554 = src14131312;
1398 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
1399 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
1400 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
1401 v8i16 filt0, filt1, filt2, filt3;
1403 v8i16 dst0, dst1, dst2, dst3, weight_vec_h, offset_vec, denom_vec;
1404 v4i32 weight_vec, rnd_vec;
1406 src -= (3 * src_stride);
1408 weight_vec = __msa_fill_w(
weight);
1409 rnd_vec = __msa_fill_w(rnd_val);
1414 weight_vec_h = __msa_fill_h(
weight);
1415 offset_vec = __msa_fill_h(
offset);
1416 denom_vec = __msa_fill_h(rnd_val);
1418 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
1419 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
1422 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1425 src += (7 * src_stride);
1429 src10_r, src32_r, src54_r, src21_r);
1430 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1432 for (loop_cnt = (
height >> 2); loop_cnt--;) {
1433 LD_SB4(
src, src_stride, src7, src8, src9, src10);
1434 src += (4 * src_stride);
1436 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1437 src76_r, src87_r, src98_r, src109_r);
1439 filt1, filt2, filt3);
1441 filt1, filt2, filt3);
1443 filt1, filt2, filt3);
1445 filt1, filt2, filt3);
1448 offset_vec, rnd_vec, dst0, dst1, dst2,
1452 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
1453 dst += (4 * dst_stride);
1476 v16u8 out0, out1, out2;
1477 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
1478 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
1479 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
1480 v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
1481 v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
1482 v16i8 src2110, src4332, src6554, src8776, src10998;
1483 v8i16 filt0, filt1, filt2, filt3;
1484 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
1485 v8i16 weight_vec_h, offset_vec, denom_vec, filter_vec;
1486 v4i32 weight_vec, rnd_vec;
1488 src -= (3 * src_stride);
1491 weight_vec = __msa_fill_w(
weight);
1492 rnd_vec = __msa_fill_w(rnd_val);
1497 weight_vec_h = __msa_fill_h(
weight);
1498 offset_vec = __msa_fill_h(
offset);
1499 denom_vec = __msa_fill_h(rnd_val);
1501 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
1502 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
1505 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1508 src += (7 * src_stride);
1512 src10_r, src32_r, src54_r, src21_r);
1513 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1515 src10_l, src32_l, src54_l, src21_l);
1516 ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
1517 ILVR_D3_SB(src21_l, src10_l, src43_l, src32_l, src65_l, src54_l,
1518 src2110, src4332, src6554);
1520 for (loop_cnt = 4; loop_cnt--;) {
1521 LD_SB4(
src, src_stride, src7, src8, src9, src10);
1522 src += (4 * src_stride);
1525 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1526 src76_r, src87_r, src98_r, src109_r);
1527 ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1528 src76_l, src87_l, src98_l, src109_l);
1529 ILVR_D2_SB(src87_l, src76_l, src109_l, src98_l, src8776, src10998);
1532 filt1, filt2, filt3);
1534 filt1, filt2, filt3);
1536 filt1, filt2, filt3);
1538 filt1, filt2, filt3);
1540 filt1, filt2, filt3);
1542 filt1, filt2, filt3);
1545 offset_vec, rnd_vec, dst0, dst1, dst2,
1548 rnd_vec, dst4, dst5);
1550 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
1551 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
1552 ST_W4(out2, 0, 1, 2, 3, dst + 8, dst_stride);
1553 dst += (4 * dst_stride);
1579 const uint8_t *src_tmp;
1582 v16u8 out0, out1, out2, out3;
1583 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
1584 v16i8 src10_r, src32_r, src54_r, src76_r;
1585 v16i8 src21_r, src43_r, src65_r, src87_r;
1586 v16i8 src10_l, src32_l, src54_l, src76_l;
1587 v16i8 src21_l, src43_l, src65_l, src87_l;
1588 v16i8 src98_r, src109_r, src98_l, src109_l;
1589 v8i16 filt0, filt1, filt2, filt3;
1591 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1592 v8i16 weight_vec_h, offset_vec, denom_vec;
1593 v4i32 weight_vec, rnd_vec;
1595 src -= (3 * src_stride);
1597 weight_vec = __msa_fill_w(
weight);
1598 rnd_vec = __msa_fill_w(rnd_val);
1603 weight_vec_h = __msa_fill_h(
weight);
1604 offset_vec = __msa_fill_h(
offset);
1605 denom_vec = __msa_fill_h(rnd_val);
1607 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
1608 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
1611 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1613 for (cnt = weightmul16; cnt--;) {
1618 src_tmp += (7 * src_stride);
1621 for (loop_cnt = (
height >> 2); loop_cnt--;) {
1622 LD_SB4(src_tmp, src_stride, src7, src8, src9, src10);
1623 src_tmp += (4 * src_stride);
1627 src10_r, src32_r, src54_r, src21_r);
1628 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1630 src10_l, src32_l, src54_l, src21_l);
1631 ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
1632 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1633 src76_r, src87_r, src98_r, src109_r);
1634 ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1635 src76_l, src87_l, src98_l, src109_l);
1638 filt1, filt2, filt3);
1640 filt1, filt2, filt3);
1642 filt1, filt2, filt3);
1644 filt1, filt2, filt3);
1646 filt1, filt2, filt3);
1648 filt1, filt2, filt3);
1650 filt1, filt2, filt3);
1652 filt1, filt2, filt3);
1655 offset_vec, rnd_vec, dst0, dst1,
1658 offset_vec, rnd_vec, dst4, dst5,
1662 ST_UB4(out0, out1, out2, out3, dst_tmp, dst_stride);
1663 dst_tmp += (4 * dst_stride);
1761 const int8_t *filter_x,
1762 const int8_t *filter_y,
1770 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
1771 v8i16 filt0, filt1, filt2, filt3;
1772 v8i16 filt_h0, filt_h1, filt_h2, filt_h3;
1773 v16i8 mask1, mask2, mask3;
1775 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1776 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1777 v8i16 dst30, dst41, dst52, dst63, dst66, dst97, dst108;
1778 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r;
1779 v8i16 dst21_r, dst43_r, dst65_r, dst87_r, dst109_r;
1780 v4i32 dst0_r, dst1_r, dst2_r, dst3_r;
1781 v4i32 weight_vec, offset_vec, rnd_vec, const_128, denom_vec;
1784 src -= ((3 * src_stride) + 3);
1785 filter_vec =
LD_SH(filter_x);
1786 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1788 filter_vec =
LD_SH(filter_y);
1791 SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
1797 weight_vec = __msa_fill_w(
weight);
1798 offset_vec = __msa_fill_w(
offset);
1799 rnd_vec = __msa_fill_w(rnd_val);
1800 denom_vec = rnd_vec - 6;
1802 const_128 = __msa_ldi_w(128);
1803 const_128 *= weight_vec;
1804 offset_vec += __msa_srar_w(const_128, denom_vec);
1807 src += (7 * src_stride);
1811 VSHF_B4_SB(
src0, src3, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
1812 VSHF_B4_SB(
src1, src4, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
1814 vec8, vec9, vec10, vec11);
1815 VSHF_B4_SB(src3, src6, mask0, mask1, mask2, mask3,
1816 vec12, vec13, vec14, vec15);
1830 dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
1832 for (loop_cnt =
height >> 2; loop_cnt--;) {
1833 LD_SB4(
src, src_stride, src7, src8, src9, src10);
1834 src += (4 * src_stride);
1837 VSHF_B4_SB(src7, src9, mask0, mask1, mask2, mask3,
1838 vec0, vec1, vec2, vec3);
1839 VSHF_B4_SB(src8, src10, mask0, mask1, mask2, mask3,
1840 vec4, vec5, vec6, vec7);
1846 dst76_r = __msa_ilvr_h(dst97, dst66);
1848 dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
1849 dst98_r = __msa_ilvr_h(dst66, dst108);
1851 dst0_r =
HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r, filt_h0,
1852 filt_h1, filt_h2, filt_h3);
1853 dst1_r =
HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r, filt_h0,
1854 filt_h1, filt_h2, filt_h3);
1855 dst2_r =
HEVC_FILT_8TAP(dst32_r, dst54_r, dst76_r, dst98_r, filt_h0,
1856 filt_h1, filt_h2, filt_h3);
1857 dst3_r =
HEVC_FILT_8TAP(dst43_r, dst65_r, dst87_r, dst109_r, filt_h0,
1858 filt_h1, filt_h2, filt_h3);
1860 SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
1861 MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
1862 MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
1863 SRAR_W4_SW(dst0_r, dst1_r, dst2_r, dst3_r, rnd_vec);
1864 ADD2(dst0_r, offset_vec, dst1_r, offset_vec, dst0_r, dst1_r);
1865 ADD2(dst2_r, offset_vec, dst3_r, offset_vec, dst2_r, dst3_r);
1867 PCKEV_H2_SW(dst1_r, dst0_r, dst3_r, dst2_r, dst0_r, dst1_r);
1868 out = (v16u8) __msa_pckev_b((v16i8) dst1_r, (v16i8) dst0_r);
1869 ST_W4(
out, 0, 1, 2, 3, dst, dst_stride);
1870 dst += (4 * dst_stride);
1878 dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
1886 const int8_t *filter_x,
1887 const int8_t *filter_y,
1894 uint32_t loop_cnt, cnt;
1895 const uint8_t *src_tmp;
1897 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8;
1898 v8i16 filt0, filt1, filt2, filt3;
1899 v4i32 filt_h0, filt_h1, filt_h2, filt_h3;
1900 v16i8 mask1, mask2, mask3;
1902 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1903 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1904 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
1905 v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
1906 v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
1907 v8i16 dst10_l, dst32_l, dst54_l, dst76_l;
1908 v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
1909 v8i16 dst21_l, dst43_l, dst65_l, dst87_l;
1910 v4i32 weight_vec, offset_vec, rnd_vec, const_128, denom_vec;
1913 src -= ((3 * src_stride) + 3);
1915 weight_vec = __msa_fill_w(
weight);
1916 offset_vec = __msa_fill_w(
offset);
1917 rnd_vec = __msa_fill_w(rnd_val);
1918 denom_vec = rnd_vec - 6;
1920 const_128 = __msa_ldi_w(128);
1921 const_128 *= weight_vec;
1922 offset_vec += __msa_srar_w(const_128, denom_vec);
1924 filter_vec =
LD_SH(filter_x);
1925 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1927 filter_vec =
LD_SH(filter_y);
1929 SPLATI_W4_SW(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
1935 for (cnt =
width >> 3; cnt--;) {
1940 src_tmp += (7 * src_stride);
1944 vec0, vec1, vec2, vec3);
1946 vec4, vec5, vec6, vec7);
1948 vec8, vec9, vec10, vec11);
1949 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
1950 vec12, vec13, vec14, vec15);
1960 VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
1961 vec0, vec1, vec2, vec3);
1962 VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3,
1963 vec4, vec5, vec6, vec7);
1964 VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3,
1965 vec8, vec9, vec10, vec11);
1973 ILVR_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst2, dst1,
1974 dst10_r, dst32_r, dst54_r, dst21_r);
1975 ILVR_H2_SH(dst4, dst3, dst6, dst5, dst43_r, dst65_r);
1976 ILVL_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst2, dst1,
1977 dst10_l, dst32_l, dst54_l, dst21_l);
1978 ILVL_H2_SH(dst4, dst3, dst6, dst5, dst43_l, dst65_l);
1980 for (loop_cnt =
height >> 1; loop_cnt--;) {
1981 LD_SB2(src_tmp, src_stride, src7, src8);
1982 src_tmp += 2 * src_stride;
1985 VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3,
1986 vec0, vec1, vec2, vec3);
1992 filt_h0, filt_h1, filt_h2, filt_h3);
1994 filt_h0, filt_h1, filt_h2, filt_h3);
1999 VSHF_B4_SB(src8, src8, mask0, mask1, mask2, mask3,
2000 vec0, vec1, vec2, vec3);
2006 filt_h0, filt_h1, filt_h2, filt_h3);
2008 filt_h0, filt_h1, filt_h2, filt_h3);
2012 MUL2(dst0_r, weight_vec, dst0_l, weight_vec, dst0_r, dst0_l);
2013 MUL2(dst1_r, weight_vec, dst1_l, weight_vec, dst1_r, dst1_l);
2014 SRAR_W4_SW(dst0_r, dst1_r, dst0_l, dst1_l, rnd_vec);
2015 ADD2(dst0_r, offset_vec, dst0_l, offset_vec, dst0_r, dst0_l);
2016 ADD2(dst1_r, offset_vec, dst1_l, offset_vec, dst1_r, dst1_l);
2019 PCKEV_H2_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst0_r, dst1_r);
2020 dst0_r = (v4i32) __msa_pckev_b((v16i8) dst1_r, (v16i8) dst0_r);
2021 ST_D2(dst0_r, 0, 1, dst_tmp, dst_stride);
2022 dst_tmp += (2 * dst_stride);
2048 const int8_t *filter_x,
2049 const int8_t *filter_y,
2064 const int8_t *filter_x,
2065 const int8_t *filter_y,
2072 const uint8_t *src_tmp;
2075 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
2076 v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
2077 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2078 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
2079 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
2080 v8i16 dst30, dst41, dst52, dst63, dst66, dst97, dst108;
2081 v8i16 filt0, filt1, filt2, filt3, filt_h0, filt_h1, filt_h2, filt_h3;
2082 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst10_l, dst32_l, dst54_l;
2083 v8i16 dst98_r, dst21_r, dst43_r, dst65_r, dst87_r, dst109_r;
2084 v8i16 dst76_l, filter_vec;
2085 v4i32 dst0_r, dst0_l, dst1_r, dst2_r, dst3_r;
2086 v4i32 weight_vec, offset_vec, rnd_vec, const_128, denom_vec;
2088 src -= ((3 * src_stride) + 3);
2090 filter_vec =
LD_SH(filter_x);
2091 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
2093 filter_vec =
LD_SH(filter_y);
2096 SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
2098 weight_vec = __msa_fill_w(
weight);
2099 offset_vec = __msa_fill_w(
offset);
2100 rnd_vec = __msa_fill_w(rnd_val);
2101 denom_vec = rnd_vec - 6;
2103 const_128 = __msa_ldi_w(128);
2104 const_128 *= weight_vec;
2105 offset_vec += __msa_srar_w(const_128, denom_vec);
2116 src_tmp += (7 * src_stride);
2124 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3, vec12, vec13, vec14,
2134 VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
2135 VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
2136 VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
2145 for (loop_cnt = 16; loop_cnt--;) {
2146 src7 =
LD_SB(src_tmp);
2147 src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
2148 src_tmp += src_stride;
2150 VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
2160 filt_h0, filt_h1, filt_h2, filt_h3);
2162 filt_h0, filt_h1, filt_h2, filt_h3);
2166 MUL2(dst0_r, weight_vec, dst0_l, weight_vec, dst0_r, dst0_l);
2168 ADD2(dst0_r, offset_vec, dst0_l, offset_vec, dst0_r, dst0_l);
2170 dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
2171 out = (v16u8) __msa_pckev_b((v16i8) dst0_r, (v16i8) dst0_r);
2173 dst_tmp += dst_stride;
2193 src += (7 * src_stride);
2196 VSHF_B4_SB(
src0, src3, mask4, mask5, mask6, mask7, vec0, vec1, vec2, vec3);
2197 VSHF_B4_SB(
src1, src4, mask4, mask5, mask6, mask7, vec4, vec5, vec6, vec7);
2198 VSHF_B4_SB(
src2, src5, mask4, mask5, mask6, mask7, vec8, vec9, vec10,
2200 VSHF_B4_SB(src3, src6, mask4, mask5, mask6, mask7, vec12, vec13, vec14,
2214 dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
2216 for (loop_cnt = 4; loop_cnt--;) {
2217 LD_SB4(
src, src_stride, src7, src8, src9, src10);
2218 src += (4 * src_stride);
2221 VSHF_B4_SB(src7, src9, mask4, mask5, mask6, mask7, vec0, vec1, vec2,
2223 VSHF_B4_SB(src8, src10, mask4, mask5, mask6, mask7, vec4, vec5, vec6,
2230 dst76_r = __msa_ilvr_h(dst97, dst66);
2232 dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
2233 dst98_r = __msa_ilvr_h(dst66, dst108);
2235 dst0_r =
HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r, filt_h0,
2236 filt_h1, filt_h2, filt_h3);
2237 dst1_r =
HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r, filt_h0,
2238 filt_h1, filt_h2, filt_h3);
2239 dst2_r =
HEVC_FILT_8TAP(dst32_r, dst54_r, dst76_r, dst98_r, filt_h0,
2240 filt_h1, filt_h2, filt_h3);
2241 dst3_r =
HEVC_FILT_8TAP(dst43_r, dst65_r, dst87_r, dst109_r, filt_h0,
2242 filt_h1, filt_h2, filt_h3);
2244 SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
2245 MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
2246 MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
2247 SRAR_W4_SW(dst0_r, dst1_r, dst2_r, dst3_r, rnd_vec);
2248 ADD2(dst0_r, offset_vec, dst1_r, offset_vec, dst0_r, dst1_r);
2249 ADD2(dst2_r, offset_vec, dst3_r, offset_vec, dst2_r, dst3_r);
2251 PCKEV_H2_SW(dst1_r, dst0_r, dst3_r, dst2_r, dst0_r, dst1_r);
2252 out = (v16u8) __msa_pckev_b((v16i8) dst1_r, (v16i8) dst0_r);
2253 ST_W4(
out, 0, 1, 2, 3, dst, dst_stride);
2254 dst += (4 * dst_stride);
2262 dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
2270 const int8_t *filter_x,
2271 const int8_t *filter_y,
2286 const int8_t *filter_x,
2287 const int8_t *filter_y,
2302 const int8_t *filter_x,
2303 const int8_t *filter_y,
2318 const int8_t *filter_x,
2319 const int8_t *filter_y,
2334 const int8_t *filter_x,
2335 const int8_t *filter_y,
2360 v4i32 dst0_r, dst0_l;
2361 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
2362 v4i32 weight_vec, rnd_vec;
2374 weight_vec = __msa_fill_w(
weight);
2375 rnd_vec = __msa_fill_w(rnd_val);
2380 weight_vec_h = __msa_fill_h(
weight);
2381 offset_vec = __msa_fill_h(
offset);
2382 denom_vec = __msa_fill_h(rnd_val);
2384 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
2385 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
2394 DOTP_SH2_SW(dst0_r, dst0_l, weight_vec, weight_vec, dst0_r, dst0_l);
2396 dst0 = __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
2397 dst0 = __msa_adds_s_h(dst0, offset_vec);
2399 out = (v16u8) __msa_pckev_b((v16i8) dst0, (v16i8) dst0);
2401 dst += (4 * dst_stride);
2416 v16i8 mask1, vec0, vec1, vec2, vec3;
2418 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
2419 v4i32 weight_vec, rnd_vec;
2432 weight_vec = __msa_fill_w(
weight);
2433 rnd_vec = __msa_fill_w(rnd_val);
2438 weight_vec_h = __msa_fill_h(
weight);
2439 offset_vec = __msa_fill_h(
offset);
2440 denom_vec = __msa_fill_h(rnd_val);
2442 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
2443 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
2456 out = (v16u8) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
2457 ST_W4(
out, 0, 1, 2, 3, dst, dst_stride);
2458 dst += (4 * dst_stride);
2475 v16i8 mask1, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2476 v8i16 dst0, dst1, dst2, dst3;
2478 v8i16 weight_vec_h, offset_vec, denom_vec;
2479 v4i32 weight_vec, rnd_vec;
2489 weight_vec = __msa_fill_w(
weight);
2490 rnd_vec = __msa_fill_w(rnd_val);
2495 weight_vec_h = __msa_fill_h(
weight);
2496 offset_vec = __msa_fill_h(
offset);
2497 denom_vec = __msa_fill_h(rnd_val);
2499 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
2500 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
2504 for (loop_cnt = (
height >> 3); loop_cnt--;) {
2506 src += (8 * src_stride);
2512 VSHF_B2_SB(src4, src5, src4, src5, mask0, mask1, vec4, vec5);
2513 VSHF_B2_SB(src6, src7, src6, src7, mask0, mask1, vec6, vec7);
2520 weight_vec, offset_vec, rnd_vec,
2521 dst0, dst1, dst2, dst3);
2524 ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
2525 dst += (8 * dst_stride);
2542 }
else if (4 ==
height) {
2562 v16u8 out0, out1, out2, out3;
2567 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2568 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
2569 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
2570 v4i32 weight_vec, rnd_vec;
2579 weight_vec = __msa_fill_w(
weight);
2580 rnd_vec = __msa_fill_w(rnd_val);
2585 weight_vec_h = __msa_fill_h(
weight);
2586 offset_vec = __msa_fill_h(
offset);
2587 denom_vec = __msa_fill_h(rnd_val);
2589 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
2590 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
2599 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
2604 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
2605 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec2, vec3);
2606 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec4, vec5);
2607 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec6, vec7);
2614 weight_vec, offset_vec, rnd_vec,
2615 dst0, dst1, dst2, dst3);
2617 weight_vec, offset_vec, rnd_vec,
2618 dst4, dst5, dst6, dst7);
2622 ST_W2(out0, 0, 2, dst, dst_stride);
2623 ST_H2(out0, 2, 6, dst + 4, dst_stride);
2624 ST_W2(out1, 0, 2, dst + 2 * dst_stride, dst_stride);
2625 ST_H2(out1, 2, 6, dst + 2 * dst_stride + 4, dst_stride);
2626 dst += (4 * dst_stride);
2627 ST_W2(out2, 0, 2, dst, dst_stride);
2628 ST_H2(out2, 2, 6, dst + 4, dst_stride);
2629 ST_W2(out3, 0, 2, dst + 2 * dst_stride, dst_stride);
2630 ST_H2(out3, 2, 6, dst + 2 * dst_stride + 4, dst_stride);
2643 v8i16 filt0, filt1, dst0, dst1;
2647 v16i8 vec0, vec1, vec2, vec3;
2648 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
2649 v4i32 weight_vec, rnd_vec;
2658 weight_vec = __msa_fill_w(
weight);
2659 rnd_vec = __msa_fill_w(rnd_val);
2664 weight_vec_h = __msa_fill_h(
weight);
2665 offset_vec = __msa_fill_h(
offset);
2666 denom_vec = __msa_fill_h(rnd_val);
2668 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
2669 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
2684 out = (v16u8) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
2699 v16i8 mask0, mask1, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2700 v8i16 filt0, filt1, dst0, dst1, dst2, dst3;
2701 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
2702 v4i32 weight_vec, rnd_vec;
2710 weight_vec = __msa_fill_w(
weight);
2711 rnd_vec = __msa_fill_w(rnd_val);
2716 weight_vec_h = __msa_fill_h(
weight);
2717 offset_vec = __msa_fill_h(
offset);
2718 denom_vec = __msa_fill_h(rnd_val);
2720 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
2721 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
2731 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
2738 weight_vec, offset_vec, rnd_vec,
2739 dst0, dst1, dst2, dst3);
2742 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
2754 v16u8 out0, out1, out2;
2760 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9, vec10;
2761 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
2762 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
2763 v4i32 weight_vec, rnd_vec;
2772 weight_vec = __msa_fill_w(
weight);
2773 rnd_vec = __msa_fill_w(rnd_val);
2778 weight_vec_h = __msa_fill_h(
weight);
2779 offset_vec = __msa_fill_h(
offset);
2780 denom_vec = __msa_fill_h(rnd_val);
2782 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
2783 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
2793 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
2794 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
2795 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec10, vec11);
2804 weight_vec, offset_vec, rnd_vec,
2805 dst0, dst1, dst2, dst3);
2810 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
2811 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
2812 ST_D2(out2, 0, 1, dst + 4 * dst_stride, dst_stride);
2827 v16u8 out0, out1, out2, out3;
2831 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2832 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
2833 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
2834 v4i32 weight_vec, rnd_vec;
2843 weight_vec = __msa_fill_w(
weight);
2844 rnd_vec = __msa_fill_w(rnd_val);
2849 weight_vec_h = __msa_fill_h(
weight);
2850 offset_vec = __msa_fill_h(
offset);
2851 denom_vec = __msa_fill_h(rnd_val);
2853 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
2854 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
2858 for (loop_cnt = (
height >> 3); loop_cnt--;) {
2860 src += (8 * src_stride);
2866 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
2871 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
2872 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec2, vec3);
2873 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec4, vec5);
2874 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec6, vec7);
2881 weight_vec, offset_vec, rnd_vec,
2882 dst0, dst1, dst2, dst3);
2885 weight_vec, offset_vec, rnd_vec,
2886 dst4, dst5, dst6, dst7);
2890 ST_D8(out0, out1, out2, out3, 0, 1, 0, 1, 0, 1, 0, 1, dst, dst_stride);
2891 dst += (8 * dst_stride);
2908 }
else if (4 ==
height) {
2911 }
else if (6 ==
height) {
2932 v16u8 out0, out1, out2;
2936 v16i8 mask2 = { 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
2939 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9, vec10;
2940 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
2941 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
2943 v4i32 weight_vec, rnd_vec;
2952 weight_vec = __msa_fill_w(
weight);
2953 rnd_vec = __msa_fill_w(rnd_val);
2958 weight_vec_h = __msa_fill_h(
weight);
2959 offset_vec = __msa_fill_h(
offset);
2960 denom_vec = __msa_fill_h(rnd_val);
2962 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
2963 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
2968 for (loop_cnt = 4; loop_cnt--;) {
2970 src += (4 * src_stride);
2977 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
2988 weight_vec, offset_vec, rnd_vec,
2989 dst0, dst1, dst2, dst3);
2992 rnd_vec, dst4, dst5);
2994 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
2995 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
2996 ST_W4(out2, 0, 1, 2, 3, dst + 8, dst_stride);
2997 dst += (4 * dst_stride);
3012 v16u8 out0, out1, out2, out3;
3017 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3018 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
3019 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3020 v4i32 weight_vec, rnd_vec;
3029 weight_vec = __msa_fill_w(
weight);
3030 rnd_vec = __msa_fill_w(rnd_val);
3035 weight_vec_h = __msa_fill_h(
weight);
3036 offset_vec = __msa_fill_h(
offset);
3037 denom_vec = __msa_fill_h(rnd_val);
3039 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3040 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3044 for (loop_cnt = (
height >> 2); loop_cnt--;) {
3047 src += (4 * src_stride);
3054 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
3059 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
3060 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec2, vec3);
3061 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec4, vec5);
3062 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec6, vec7);
3069 weight_vec, offset_vec, rnd_vec,
3070 dst0, dst1, dst2, dst3);
3073 weight_vec, offset_vec, rnd_vec,
3074 dst4, dst5, dst6, dst7);
3076 PCKEV_B4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
3077 out0, out1, out2, out3);
3079 ST_UB4(out0, out1, out2, out3, dst, dst_stride);
3080 dst += (4 * dst_stride);
3095 v16u8 out0, out1, out2;
3098 v16i8 mask0, mask1, mask2, mask3;
3099 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3100 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
3101 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3102 v4i32 weight_vec, rnd_vec;
3110 weight_vec = __msa_fill_w(
weight);
3111 rnd_vec = __msa_fill_w(rnd_val);
3116 weight_vec_h = __msa_fill_h(
weight);
3117 offset_vec = __msa_fill_h(
offset);
3118 denom_vec = __msa_fill_h(rnd_val);
3120 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3121 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3128 for (loop_cnt = 16; loop_cnt--;) {
3131 src += (2 * src_stride);
3144 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec2, vec3);
3149 weight_vec, offset_vec, rnd_vec,
3150 dst0, dst1, dst2, dst3);
3153 rnd_vec, dst4, dst5);
3155 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
3156 ST_UB2(out0, out1, dst, dst_stride);
3157 ST_D2(out2, 0, 1, dst + 16, dst_stride);
3158 dst += (2 * dst_stride);
3173 v16u8 out0, out1, out2, out3;
3177 v16i8 mask1, mask2, mask3;
3178 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3179 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
3180 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3181 v4i32 weight_vec, rnd_vec;
3190 weight_vec = __msa_fill_w(
weight);
3191 rnd_vec = __msa_fill_w(rnd_val);
3196 weight_vec_h = __msa_fill_h(
weight);
3197 offset_vec = __msa_fill_h(
offset);
3198 denom_vec = __msa_fill_h(rnd_val);
3200 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3201 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3207 for (loop_cnt = (
height >> 1); loop_cnt--;) {
3223 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
3224 VSHF_B2_SB(src3, src4, src3, src4, mask2, mask3, vec2, vec3);
3225 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec4, vec5);
3226 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec6, vec7);
3233 weight_vec, offset_vec, rnd_vec,
3234 dst0, dst1, dst2, dst3);
3237 weight_vec, offset_vec, rnd_vec,
3238 dst4, dst5, dst6, dst7);
3242 ST_UB2(out0, out1, dst, 16);
3244 ST_UB2(out2, out3, dst, 16);
3260 v16i8 src10_r, src32_r, src21_r, src43_r;
3261 v16i8 src2110, src4332;
3263 v4i32 dst0_r, dst0_l;
3265 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3266 v4i32 weight_vec, rnd_vec;
3272 weight_vec = __msa_fill_w(
weight);
3273 rnd_vec = __msa_fill_w(rnd_val);
3278 weight_vec_h = __msa_fill_h(
weight);
3279 offset_vec = __msa_fill_h(
offset);
3280 denom_vec = __msa_fill_h(rnd_val);
3282 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3283 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3291 ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
3295 DOTP_SH2_SW(dst0_r, dst0_l, weight_vec, weight_vec, dst0_r, dst0_l);
3297 dst0 = __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
3298 dst0 = __msa_adds_s_h(dst0, offset_vec);
3300 out = (v16u8) __msa_pckev_b((v16i8) dst0, (v16i8) dst0);
3315 v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r;
3316 v16i8 src2110, src4332, src6554;
3319 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3320 v4i32 weight_vec, rnd_vec;
3326 weight_vec = __msa_fill_w(
weight);
3327 rnd_vec = __msa_fill_w(rnd_val);
3332 weight_vec_h = __msa_fill_h(
weight);
3333 offset_vec = __msa_fill_h(
offset);
3334 denom_vec = __msa_fill_h(rnd_val);
3336 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3337 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3345 src32_r, src43_r, src54_r, src65_r);
3346 ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
3347 src2110, src4332, src6554);
3354 out = (v16u8) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
3355 ST_W4(
out, 0, 1, 2, 3, dst, dst_stride);
3370 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
3371 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
3372 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
3373 v16i8 src2110, src4332, src6554, src8776;
3375 v8i16 dst0, dst1, dst2, dst3, filt0, filt1;
3376 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3377 v4i32 weight_vec, rnd_vec;
3383 weight_vec = __msa_fill_w(
weight);
3384 rnd_vec = __msa_fill_w(rnd_val);
3389 weight_vec_h = __msa_fill_h(
weight);
3390 offset_vec = __msa_fill_h(
offset);
3391 denom_vec = __msa_fill_h(rnd_val);
3393 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3394 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3400 src += (3 * src_stride);
3402 src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_r, (v2i64) src10_r);
3403 src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
3405 for (loop_cnt = (
height >> 3); loop_cnt--;) {
3407 src3, src4, src5, src6, src7, src8, src9, src10);
3408 src += (8 * src_stride);
3410 src32_r, src43_r, src54_r, src65_r);
3411 ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
3412 ILVR_B2_SB(src9, src8, src10, src9, src98_r, src109_r);
3413 ILVR_D4_SB(src43_r, src32_r, src65_r, src54_r, src87_r, src76_r,
3414 src109_r, src98_r, src4332, src6554, src8776, src10998);
3422 weight_vec, offset_vec, rnd_vec,
3423 dst0, dst1, dst2, dst3);
3426 ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
3427 dst += (8 * dst_stride);
3447 }
else if (4 ==
height) {
3450 }
else if (0 == (
height % 8)) {
3467 v16u8 out0, out1, out2, out3;
3468 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
3469 v16i8 src10_r, src32_r, src21_r, src43_r;
3470 v16i8 src54_r, src65_r, src76_r, src87_r, src98_r, src109_r;
3472 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
3473 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3474 v4i32 weight_vec, rnd_vec;
3480 weight_vec = __msa_fill_w(
weight);
3481 rnd_vec = __msa_fill_w(rnd_val);
3486 weight_vec_h = __msa_fill_h(
weight);
3487 offset_vec = __msa_fill_h(
offset);
3488 denom_vec = __msa_fill_h(rnd_val);
3490 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3491 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3497 src += (3 * src_stride);
3498 LD_SB8(
src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
3503 ILVR_B2_SB(src5, src4, src6, src5, src54_r, src65_r);
3504 ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
3505 ILVR_B2_SB(src9, src8, src10, src9, src98_r, src109_r);
3516 weight_vec, offset_vec, rnd_vec,
3517 dst0, dst1, dst2, dst3);
3519 weight_vec, offset_vec, rnd_vec,
3520 dst4, dst5, dst6, dst7);
3524 ST_W2(out0, 0, 2, dst, dst_stride);
3525 ST_H2(out0, 2, 6, dst + 4, dst_stride);
3526 ST_W2(out1, 0, 2, dst + 2 * dst_stride, dst_stride);
3527 ST_H2(out1, 2, 6, dst + 2 * dst_stride + 4, dst_stride);
3528 dst += (4 * dst_stride);
3529 ST_W2(out2, 0, 2, dst, dst_stride);
3530 ST_H2(out2, 2, 6, dst + 4, dst_stride);
3531 ST_W2(out3, 0, 2, dst + 2 * dst_stride, dst_stride);
3532 ST_H2(out3, 2, 6, dst + 2 * dst_stride + 4, dst_stride);
3546 v16i8 src10_r, src32_r, src21_r, src43_r;
3549 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3550 v4i32 weight_vec, rnd_vec;
3556 weight_vec = __msa_fill_w(
weight);
3557 rnd_vec = __msa_fill_w(rnd_val);
3562 weight_vec_h = __msa_fill_h(
weight);
3563 offset_vec = __msa_fill_h(
offset);
3564 denom_vec = __msa_fill_h(rnd_val);
3566 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3567 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3582 out = (v16u8) __msa_pckev_b((v16i8) dst1, (v16i8) dst0);
3597 v16i8 src10_r, src32_r, src21_r, src43_r;
3598 v16i8 src5, src6, src54_r, src65_r;
3600 v8i16 dst0, dst1, dst2, dst3;
3601 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3602 v4i32 weight_vec, rnd_vec;
3608 weight_vec = __msa_fill_w(
weight);
3609 rnd_vec = __msa_fill_w(rnd_val);
3614 weight_vec_h = __msa_fill_h(
weight);
3615 offset_vec = __msa_fill_h(
offset);
3616 denom_vec = __msa_fill_h(rnd_val);
3618 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3619 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3625 src += (3 * src_stride);
3629 ILVR_B2_SB(src5, src4, src6, src5, src54_r, src65_r);
3635 offset_vec, rnd_vec, dst0, dst1, dst2,
3638 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
3650 v16u8 out0, out1, out2;
3651 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8;
3652 v16i8 src10_r, src32_r, src54_r, src76_r;
3653 v16i8 src21_r, src43_r, src65_r, src87_r;
3654 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
3656 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3657 v4i32 weight_vec, rnd_vec;
3663 weight_vec = __msa_fill_w(
weight);
3664 rnd_vec = __msa_fill_w(rnd_val);
3669 weight_vec_h = __msa_fill_h(
weight);
3670 offset_vec = __msa_fill_h(
offset);
3671 denom_vec = __msa_fill_h(rnd_val);
3673 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3674 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3680 src += (3 * src_stride);
3681 LD_SB6(
src, src_stride, src3, src4, src5, src6, src7, src8);
3685 ILVR_B4_SB(
src1,
src0,
src2,
src1, src3,
src2, src4, src3, src10_r, src21_r,
3687 ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
3696 offset_vec, rnd_vec, dst0, dst1, dst2, dst3);
3699 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
3700 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
3701 ST_D2(out2, 0, 1, dst + 4 * dst_stride, dst_stride);
3715 v16u8 out0, out1, out2, out3;
3716 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
3717 v16i8 src10_r, src32_r, src21_r, src43_r;
3718 v16i8 src54_r, src65_r, src76_r, src87_r, src98_r, src109_r;
3720 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
3721 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3722 v4i32 weight_vec, rnd_vec;
3728 weight_vec = __msa_fill_w(
weight);
3729 rnd_vec = __msa_fill_w(rnd_val);
3734 weight_vec_h = __msa_fill_h(
weight);
3735 offset_vec = __msa_fill_h(
offset);
3736 denom_vec = __msa_fill_h(rnd_val);
3738 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3739 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3745 src += (3 * src_stride);
3749 for (loop_cnt = (
height >> 3); loop_cnt--;) {
3751 src3, src4, src5, src6, src7, src8, src9, src10);
3752 src += (8 * src_stride);
3755 ILVR_B2_SB(src5, src4, src6, src5, src54_r, src65_r);
3756 ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
3757 ILVR_B2_SB(src9, src8, src10, src9, src98_r, src109_r);
3767 offset_vec, rnd_vec, dst0, dst1, dst2,
3770 offset_vec, rnd_vec, dst4, dst5, dst6,
3774 ST_D8(out0, out1, out2, out3, 0, 1, 0, 1, 0, 1, 0, 1, dst, dst_stride);
3775 dst += (8 * dst_stride);
3796 }
else if (4 ==
height) {
3799 }
else if (6 ==
height) {
3820 v16u8 out0, out1, out2, out3, out4, out5;
3821 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
3822 v16i8 src10_r, src32_r, src21_r, src43_r;
3823 v16i8 src10_l, src32_l, src54_l, src21_l, src43_l, src65_l;
3824 v16i8 src2110, src4332;
3825 v16i8 src54_r, src76_r, src98_r, src65_r, src87_r, src109_r;
3826 v16i8 src76_l, src98_l, src87_l, src109_l, src6554, src8776, src10998;
3828 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
3829 v8i16 dst9, dst10, dst11, filter_vec, weight_vec_h, offset_vec, denom_vec;
3830 v4i32 weight_vec, rnd_vec;
3832 src -= (1 * src_stride);
3836 weight_vec = __msa_fill_w(
weight);
3837 rnd_vec = __msa_fill_w(rnd_val);
3842 weight_vec_h = __msa_fill_h(
weight);
3843 offset_vec = __msa_fill_h(
offset);
3844 denom_vec = __msa_fill_h(rnd_val);
3846 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3847 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3853 src += (3 * src_stride);
3857 src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_l, (v2i64) src10_l);
3859 for (loop_cnt = 2; loop_cnt--;) {
3860 LD_SB8(
src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
3861 src += (8 * src_stride);
3867 src4332 = (v16i8) __msa_ilvr_d((v2i64) src43_l, (v2i64) src32_l);
3868 src6554 = (v16i8) __msa_ilvr_d((v2i64) src65_l, (v2i64) src54_l);
3876 offset_vec, rnd_vec, dst0, dst1, dst2,
3879 rnd_vec, dst4, dst5);
3880 PCKEV_B3_UB(dst1, dst0, dst3, dst2, dst5, dst4, out0, out1, out2);
3881 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
3882 ST_W4(out2, 0, 1, 2, 3, dst + 8, dst_stride);
3883 dst += (4 * dst_stride);
3889 src8776 = (v16i8) __msa_ilvr_d((v2i64) src87_l, (v2i64) src76_l);
3890 src10998 = (v16i8) __msa_ilvr_d((v2i64) src109_l, (v2i64) src98_l);
3898 offset_vec, rnd_vec, dst6, dst7, dst8,
3901 rnd_vec, dst10, dst11);
3902 PCKEV_B3_UB(dst7, dst6, dst9, dst8, dst11, dst10, out3, out4, out5);
3903 ST_D4(out3, out4, 0, 1, 0, 1, dst, dst_stride);
3904 ST_W4(out5, 0, 1, 2, 3, dst + 8, dst_stride);
3905 dst += (4 * dst_stride);
3925 v16u8 out0, out1, out2, out3;
3927 v16i8 src10_r, src32_r, src21_r, src43_r;
3928 v16i8 src10_l, src32_l, src21_l, src43_l;
3929 v16i8 src54_r, src54_l, src65_r, src65_l, src6;
3931 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
3932 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
3933 v4i32 weight_vec, rnd_vec;
3939 weight_vec = __msa_fill_w(
weight);
3940 rnd_vec = __msa_fill_w(rnd_val);
3945 weight_vec_h = __msa_fill_h(
weight);
3946 offset_vec = __msa_fill_h(
offset);
3947 denom_vec = __msa_fill_h(rnd_val);
3949 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
3950 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
3956 src += (3 * src_stride);
3961 for (loop_cnt = (
height >> 2); loop_cnt--;) {
3962 LD_SB4(
src, src_stride, src3, src4, src5, src6);
3963 src += (4 * src_stride);
3978 offset_vec, rnd_vec, dst0, dst1, dst2,
3981 offset_vec, rnd_vec, dst4, dst5, dst6,
3983 PCKEV_B4_UB(dst4, dst0, dst5, dst1, dst6, dst2, dst7, dst3, out0, out1,
3985 ST_UB4(out0, out1, out2, out3, dst, dst_stride);
3986 dst += (4 * dst_stride);
4007 v16u8 out0, out1, out2, out3, out4, out5;
4009 v16i8 src6, src7, src8, src9, src10, src11, src12, src13;
4010 v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r;
4011 v16i8 src10_l, src32_l, src54_l, src21_l, src43_l, src65_l;
4012 v16i8 src87_r, src98_r, src109_r, src1110_r, src1211_r, src1312_r;
4014 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8, dst9, dst10;
4015 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec, dst11;
4016 v4i32 weight_vec, rnd_vec;
4022 weight_vec = __msa_fill_w(
weight);
4023 rnd_vec = __msa_fill_w(rnd_val);
4028 weight_vec_h = __msa_fill_h(
weight);
4029 offset_vec = __msa_fill_h(
offset);
4030 denom_vec = __msa_fill_h(rnd_val);
4032 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
4033 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
4039 LD_SB3(
src + 16, src_stride, src7, src8, src9);
4040 src += (3 * src_stride);
4045 ILVR_B2_SB(src8, src7, src9, src8, src87_r, src98_r);
4047 for (loop_cnt = 8; loop_cnt--;) {
4048 LD_SB4(
src, src_stride, src3, src4, src5, src6);
4049 LD_SB4(
src + 16, src_stride, src10, src11, src12, src13);
4050 src += (4 * src_stride);
4057 ILVR_B2_SB(src10, src9, src11, src10, src109_r, src1110_r);
4058 ILVR_B2_SB(src12, src11, src13, src12, src1211_r, src1312_r);
4072 offset_vec, rnd_vec, dst0, dst1, dst2,
4075 offset_vec, rnd_vec, dst4, dst5, dst6,
4078 offset_vec, rnd_vec, dst8, dst9, dst10,
4080 PCKEV_B4_UB(dst4, dst0, dst5, dst1, dst6, dst2, dst7, dst3, out0, out1,
4082 PCKEV_B2_UB(dst9, dst8, dst11, dst10, out4, out5);
4083 ST_UB4(out0, out1, out2, out3, dst, dst_stride);
4084 ST_D4(out4, out5, 0, 1, 0, 1, dst + 16, dst_stride);
4085 dst += (4 * dst_stride);
4093 src87_r = src1211_r;
4094 src98_r = src1312_r;
4109 v16u8 out0, out1, out2, out3;
4110 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9;
4111 v16i8 src10_r, src32_r, src76_r, src98_r;
4112 v16i8 src21_r, src43_r, src65_r, src87_r;
4113 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
4114 v16i8 src10_l, src32_l, src76_l, src98_l;
4115 v16i8 src21_l, src43_l, src65_l, src87_l;
4117 v8i16 filter_vec, weight_vec_h, offset_vec, denom_vec;
4118 v4i32 weight_vec, rnd_vec;
4124 weight_vec = __msa_fill_w(
weight);
4125 rnd_vec = __msa_fill_w(rnd_val);
4130 weight_vec_h = __msa_fill_h(
weight);
4131 offset_vec = __msa_fill_h(
offset);
4132 denom_vec = __msa_fill_h(rnd_val);
4134 weight_vec_h = __msa_srar_h(weight_vec_h, denom_vec);
4135 offset_vec = __msa_adds_s_h(offset_vec, weight_vec_h);
4141 LD_SB3(
src + 16, src_stride, src5, src6, src7);
4142 src += (3 * src_stride);
4146 ILVR_B2_SB(src6, src5, src7, src6, src65_r, src76_r);
4147 ILVL_B2_SB(src6, src5, src7, src6, src65_l, src76_l);
4149 for (loop_cnt = (
height >> 1); loop_cnt--;) {
4151 LD_SB2(
src + 16, src_stride, src8, src9);
4152 src += (2 * src_stride);
4167 offset_vec, rnd_vec, dst0, dst1, dst2,
4170 offset_vec, rnd_vec, dst4, dst5, dst6,
4172 PCKEV_B4_UB(dst2, dst0, dst3, dst1, dst6, dst4, dst7, dst5, out0, out1,
4174 ST_UB2(out0, out2, dst, 16);
4176 ST_UB2(out1, out3, dst, 16);
4196 const int8_t *filter_x,
4197 const int8_t *filter_y,
4207 v8i16 filt_h0, filt_h1, filter_vec,
tmp;
4208 v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
4209 v8i16 dst20, dst31, dst42, dst10, dst32, dst21, dst43;
4210 v8i16 offset_vec, const_128, denom_vec;
4211 v4i32 dst0, dst1, weight_vec, rnd_vec;
4213 src -= (src_stride + 1);
4215 filter_vec =
LD_SH(filter_x);
4218 filter_vec =
LD_SH(filter_y);
4225 weight_vec = __msa_fill_w(
weight);
4226 rnd_vec = __msa_fill_w(rnd_val);
4228 offset_vec = __msa_fill_h(
offset);
4229 denom_vec = __msa_fill_h(rnd_val - 6);
4230 const_128 = __msa_fill_h((128 *
weight));
4231 offset_vec += __msa_srar_h(const_128, denom_vec);
4247 MUL2(dst0, weight_vec, dst1, weight_vec, dst0, dst1);
4249 tmp = __msa_pckev_h((v8i16) dst1, (v8i16) dst0);
4252 out = (v16u8) __msa_pckev_b((v16i8)
tmp, (v16i8)
tmp);
4260 const int8_t *filter_x,
4261 const int8_t *filter_y,
4269 v8i16 filt_h0, filt_h1, filter_vec, tmp0, tmp1;
4272 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
4273 v8i16 dst30, dst41, dst52, dst63, dst10, dst32, dst54, dst21, dst43, dst65;
4274 v8i16 offset_vec, const_128, denom_vec;
4275 v4i32 dst0, dst1, dst2, dst3, weight_vec, rnd_vec;
4277 src -= (src_stride + 1);
4279 filter_vec =
LD_SH(filter_x);
4282 filter_vec =
LD_SH(filter_y);
4289 weight_vec = __msa_fill_w(
weight);
4290 rnd_vec = __msa_fill_w(rnd_val);
4292 offset_vec = __msa_fill_h(
offset);
4293 denom_vec = __msa_fill_h(rnd_val - 6);
4294 const_128 = __msa_fill_h((128 *
weight));
4295 offset_vec += __msa_srar_h(const_128, denom_vec);
4302 VSHF_B2_SB(src3, src6, src3, src6, mask0, mask1, vec6, vec7);
4314 SRA_4V(dst0, dst1, dst2, dst3, 6);
4315 MUL2(dst0, weight_vec, dst1, weight_vec, dst0, dst1);
4316 MUL2(dst2, weight_vec, dst3, weight_vec, dst2, dst3);
4319 ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
4321 out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
4322 ST_W4(
out, 0, 1, 2, 3, dst, dst_stride);
4329 const int8_t *filter_x,
4330 const int8_t *filter_y,
4338 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
4342 v8i16 filt_h0, filt_h1, filter_vec, tmp0, tmp1, tmp2, tmp3;
4343 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
4344 v8i16 dst10, dst21, dst22, dst73, dst84, dst95, dst106;
4345 v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
4346 v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
4347 v8i16 dst98_r, dst109_r, offset_vec, const_128, denom_vec;
4348 v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, weight_vec, rnd_vec;
4350 src -= (src_stride + 1);
4352 filter_vec =
LD_SH(filter_x);
4355 filter_vec =
LD_SH(filter_y);
4362 weight_vec = __msa_fill_w(
weight);
4363 rnd_vec = __msa_fill_w(rnd_val);
4365 offset_vec = __msa_fill_h(
offset);
4366 denom_vec = __msa_fill_h(rnd_val - 6);
4367 const_128 = __msa_fill_h((128 *
weight));
4368 offset_vec += __msa_srar_h(const_128, denom_vec);
4371 src += (3 * src_stride);
4379 dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
4381 for (loop_cnt =
height >> 3; loop_cnt--;) {
4383 src3, src4, src5, src6, src7, src8, src9, src10);
4384 src += (8 * src_stride);
4387 VSHF_B2_SB(src3, src7, src3, src7, mask0, mask1, vec0, vec1);
4388 VSHF_B2_SB(src4, src8, src4, src8, mask0, mask1, vec2, vec3);
4389 VSHF_B2_SB(src5, src9, src5, src9, mask0, mask1, vec4, vec5);
4390 VSHF_B2_SB(src6, src10, src6, src10, mask0, mask1, vec6, vec7);
4395 dst32_r = __msa_ilvr_h(dst73, dst22);
4399 dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
4400 dst76_r = __msa_ilvr_h(dst22, dst106);
4409 SRA_4V(dst0, dst1, dst2, dst3, 6);
4410 SRA_4V(dst4, dst5, dst6, dst7, 6);
4411 MUL2(dst0, weight_vec, dst1, weight_vec, dst0, dst1);
4412 MUL2(dst2, weight_vec, dst3, weight_vec, dst2, dst3);
4413 MUL2(dst4, weight_vec, dst5, weight_vec, dst4, dst5);
4414 MUL2(dst6, weight_vec, dst7, weight_vec, dst6, dst7);
4417 PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, tmp0, tmp1,
4419 ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
4420 ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
4423 ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
4424 dst += (8 * dst_stride);
4428 dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
4436 const int8_t *filter_x,
4437 const int8_t *filter_y,
4445 filter_x, filter_y,
weight,
4447 }
else if (4 ==
height) {
4449 filter_x,filter_y,
weight,
4451 }
else if (0 == (
height % 8)) {
4462 const int8_t *filter_x,
4463 const int8_t *filter_y,
4469 v16u8 out0, out1, out2;
4470 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
4474 v8i16 filt_h0, filt_h1, filter_vec;
4475 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
4476 v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, dsth7, dsth8, dsth9;
4477 v8i16 dsth10, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
4478 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r, dst21_r, dst43_r;
4479 v8i16 dst65_r, dst87_r, dst109_r, dst10_l, dst32_l, dst54_l, dst76_l;
4480 v8i16 dst98_l, dst21_l, dst43_l, dst65_l, dst87_l, dst109_l;
4481 v8i16 dst1021_l, dst3243_l, dst5465_l, dst7687_l, dst98109_l;
4482 v8i16 offset_vec, const_128, denom_vec;
4483 v4i32 dst0_r, dst1_r, dst2_r, dst3_r, dst4_r, dst5_r, dst6_r, dst7_r;
4484 v4i32 dst0_l, dst1_l, dst2_l, dst3_l, weight_vec, rnd_vec;
4486 src -= (src_stride + 1);
4488 filter_vec =
LD_SH(filter_x);
4491 filter_vec =
LD_SH(filter_y);
4498 weight_vec = __msa_fill_w(
weight);
4499 rnd_vec = __msa_fill_w(rnd_val);
4501 offset_vec = __msa_fill_h(
offset);
4502 denom_vec = __msa_fill_h(rnd_val - 6);
4503 const_128 = __msa_fill_h((128 *
weight));
4504 offset_vec += __msa_srar_h(const_128, denom_vec);
4507 src += (3 * src_stride);
4519 LD_SB8(
src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
4521 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
4522 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
4523 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
4524 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
4529 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
4530 VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec2, vec3);
4531 VSHF_B2_SB(src9, src9, src9, src9, mask0, mask1, vec4, vec5);
4532 VSHF_B2_SB(src10, src10, src10, src10, mask0, mask1, vec6, vec7);
4545 PCKEV_D2_SH(dst21_l, dst10_l, dst43_l, dst32_l, dst1021_l, dst3243_l);
4546 PCKEV_D2_SH(dst65_l, dst54_l, dst87_l, dst76_l, dst5465_l, dst7687_l);
4547 dst98109_l = (v8i16) __msa_pckev_d((v2i64) dst109_l, (v2i64) dst98_l);
4559 dst3_l =
HEVC_FILT_4TAP(dst7687_l, dst98109_l, filt_h0, filt_h1);
4560 SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
4561 SRA_4V(dst4_r, dst5_r, dst6_r, dst7_r, 6);
4562 SRA_4V(dst0_l, dst1_l, dst2_l, dst3_l, 6);
4563 MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
4564 MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
4565 MUL2(dst4_r, weight_vec, dst5_r, weight_vec, dst4_r, dst5_r);
4566 MUL2(dst6_r, weight_vec, dst7_r, weight_vec, dst6_r, dst7_r);
4567 MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
4568 MUL2(dst2_l, weight_vec, dst3_l, weight_vec, dst2_l, dst3_l);
4569 SRAR_W4_SW(dst0_r, dst1_r, dst2_r, dst3_r, rnd_vec);
4570 SRAR_W4_SW(dst4_r, dst5_r, dst6_r, dst7_r, rnd_vec);
4571 SRAR_W4_SW(dst0_l, dst1_l, dst2_l, dst3_l, rnd_vec);
4572 PCKEV_H2_SH(dst1_r, dst0_r, dst3_r, dst2_r, tmp0, tmp1);
4573 PCKEV_H2_SH(dst5_r, dst4_r, dst7_r, dst6_r, tmp2, tmp3);
4574 PCKEV_H2_SH(dst1_l, dst0_l, dst3_l, dst2_l, tmp4, tmp5);
4575 ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
4576 ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
4577 ADD2(tmp4, offset_vec, tmp5, offset_vec, tmp4, tmp5);
4580 PCKEV_B3_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, out0, out1, out2);
4581 ST_W8(out0, out1, 0, 1, 2, 3, 0, 1, 2, 3, dst, dst_stride);
4582 ST_H8(out2, 0, 1, 2, 3, 4, 5, 6, 7, dst + 4, dst_stride);
4589 const int8_t *filter_x,
4590 const int8_t *filter_y,
4598 v8i16 filt_h0, filt_h1, filter_vec;
4601 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
4602 v8i16 dst0, dst1, dst2, dst3, dst4;
4603 v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
4604 v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
4605 v8i16 dst10_l, dst32_l, dst21_l, dst43_l;
4607 v8i16 offset_vec, const_128, denom_vec;
4608 v4i32 weight_vec, rnd_vec;
4610 src -= (src_stride + 1);
4612 filter_vec =
LD_SH(filter_x);
4615 filter_vec =
LD_SH(filter_y);
4622 weight_vec = __msa_fill_w(
weight);
4623 rnd_vec = __msa_fill_w(rnd_val);
4625 offset_vec = __msa_fill_h(
offset);
4626 denom_vec = __msa_fill_h(rnd_val - 6);
4627 const_128 = __msa_fill_h((128 *
weight));
4628 offset_vec += __msa_srar_h(const_128, denom_vec);
4635 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
4636 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
4650 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
4651 MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
4652 MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
4653 SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
4654 PCKEV_H2_SH(dst0_l, dst0_r, dst1_l, dst1_r, tmp0, tmp1);
4655 ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
4657 out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
4665 const int8_t *filter_x,
4666 const int8_t *filter_y,
4674 v16i8
src0,
src1,
src2, src3, src4, src5, src6, mask0, mask1;
4675 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
4676 v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec;
4677 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, tmp0, tmp1, tmp2, tmp3;
4678 v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
4679 v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
4680 v8i16 offset_vec, const_128, denom_vec;
4681 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
4682 v4i32 weight_vec, rnd_vec;
4684 src -= (src_stride + 1);
4686 filter_vec =
LD_SH(filter_x);
4689 filter_vec =
LD_SH(filter_y);
4697 weight_vec = __msa_fill_w(
weight);
4698 rnd_vec = __msa_fill_w(rnd_val);
4700 offset_vec = __msa_fill_h(
offset);
4701 denom_vec = __msa_fill_h(rnd_val - 6);
4702 const_128 = __msa_fill_h((128 *
weight));
4703 offset_vec += __msa_srar_h(const_128, denom_vec);
4705 for (cnt = width8mult; cnt--;) {
4717 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
4718 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
4719 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
4720 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
4737 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
4738 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
4739 MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
4740 MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
4741 MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
4742 MUL2(dst2_l, weight_vec, dst3_l, weight_vec, dst2_l, dst3_l);
4743 SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
4744 SRAR_W4_SW(dst2_r, dst2_l, dst3_r, dst3_l, rnd_vec);
4745 PCKEV_H4_SH(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
4746 dst3_r, tmp0, tmp1, tmp2, tmp3);
4747 ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
4748 ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
4751 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
4760 const int8_t *filter_x,
4761 const int8_t *filter_y,
4766 v16u8 out0, out1, out2;
4767 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8;
4769 v8i16 filt_h0, filt_h1, filter_vec;
4772 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
4773 v16i8 vec10, vec11, vec12, vec13, vec14, vec15, vec16, vec17;
4774 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
4775 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
4776 v4i32 dst4_r, dst4_l, dst5_r, dst5_l, weight_vec, rnd_vec;
4777 v8i16 dst10_r, dst32_r, dst10_l, dst32_l;
4778 v8i16 dst21_r, dst43_r, dst21_l, dst43_l;
4779 v8i16 dst54_r, dst54_l, dst65_r, dst65_l;
4780 v8i16 dst76_r, dst76_l, dst87_r, dst87_l;
4781 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
4782 v8i16 offset_vec, const_128, denom_vec;
4784 src -= (src_stride + 1);
4786 filter_vec =
LD_SH(filter_x);
4789 filter_vec =
LD_SH(filter_y);
4796 weight_vec = __msa_fill_w(
weight);
4797 rnd_vec = __msa_fill_w(rnd_val);
4799 offset_vec = __msa_fill_h(
offset);
4800 denom_vec = __msa_fill_h(rnd_val - 6);
4801 const_128 = __msa_fill_h((128 *
weight));
4802 offset_vec += __msa_srar_h(const_128, denom_vec);
4805 src += (5 * src_stride);
4806 LD_SB4(
src, src_stride, src5, src6, src7, src8);
4812 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
4813 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
4814 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec10, vec11);
4815 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec12, vec13);
4816 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec14, vec15);
4817 VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec16, vec17);
4847 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
4848 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
4849 SRA_4V(dst4_r, dst4_l, dst5_r, dst5_l, 6);
4850 MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
4851 MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
4852 MUL2(dst4_r, weight_vec, dst5_r, weight_vec, dst4_r, dst5_r);
4853 MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
4854 MUL2(dst2_l, weight_vec, dst3_l, weight_vec, dst2_l, dst3_l);
4855 MUL2(dst4_l, weight_vec, dst5_l, weight_vec, dst4_l, dst5_l);
4856 SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
4857 SRAR_W4_SW(dst2_r, dst2_l, dst3_r, dst3_l, rnd_vec);
4858 SRAR_W4_SW(dst4_r, dst4_l, dst5_r, dst5_l, rnd_vec);
4859 PCKEV_H4_SH(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l, dst3_r,
4860 tmp0, tmp1, tmp2, tmp3);
4861 PCKEV_H2_SH(dst4_l, dst4_r, dst5_l, dst5_r, tmp4, tmp5);
4862 ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
4863 ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
4864 ADD2(tmp4, offset_vec, tmp5, offset_vec, tmp4, tmp5);
4867 PCKEV_B3_UB(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, out0, out1, out2);
4868 ST_D4(out0, out1, 0, 1, 0, 1, dst, dst_stride);
4869 ST_D2(out2, 0, 1, dst + 4 * dst_stride, dst_stride);
4876 const int8_t *filter_x,
4877 const int8_t *filter_y,
4884 uint32_t loop_cnt, cnt;
4885 const uint8_t *src_tmp;
4890 v8i16 filt_h0, filt_h1, filter_vec;
4893 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
4894 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, tmp0, tmp1, tmp2, tmp3;
4895 v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
4896 v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
4897 v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
4898 v8i16 offset_vec, const_128, denom_vec;
4899 v4i32 dst2_r, dst2_l, dst3_r, dst3_l;
4900 v4i32 weight_vec, rnd_vec;
4902 src -= (src_stride + 1);
4904 filter_vec =
LD_SH(filter_x);
4907 filter_vec =
LD_SH(filter_y);
4914 weight_vec = __msa_fill_w(
weight);
4915 rnd_vec = __msa_fill_w(rnd_val);
4917 offset_vec = __msa_fill_h(
offset);
4918 denom_vec = __msa_fill_h(rnd_val - 6);
4919 const_128 = __msa_fill_h((128 *
weight));
4920 offset_vec += __msa_srar_h(const_128, denom_vec);
4922 for (cnt = width8mult; cnt--;) {
4927 src_tmp += (3 * src_stride);
4940 for (loop_cnt =
height >> 2; loop_cnt--;) {
4941 LD_SB4(src_tmp, src_stride, src3, src4, src5, src6);
4942 src_tmp += (4 * src_stride);
4945 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
4946 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
4947 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
4948 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
4965 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
4966 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
4967 MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
4968 MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
4969 MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
4970 MUL2(dst2_l, weight_vec, dst3_l, weight_vec, dst2_l, dst3_l);
4971 SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
4972 SRAR_W4_SW(dst2_r, dst2_l, dst3_r, dst3_l, rnd_vec);
4973 PCKEV_H4_SH(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
4974 dst3_r, tmp0, tmp1, tmp2, tmp3);
4975 ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
4976 ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
4979 ST_D4(out0, out1, 0, 1, 0, 1, dst_tmp, dst_stride);
4980 dst_tmp += (4 * dst_stride);
4998 const int8_t *filter_x,
4999 const int8_t *filter_y,
5008 filter_x, filter_y,
weight,
5010 }
else if (4 ==
height) {
5012 filter_x, filter_y, 1,
weight,
5014 }
else if (6 ==
height) {
5016 filter_x, filter_y,
weight,
5018 }
else if (0 == (
height % 4)) {
5029 const int8_t *filter_x,
5030 const int8_t *filter_y,
5037 const uint8_t *src_tmp;
5040 v16i8
src0,
src1,
src2, src3, src4, src5, src6, src7, src8, src9, src10;
5041 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
5042 v16i8 mask0, mask1, mask2, mask3;
5043 v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec, tmp0, tmp1, tmp2, tmp3;
5044 v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6;
5045 v8i16 dst10, dst21, dst22, dst73, dst84, dst95, dst106;
5046 v8i16 dst76_r, dst98_r, dst87_r, dst109_r;
5047 v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
5048 v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
5049 v8i16 offset_vec, const_128, denom_vec;
5050 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
5051 v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, weight_vec, rnd_vec;
5053 src -= (src_stride + 1);
5055 filter_vec =
LD_SH(filter_x);
5058 filter_vec =
LD_SH(filter_y);
5066 weight_vec = __msa_fill_w(
weight);
5067 rnd_vec = __msa_fill_w(rnd_val);
5069 offset_vec = __msa_fill_h(
offset);
5070 denom_vec = __msa_fill_h(rnd_val - 6);
5071 const_128 = __msa_fill_h((128 *
weight));
5072 offset_vec += __msa_srar_h(const_128, denom_vec);
5078 src_tmp += (3 * src_stride);
5089 for (loop_cnt = 4; loop_cnt--;) {
5090 LD_SB4(src_tmp, src_stride, src3, src4, src5, src6);
5091 src_tmp += (4 * src_stride);
5093 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
5094 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
5095 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
5096 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
5113 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
5114 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
5115 MUL2(dst0_r, weight_vec, dst1_r, weight_vec, dst0_r, dst1_r);
5116 MUL2(dst2_r, weight_vec, dst3_r, weight_vec, dst2_r, dst3_r);
5117 MUL2(dst0_l, weight_vec, dst1_l, weight_vec, dst0_l, dst1_l);
5118 MUL2(dst2_l, weight_vec, dst3_l, weight_vec, dst2_l, dst3_l);
5119 SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
5120 SRAR_W4_SW(dst2_r, dst2_l, dst3_r, dst3_l, rnd_vec);
5121 PCKEV_H4_SH(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
5122 dst3_r, tmp0, tmp1, tmp2, tmp3);
5123 ADD2(tmp0, offset_vec, tmp1, offset_vec, tmp0, tmp1);
5124 ADD2(tmp2, offset_vec, tmp3, offset_vec, tmp2, tmp3);
5127 ST_D4(out0, out1, 0, 1, 0, 1, dst_tmp, dst_stride);
5128 dst_tmp += (4 * dst_stride);
5144 src += (3 * src_stride);
5151 dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
5153 for (loop_cnt = 2; loop_cnt--;) {
5154 LD_SB8(
src, src_stride, src3, src4, src5, src6, src7, src8, src9,
5156 src += (8 * src_stride);
5158 VSHF_B2_SB(src3, src7, src3, src7, mask2, mask3, vec0, vec1);