27 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
29 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20
33 int16_t *dst,
int32_t dst_stride,
42 LD_SB2(src, src_stride, src0, src1);
44 src0 = (v16i8) __msa_ilvr_w((v4i32)
src1, (v4i32) src0);
45 in0 = (v8i16) __msa_ilvr_b(zero, src0);
48 }
else if (4 == height) {
52 LD_SB4(src, src_stride, src0, src1, src2, src3);
54 ILVR_W2_SB(src1, src0, src3, src2, src0, src1);
58 ST8x4_UB(in0, in1, dst, 2 * dst_stride);
59 }
else if (0 == height % 8) {
60 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
61 v8i16 in0, in1, in2, in3;
64 for (loop_cnt = (height >> 3); loop_cnt--;) {
66 src0, src1, src2, src3, src4, src5, src6, src7);
67 src += (8 * src_stride);
69 ILVR_W4_SB(src1, src0, src3, src2, src5, src4, src7, src6,
70 src0, src1, src2, src3);
71 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
74 ST8x8_UB(in0, in1, in2, in3, dst, 2 * dst_stride);
75 dst += (8 * dst_stride);
81 int16_t *dst,
int32_t dst_stride,
86 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
87 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
89 for (loop_cnt = (height >> 3); loop_cnt--;) {
90 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
91 src += (8 * src_stride);
93 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
95 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
99 ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, dst, 2 * dst_stride);
100 dst += (8 * dst_stride);
105 int16_t *dst,
int32_t dst_stride,
114 LD_SB2(src, src_stride, src0, src1);
119 ST_SH2(in0, in1, dst, dst_stride);
120 }
else if (4 == height) {
122 v8i16 in0, in1, in2, in3;
124 LD_SB4(src, src_stride, src0, src1, src2, src3);
126 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
128 SLLI_4V(in0, in1, in2, in3, 6);
129 ST_SH4(in0, in1, in2, in3, dst, dst_stride);
130 }
else if (6 == height) {
131 v16i8
src0,
src1, src2, src3, src4, src5;
132 v8i16 in0, in1, in2, in3, in4, in5;
134 LD_SB6(src, src_stride, src0, src1, src2, src3, src4, src5);
136 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
139 SLLI_4V(in0, in1, in2, in3, 6);
142 ST_SH6(in0, in1, in2, in3, in4, in5, dst, dst_stride);
143 }
else if (0 == height % 8) {
145 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
146 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
148 for (loop_cnt = (height >> 3); loop_cnt--;) {
150 src0, src1, src2, src3, src4, src5, src6, src7);
151 src += (8 * src_stride);
153 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
155 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
157 SLLI_4V(in0, in1, in2, in3, 6);
158 SLLI_4V(in4, in5, in6, in7, 6);
159 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, dst, dst_stride);
160 dst += (8 * dst_stride);
166 int16_t *dst,
int32_t dst_stride,
171 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
172 v8i16 in0, in1, in0_r, in1_r, in2_r, in3_r;
174 for (loop_cnt = (height >> 3); loop_cnt--;) {
175 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
176 src += (8 * src_stride);
178 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
179 in0_r, in1_r, in2_r, in3_r);
180 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
181 ILVL_W2_SB(src1, src0, src3, src2, src0, src1);
185 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
186 ST8x4_UB(in0, in1, dst + 8, 2 * dst_stride);
187 dst += (4 * dst_stride);
189 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
190 in0_r, in1_r, in2_r, in3_r);
191 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
192 ILVL_W2_SB(src5, src4, src7, src6, src0, src1);
196 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
197 ST8x4_UB(in0, in1, dst + 8, 2 * dst_stride);
198 dst += (4 * dst_stride);
203 int16_t *dst,
int32_t dst_stride,
210 v8i16 in0_r, in1_r, in2_r, in3_r;
211 v8i16 in0_l, in1_l, in2_l, in3_l;
213 LD_SB4(src, src_stride, src0, src1, src2, src3);
215 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
216 in0_r, in1_r, in2_r, in3_r);
217 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
218 in0_l, in1_l, in2_l, in3_l);
219 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
220 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
221 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
222 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
223 }
else if (12 == height) {
224 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
225 v16i8 src8, src9, src10, src11;
226 v8i16 in0_r, in1_r, in2_r, in3_r;
227 v8i16 in0_l, in1_l, in2_l, in3_l;
229 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
230 src += (8 * src_stride);
231 LD_SB4(src, src_stride, src8, src9, src10, src11);
233 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
234 in0_r, in1_r, in2_r, in3_r);
235 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
236 in0_l, in1_l, in2_l, in3_l);
237 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
238 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
239 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
240 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
241 dst += (4 * dst_stride);
243 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
244 in0_r, in1_r, in2_r, in3_r);
245 ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
246 in0_l, in1_l, in2_l, in3_l);
247 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
248 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
249 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
250 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
251 dst += (4 * dst_stride);
253 ILVR_B4_SH(zero, src8, zero, src9, zero, src10, zero, src11,
254 in0_r, in1_r, in2_r, in3_r);
255 ILVL_B4_SH(zero, src8, zero, src9, zero, src10, zero, src11,
256 in0_l, in1_l, in2_l, in3_l);
257 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
258 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
259 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
260 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
261 }
else if (0 == (height % 8)) {
263 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
264 v8i16 in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
266 for (loop_cnt = (height >> 3); loop_cnt--;) {
267 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6,
269 src += (8 * src_stride);
270 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_r,
271 in1_r, in2_r, in3_r);
272 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_l,
273 in1_l, in2_l, in3_l);
274 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
275 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
276 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
277 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
278 dst += (4 * dst_stride);
280 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, in0_r,
281 in1_r, in2_r, in3_r);
282 ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, in0_l,
283 in1_l, in2_l, in3_l);
284 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
285 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
286 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
287 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
288 dst += (4 * dst_stride);
294 int16_t *dst,
int32_t dst_stride,
299 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
300 v8i16 in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
302 for (loop_cnt = (height >> 2); loop_cnt--;) {
303 LD_SB4(src, src_stride, src0, src1, src2, src3);
304 LD_SB4((src + 16), src_stride, src4, src5, src6, src7);
305 src += (4 * src_stride);
306 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_r, in1_r,
308 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_l, in1_l,
310 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
311 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
312 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
313 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
314 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, in0_r, in1_r,
316 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
317 ST_SH4(in0_r, in1_r, in2_r, in3_r, (dst + 16), dst_stride);
318 dst += (4 * dst_stride);
323 int16_t *dst,
int32_t dst_stride,
328 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
329 v8i16 in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
331 for (loop_cnt = (height >> 2); loop_cnt--;) {
332 LD_SB4(src, src_stride, src0, src2, src4, src6);
333 LD_SB4((src + 16), src_stride, src1, src3, src5, src7);
334 src += (4 * src_stride);
336 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_r, in1_r,
338 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_l, in1_l,
340 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
341 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
342 ST_SH4(in0_r, in0_l, in1_r, in1_l, dst, 8);
344 ST_SH4(in2_r, in2_l, in3_r, in3_l, dst, 8);
347 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, in0_r, in1_r,
349 ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, in0_l, in1_l,
351 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
352 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
353 ST_SH4(in0_r, in0_l, in1_r, in1_l, dst, 8);
355 ST_SH4(in2_r, in2_l, in3_r, in3_l, dst, 8);
361 int16_t *dst,
int32_t dst_stride,
366 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
367 v16i8 src8, src9, src10, src11;
368 v8i16 in0_r, in1_r, in2_r, in3_r, in4_r, in5_r;
369 v8i16 in0_l, in1_l, in2_l, in3_l, in4_l, in5_l;
371 for (loop_cnt = (height >> 2); loop_cnt--;) {
372 LD_SB3(src, 16, src0, src1, src2);
374 LD_SB3(src, 16, src3, src4, src5);
376 LD_SB3(src, 16, src6, src7, src8);
378 LD_SB3(src, 16, src9, src10, src11);
381 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
382 in0_r, in1_r, in2_r, in3_r);
383 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
384 in0_l, in1_l, in2_l, in3_l);
385 ILVR_B2_SH(zero, src4, zero, src5, in4_r, in5_r);
386 ILVL_B2_SH(zero, src4, zero, src5, in4_l, in5_l);
387 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
388 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
389 SLLI_4V(in4_r, in5_r, in4_l, in5_l, 6);
390 ST_SH6(in0_r, in0_l, in1_r, in1_l, in2_r, in2_l, dst, 8);
392 ST_SH6(in3_r, in3_l, in4_r, in4_l, in5_r, in5_l, dst, 8);
395 ILVR_B4_SH(zero, src6, zero, src7, zero, src8, zero, src9,
396 in0_r, in1_r, in2_r, in3_r);
397 ILVL_B4_SH(zero, src6, zero, src7, zero, src8, zero, src9,
398 in0_l, in1_l, in2_l, in3_l);
399 ILVR_B2_SH(zero, src10, zero, src11, in4_r, in5_r);
400 ILVL_B2_SH(zero, src10, zero, src11, in4_l, in5_l);
401 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
402 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
403 SLLI_4V(in4_r, in5_r, in4_l, in5_l, 6);
404 ST_SH6(in0_r, in0_l, in1_r, in1_l, in2_r, in2_l, dst, 8);
406 ST_SH6(in3_r, in3_l, in4_r, in4_l, in5_r, in5_l, dst, 8);
412 int16_t *dst,
int32_t dst_stride,
417 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
418 v8i16 in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
420 for (loop_cnt = (height >> 1); loop_cnt--;) {
421 LD_SB4(src, 16, src0, src1, src2, src3);
423 LD_SB4(src, 16, src4, src5, src6, src7);
426 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
427 in0_r, in1_r, in2_r, in3_r);
428 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
429 in0_l, in1_l, in2_l, in3_l);
430 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
431 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
432 ST_SH4(in0_r, in0_l, in1_r, in1_l, dst, 8);
433 ST_SH4(in2_r, in2_l, in3_r, in3_l, (dst + 32), 8);
436 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
437 in0_r, in1_r, in2_r, in3_r);
438 ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
439 in0_l, in1_l, in2_l, in3_l);
440 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
441 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
442 ST_SH4(in0_r, in0_l, in1_r, in1_l, dst, 8);
443 ST_SH4(in2_r, in2_l, in3_r, in3_l, (dst + 32), 8);
449 int16_t *dst,
int32_t dst_stride,
453 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
454 v8i16 filt0, filt1, filt2, filt3;
455 v16i8 mask1, mask2, mask3;
456 v16i8 vec0, vec1, vec2, vec3;
457 v8i16 dst0, dst1, dst2, dst3;
458 v8i16 filter_vec, const_vec;
462 const_vec = __msa_ldi_h(128);
465 filter_vec =
LD_SH(filter);
466 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
472 for (loop_cnt = (height >> 3); loop_cnt--;) {
473 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
474 src += (8 * src_stride);
477 VSHF_B4_SB(src0, src1, mask0, mask1, mask2, mask3,
478 vec0, vec1, vec2, vec3);
480 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
481 dst0, dst0, dst0, dst0);
482 VSHF_B4_SB(src2, src3, mask0, mask1, mask2, mask3,
483 vec0, vec1, vec2, vec3);
485 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
486 dst1, dst1, dst1, dst1);
487 VSHF_B4_SB(src4, src5, mask0, mask1, mask2, mask3,
488 vec0, vec1, vec2, vec3);
490 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
491 dst2, dst2, dst2, dst2);
492 VSHF_B4_SB(src6, src7, mask0, mask1, mask2, mask3,
493 vec0, vec1, vec2, vec3);
495 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
496 dst3, dst3, dst3, dst3);
498 ST8x8_UB(dst0, dst1, dst2, dst3, dst, 2 * dst_stride);
499 dst += (8 * dst_stride);
504 int16_t *dst,
int32_t dst_stride,
509 v8i16 filt0, filt1, filt2, filt3;
510 v16i8 mask1, mask2, mask3;
511 v16i8 vec0, vec1, vec2, vec3;
512 v8i16 dst0, dst1, dst2, dst3;
513 v8i16 filter_vec, const_vec;
517 const_vec = __msa_ldi_h(128);
520 filter_vec =
LD_SH(filter);
521 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
527 for (loop_cnt = (height >> 2); loop_cnt--;) {
528 LD_SB4(src, src_stride, src0, src1, src2, src3);
529 src += (4 * src_stride);
532 VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
533 vec0, vec1, vec2, vec3);
535 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
536 dst0, dst0, dst0, dst0);
537 VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
538 vec0, vec1, vec2, vec3);
540 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
541 dst1, dst1, dst1, dst1);
542 VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
543 vec0, vec1, vec2, vec3);
545 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
546 dst2, dst2, dst2, dst2);
547 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
548 vec0, vec1, vec2, vec3);
550 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
551 dst3, dst3, dst3, dst3);
553 ST_SH4(dst0, dst1, dst2, dst3, dst, dst_stride);
554 dst += (4 * dst_stride);
559 int16_t *dst,
int32_t dst_stride,
563 int64_t res0, res1, res2, res3;
564 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
565 v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
566 v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
567 v8i16 filt0, filt1, filt2, filt3, dst0, dst1, dst2, dst3, dst4, dst5;
568 v8i16 filter_vec, const_vec;
571 const_vec = __msa_ldi_h(128);
574 filter_vec =
LD_SH(filter);
575 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
586 for (loop_cnt = 4; loop_cnt--;) {
587 LD_SB4(src, src_stride, src0, src1, src2, src3);
588 LD_SB4(src + 8, src_stride, src4, src5, src6, src7);
589 src += (4 * src_stride);
598 VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec1);
599 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2, vec3);
600 VSHF_B2_SB(src4, src5, src6, src7, mask4, mask4, vec4, vec5);
601 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
604 VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0, vec1);
605 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2, vec3);
606 VSHF_B2_SB(src4, src5, src6, src7, mask5, mask5, vec4, vec5);
607 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
610 VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0, vec1);
611 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2, vec3);
612 VSHF_B2_SB(src4, src5, src6, src7, mask6, mask6, vec4, vec5);
613 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
616 VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec0, vec1);
617 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec2, vec3);
618 VSHF_B2_SB(src4, src5, src6, src7, mask7, mask7, vec4, vec5);
619 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
623 res0 = __msa_copy_s_d((v2i64) dst4, 0);
624 res1 = __msa_copy_s_d((v2i64) dst4, 1);
625 res2 = __msa_copy_s_d((v2i64) dst5, 0);
626 res3 = __msa_copy_s_d((v2i64) dst5, 1);
627 ST_SH4(dst0, dst1, dst2, dst3, dst, dst_stride);
628 SD4(res0, res1, res2, res3, (dst + 8), dst_stride);
629 dst += (4 * dst_stride);
634 int16_t *dst,
int32_t dst_stride,
639 v8i16 filt0, filt1, filt2, filt3;
640 v16i8 mask1, mask2, mask3;
641 v16i8 vec0, vec1, vec2, vec3;
642 v8i16 dst0, dst1, dst2, dst3;
643 v8i16 filter_vec, const_vec;
647 const_vec = __msa_ldi_h(128);
650 filter_vec =
LD_SH(filter);
651 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
657 for (loop_cnt = (height >> 1); loop_cnt--;) {
658 LD_SB2(src, src_stride, src0, src2);
659 LD_SB2(src + 8, src_stride, src1, src3);
660 src += (2 * src_stride);
667 VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec1);
668 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2, vec3);
669 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
671 VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0, vec1);
672 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2, vec3);
673 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
675 VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0, vec1);
676 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2, vec3);
677 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
679 VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec0, vec1);
680 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec2, vec3);
681 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
684 ST_SH2(dst0, dst2, dst, dst_stride);
685 ST_SH2(dst1, dst3, dst + 8, dst_stride);
686 dst += (2 * dst_stride);
691 int16_t *dst,
int32_t dst_stride,
696 v8i16 filt0, filt1, filt2, filt3;
697 v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
698 v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
699 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
700 v8i16 filter_vec, const_vec;
704 filter_vec =
LD_SH(filter);
705 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
715 const_vec = __msa_ldi_h(128);
718 for (loop_cnt = (height >> 1); loop_cnt--;) {
719 LD_SB2(src, 16, src0, src1);
721 LD_SB2(src, 16, src2, src3);
731 VSHF_B2_SB(src0, src0, src0, src1, mask0, mask4, vec0, vec1);
732 VSHF_B2_SB(src1, src1, src2, src2, mask0, mask0, vec2, vec3);
733 VSHF_B2_SB(src2, src3, src3, src3, mask4, mask0, vec4, vec5);
734 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
737 VSHF_B2_SB(src0, src0, src0, src1, mask1, mask5, vec0, vec1);
738 VSHF_B2_SB(src1, src1, src2, src2, mask1, mask1, vec2, vec3);
739 VSHF_B2_SB(src2, src3, src3, src3, mask5, mask1, vec4, vec5);
740 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
743 VSHF_B2_SB(src0, src0, src0, src1, mask2, mask6, vec0, vec1);
744 VSHF_B2_SB(src1, src1, src2, src2, mask2, mask2, vec2, vec3);
745 VSHF_B2_SB(src2, src3, src3, src3, mask6, mask2, vec4, vec5);
746 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
749 VSHF_B2_SB(src0, src0, src0, src1, mask3, mask7, vec0, vec1);
750 VSHF_B2_SB(src1, src1, src2, src2, mask3, mask3, vec2, vec3);
751 VSHF_B2_SB(src2, src3, src3, src3, mask7, mask3, vec4, vec5);
752 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
756 ST_SH2(dst0, dst1, dst, 8);
757 ST_SH(dst2, dst + 16);
759 ST_SH2(dst3, dst4, dst, 8);
760 ST_SH(dst5, dst + 16);
766 int16_t *dst,
int32_t dst_stride,
771 v8i16 filt0, filt1, filt2, filt3;
772 v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
773 v16i8 vec0, vec1, vec2, vec3;
774 v8i16 dst0, dst1, dst2, dst3;
775 v8i16 filter_vec, const_vec;
779 filter_vec =
LD_SH(filter);
780 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
790 const_vec = __msa_ldi_h(128);
793 for (loop_cnt = height; loop_cnt--;) {
794 LD_SB2(src, 16, src0, src1);
795 src2 =
LD_SB(src + 24);
799 VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
800 vec0, vec1, vec2, vec3);
802 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
803 dst0, dst0, dst0, dst0);
804 VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
805 vec0, vec1, vec2, vec3);
807 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
808 dst1, dst1, dst1, dst1);
809 VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
810 vec0, vec1, vec2, vec3);
812 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
813 dst2, dst2, dst2, dst2);
814 VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
815 vec0, vec1, vec2, vec3);
817 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
818 dst3, dst3, dst3, dst3);
820 ST_SH4(dst0, dst1, dst2, dst3, dst, 8);
826 int16_t *dst,
int32_t dst_stride,
831 v8i16 filt0, filt1, filt2, filt3;
832 v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
833 v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
834 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
835 v8i16 filter_vec, const_vec;
839 filter_vec =
LD_SH(filter);
840 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
850 const_vec = __msa_ldi_h(128);
853 for (loop_cnt = height; loop_cnt--;) {
854 LD_SB3(src, 16, src0, src1, src2);
855 src3 =
LD_SB(src + 40);
865 VSHF_B2_SB(src0, src0, src0, src1, mask0, mask4, vec0, vec1);
866 VSHF_B2_SB(src1, src1, src1, src2, mask0, mask4, vec2, vec3);
867 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
869 VSHF_B2_SB(src0, src0, src0, src1, mask1, mask5, vec0, vec1);
870 VSHF_B2_SB(src1, src1, src1, src2, mask1, mask5, vec2, vec3);
871 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
873 VSHF_B2_SB(src0, src0, src0, src1, mask2, mask6, vec0, vec1);
874 VSHF_B2_SB(src1, src1, src1, src2, mask2, mask6, vec2, vec3);
875 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
877 VSHF_B2_SB(src0, src0, src0, src1, mask3, mask7, vec0, vec1);
878 VSHF_B2_SB(src1, src1, src1, src2, mask3, mask7, vec2, vec3);
879 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
881 ST_SH4(dst0, dst1, dst2, dst3, dst, 8);
883 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec4, vec5);
885 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec4, vec5);
887 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec4, vec5);
889 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec4, vec5);
891 ST_SH2(dst4, dst5, (dst + 32), 8);
897 int16_t *dst,
int32_t dst_stride,
902 v8i16 filt0, filt1, filt2, filt3;
903 v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
904 v16i8 vec0, vec1, vec2, vec3;
905 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
906 v8i16 filter_vec, const_vec;
911 filter_vec =
LD_SH(filter);
912 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
922 const_vec = __msa_ldi_h(128);
925 for (loop_cnt = height; loop_cnt--;) {
926 LD_SB4(src, 16, src0, src1, src2, src3);
927 src4 =
LD_SB(src + 56);
931 VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
932 vec0, vec1, vec2, vec3);
934 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
935 dst0, dst0, dst0, dst0);
938 VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
939 vec0, vec1, vec2, vec3);
941 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
942 dst1, dst1, dst1, dst1);
943 ST_SH(dst1, dst + 8);
945 VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
946 vec0, vec1, vec2, vec3);
948 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
949 dst2, dst2, dst2, dst2);
950 ST_SH(dst2, dst + 16);
952 VSHF_B4_SB(src1, src2, mask4, mask5, mask6, mask7,
953 vec0, vec1, vec2, vec3);
955 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
956 dst3, dst3, dst3, dst3);
957 ST_SH(dst3, dst + 24);
959 VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
960 vec0, vec1, vec2, vec3);
962 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
963 dst4, dst4, dst4, dst4);
964 ST_SH(dst4, dst + 32);
966 VSHF_B4_SB(src2, src3, mask4, mask5, mask6, mask7,
967 vec0, vec1, vec2, vec3);
969 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
970 dst5, dst5, dst5, dst5);
971 ST_SH(dst5, dst + 40);
973 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
974 vec0, vec1, vec2, vec3);
976 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
977 dst6, dst6, dst6, dst6);
978 ST_SH(dst6, dst + 48);
980 VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
981 vec0, vec1, vec2, vec3);
983 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
984 dst7, dst7, dst7, dst7);
985 ST_SH(dst7, dst + 56);
991 int16_t *dst,
int32_t dst_stride,
995 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8;
996 v16i8 src9, src10, src11, src12, src13, src14;
997 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
998 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
999 v16i8 src1110_r, src1211_r, src1312_r, src1413_r;
1000 v16i8 src2110, src4332, src6554, src8776, src10998;
1001 v16i8 src12111110, src14131312;
1002 v8i16 dst10, dst32, dst54, dst76;
1003 v8i16 filt0, filt1, filt2, filt3;
1004 v8i16 filter_vec, const_vec;
1006 src -= (3 * src_stride);
1008 const_vec = __msa_ldi_h(128);
1011 filter_vec =
LD_SH(filter);
1012 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1014 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
1015 src += (7 * src_stride);
1016 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1017 src10_r, src32_r, src54_r, src21_r);
1018 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1019 ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
1020 src2110, src4332, src6554);
1023 for (loop_cnt = (height >> 3); loop_cnt--;) {
1025 src7, src8, src9, src10, src11, src12, src13, src14);
1026 src += (8 * src_stride);
1028 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1029 src76_r, src87_r, src98_r, src109_r);
1030 ILVR_B4_SB(src11, src10, src12, src11, src13, src12, src14, src13,
1031 src1110_r, src1211_r, src1312_r, src1413_r);
1032 ILVR_D4_SB(src87_r, src76_r, src109_r, src98_r,
1033 src1211_r, src1110_r, src1413_r, src1312_r,
1034 src8776, src10998, src12111110, src14131312);
1039 filt0, filt1, filt2, filt3, dst10, dst10, dst10, dst10);
1042 filt0, filt1, filt2, filt3, dst32, dst32, dst32, dst32);
1045 filt0, filt1, filt2, filt3, dst54, dst54, dst54, dst54);
1047 DPADD_SB4_SH(src8776, src10998, src12111110, src14131312,
1048 filt0, filt1, filt2, filt3, dst76, dst76, dst76, dst76);
1050 ST8x8_UB(dst10, dst32, dst54, dst76, dst, 2 * dst_stride);
1051 dst += (8 * dst_stride);
1054 src4332 = src12111110;
1055 src6554 = src14131312;
1061 int16_t *dst,
int32_t dst_stride,
1065 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1066 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
1067 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
1068 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
1069 v8i16 filter_vec, const_vec;
1070 v8i16 filt0, filt1, filt2, filt3;
1072 src -= (3 * src_stride);
1073 const_vec = __msa_ldi_h(128);
1076 filter_vec =
LD_SH(filter);
1077 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1079 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
1080 src += (7 * src_stride);
1082 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1083 src10_r, src32_r, src54_r, src21_r);
1084 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1086 for (loop_cnt = (height >> 2); loop_cnt--;) {
1087 LD_SB4(src, src_stride, src7, src8, src9, src10);
1088 src += (4 * src_stride);
1090 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1091 src76_r, src87_r, src98_r, src109_r);
1095 filt0, filt1, filt2, filt3,
1096 dst0_r, dst0_r, dst0_r, dst0_r);
1099 filt0, filt1, filt2, filt3,
1100 dst1_r, dst1_r, dst1_r, dst1_r);
1103 filt0, filt1, filt2, filt3,
1104 dst2_r, dst2_r, dst2_r, dst2_r);
1107 filt0, filt1, filt2, filt3,
1108 dst3_r, dst3_r, dst3_r, dst3_r);
1110 ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
1111 dst += (4 * dst_stride);
1124 int16_t *dst,
int32_t dst_stride,
1128 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1129 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
1130 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
1131 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
1132 v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
1133 v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
1134 v16i8 src2110, src4332, src6554, src8776, src10998;
1135 v8i16 dst0_l, dst1_l;
1136 v8i16 filter_vec, const_vec;
1137 v8i16 filt0, filt1, filt2, filt3;
1139 src -= (3 * src_stride);
1140 const_vec = __msa_ldi_h(128);
1143 filter_vec =
LD_SH(filter);
1144 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1146 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
1147 src += (7 * src_stride);
1149 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1150 src10_r, src32_r, src54_r, src21_r);
1151 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1152 ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1153 src10_l, src32_l, src54_l, src21_l);
1154 ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
1155 ILVR_D3_SB(src21_l, src10_l, src43_l, src32_l, src65_l, src54_l,
1156 src2110, src4332, src6554);
1158 for (loop_cnt = (height >> 2); loop_cnt--;) {
1159 LD_SB4(src, src_stride, src7, src8, src9, src10);
1160 src += (4 * src_stride);
1162 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1163 src76_r, src87_r, src98_r, src109_r);
1164 ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1165 src76_l, src87_l, src98_l, src109_l);
1166 ILVR_D2_SB(src87_l, src76_l, src109_l, src98_l, src8776, src10998);
1170 filt0, filt1, filt2, filt3,
1171 dst0_r, dst0_r, dst0_r, dst0_r);
1174 filt0, filt1, filt2, filt3,
1175 dst1_r, dst1_r, dst1_r, dst1_r);
1178 filt0, filt1, filt2, filt3,
1179 dst2_r, dst2_r, dst2_r, dst2_r);
1182 filt0, filt1, filt2, filt3,
1183 dst3_r, dst3_r, dst3_r, dst3_r);
1186 filt0, filt1, filt2, filt3,
1187 dst0_l, dst0_l, dst0_l, dst0_l);
1190 filt0, filt1, filt2, filt3,
1191 dst1_l, dst1_l, dst1_l, dst1_l);
1193 ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
1194 ST8x4_UB(dst0_l, dst1_l, dst + 8, 2 * dst_stride);
1195 dst += (4 * dst_stride);
1221 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1222 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
1223 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
1224 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
1225 v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
1226 v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
1227 v8i16 dst0_l, dst1_l, dst2_l, dst3_l;
1228 v8i16 filter_vec, const_vec;
1229 v8i16 filt0, filt1, filt2, filt3;
1231 src -= (3 * src_stride);
1232 const_vec = __msa_ldi_h(128);
1235 filter_vec =
LD_SH(filter);
1236 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1238 for (cnt = width >> 4; cnt--;) {
1242 LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
1243 src_tmp += (7 * src_stride);
1245 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1246 src10_r, src32_r, src54_r, src21_r);
1247 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1248 ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1249 src10_l, src32_l, src54_l, src21_l);
1250 ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
1252 for (loop_cnt = (height >> 2); loop_cnt--;) {
1253 LD_SB4(src_tmp, src_stride, src7, src8, src9, src10);
1254 src_tmp += (4 * src_stride);
1256 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1257 src76_r, src87_r, src98_r, src109_r);
1258 ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1259 src76_l, src87_l, src98_l, src109_l);
1263 filt0, filt1, filt2, filt3,
1264 dst0_r, dst0_r, dst0_r, dst0_r);
1267 filt0, filt1, filt2, filt3,
1268 dst1_r, dst1_r, dst1_r, dst1_r);
1271 filt0, filt1, filt2, filt3,
1272 dst2_r, dst2_r, dst2_r, dst2_r);
1275 filt0, filt1, filt2, filt3,
1276 dst3_r, dst3_r, dst3_r, dst3_r);
1279 filt0, filt1, filt2, filt3,
1280 dst0_l, dst0_l, dst0_l, dst0_l);
1283 filt0, filt1, filt2, filt3,
1284 dst1_l, dst1_l, dst1_l, dst1_l);
1287 filt0, filt1, filt2, filt3,
1288 dst2_l, dst2_l, dst2_l, dst2_l);
1291 filt0, filt1, filt2, filt3,
1292 dst3_l, dst3_l, dst3_l, dst3_l);
1294 ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst_tmp, dst_stride);
1295 ST_SH4(dst0_l, dst1_l, dst2_l, dst3_l, dst_tmp + 8, dst_stride);
1296 dst_tmp += (4 * dst_stride);
1319 int16_t *dst,
int32_t dst_stride,
1323 filter, height, 16);
1327 int16_t *dst,
int32_t dst_stride,
1331 filter, height, 16);
1337 int16_t *dst,
int32_t dst_stride,
1341 filter, height, 32);
1345 int16_t *dst,
int32_t dst_stride,
1349 filter, height, 48);
1353 int16_t *dst,
int32_t dst_stride,
1357 filter, height, 64);
1361 int16_t *dst,
int32_t dst_stride,
1362 const int8_t *filter_x,
const int8_t *filter_y,
1366 int32_t dst_stride_in_bytes = 2 * dst_stride;
1367 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1368 v8i16 filt0, filt1, filt2, filt3;
1369 v8i16 filt_h0, filt_h1, filt_h2, filt_h3;
1370 v16i8 mask1, mask2, mask3;
1371 v8i16 filter_vec, const_vec;
1372 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1373 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1374 v8i16 dst30, dst41, dst52, dst63, dst66, dst97, dst108;
1375 v4i32 dst0_r, dst1_r, dst2_r, dst3_r;
1376 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r;
1377 v8i16 dst21_r, dst43_r, dst65_r, dst87_r, dst109_r;
1380 src -= ((3 * src_stride) + 3);
1381 filter_vec =
LD_SH(filter_x);
1382 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1384 filter_vec =
LD_SH(filter_y);
1387 SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
1393 const_vec = __msa_ldi_h(128);
1396 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
1397 src += (7 * src_stride);
1400 VSHF_B4_SB(src0, src3, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
1401 VSHF_B4_SB(src1, src4, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
1402 VSHF_B4_SB(src2, src5, mask0, mask1, mask2, mask3,
1403 vec8, vec9, vec10, vec11);
1404 VSHF_B4_SB(src3, src6, mask0, mask1, mask2, mask3,
1405 vec12, vec13, vec14, vec15);
1407 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
1408 dst30, dst30, dst30, dst30);
1410 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
1411 dst41, dst41, dst41, dst41);
1413 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
1414 dst52, dst52, dst52, dst52);
1416 DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3,
1417 dst63, dst63, dst63, dst63);
1422 dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
1424 for (loop_cnt = height >> 2; loop_cnt--;) {
1425 LD_SB4(src, src_stride, src7, src8, src9, src10);
1426 src += (4 * src_stride);
1429 VSHF_B4_SB(src7, src9, mask0, mask1, mask2, mask3,
1430 vec0, vec1, vec2, vec3);
1431 VSHF_B4_SB(src8, src10, mask0, mask1, mask2, mask3,
1432 vec4, vec5, vec6, vec7);
1435 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
1436 dst97, dst97, dst97, dst97);
1437 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
1438 dst108, dst108, dst108, dst108);
1440 dst76_r = __msa_ilvr_h(dst97, dst66);
1442 dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
1443 dst98_r = __msa_ilvr_h(dst66, dst108);
1446 filt_h0, filt_h1, filt_h2, filt_h3);
1448 filt_h0, filt_h1, filt_h2, filt_h3);
1450 filt_h0, filt_h1, filt_h2, filt_h3);
1452 filt_h0, filt_h1, filt_h2, filt_h3);
1453 SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
1454 PCKEV_H2_SW(dst1_r, dst0_r, dst3_r, dst2_r, dst0_r, dst2_r);
1455 ST8x4_UB(dst0_r, dst2_r, dst, dst_stride_in_bytes);
1456 dst += (4 * dst_stride);
1464 dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
1472 const int8_t *filter_x,
1473 const int8_t *filter_y,
1476 uint32_t loop_cnt, cnt;
1479 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
1480 v8i16 filt0, filt1, filt2, filt3;
1481 v8i16 filt_h0, filt_h1, filt_h2, filt_h3;
1482 v16i8 mask1, mask2, mask3;
1483 v8i16 filter_vec, const_vec;
1484 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1485 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1486 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1487 v4i32 dst0_r, dst0_l;
1488 v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
1489 v8i16 dst10_l, dst32_l, dst54_l, dst76_l;
1490 v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
1492 src -= ((3 * src_stride) + 3);
1493 filter_vec =
LD_SH(filter_x);
1494 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1496 filter_vec =
LD_SH(filter_y);
1499 SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
1505 const_vec = __msa_ldi_h(128);
1508 for (cnt = width >> 3; cnt--;) {
1512 LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
1513 src_tmp += (7 * src_stride);
1517 VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
1518 vec0, vec1, vec2, vec3);
1519 VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
1520 vec4, vec5, vec6, vec7);
1521 VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
1522 vec8, vec9, vec10, vec11);
1523 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
1524 vec12, vec13, vec14, vec15);
1526 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
1527 dst0, dst0, dst0, dst0);
1529 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
1530 dst1, dst1, dst1, dst1);
1532 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
1533 dst2, dst2, dst2, dst2);
1535 DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3,
1536 dst3, dst3, dst3, dst3);
1539 VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
1540 vec0, vec1, vec2, vec3);
1541 VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3,
1542 vec4, vec5, vec6, vec7);
1543 VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3,
1544 vec8, vec9, vec10, vec11);
1546 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
1547 dst4, dst4, dst4, dst4);
1549 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
1550 dst5, dst5, dst5, dst5);
1552 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
1553 dst6, dst6, dst6, dst6);
1555 for (loop_cnt = height; loop_cnt--;) {
1556 src7 =
LD_SB(src_tmp);
1557 src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
1558 src_tmp += src_stride;
1560 VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3,
1561 vec0, vec1, vec2, vec3);
1563 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
1564 dst7, dst7, dst7, dst7);
1571 filt_h0, filt_h1, filt_h2, filt_h3);
1573 filt_h0, filt_h1, filt_h2, filt_h3);
1577 dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
1578 ST_SW(dst0_r, dst_tmp);
1579 dst_tmp += dst_stride;
1596 int16_t *dst,
int32_t dst_stride,
1597 const int8_t *filter_x,
const int8_t *filter_y,
1601 filter_x, filter_y, height, 8);
1605 int16_t *dst,
int32_t dst_stride,
1606 const int8_t *filter_x,
const int8_t *filter_y,
1610 int32_t dst_stride_in_bytes = 2 * dst_stride;
1613 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1614 v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
1615 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1616 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1617 v8i16 filt0, filt1, filt2, filt3, filt_h0, filt_h1, filt_h2, filt_h3;
1618 v8i16 filter_vec, const_vec;
1619 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1620 v8i16 dst30, dst41, dst52, dst63, dst66, dst97, dst108;
1621 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r, dst21_r, dst43_r;
1622 v8i16 dst65_r, dst87_r, dst109_r, dst10_l, dst32_l, dst54_l, dst76_l;
1623 v4i32 dst0_r, dst0_l, dst1_r, dst2_r, dst3_r;
1625 src -= ((3 * src_stride) + 3);
1626 filter_vec =
LD_SH(filter_x);
1627 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1629 filter_vec =
LD_SH(filter_y);
1632 SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
1639 const_vec = __msa_ldi_h(128);
1645 LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
1646 src_tmp += (7 * src_stride);
1650 VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
1651 VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
1652 VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
1654 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3, vec12, vec13, vec14,
1657 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, dst0, dst0,
1660 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3, dst1, dst1,
1663 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3, dst2,
1666 DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3, dst3,
1670 VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
1671 VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
1672 VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
1675 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, dst4, dst4,
1678 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3, dst5, dst5,
1681 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3, dst6,
1684 for (loop_cnt = height; loop_cnt--;) {
1685 src7 =
LD_SB(src_tmp);
1686 src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
1687 src_tmp += src_stride;
1689 VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
1692 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, dst7,
1699 dst0_r =
HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r, filt_h0,
1700 filt_h1, filt_h2, filt_h3);
1701 dst0_l =
HEVC_FILT_8TAP(dst10_l, dst32_l, dst54_l, dst76_l, filt_h0,
1702 filt_h1, filt_h2, filt_h3);
1706 dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
1707 ST_SW(dst0_r, dst_tmp);
1708 dst_tmp += dst_stride;
1727 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
1728 src += (7 * src_stride);
1731 VSHF_B4_SB(src0, src3, mask4, mask5, mask6, mask7, vec0, vec1, vec2, vec3);
1732 VSHF_B4_SB(src1, src4, mask4, mask5, mask6, mask7, vec4, vec5, vec6, vec7);
1733 VSHF_B4_SB(src2, src5, mask4, mask5, mask6, mask7, vec8, vec9, vec10,
1735 VSHF_B4_SB(src3, src6, mask4, mask5, mask6, mask7, vec12, vec13, vec14,
1738 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, dst30,
1739 dst30, dst30, dst30);
1741 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3, dst41,
1742 dst41, dst41, dst41);
1744 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3, dst52,
1745 dst52, dst52, dst52);
1747 DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3, dst63,
1748 dst63, dst63, dst63);
1754 dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
1756 for (loop_cnt = height >> 2; loop_cnt--;) {
1757 LD_SB4(src, src_stride, src7, src8, src9, src10);
1758 src += (4 * src_stride);
1761 VSHF_B4_SB(src7, src9, mask4, mask5, mask6, mask7, vec0, vec1, vec2,
1763 VSHF_B4_SB(src8, src10, mask4, mask5, mask6, mask7, vec4, vec5, vec6,
1767 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, dst97,
1768 dst97, dst97, dst97);
1769 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3, dst108,
1770 dst108, dst108, dst108);
1772 dst76_r = __msa_ilvr_h(dst97, dst66);
1774 dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
1775 dst98_r = __msa_ilvr_h(dst66, dst108);
1777 dst0_r =
HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r, filt_h0,
1778 filt_h1, filt_h2, filt_h3);
1779 dst1_r =
HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r, filt_h0,
1780 filt_h1, filt_h2, filt_h3);
1781 dst2_r =
HEVC_FILT_8TAP(dst32_r, dst54_r, dst76_r, dst98_r, filt_h0,
1782 filt_h1, filt_h2, filt_h3);
1783 dst3_r =
HEVC_FILT_8TAP(dst43_r, dst65_r, dst87_r, dst109_r, filt_h0,
1784 filt_h1, filt_h2, filt_h3);
1785 SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
1786 PCKEV_H2_SW(dst1_r, dst0_r, dst3_r, dst2_r, dst0_r, dst2_r);
1787 ST8x4_UB(dst0_r, dst2_r, dst, dst_stride_in_bytes);
1788 dst += (4 * dst_stride);
1796 dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
1801 int16_t *dst,
int32_t dst_stride,
1802 const int8_t *filter_x,
const int8_t *filter_y,
1806 filter_x, filter_y, height, 16);
1810 int16_t *dst,
int32_t dst_stride,
1811 const int8_t *filter_x,
const int8_t *filter_y,
1815 filter_x, filter_y, height, 24);
1819 int16_t *dst,
int32_t dst_stride,
1820 const int8_t *filter_x,
const int8_t *filter_y,
1824 filter_x, filter_y, height, 32);
1828 int16_t *dst,
int32_t dst_stride,
1829 const int8_t *filter_x,
const int8_t *filter_y,
1833 filter_x, filter_y, height, 48);
1837 int16_t *dst,
int32_t dst_stride,
1838 const int8_t *filter_x,
const int8_t *filter_y,
1842 filter_x, filter_y, height, 64);
1853 v16i8 mask1, vec0, vec1;
1855 v8i16 filter_vec, const_vec;
1860 filter_vec =
LD_SH(filter);
1865 const_vec = __msa_ldi_h(128);
1868 LD_SB2(src, src_stride, src0, src1);
1871 VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
1875 ST8x2_UB(dst0, dst, 2 * dst_stride);
1886 v16i8 mask1, vec0, vec1;
1888 v8i16 filter_vec, const_vec;
1893 filter_vec =
LD_SH(filter);
1898 const_vec = __msa_ldi_h(128);
1901 LD_SB4(src, src_stride, src0, src1, src2, src3);
1904 VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
1908 VSHF_B2_SB(src2, src3, src2, src3, mask0, mask1, vec0, vec1);
1912 ST8x4_UB(dst0, dst1, dst, 2 * dst_stride);
1924 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
1925 v16i8 mask1, vec0, vec1;
1926 v8i16 dst0, dst1, dst2, dst3;
1927 v8i16 filter_vec, const_vec;
1932 filter_vec =
LD_SH(filter);
1937 const_vec = __msa_ldi_h(128);
1940 for (loop_cnt = (height >> 3); loop_cnt--;) {
1941 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
1942 src += (8 * src_stride);
1946 VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
1949 VSHF_B2_SB(src2, src3, src2, src3, mask0, mask1, vec0, vec1);
1952 VSHF_B2_SB(src4, src5, src4, src5, mask0, mask1, vec0, vec1);
1955 VSHF_B2_SB(src6, src7, src6, src7, mask0, mask1, vec0, vec1);
1959 ST8x8_UB(dst0, dst1, dst2, dst3, dst, 2 * dst_stride);
1960 dst += (8 * dst_stride);
1973 }
else if (4 == height) {
1975 }
else if (0 == height % 8) {
1989 uint64_t dst_val0, dst_val1, dst_val2, dst_val3;
1990 uint32_t dst_val_int0, dst_val_int1, dst_val_int2, dst_val_int3;
1991 v8i16 filt0, filt1, dst0, dst1, dst2, dst3;
1996 v8i16 filter_vec, const_vec;
2000 filter_vec =
LD_SH(filter);
2005 const_vec = __msa_ldi_h(128);
2008 for (loop_cnt = 2; loop_cnt--;) {
2009 LD_SB4(src, src_stride, src0, src1, src2, src3);
2010 src += (4 * src_stride);
2014 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2017 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2020 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
2023 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
2027 dst_val0 = __msa_copy_u_d((v2i64) dst0, 0);
2028 dst_val1 = __msa_copy_u_d((v2i64) dst1, 0);
2029 dst_val2 = __msa_copy_u_d((v2i64) dst2, 0);
2030 dst_val3 = __msa_copy_u_d((v2i64) dst3, 0);
2032 dst_val_int0 = __msa_copy_u_w((v4i32) dst0, 2);
2033 dst_val_int1 = __msa_copy_u_w((v4i32) dst1, 2);
2034 dst_val_int2 = __msa_copy_u_w((v4i32) dst2, 2);
2035 dst_val_int3 = __msa_copy_u_w((v4i32) dst3, 2);
2038 SW(dst_val_int0, dst + 4);
2041 SW(dst_val_int1, dst + 4);
2044 SW(dst_val_int2, dst + 4);
2047 SW(dst_val_int3, dst + 4);
2060 v8i16 filt0, filt1, dst0, dst1;
2065 v8i16 filter_vec, const_vec;
2069 filter_vec =
LD_SH(filter);
2074 const_vec = __msa_ldi_h(128);
2077 for (loop_cnt = (height >> 1); loop_cnt--;) {
2078 LD_SB2(src, src_stride, src0, src1);
2079 src += (2 * src_stride);
2083 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2087 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2091 ST_SH2(dst0, dst1, dst, dst_stride);
2092 dst += (2 * dst_stride);
2109 v8i16 dst0, dst1, dst2, dst3;
2110 v8i16 filter_vec, const_vec;
2114 filter_vec =
LD_SH(filter);
2119 const_vec = __msa_ldi_h(128);
2122 for (loop_cnt = (height >> 2); loop_cnt--;) {
2123 LD_SB4(src, src_stride, src0, src1, src2, src3);
2124 src += (4 * src_stride);
2128 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2132 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2136 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
2140 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
2144 ST_SH4(dst0, dst1, dst2, dst3, dst, dst_stride);
2145 dst += (4 * dst_stride);
2156 if (2 == height || 6 == height) {
2177 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
2178 v8i16 filter_vec, const_vec;
2182 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
2187 filter_vec =
LD_SH(filter);
2193 const_vec = __msa_ldi_h(128);
2196 for (loop_cnt = (height >> 2); loop_cnt--;) {
2197 LD_SB4(src, src_stride, src0, src1, src2, src3);
2198 src += (4 * src_stride);
2201 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2204 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2207 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
2210 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
2213 VSHF_B2_SB(src0, src1, src0, src1, mask2, mask3, vec0, vec1);
2216 VSHF_B2_SB(src2, src3, src2, src3, mask2, mask3, vec0, vec1);
2220 ST_SH4(dst0, dst1, dst2, dst3, dst, dst_stride);
2221 ST8x4_UB(dst4, dst5, dst + 8, 2 * dst_stride);
2222 dst += (4 * dst_stride);
2235 v16i8 src4, src5, src6, src7;
2239 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
2241 v8i16 filter_vec, const_vec;
2245 filter_vec =
LD_SH(filter);
2250 const_vec = __msa_ldi_h(128);
2253 for (loop_cnt = (height >> 2); loop_cnt--;) {
2254 LD_SB4(src, src_stride, src0, src2, src4, src6);
2255 LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
2256 src += (4 * src_stride);
2260 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2264 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2268 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
2272 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
2276 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
2280 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
2284 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
2288 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
2292 ST_SH4(dst0, dst2, dst4, dst6, dst, dst_stride);
2293 ST_SH4(dst1, dst3, dst5, dst7, dst + 8, dst_stride);
2294 dst += (4 * dst_stride);
2306 int16_t *dst_tmp = dst + 16;
2307 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7;
2310 v16i8 mask1, mask00, mask11;
2312 v8i16 dst0, dst1, dst2, dst3;
2313 v8i16 filter_vec, const_vec;
2317 filter_vec =
LD_SH(filter);
2322 mask11 = mask0 + 10;
2324 const_vec = __msa_ldi_h(128);
2327 for (loop_cnt = (height >> 2); loop_cnt--;) {
2329 LD_SB4(src, src_stride, src0, src2, src4, src6);
2330 LD_SB4(src + 16, src_stride, src1, src3, src5, src7);
2331 src += (4 * src_stride);
2335 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2339 VSHF_B2_SB(src0, src1, src0, src1, mask00, mask11, vec0, vec1);
2343 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
2347 VSHF_B2_SB(src2, src3, src2, src3, mask00, mask11, vec0, vec1);
2351 ST_SH2(dst0, dst1, dst, 8);
2353 ST_SH2(dst2, dst3, dst, 8);
2356 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
2360 VSHF_B2_SB(src4, src5, src4, src5, mask00, mask11, vec0, vec1);
2364 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
2368 VSHF_B2_SB(src6, src7, src6, src7, mask00, mask11, vec0, vec1);
2372 ST_SH2(dst0, dst1, dst, 8);
2374 ST_SH2(dst2, dst3, dst, 8);
2378 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2382 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
2386 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
2390 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
2394 ST_SH4(dst0, dst1, dst2, dst3, dst_tmp, dst_stride);
2395 dst_tmp += (4 * dst_stride);
2410 v16i8 mask1, mask2, mask3;
2411 v8i16 dst0, dst1, dst2, dst3;
2412 v16i8 vec0, vec1, vec2, vec3;
2413 v8i16 filter_vec, const_vec;
2417 filter_vec =
LD_SH(filter);
2420 const_vec = __msa_ldi_h(128);
2427 for (loop_cnt = height; loop_cnt--;) {
2428 LD_SB2(src, 16, src0, src1);
2429 src2 =
LD_SB(src + 24);
2438 VSHF_B2_SB(src0, src0, src0, src1, mask0, mask2, vec0, vec1);
2439 VSHF_B2_SB(src1, src1, src2, src2, mask0, mask0, vec2, vec3);
2440 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
2442 VSHF_B2_SB(src0, src0, src0, src1, mask1, mask3, vec0, vec1);
2443 VSHF_B2_SB(src1, src1, src2, src2, mask1, mask1, vec2, vec3);
2444 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
2446 ST_SH4(dst0, dst1, dst2, dst3, dst, 8);
2457 v16i8
src0,
src1, src2, src3, src4;
2458 v16i8 src10_r, src32_r, src21_r, src43_r;
2459 v16i8 src2110, src4332;
2462 v8i16 filter_vec, const_vec;
2466 const_vec = __msa_ldi_h(128);
2469 filter_vec =
LD_SH(filter);
2472 LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
2473 ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3,
2474 src10_r, src21_r, src32_r, src43_r);
2476 ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
2479 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst10, dst10);
2481 ST8x2_UB(dst10, dst, 2 * dst_stride);
2491 v16i8
src0,
src1, src2, src3, src4, src5, src6;
2492 v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r;
2493 v16i8 src2110, src4332, src6554;
2496 v8i16 filter_vec, const_vec;
2500 const_vec = __msa_ldi_h(128);
2503 filter_vec =
LD_SH(filter);
2506 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
2507 ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3,
2508 src10_r, src21_r, src32_r, src43_r);
2509 ILVR_B2_SB(src5, src4, src6, src5, src54_r, src65_r);
2510 ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
2511 src2110, src4332, src6554);
2514 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst10, dst10);
2516 DPADD_SB2_SH(src4332, src6554, filt0, filt1, dst32, dst32);
2518 ST8x4_UB(dst10, dst32, dst, 2 * dst_stride);
2528 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
2529 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
2530 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
2531 v16i8 src2110, src4332, src6554, src8776, src10998;
2532 v8i16 dst10, dst32, dst54, dst76;
2534 v8i16 filter_vec, const_vec;
2537 const_vec = __msa_ldi_h(128);
2540 filter_vec =
LD_SH(filter);
2543 LD_SB3(src, src_stride, src0, src1, src2);
2544 src += (3 * src_stride);
2546 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2547 src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_r, (v2i64) src10_r);
2548 src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
2550 LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
2551 src += (8 * src_stride);
2552 ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5,
2553 src32_r, src43_r, src54_r, src65_r);
2554 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
2555 src76_r, src87_r, src98_r, src109_r);
2556 ILVR_D4_SB(src43_r, src32_r, src65_r, src54_r, src87_r, src76_r, src109_r,
2557 src98_r, src4332, src6554, src8776, src10998);
2563 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst10, dst10);
2564 DPADD_SB2_SH(src4332, src6554, filt0, filt1, dst32, dst32);
2565 DPADD_SB2_SH(src6554, src8776, filt0, filt1, dst54, dst54);
2566 DPADD_SB2_SH(src8776, src10998, filt0, filt1, dst76, dst76);
2567 ST8x8_UB(dst10, dst32, dst54, dst76, dst, 2 * dst_stride);
2568 dst += (8 * dst_stride);
2572 int16_t *dst,
int32_t dst_stride,
2575 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
2576 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
2577 v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776;
2579 v8i16 dst10, dst32, dst54, dst76, filt0, filt1, filter_vec, const_vec;
2582 const_vec = __msa_ldi_h(128);
2585 filter_vec =
LD_SH(filter);
2588 LD_SB3(src, src_stride, src0, src1, src2);
2589 src += (3 * src_stride);
2591 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2592 src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_r, (v2i64) src10_r);
2593 src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
2595 LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
2596 src += (8 * src_stride);
2597 ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, src32_r, src43_r,
2599 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
2600 src87_r, src98_r, src109_r);
2601 ILVR_D4_SB(src43_r, src32_r, src65_r, src54_r, src87_r, src76_r, src109_r,
2602 src98_r, src4332, src6554, src8776, src10998);
2609 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst10, dst10);
2610 DPADD_SB2_SH(src4332, src6554, filt0, filt1, dst32, dst32);
2611 DPADD_SB2_SH(src6554, src8776, filt0, filt1, dst54, dst54);
2612 DPADD_SB2_SH(src8776, src10998, filt0, filt1, dst76, dst76);
2613 ST8x8_UB(dst10, dst32, dst54, dst76, dst, 2 * dst_stride);
2614 dst += (8 * dst_stride);
2619 LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
2620 src += (8 * src_stride);
2622 ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, src32_r, src43_r,
2624 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
2625 src87_r, src98_r, src109_r);
2626 ILVR_D4_SB(src43_r, src32_r, src65_r, src54_r, src87_r, src76_r, src109_r,
2627 src98_r, src4332, src6554, src8776, src10998);
2634 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst10, dst10);
2635 DPADD_SB2_SH(src4332, src6554, filt0, filt1, dst32, dst32);
2636 DPADD_SB2_SH(src6554, src8776, filt0, filt1, dst54, dst54);
2637 DPADD_SB2_SH(src8776, src10998, filt0, filt1, dst76, dst76);
2638 ST8x8_UB(dst10, dst32, dst54, dst76, dst, 2 * dst_stride);
2639 dst += (8 * dst_stride);
2651 }
else if (4 == height) {
2653 }
else if (8 == height) {
2655 }
else if (16 == height) {
2668 uint32_t dst_val_int0, dst_val_int1, dst_val_int2, dst_val_int3;
2669 uint64_t dst_val0, dst_val1, dst_val2, dst_val3;
2670 v16i8
src0,
src1, src2, src3, src4;
2671 v16i8 src10_r, src32_r, src21_r, src43_r;
2672 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
2674 v8i16 filter_vec, const_vec;
2677 const_vec = __msa_ldi_h(128);
2680 filter_vec =
LD_SH(filter);
2683 LD_SB3(src, src_stride, src0, src1, src2);
2684 src += (3 * src_stride);
2686 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2688 for (loop_cnt = (height >> 2); loop_cnt--;) {
2689 LD_SB2(src, src_stride, src3, src4);
2690 src += (2 * src_stride);
2692 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2695 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2697 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2699 LD_SB2(src, src_stride, src1, src2);
2700 src += (2 * src_stride);
2702 ILVR_B2_SB(src1, src4, src2, src1, src10_r, src21_r);
2705 DPADD_SB2_SH(src32_r, src10_r, filt0, filt1, dst2_r, dst2_r);
2707 DPADD_SB2_SH(src43_r, src21_r, filt0, filt1, dst3_r, dst3_r);
2709 dst_val0 = __msa_copy_u_d((v2i64) dst0_r, 0);
2710 dst_val1 = __msa_copy_u_d((v2i64) dst1_r, 0);
2711 dst_val2 = __msa_copy_u_d((v2i64) dst2_r, 0);
2712 dst_val3 = __msa_copy_u_d((v2i64) dst3_r, 0);
2714 dst_val_int0 = __msa_copy_u_w((v4i32) dst0_r, 2);
2715 dst_val_int1 = __msa_copy_u_w((v4i32) dst1_r, 2);
2716 dst_val_int2 = __msa_copy_u_w((v4i32) dst2_r, 2);
2717 dst_val_int3 = __msa_copy_u_w((v4i32) dst3_r, 2);
2720 SW(dst_val_int0, dst + 4);
2723 SW(dst_val_int1, dst + 4);
2726 SW(dst_val_int2, dst + 4);
2729 SW(dst_val_int3, dst + 4);
2740 v16i8
src0,
src1, src2, src3, src4;
2741 v16i8 src10_r, src32_r, src21_r, src43_r;
2742 v8i16 dst0_r, dst1_r;
2744 v8i16 filter_vec, const_vec;
2747 const_vec = __msa_ldi_h(128);
2750 filter_vec =
LD_SH(filter);
2753 LD_SB3(src, src_stride, src0, src1, src2);
2754 src += (3 * src_stride);
2756 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2758 LD_SB2(src, src_stride, src3, src4);
2760 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2762 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2764 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2766 ST_SH2(dst0_r, dst1_r, dst, dst_stride);
2775 v16i8
src0,
src1, src2, src3, src4;
2776 v16i8 src10_r, src32_r, src21_r, src43_r;
2777 v8i16 dst0_r, dst1_r;
2779 v8i16 filter_vec, const_vec;
2782 const_vec = __msa_ldi_h(128);
2785 filter_vec =
LD_SH(filter);
2788 LD_SB3(src, src_stride, src0, src1, src2);
2789 src += (3 * src_stride);
2791 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2793 LD_SB2(src, src_stride, src3, src4);
2794 src += (2 * src_stride);
2797 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2799 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2801 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2803 ST_SH2(dst0_r, dst1_r, dst, dst_stride);
2804 dst += (2 * dst_stride);
2806 LD_SB2(src, src_stride, src1, src2);
2807 src += (2 * src_stride);
2810 ILVR_B2_SB(src1, src4, src2, src1, src10_r, src21_r);
2812 DPADD_SB2_SH(src32_r, src10_r, filt0, filt1, dst0_r, dst0_r);
2814 DPADD_SB2_SH(src43_r, src21_r, filt0, filt1, dst1_r, dst1_r);
2816 ST_SH2(dst0_r, dst1_r, dst, dst_stride);
2817 dst += (2 * dst_stride);
2819 LD_SB2(src, src_stride, src3, src4);
2822 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2824 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2826 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2828 ST_SH2(dst0_r, dst1_r, dst, dst_stride);
2839 v16i8
src0,
src1, src2, src3, src4, src5, src6;
2840 v16i8 src10_r, src32_r, src21_r, src43_r, src54_r, src65_r;
2841 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
2843 v8i16 filter_vec, const_vec;
2846 const_vec = __msa_ldi_h(128);
2849 filter_vec =
LD_SH(filter);
2852 LD_SB3(src, src_stride, src0, src1, src2);
2853 src += (3 * src_stride);
2855 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2857 for (loop_cnt = (height >> 2); loop_cnt--;) {
2858 LD_SB4(src, src_stride, src3, src4, src5, src6);
2859 src += (4 * src_stride);
2861 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2862 ILVR_B2_SB(src5, src4, src6, src5, src54_r, src65_r);
2867 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2868 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2869 DPADD_SB2_SH(src32_r, src54_r, filt0, filt1, dst2_r, dst2_r);
2870 DPADD_SB2_SH(src43_r, src65_r, filt0, filt1, dst3_r, dst3_r);
2871 ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
2872 dst += (4 * dst_stride);
2889 }
else if (6 == height) {
2905 v16i8
src0,
src1, src2, src3, src4, src5, src6;
2906 v16i8 src10_r, src32_r, src21_r, src43_r;
2907 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
2908 v16i8 src10_l, src32_l, src54_l, src21_l, src43_l, src65_l;
2909 v16i8 src2110, src4332;
2910 v16i8 src54_r, src65_r, src6554;
2911 v8i16 dst0_l, dst1_l;
2913 v8i16 filter_vec, const_vec;
2915 src -= (1 * src_stride);
2916 const_vec = __msa_ldi_h(128);
2919 filter_vec =
LD_SH(filter);
2922 LD_SB3(src, src_stride, src0, src1, src2);
2923 src += (3 * src_stride);
2925 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2926 ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
2927 src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_l, (v2i64) src10_l);
2929 for (loop_cnt = 4; loop_cnt--;) {
2930 LD_SB2(src, src_stride, src3, src4);
2931 src += (2 * src_stride);
2932 LD_SB2(src, src_stride, src5, src6);
2933 src += (2 * src_stride);
2937 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2938 ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
2939 src4332 = (v16i8) __msa_ilvr_d((v2i64) src43_l, (v2i64) src32_l);
2940 ILVR_B2_SB(src5, src4, src6, src5, src54_r, src65_r);
2941 ILVL_B2_SB(src5, src4, src6, src5, src54_l, src65_l);
2942 src6554 = (v16i8) __msa_ilvr_d((v2i64) src65_l, (v2i64) src54_l);
2945 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2947 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2949 DPADD_SB2_SH(src32_r, src54_r, filt0, filt1, dst2_r, dst2_r);
2951 DPADD_SB2_SH(src43_r, src65_r, filt0, filt1, dst3_r, dst3_r);
2953 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst0_l, dst0_l);
2955 DPADD_SB2_SH(src4332, src6554, filt0, filt1, dst1_l, dst1_l);
2957 ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
2958 ST8x4_UB(dst0_l, dst1_l, dst + 8, (2 * dst_stride));
2959 dst += (4 * dst_stride);
2976 v16i8
src0,
src1, src2, src3, src4, src5;
2977 v16i8 src10_r, src32_r, src21_r, src43_r;
2978 v16i8 src10_l, src32_l, src21_l, src43_l;
2979 v8i16 dst0_r, dst1_r, dst0_l, dst1_l;
2981 v8i16 filter_vec, const_vec;
2984 const_vec = __msa_ldi_h(128);
2987 filter_vec =
LD_SH(filter);
2990 LD_SB3(src, src_stride, src0, src1, src2);
2991 src += (3 * src_stride);
2993 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2994 ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
2996 for (loop_cnt = (height >> 2); loop_cnt--;) {
2997 LD_SB2(src, src_stride, src3, src4);
2998 src += (2 * src_stride);
3000 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
3001 ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
3003 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
3005 DPADD_SB2_SH(src10_l, src32_l, filt0, filt1, dst0_l, dst0_l);
3007 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
3009 DPADD_SB2_SH(src21_l, src43_l, filt0, filt1, dst1_l, dst1_l);
3010 ST_SH2(dst0_r, dst0_l, dst, 8);
3012 ST_SH2(dst1_r, dst1_l, dst, 8);
3015 LD_SB2(src, src_stride, src5, src2);
3016 src += (2 * src_stride);
3018 ILVR_B2_SB(src5, src4, src2, src5, src10_r, src21_r);
3019 ILVL_B2_SB(src5, src4, src2, src5, src10_l, src21_l);
3021 DPADD_SB2_SH(src32_r, src10_r, filt0, filt1, dst0_r, dst0_r);
3023 DPADD_SB2_SH(src32_l, src10_l, filt0, filt1, dst0_l, dst0_l);
3025 DPADD_SB2_SH(src43_r, src21_r, filt0, filt1, dst1_r, dst1_r);
3027 DPADD_SB2_SH(src43_l, src21_l, filt0, filt1, dst1_l, dst1_l);
3028 ST_SH2(dst0_r, dst0_l, dst, 8);
3030 ST_SH2(dst1_r, dst1_l, dst, 8);
3043 v16i8
src0,
src1, src2, src3, src4, src5;
3044 v16i8 src6, src7, src8, src9, src10, src11;
3045 v16i8 src10_r, src32_r, src76_r, src98_r;
3046 v16i8 src21_r, src43_r, src87_r, src109_r;
3047 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
3048 v16i8 src10_l, src32_l, src21_l, src43_l;
3049 v8i16 dst0_l, dst1_l;
3051 v8i16 filter_vec, const_vec;
3054 const_vec = __msa_ldi_h(128);
3057 filter_vec =
LD_SH(filter);
3060 LD_SB3(src, src_stride, src0, src1, src2);
3062 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
3063 ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
3065 LD_SB3(src + 16, src_stride, src6, src7, src8);
3066 src += (3 * src_stride);
3068 ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
3070 for (loop_cnt = (height >> 2); loop_cnt--;) {
3071 LD_SB2(src, src_stride, src3, src4);
3073 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
3074 ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
3076 LD_SB2(src + 16, src_stride, src9, src10);
3077 src += (2 * src_stride);
3079 ILVR_B2_SB(src9, src8, src10, src9, src98_r, src109_r);
3082 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
3084 DPADD_SB2_SH(src10_l, src32_l, filt0, filt1, dst0_l, dst0_l);
3086 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
3088 DPADD_SB2_SH(src21_l, src43_l, filt0, filt1, dst1_l, dst1_l);
3090 DPADD_SB2_SH(src76_r, src98_r, filt0, filt1, dst2_r, dst2_r);
3092 DPADD_SB2_SH(src87_r, src109_r, filt0, filt1, dst3_r, dst3_r);
3094 ST_SH2(dst0_r, dst0_l, dst, 8);
3095 ST_SH(dst2_r, dst + 16);
3097 ST_SH2(dst1_r, dst1_l, dst, 8);
3098 ST_SH(dst3_r, dst + 16);
3101 LD_SB2(src, src_stride, src5, src2);
3103 ILVR_B2_SB(src5, src4, src2, src5, src10_r, src21_r);
3104 ILVL_B2_SB(src5, src4, src2, src5, src10_l, src21_l);
3106 LD_SB2(src + 16, src_stride, src11, src8);
3107 src += (2 * src_stride);
3109 ILVR_B2_SB(src11, src10, src8, src11, src76_r, src87_r);
3112 DPADD_SB2_SH(src32_r, src10_r, filt0, filt1, dst0_r, dst0_r);
3114 DPADD_SB2_SH(src32_l, src10_l, filt0, filt1, dst0_l, dst0_l);
3116 DPADD_SB2_SH(src43_r, src21_r, filt0, filt1, dst1_r, dst1_r);
3118 DPADD_SB2_SH(src43_l, src21_l, filt0, filt1, dst1_l, dst1_l);
3120 DPADD_SB2_SH(src98_r, src76_r, filt0, filt1, dst2_r, dst2_r);
3122 DPADD_SB2_SH(src109_r, src87_r, filt0, filt1, dst3_r, dst3_r);
3124 ST_SH2(dst0_r, dst0_l, dst, 8);
3125 ST_SH(dst2_r, dst + 16);
3127 ST_SH2(dst1_r, dst1_l, dst, 8);
3128 ST_SH(dst3_r, dst + 16);
3141 v16i8
src0,
src1, src2, src3, src4, src5;
3142 v16i8 src6, src7, src8, src9, src10, src11;
3143 v16i8 src10_r, src32_r, src76_r, src98_r;
3144 v16i8 src21_r, src43_r, src87_r, src109_r;
3145 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
3146 v16i8 src10_l, src32_l, src76_l, src98_l;
3147 v16i8 src21_l, src43_l, src87_l, src109_l;
3148 v8i16 dst0_l, dst1_l, dst2_l, dst3_l;
3150 v8i16 filter_vec, const_vec;
3153 const_vec = __msa_ldi_h(128);
3156 filter_vec =
LD_SH(filter);
3159 LD_SB3(src, src_stride, src0, src1, src2);
3161 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
3162 ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
3164 LD_SB3(src + 16, src_stride, src6, src7, src8);
3165 src += (3 * src_stride);
3167 ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
3168 ILVL_B2_SB(src7, src6, src8, src7, src76_l, src87_l);
3170 for (loop_cnt = (height >> 2); loop_cnt--;) {
3171 LD_SB2(src, src_stride, src3, src4);
3173 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
3174 ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
3176 LD_SB2(src + 16, src_stride, src9, src10);
3177 src += (2 * src_stride);
3179 ILVR_B2_SB(src9, src8, src10, src9, src98_r, src109_r);
3180 ILVL_B2_SB(src9, src8, src10, src9, src98_l, src109_l);
3183 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
3185 DPADD_SB2_SH(src10_l, src32_l, filt0, filt1, dst0_l, dst0_l);
3187 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
3189 DPADD_SB2_SH(src21_l, src43_l, filt0, filt1, dst1_l, dst1_l);
3191 DPADD_SB2_SH(src76_r, src98_r, filt0, filt1, dst2_r, dst2_r);
3193 DPADD_SB2_SH(src76_l, src98_l, filt0, filt1, dst2_l, dst2_l);
3195 DPADD_SB2_SH(src87_r, src109_r, filt0, filt1, dst3_r, dst3_r);
3197 DPADD_SB2_SH(src87_l, src109_l, filt0, filt1, dst3_l, dst3_l);
3199 ST_SH4(dst0_r, dst0_l, dst2_r, dst2_l, dst, 8);
3201 ST_SH4(dst1_r, dst1_l, dst3_r, dst3_l, dst, 8);
3204 LD_SB2(src, src_stride, src5, src2);
3206 ILVR_B2_SB(src5, src4, src2, src5, src10_r, src21_r);
3207 ILVL_B2_SB(src5, src4, src2, src5, src10_l, src21_l);
3209 LD_SB2(src + 16, src_stride, src11, src8);
3210 src += (2 * src_stride);
3212 ILVR_B2_SB(src11, src10, src8, src11, src76_r, src87_r);
3213 ILVL_B2_SB(src11, src10, src8, src11, src76_l, src87_l);
3216 DPADD_SB2_SH(src32_r, src10_r, filt0, filt1, dst0_r, dst0_r);
3218 DPADD_SB2_SH(src32_l, src10_l, filt0, filt1, dst0_l, dst0_l);
3220 DPADD_SB2_SH(src43_r, src21_r, filt0, filt1, dst1_r, dst1_r);
3222 DPADD_SB2_SH(src43_l, src21_l, filt0, filt1, dst1_l, dst1_l);
3224 DPADD_SB2_SH(src98_r, src76_r, filt0, filt1, dst2_r, dst2_r);
3226 DPADD_SB2_SH(src98_l, src76_l, filt0, filt1, dst2_l, dst2_l);
3228 DPADD_SB2_SH(src109_r, src87_r, filt0, filt1, dst3_r, dst3_r);
3230 DPADD_SB2_SH(src109_l, src87_l, filt0, filt1, dst3_l, dst3_l);
3232 ST_SH4(dst0_r, dst0_l, dst2_r, dst2_l, dst, 8);
3234 ST_SH4(dst1_r, dst1_l, dst3_r, dst3_l, dst, 8);
3243 const int8_t *filter_x,
3244 const int8_t *filter_y)
3246 int32_t dst_stride_in_bytes = 2 * dst_stride;
3247 v16i8
src0,
src1, src2, src3, src4;
3249 v8i16 filt_h0, filt_h1;
3252 v8i16 filter_vec, const_vec;
3253 v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
3254 v8i16 dst20, dst31, dst42, dst10, dst32, dst21, dst43;
3257 src -= (src_stride + 1);
3258 filter_vec =
LD_SH(filter_x);
3261 filter_vec =
LD_SH(filter_y);
3268 const_vec = __msa_ldi_h(128);
3271 LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
3273 VSHF_B2_SB(src0, src2, src0, src2, mask0, mask1, vec0, vec1);
3274 VSHF_B2_SB(src1, src3, src1, src3, mask0, mask1, vec2, vec3);
3275 VSHF_B2_SB(src2, src4, src2, src4, mask0, mask1, vec4, vec5);
3290 dst0 = (v4i32) __msa_pckev_h((v8i16) dst1, (v8i16) dst0);
3291 ST8x2_UB(dst0, dst, dst_stride_in_bytes);
3298 const int8_t *filter_x,
3299 const int8_t *filter_y)
3301 int32_t dst_stride_in_bytes = 2 * dst_stride;
3302 v16i8
src0,
src1, src2, src3, src4, src5, src6;
3304 v8i16 filt_h0, filt_h1;
3307 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3308 v8i16 filter_vec, const_vec;
3309 v8i16 dst30, dst41, dst52, dst63, dst10, dst32, dst54, dst21, dst43, dst65;
3310 v4i32 dst0, dst1, dst2, dst3;
3312 src -= (src_stride + 1);
3314 filter_vec =
LD_SH(filter_x);
3317 filter_vec =
LD_SH(filter_y);
3324 const_vec = __msa_ldi_h(128);
3327 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
3330 VSHF_B2_SB(src0, src3, src0, src3, mask0, mask1, vec0, vec1);
3331 VSHF_B2_SB(src1, src4, src1, src4, mask0, mask1, vec2, vec3);
3332 VSHF_B2_SB(src2, src5, src2, src5, mask0, mask1, vec4, vec5);
3333 VSHF_B2_SB(src3, src6, src3, src6, mask0, mask1, vec6, vec7);
3352 SRA_4V(dst0, dst1, dst2, dst3, 6);
3354 ST8x4_UB(dst0, dst2, dst, dst_stride_in_bytes);
3362 const int8_t *filter_x,
3363 const int8_t *filter_y,
3367 v16i8
src0,
src1, src2, src3, src4, src5, src6;
3368 v16i8 src7, src8, src9, src10;
3370 v8i16 filt_h0, filt_h1;
3373 v8i16 filter_vec, const_vec;
3374 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3375 v8i16 dst10, dst21, dst22, dst73, dst84, dst95, dst106;
3376 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r;
3377 v8i16 dst21_r, dst43_r, dst65_r, dst87_r, dst109_r;
3378 v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
3380 src -= (src_stride + 1);
3381 filter_vec =
LD_SH(filter_x);
3384 filter_vec =
LD_SH(filter_y);
3391 const_vec = __msa_ldi_h(128);
3394 LD_SB3(src, src_stride, src0, src1, src2);
3395 src += (3 * src_stride);
3397 VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
3398 VSHF_B2_SB(src1, src2, src1, src2, mask0, mask1, vec2, vec3);
3404 dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
3406 for (loop_cnt = height >> 3; loop_cnt--;) {
3408 src3, src4, src5, src6, src7, src8, src9, src10);
3409 src += (8 * src_stride);
3412 VSHF_B2_SB(src3, src7, src3, src7, mask0, mask1, vec0, vec1);
3413 VSHF_B2_SB(src4, src8, src4, src8, mask0, mask1, vec2, vec3);
3414 VSHF_B2_SB(src5, src9, src5, src9, mask0, mask1, vec4, vec5);
3415 VSHF_B2_SB(src6, src10, src6, src10, mask0, mask1, vec6, vec7);
3424 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dst106, dst106);
3426 dst32_r = __msa_ilvr_h(dst73, dst22);
3430 dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
3431 dst76_r = __msa_ilvr_h(dst22, dst106);
3441 SRA_4V(dst0, dst1, dst2, dst3, 6);
3442 SRA_4V(dst4, dst5, dst6, dst7, 6);
3443 PCKEV_H4_SW(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
3444 dst0, dst1, dst2, dst3);
3445 ST8x8_UB(dst0, dst1, dst2, dst3, dst, 2 * dst_stride);
3446 dst += (8 * dst_stride);
3450 dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
3458 const int8_t *filter_x,
3459 const int8_t *filter_y,
3464 filter_x, filter_y);
3465 }
else if (4 == height) {
3467 filter_x, filter_y);
3468 }
else if (0 == (height % 8)) {
3470 filter_x, filter_y, height);
3478 const int8_t *filter_x,
3479 const int8_t *filter_y,
3482 int32_t dst_stride_in_bytes = 2 * dst_stride;
3483 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
3485 v8i16 filt_h0, filt_h1;
3488 v8i16 filter_vec, const_vec;
3489 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3490 v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, dsth7, dsth8, dsth9;
3491 v8i16 dsth10, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
3492 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r, dst21_r, dst43_r;
3493 v8i16 dst65_r, dst87_r, dst109_r, dst10_l, dst32_l, dst54_l, dst76_l;
3494 v8i16 dst98_l, dst21_l, dst43_l, dst65_l, dst87_l, dst109_l;
3495 v8i16 dst1021_l, dst3243_l, dst5465_l, dst7687_l, dst98109_l;
3496 v4i32 dst0_r, dst1_r, dst2_r, dst3_r, dst4_r, dst5_r, dst6_r, dst7_r;
3497 v4i32 dst0_l, dst1_l, dst2_l, dst3_l;
3499 src -= (src_stride + 1);
3500 filter_vec =
LD_SH(filter_x);
3503 filter_vec =
LD_SH(filter_y);
3510 const_vec = __msa_ldi_h(128);
3513 LD_SB3(src, src_stride, src0, src1, src2);
3514 src += (3 * src_stride);
3517 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
3518 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
3519 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
3531 LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
3534 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
3535 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
3536 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
3537 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
3548 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
3549 VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec2, vec3);
3550 VSHF_B2_SB(src9, src9, src9, src9, mask0, mask1, vec4, vec5);
3551 VSHF_B2_SB(src10, src10, src10, src10, mask0, mask1, vec6, vec7);
3560 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dsth10, dsth10);
3571 PCKEV_D2_SH(dst21_l, dst10_l, dst43_l, dst32_l, dst1021_l, dst3243_l);
3572 PCKEV_D2_SH(dst65_l, dst54_l, dst87_l, dst76_l, dst5465_l, dst7687_l);
3573 dst98109_l = (v8i16) __msa_pckev_d((v2i64) dst109_l, (v2i64) dst98_l);
3586 dst3_l =
HEVC_FILT_4TAP(dst7687_l, dst98109_l, filt_h0, filt_h1);
3587 SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
3588 SRA_4V(dst4_r, dst5_r, dst6_r, dst7_r, 6);
3589 SRA_4V(dst0_l, dst1_l, dst2_l, dst3_l, 6);
3590 PCKEV_H2_SH(dst1_r, dst0_r, dst3_r, dst2_r, tmp0, tmp1);
3591 PCKEV_H2_SH(dst5_r, dst4_r, dst7_r, dst6_r, tmp2, tmp3);
3592 PCKEV_H2_SH(dst1_l, dst0_l, dst3_l, dst2_l, tmp4, tmp5);
3593 ST8x4_UB(tmp0, tmp1, dst, dst_stride_in_bytes);
3594 ST4x4_UB(tmp4, tmp4, 0, 1, 2, 3, dst + 4, dst_stride_in_bytes);
3595 dst += 4 * dst_stride;
3596 ST8x4_UB(tmp2, tmp3, dst, dst_stride_in_bytes);
3597 ST4x4_UB(tmp5, tmp5, 0, 1, 2, 3, dst + 4, dst_stride_in_bytes);
3604 const int8_t *filter_x,
3605 const int8_t *filter_y)
3607 v16i8
src0,
src1, src2, src3, src4;
3609 v8i16 filt_h0, filt_h1;
3612 v8i16 filter_vec, const_vec;
3613 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
3614 v8i16 dst0, dst1, dst2, dst3, dst4;
3615 v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
3616 v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
3617 v8i16 dst10_l, dst32_l, dst21_l, dst43_l;
3619 src -= (src_stride + 1);
3621 filter_vec =
LD_SH(filter_x);
3624 filter_vec =
LD_SH(filter_y);
3631 const_vec = __msa_ldi_h(128);
3634 LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
3637 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
3638 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
3639 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
3640 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
3641 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
3662 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
3663 PCKEV_H2_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst0_r, dst1_r);
3664 ST_SW2(dst0_r, dst1_r, dst, dst_stride);
3668 int16_t *dst,
int32_t dst_stride,
3669 const int8_t *filter_x,
3670 const int8_t *filter_y,
int32_t width8mult)
3673 v16i8
src0,
src1, src2, src3, src4, src5, src6, mask0, mask1;
3674 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3675 v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec, const_vec;
3676 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6;
3677 v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
3678 v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
3679 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
3681 src -= (src_stride + 1);
3683 filter_vec =
LD_SH(filter_x);
3686 filter_vec =
LD_SH(filter_y);
3694 const_vec = __msa_ldi_h(128);
3697 for (cnt = width8mult; cnt--;) {
3698 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
3702 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
3703 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
3704 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
3716 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
3717 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
3718 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
3719 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
3741 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
3742 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
3743 PCKEV_H2_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst0_r, dst1_r);
3744 PCKEV_H2_SW(dst2_l, dst2_r, dst3_l, dst3_r, dst2_r, dst3_r);
3746 ST_SW4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
3755 const int8_t *filter_x,
3756 const int8_t *filter_y)
3758 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8;
3760 v8i16 filt_h0, filt_h1;
3763 v8i16 filter_vec, const_vec;
3764 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
3765 v16i8 vec10, vec11, vec12, vec13, vec14, vec15, vec16, vec17;
3766 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
3767 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
3768 v4i32 dst4_r, dst4_l, dst5_r, dst5_l;
3769 v8i16 dst10_r, dst32_r, dst10_l, dst32_l;
3770 v8i16 dst21_r, dst43_r, dst21_l, dst43_l;
3771 v8i16 dst54_r, dst54_l, dst65_r, dst65_l;
3772 v8i16 dst76_r, dst76_l, dst87_r, dst87_l;
3774 src -= (src_stride + 1);
3776 filter_vec =
LD_SH(filter_x);
3779 filter_vec =
LD_SH(filter_y);
3786 const_vec = __msa_ldi_h(128);
3789 LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
3790 src += (5 * src_stride);
3791 LD_SB4(src, src_stride, src5, src6, src7, src8);
3796 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
3797 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
3798 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
3799 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
3800 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
3801 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec10, vec11);
3802 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec12, vec13);
3803 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec14, vec15);
3804 VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec16, vec17);
3847 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
3848 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
3849 SRA_4V(dst4_r, dst4_l, dst5_r, dst5_l, 6);
3852 dst2_l, dst2_r, dst3_l, dst3_r, dst0_r, dst1_r, dst2_r, dst3_r);
3853 PCKEV_H2_SW(dst4_l, dst4_r, dst5_l, dst5_r, dst4_r, dst5_r);
3855 ST_SW2(dst0_r, dst1_r, dst, dst_stride);
3856 dst += (2 * dst_stride);
3857 ST_SW2(dst2_r, dst3_r, dst, dst_stride);
3858 dst += (2 * dst_stride);
3859 ST_SW2(dst4_r, dst5_r, dst, dst_stride);
3866 const int8_t *filter_x,
3867 const int8_t *filter_y,
3871 uint32_t loop_cnt, cnt;
3874 v16i8
src0,
src1, src2, src3, src4, src5, src6;
3876 v8i16 filt_h0, filt_h1;
3879 v8i16 filter_vec, const_vec;
3880 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3881 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6;
3882 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
3883 v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
3884 v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
3886 src -= (src_stride + 1);
3888 filter_vec =
LD_SH(filter_x);
3891 filter_vec =
LD_SH(filter_y);
3898 const_vec = __msa_ldi_h(128);
3901 for (cnt = width8mult; cnt--;) {
3905 LD_SB3(src_tmp, src_stride, src0, src1, src2);
3906 src_tmp += (3 * src_stride);
3910 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
3911 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
3912 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
3924 for (loop_cnt = height >> 2; loop_cnt--;) {
3925 LD_SB4(src_tmp, src_stride, src3, src4, src5, src6);
3926 src_tmp += (4 * src_stride);
3929 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
3930 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
3931 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
3932 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
3957 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
3958 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
3961 dst2_l, dst2_r, dst3_l, dst3_r,
3962 dst0_r, dst1_r, dst2_r, dst3_r);
3964 ST_SW4(dst0_r, dst1_r, dst2_r, dst3_r, dst_tmp, dst_stride);
3965 dst_tmp += (4 * dst_stride);
3983 const int8_t *filter_x,
3984 const int8_t *filter_y,
3990 filter_x, filter_y);
3991 }
else if (4 == height) {
3993 filter_x, filter_y, 1);
3994 }
else if (6 == height) {
3996 filter_x, filter_y);
3997 }
else if (0 == (height % 4)) {
3999 filter_x, filter_y, height, 1);
4007 const int8_t *filter_x,
4008 const int8_t *filter_y,
4014 v16i8
src0,
src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
4015 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
4016 v16i8 mask0, mask1, mask2, mask3;
4017 v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec, const_vec;
4018 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst10, dst21, dst22, dst73;
4019 v8i16 dst84, dst95, dst106, dst76_r, dst98_r, dst87_r, dst109_r;
4020 v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
4021 v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
4022 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
4023 v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
4025 src -= (src_stride + 1);
4027 filter_vec =
LD_SH(filter_x);
4030 filter_vec =
LD_SH(filter_y);
4038 const_vec = __msa_ldi_h(128);
4044 LD_SB3(src_tmp, src_stride, src0, src1, src2);
4045 src_tmp += (3 * src_stride);
4049 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
4050 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
4051 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
4063 for (loop_cnt = 4; loop_cnt--;) {
4064 LD_SB4(src_tmp, src_stride, src3, src4, src5, src6);
4065 src_tmp += (4 * src_stride);
4068 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
4069 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
4070 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
4071 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
4096 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
4097 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
4098 PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
4099 dst3_r, dst0_r, dst1_r, dst2_r, dst3_r);
4100 ST_SW4(dst0_r, dst1_r, dst2_r, dst3_r, dst_tmp, dst_stride);
4101 dst_tmp += (4 * dst_stride);
4116 LD_SB3(src, src_stride, src0, src1, src2);
4117 src += (3 * src_stride);
4119 VSHF_B2_SB(src0, src1, src0, src1, mask2, mask3, vec0, vec1);
4120 VSHF_B2_SB(src1, src2, src1, src2, mask2, mask3, vec2, vec3);
4126 dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
4128 for (loop_cnt = 2; loop_cnt--;) {
4129 LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9,
4131 src += (8 * src_stride);
4133 VSHF_B2_SB(src3, src7, src3, src7, mask2, mask3, vec0, vec1);
4134 VSHF_B2_SB(src4, src8, src4, src8, mask2, mask3, vec2, vec3);
4135 VSHF_B2_SB(src5, src9, src5, src9, mask2, mask3, vec4, vec5);
4136 VSHF_B2_SB(src6, src10, src6, src10, mask2, mask3, vec6, vec7);
4145 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dst106, dst106);
4147 dst32_r = __msa_ilvr_h(dst73, dst22);
4151 dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
4152 dst76_r = __msa_ilvr_h(dst22, dst106);
4163 SRA_4V(tmp0, tmp1, tmp2, tmp3, 6);
4164 SRA_4V(tmp4, tmp5, tmp6, tmp7, 6);
4165 PCKEV_H4_SW(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, tmp0, tmp1,
4167 ST8x8_UB(tmp0, tmp1, tmp2, tmp3, dst, 2 * dst_stride);
4168 dst += (8 * dst_stride);
4172 dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
4180 const int8_t *filter_x,
4181 const int8_t *filter_y,
4186 filter_x, filter_y, 2);
4189 filter_x, filter_y, height, 2);
4197 const int8_t *filter_x,
4198 const int8_t *filter_y,
4202 filter_x, filter_y, height, 3);
4209 const int8_t *filter_x,
4210 const int8_t *filter_y,
4214 filter_x, filter_y, height, 4);
4217 #define MC_COPY(WIDTH) \
4218 void ff_hevc_put_hevc_pel_pixels##WIDTH##_8_msa(int16_t *dst, \
4220 ptrdiff_t src_stride, \
4226 hevc_copy_##WIDTH##w_msa(src, src_stride, dst, MAX_PB_SIZE, height); \
4241 #define MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
4242 void ff_hevc_put_hevc_##PEL##_##DIR##WIDTH##_8_msa(int16_t *dst, \
4244 ptrdiff_t src_stride, \
4250 const int8_t *filter = ff_hevc_##PEL##_filters[FILT_DIR - 1]; \
4252 hevc_##DIR1##_##TAP##t_##WIDTH##w_msa(src, src_stride, dst, \
4253 MAX_PB_SIZE, filter, height); \
4256 MC(qpel,
h, 4, 8, hz, mx);
4257 MC(qpel,
h, 8, 8, hz, mx);
4258 MC(qpel,
h, 12, 8, hz, mx);
4259 MC(qpel,
h, 16, 8, hz, mx);
4260 MC(qpel,
h, 24, 8, hz, mx);
4261 MC(qpel,
h, 32, 8, hz, mx);
4262 MC(qpel,
h, 48, 8, hz, mx);
4263 MC(qpel,
h, 64, 8, hz, mx);
4265 MC(qpel, v, 4, 8, vt, my);
4266 MC(qpel, v, 8, 8, vt, my);
4267 MC(qpel, v, 12, 8, vt, my);
4268 MC(qpel, v, 16, 8, vt, my);
4269 MC(qpel, v, 24, 8, vt, my);
4270 MC(qpel, v, 32, 8, vt, my);
4271 MC(qpel, v, 48, 8, vt, my);
4272 MC(qpel, v, 64, 8, vt, my);
4274 MC(epel,
h, 4, 4, hz, mx);
4275 MC(epel,
h, 6, 4, hz, mx);
4276 MC(epel,
h, 8, 4, hz, mx);
4277 MC(epel,
h, 12, 4, hz, mx);
4278 MC(epel,
h, 16, 4, hz, mx);
4279 MC(epel,
h, 24, 4, hz, mx);
4280 MC(epel,
h, 32, 4, hz, mx);
4282 MC(epel, v, 4, 4, vt, my);
4283 MC(epel, v, 6, 4, vt, my);
4284 MC(epel, v, 8, 4, vt, my);
4285 MC(epel, v, 12, 4, vt, my);
4286 MC(epel, v, 16, 4, vt, my);
4287 MC(epel, v, 24, 4, vt, my);
4288 MC(epel, v, 32, 4, vt, my);
4292 #define MC_HV(PEL, WIDTH, TAP) \
4293 void ff_hevc_put_hevc_##PEL##_hv##WIDTH##_8_msa(int16_t *dst, \
4295 ptrdiff_t src_stride, \
4301 const int8_t *filter_x = ff_hevc_##PEL##_filters[mx - 1]; \
4302 const int8_t *filter_y = ff_hevc_##PEL##_filters[my - 1]; \
4304 hevc_hv_##TAP##t_##WIDTH##w_msa(src, src_stride, dst, MAX_PB_SIZE, \
4305 filter_x, filter_y, height); \
#define XORI_B5_128_SB(...)
#define XORI_B8_128_SB(...)
static void hevc_hz_4t_16w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hz_8t_8w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_vt_8t_12w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hv_4t_4x2_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y)
static void hevc_hz_4t_4x4_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter)
static void hevc_hv_8t_64w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_copy_24w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)
static void hevc_hz_8t_16w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hz_4t_6w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hz_4t_4w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hv_4t_8multx4mult_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height, int32_t width8mult)
#define XORI_B2_128_SB(...)
static void hevc_vt_4t_4x16_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
#define XORI_B3_128_SB(...)
#define MC_HV(PEL, WIDTH, TAP)
static void hevc_copy_32w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)
#define UNPCK_R_SB_SH(in, out)
static void hevc_hv_4t_8multx4_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t width8mult)
#define DPADD_SB4_SH(...)
#define SPLATI_H2_SH(...)
static void hevc_vt_4t_6w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_vt_4t_12w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride)
static void hevc_copy_8w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)
static void hevc_hz_8t_12w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
#define SRA_4V(in0, in1, in2, in3, shift)
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
static void hevc_hv_8t_24w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_vt_8t_32w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hv_4t_32w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
#define SPLATI_W2_SH(...)
#define SPLATI_H4_SH(...)
static void hevc_vt_4t_8w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_vt_4t_4w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_vt_8t_8w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_copy_48w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)
static int aligned(int val)
static void hevc_copy_64w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)
static void hevc_hv_4t_4multx8mult_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
#define ST8x2_UB(in, pdst, stride)
static void hevc_hv_8t_12w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_vt_8t_48w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hv_4t_6w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
#define XORI_B7_128_SB(...)
static void hevc_hv_8t_4w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_vt_4t_8x2_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter)
#define XORI_B4_128_SB(...)
static void hevc_hv_8t_8multx1mult_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height, int32_t width)
#define SPLATI_W4_SH(...)
static void hevc_hv_4t_8x2_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y)
static void hevc_vt_4t_8x4multiple_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_vt_8t_16multx4mult_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height, int32_t width)
#define DPADD_SB2_SH(...)
static void hevc_hv_4t_8x6_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y)
#define MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR)
static void hevc_hv_4t_4x4_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y)
static void hevc_vt_4t_32w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hz_4t_4x8multiple_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_vt_8t_24w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hz_4t_12w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_vt_4t_4x8_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hv_4t_4w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_vt_4t_4x4_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hz_4t_8w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hv_8t_8w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
#define HEVC_FILT_8TAP(in0, in1, in2, in3,filt0, filt1, filt2, filt3)
static void hevc_copy_16w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)
static void hevc_copy_4w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)
static void hevc_hv_4t_8w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_hv_4t_24w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_vt_4t_24w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static const uint8_t ff_hevc_mask_arr[16 *2]
static void hevc_hz_4t_32w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_vt_8t_16w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hv_4t_16w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_copy_12w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)
#define SD4(in0, in1, in2, in3, pdst, stride)
static void hevc_hz_4t_24w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
#define ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride)
static void hevc_hz_4t_8x2multiple_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
#define SLLI_4V(in0, in1, in2, in3, shift)
static void hevc_hv_8t_16w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
#define HEVC_FILT_4TAP(in0, in1, filt0, filt1)
static void hevc_hz_8t_24w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_vt_8t_4w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hz_8t_64w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
#define ST8x4_UB(in0, in1, pdst, stride)
static void hevc_vt_8t_64w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
#define ST8x8_UB(in0, in1, in2, in3, pdst, stride)
static void hevc_hz_8t_32w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_vt_4t_16w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hv_8t_48w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_vt_4t_4x2_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter)
static void hevc_hv_8t_32w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_hz_8t_4w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hz_8t_48w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hz_4t_8x4multiple_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)
static void hevc_hz_4t_4x2_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter)
static void hevc_vt_4t_8x6_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter)
static void hevc_hv_4t_12w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)
static void hevc_copy_6w_msa(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)