31 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
32 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
34 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
35 #define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
36 #define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
37 #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
38 #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
39 #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
40 #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
41 #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
42 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
45 #undef PREFIX_h264_chroma_mc8_altivec
46 #undef PREFIX_h264_chroma_mc8_num
47 #undef PREFIX_h264_qpel16_h_lowpass_altivec
48 #undef PREFIX_h264_qpel16_h_lowpass_num
49 #undef PREFIX_h264_qpel16_v_lowpass_altivec
50 #undef PREFIX_h264_qpel16_v_lowpass_num
51 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
52 #undef PREFIX_h264_qpel16_hv_lowpass_num
54 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
55 #define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
56 #define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
57 #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
58 #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
59 #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
60 #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
61 #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
62 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
65 #undef PREFIX_h264_chroma_mc8_altivec
66 #undef PREFIX_h264_chroma_mc8_num
67 #undef PREFIX_h264_qpel16_h_lowpass_altivec
68 #undef PREFIX_h264_qpel16_h_lowpass_num
69 #undef PREFIX_h264_qpel16_v_lowpass_altivec
70 #undef PREFIX_h264_qpel16_v_lowpass_num
71 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
72 #undef PREFIX_h264_qpel16_hv_lowpass_num
74 #define H264_MC(OPNAME, SIZE, CODETYPE) \
75 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
76 ff_ ## OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
79 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
80 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
81 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
82 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
85 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
86 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
89 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
90 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
91 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
92 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
95 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
96 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
97 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
98 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
101 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
102 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
105 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
106 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
107 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
108 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
111 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
112 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
113 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
114 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
115 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
116 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
119 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
120 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
121 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
122 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
123 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
124 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
127 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
128 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
129 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
130 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
131 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
132 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
135 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
136 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
137 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
138 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
139 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
140 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
143 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
144 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
145 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
148 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
149 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
150 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
151 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
152 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
153 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
154 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
157 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
158 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
159 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
160 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
161 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
162 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
163 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
166 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
167 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
168 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
169 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
170 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
171 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
172 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
175 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
176 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
177 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
178 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
179 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
180 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
181 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
185 const uint8_t * src2,
int dst_stride,
186 int src_stride1,
int h)
191 mask_ = vec_lvsl(0, src2);
193 for (i = 0; i < h; i++) {
195 tmp1 = vec_ld(i * src_stride1, src1);
196 mask = vec_lvsl(i * src_stride1, src1);
197 tmp2 = vec_ld(i * src_stride1 + 15, src1);
199 a = vec_perm(tmp1, tmp2, mask);
201 tmp1 = vec_ld(i * 16, src2);
202 tmp2 = vec_ld(i * 16 + 15, src2);
204 b = vec_perm(tmp1, tmp2, mask_);
206 tmp1 = vec_ld(0, dst);
207 mask = vec_lvsl(0, dst);
208 tmp2 = vec_ld(15, dst);
212 edges = vec_perm(tmp2, tmp1, mask);
214 align = vec_lvsr(0, dst);
216 tmp2 = vec_perm(d, edges, align);
217 tmp1 = vec_perm(edges, d, align);
219 vec_st(tmp2, 15, dst);
220 vec_st(tmp1, 0 , dst);
227 const uint8_t * src2,
int dst_stride,
228 int src_stride1,
int h)
233 mask_ = vec_lvsl(0, src2);
235 for (i = 0; i < h; i++) {
237 tmp1 = vec_ld(i * src_stride1, src1);
238 mask = vec_lvsl(i * src_stride1, src1);
239 tmp2 = vec_ld(i * src_stride1 + 15, src1);
241 a = vec_perm(tmp1, tmp2, mask);
243 tmp1 = vec_ld(i * 16, src2);
244 tmp2 = vec_ld(i * 16 + 15, src2);
246 b = vec_perm(tmp1, tmp2, mask_);
248 tmp1 = vec_ld(0, dst);
249 mask = vec_lvsl(0, dst);
250 tmp2 = vec_ld(15, dst);
252 d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
254 edges = vec_perm(tmp2, tmp1, mask);
256 align = vec_lvsr(0, dst);
258 tmp2 = vec_perm(d, edges, align);
259 tmp1 = vec_perm(edges, d, align);
261 vec_st(tmp2, 15, dst);
262 vec_st(tmp1, 0 , dst);
281 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
283 vz0 = vec_add(vb0,vb2); \
284 vz1 = vec_sub(vb0,vb2); \
285 vz2 = vec_sra(vb1,vec_splat_u16(1)); \
286 vz2 = vec_sub(vz2,vb3); \
287 vz3 = vec_sra(vb3,vec_splat_u16(1)); \
288 vz3 = vec_add(vb1,vz3); \
290 va0 = vec_add(vz0,vz3); \
291 va1 = vec_add(vz1,vz2); \
292 va2 = vec_sub(vz1,vz2); \
293 va3 = vec_sub(vz0,vz3)
295 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
296 b0 = vec_mergeh( a0, a0 ); \
297 b1 = vec_mergeh( a1, a0 ); \
298 b2 = vec_mergeh( a2, a0 ); \
299 b3 = vec_mergeh( a3, a0 ); \
300 a0 = vec_mergeh( b0, b2 ); \
301 a1 = vec_mergel( b0, b2 ); \
302 a2 = vec_mergeh( b1, b3 ); \
303 a3 = vec_mergel( b1, b3 ); \
304 b0 = vec_mergeh( a0, a2 ); \
305 b1 = vec_mergel( a0, a2 ); \
306 b2 = vec_mergeh( a1, a3 ); \
307 b3 = vec_mergel( a1, a3 )
309 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
310 vdst_orig = vec_ld(0, dst); \
311 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
312 vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \
313 va = vec_add(va, vdst_ss); \
314 va_u8 = vec_packsu(va, zero_s16v); \
315 va_u32 = vec_splat((vec_u32)va_u8, 0); \
316 vec_ste(va_u32, element, (uint32_t*)dst);
322 vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
326 const vec_u16 v6us = vec_splat_u16(6);
328 vec_u8 vdst_mask = vec_lvsl(0, dst);
329 int element = ((
unsigned long)dst & 0xf) >> 2;
334 vtmp0 = vec_ld(0,block);
335 vtmp1 = vec_sld(vtmp0, vtmp0, 8);
336 vtmp2 = vec_ld(16,block);
337 vtmp3 = vec_sld(vtmp2, vtmp2, 8);
339 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
341 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
343 va0 = vec_sra(va0,v6us);
344 va1 = vec_sra(va1,v6us);
345 va2 = vec_sra(va2,v6us);
346 va3 = vec_sra(va3,v6us);
357 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
359 vec_s16 a0v = vec_add(s0, s4); \
361 vec_s16 a2v = vec_sub(s0, s4); \
363 vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \
365 vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \
367 vec_s16 b0v = vec_add(a0v, a6v); \
369 vec_s16 b2v = vec_add(a2v, a4v); \
371 vec_s16 b4v = vec_sub(a2v, a4v); \
373 vec_s16 b6v = vec_sub(a0v, a6v); \
376 vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
379 vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
382 vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
384 vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
386 vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
388 vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
390 vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
392 vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
394 d0 = vec_add(b0v, b7v); \
396 d1 = vec_add(b2v, b5v); \
398 d2 = vec_add(b4v, b3v); \
400 d3 = vec_add(b6v, b1v); \
402 d4 = vec_sub(b6v, b1v); \
404 d5 = vec_sub(b4v, b3v); \
406 d6 = vec_sub(b2v, b5v); \
408 d7 = vec_sub(b0v, b7v); \
411 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
413 vec_u8 hv = vec_ld( 0, dest ); \
414 vec_u8 lv = vec_ld( 7, dest ); \
415 vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \
416 vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
417 vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \
418 vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
419 vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
422 vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
423 vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
424 lv = vec_sel( lv, bodyv, edgelv ); \
425 vec_st( lv, 7, dest ); \
426 hv = vec_ld( 0, dest ); \
427 edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
428 hv = vec_sel( hv, bodyv, edgehv ); \
429 vec_st( hv, 0, dest ); \
434 vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
435 vec_s16 idct0, idct1, idct2, idct3, idct4, idct5,
idct6, idct7;
437 vec_u8 perm_ldv = vec_lvsl(0, dst);
438 vec_u8 perm_stv = vec_lvsr(8, dst);
440 const vec_u16 onev = vec_splat_u16(1);
441 const vec_u16 twov = vec_splat_u16(2);
442 const vec_u16 sixv = vec_splat_u16(6);
444 const vec_u8 sel = (
vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
449 s0 = vec_ld(0x00, (int16_t*)dct);
450 s1 = vec_ld(0x10, (int16_t*)dct);
451 s2 = vec_ld(0x20, (int16_t*)dct);
452 s3 = vec_ld(0x30, (int16_t*)dct);
453 s4 = vec_ld(0x40, (int16_t*)dct);
454 s5 = vec_ld(0x50, (int16_t*)dct);
455 s6 = vec_ld(0x60, (int16_t*)dct);
456 s7 = vec_ld(0x70, (int16_t*)dct);
459 d0, d1, d2, d3, d4, d5, d6, d7);
464 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
479 vec_u8 dcplus, dcminus,
v0, v1, v2, v3, aligner;
484 dc = (block[0] + 32) >> 6;
485 dc16 = vec_splat((
vec_s16) vec_lde(0, &dc), 1);
492 aligner = vec_lvsr(0, dst);
493 dcplus = vec_perm(dcplus, dcplus, aligner);
494 dcminus = vec_perm(dcminus, dcminus, aligner);
496 for (i = 0; i <
size; i += 4) {
497 v0 = vec_ld(0, dst+0*stride);
498 v1 = vec_ld(0, dst+1*stride);
499 v2 = vec_ld(0, dst+2*stride);
500 v3 = vec_ld(0, dst+3*stride);
502 v0 = vec_adds(v0, dcplus);
503 v1 = vec_adds(v1, dcplus);
504 v2 = vec_adds(v2, dcplus);
505 v3 = vec_adds(v3, dcplus);
507 v0 = vec_subs(v0, dcminus);
508 v1 = vec_subs(v1, dcminus);
509 v2 = vec_subs(v2, dcminus);
510 v3 = vec_subs(v3, dcminus);
512 vec_st(v0, 0, dst+0*stride);
513 vec_st(v1, 0, dst+1*stride);
514 vec_st(v2, 0, dst+2*stride);
515 vec_st(v3, 0, dst+3*stride);
534 int nnz = nnzc[
scan8[i] ];
552 for(i=0; i<16; i+=4){
553 int nnz = nnzc[
scan8[i] ];
563 for (j = 1; j < 3; j++) {
564 for(i = j * 16; i < j * 16 + 4; i++){
573 #define transpose4x16(r0, r1, r2, r3) { \
574 register vec_u8 r4; \
575 register vec_u8 r5; \
576 register vec_u8 r6; \
577 register vec_u8 r7; \
579 r4 = vec_mergeh(r0, r2); \
580 r5 = vec_mergel(r0, r2); \
581 r6 = vec_mergeh(r1, r3); \
582 r7 = vec_mergel(r1, r3); \
584 r0 = vec_mergeh(r4, r6); \
585 r1 = vec_mergel(r4, r6); \
586 r2 = vec_mergeh(r5, r7); \
587 r3 = vec_mergel(r5, r7); \
594 uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)
dst;
595 int int_dst_stride = dst_stride/4;
597 vec_st(r0, 0, result);
598 vec_st(r1, 16, result);
599 vec_st(r2, 32, result);
600 vec_st(r3, 48, result);
603 *(dst_int+ int_dst_stride) = *(src_int + 1);
604 *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
605 *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
606 *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
607 *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
608 *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
609 *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
610 *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
611 *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
612 *(dst_int+10*int_dst_stride) = *(src_int + 10);
613 *(dst_int+11*int_dst_stride) = *(src_int + 11);
614 *(dst_int+12*int_dst_stride) = *(src_int + 12);
615 *(dst_int+13*int_dst_stride) = *(src_int + 13);
616 *(dst_int+14*int_dst_stride) = *(src_int + 14);
617 *(dst_int+15*int_dst_stride) = *(src_int + 15);
623 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
624 register vec_u8 r0 = unaligned_load(0, src); \
625 register vec_u8 r1 = unaligned_load( src_stride, src); \
626 register vec_u8 r2 = unaligned_load(2* src_stride, src); \
627 register vec_u8 r3 = unaligned_load(3* src_stride, src); \
628 register vec_u8 r4 = unaligned_load(4* src_stride, src); \
629 register vec_u8 r5 = unaligned_load(5* src_stride, src); \
630 register vec_u8 r6 = unaligned_load(6* src_stride, src); \
631 register vec_u8 r7 = unaligned_load(7* src_stride, src); \
632 register vec_u8 r14 = unaligned_load(14*src_stride, src); \
633 register vec_u8 r15 = unaligned_load(15*src_stride, src); \
635 r8 = unaligned_load( 8*src_stride, src); \
636 r9 = unaligned_load( 9*src_stride, src); \
637 r10 = unaligned_load(10*src_stride, src); \
638 r11 = unaligned_load(11*src_stride, src); \
639 r12 = unaligned_load(12*src_stride, src); \
640 r13 = unaligned_load(13*src_stride, src); \
643 r0 = vec_mergeh(r0, r8); \
644 r1 = vec_mergeh(r1, r9); \
645 r2 = vec_mergeh(r2, r10); \
646 r3 = vec_mergeh(r3, r11); \
647 r4 = vec_mergeh(r4, r12); \
648 r5 = vec_mergeh(r5, r13); \
649 r6 = vec_mergeh(r6, r14); \
650 r7 = vec_mergeh(r7, r15); \
653 r8 = vec_mergeh(r0, r4); \
654 r9 = vec_mergel(r0, r4); \
655 r10 = vec_mergeh(r1, r5); \
656 r11 = vec_mergel(r1, r5); \
657 r12 = vec_mergeh(r2, r6); \
658 r13 = vec_mergel(r2, r6); \
659 r14 = vec_mergeh(r3, r7); \
660 r15 = vec_mergel(r3, r7); \
663 r0 = vec_mergeh(r8, r12); \
664 r1 = vec_mergel(r8, r12); \
665 r2 = vec_mergeh(r9, r13); \
666 r4 = vec_mergeh(r10, r14); \
667 r5 = vec_mergel(r10, r14); \
668 r6 = vec_mergeh(r11, r15); \
672 r8 = vec_mergeh(r0, r4); \
673 r9 = vec_mergel(r0, r4); \
674 r10 = vec_mergeh(r1, r5); \
675 r11 = vec_mergel(r1, r5); \
676 r12 = vec_mergeh(r2, r6); \
677 r13 = vec_mergel(r2, r6); \
688 register vec_u8 diffneg = vec_subs(y, x);
689 register vec_u8 o = vec_or(diff, diffneg);
690 o = (
vec_u8)vec_cmplt(o, a);
706 mask = vec_and(mask, tempmask);
708 mask = vec_and(mask, tempmask);
720 register vec_u8 average = vec_avg(p0, q0);
728 temp = vec_xor(average, p2);
729 average = vec_avg(average, p2);
730 ones = vec_splat_u8(1);
731 temp = vec_and(temp, ones);
732 uncliped = vec_subs(average, temp);
733 max = vec_adds(p1, tc0);
734 min = vec_subs(p1, tc0);
735 newp1 = vec_max(min, uncliped);
736 newp1 = vec_min(max, newp1);
740 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
742 const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
744 register vec_u8 pq0bit = vec_xor(p0,q0); \
745 register vec_u8 q1minus; \
746 register vec_u8 p0minus; \
747 register vec_u8 stage1; \
748 register vec_u8 stage2; \
749 register vec_u8 vec160; \
750 register vec_u8 delta; \
751 register vec_u8 deltaneg; \
753 q1minus = vec_nor(q1, q1); \
754 stage1 = vec_avg(p1, q1minus); \
755 stage2 = vec_sr(stage1, vec_splat_u8(1)); \
756 p0minus = vec_nor(p0, p0); \
757 stage1 = vec_avg(q0, p0minus); \
758 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
759 stage2 = vec_avg(stage2, pq0bit); \
760 stage2 = vec_adds(stage2, stage1); \
761 vec160 = vec_ld(0, &A0v); \
762 deltaneg = vec_subs(vec160, stage2); \
763 delta = vec_subs(stage2, vec160); \
764 deltaneg = vec_min(tc0masked, deltaneg); \
765 delta = vec_min(tc0masked, delta); \
766 p0 = vec_subs(p0, deltaneg); \
767 q0 = vec_subs(q0, delta); \
768 p0 = vec_adds(p0, delta); \
769 q0 = vec_adds(q0, deltaneg); \
772 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
773 DECLARE_ALIGNED(16, unsigned char, temp)[16]; \
774 register vec_u8 alphavec; \
775 register vec_u8 betavec; \
776 register vec_u8 mask; \
777 register vec_u8 p1mask; \
778 register vec_u8 q1mask; \
779 register vector signed char tc0vec; \
780 register vec_u8 finaltc0; \
781 register vec_u8 tc0masked; \
782 register vec_u8 newp1; \
783 register vec_u8 newq1; \
787 alphavec = vec_ld(0, temp); \
788 betavec = vec_splat(alphavec, 0x1); \
789 alphavec = vec_splat(alphavec, 0x0); \
790 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); \
792 AV_COPY32(temp, tc0); \
793 tc0vec = vec_ld(0, (signed char*)temp); \
794 tc0vec = vec_mergeh(tc0vec, tc0vec); \
795 tc0vec = vec_mergeh(tc0vec, tc0vec); \
796 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); \
797 finaltc0 = vec_and((vec_u8)tc0vec, mask); \
799 p1mask = diff_lt_altivec(p2, p0, betavec); \
800 p1mask = vec_and(p1mask, mask); \
801 tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \
802 finaltc0 = vec_sub(finaltc0, p1mask); \
803 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
806 q1mask = diff_lt_altivec(q2, q0, betavec); \
807 q1mask = vec_and(q1mask, mask); \
808 tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \
809 finaltc0 = vec_sub(finaltc0, q1mask); \
810 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
813 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
820 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
821 register vec_u8 p2 = vec_ld(-3*stride, pix);
822 register vec_u8 p1 = vec_ld(-2*stride, pix);
823 register vec_u8 p0 = vec_ld(-1*stride, pix);
824 register vec_u8 q0 = vec_ld(0, pix);
825 register vec_u8 q1 = vec_ld(stride, pix);
826 register vec_u8 q2 = vec_ld(2*stride, pix);
828 vec_st(p1, -2*stride, pix);
829 vec_st(p0, -1*stride, pix);
831 vec_st(q1, stride, pix);
837 register vec_u8 line0, line1, line2, line3, line4, line5;
838 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
843 write16x4(pix-2, stride, line1, line2, line3, line4);
857 offset <<= log2_denom;
858 if(log2_denom) offset += 1<<(log2_denom-1);
859 temp[0] = log2_denom;
864 vlog2_denom = (
vec_u16)vec_splat(vtemp, 1);
865 vweight = vec_splat(vtemp, 3);
866 voffset = vec_splat(vtemp, 5);
867 aligned = !((
unsigned long)block & 0xf);
869 for (y = 0; y <
height; y++) {
870 vblock = vec_ld(0, block);
875 if (w == 16 || aligned) {
877 v0 = vec_adds(v0, voffset);
878 v0 = vec_sra(v0, vlog2_denom);
880 if (w == 16 || !aligned) {
882 v1 = vec_adds(v1, voffset);
883 v1 = vec_sra(v1, vlog2_denom);
885 vblock = vec_packsu(v0, v1);
886 vec_st(vblock, 0, block);
894 int log2_denom,
int weightd,
int weights,
int offset,
int w)
896 int y, dst_aligned, src_aligned;
898 vec_s16 vtemp, vweights, vweightd, voffset,
v0, v1, v2, v3;
903 offset = ((offset + 1) | 1) << log2_denom;
904 temp[0] = log2_denom+1;
910 vlog2_denom = (
vec_u16)vec_splat(vtemp, 1);
911 vweights = vec_splat(vtemp, 3);
912 vweightd = vec_splat(vtemp, 5);
913 voffset = vec_splat(vtemp, 7);
914 dst_aligned = !((
unsigned long)dst & 0xf);
915 src_aligned = !((
unsigned long)src & 0xf);
917 for (y = 0; y <
height; y++) {
918 vdst = vec_ld(0, dst);
919 vsrc = vec_ld(0, src);
933 if (w == 16 || dst_aligned) {
937 v0 = vec_adds(v0, voffset);
938 v0 = vec_adds(v0, v2);
939 v0 = vec_sra(v0, vlog2_denom);
941 if (w == 16 || !dst_aligned) {
945 v1 = vec_adds(v1, voffset);
946 v1 = vec_adds(v1, v3);
947 v1 = vec_sra(v1, vlog2_denom);
949 vdst = vec_packsu(v0, v1);
950 vec_st(vdst, 0, dst);
957 #define H264_WEIGHT(W) \
958 static void ff_weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \
959 int log2_denom, int weight, int offset){ \
960 weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \
962 static void ff_biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \
963 int log2_denom, int weightd, int weights, int offset){ \
964 biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \
971 const int high_bit_depth = avctx->bits_per_raw_sample > 8;
974 if (!high_bit_depth) {
975 c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
976 c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
978 #define dspfunc(PFX, IDX, NUM) \
979 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
980 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
981 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
982 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
983 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
984 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
985 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
986 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
987 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
988 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
989 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
990 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
991 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
992 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
993 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
994 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
1006 if (bit_depth == 8) {
1008 if (chroma_format_idc == 1)