00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #include "libavcodec/dsputil.h"
00022 #include "libavcodec/h264data.h"
00023 #include "libavcodec/h264dsp.h"
00024
00025 #include "dsputil_ppc.h"
00026 #include "dsputil_altivec.h"
00027 #include "util_altivec.h"
00028 #include "types_altivec.h"
00029
00030 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
00031 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
00032
00033 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
00034 #define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
00035 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec
00036 #define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
00037 #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
00038 #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
00039 #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
00040 #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
00041 #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
00042 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
00043 #include "h264_template_altivec.c"
00044 #undef OP_U8_ALTIVEC
00045 #undef PREFIX_h264_chroma_mc8_altivec
00046 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
00047 #undef PREFIX_h264_chroma_mc8_num
00048 #undef PREFIX_h264_qpel16_h_lowpass_altivec
00049 #undef PREFIX_h264_qpel16_h_lowpass_num
00050 #undef PREFIX_h264_qpel16_v_lowpass_altivec
00051 #undef PREFIX_h264_qpel16_v_lowpass_num
00052 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
00053 #undef PREFIX_h264_qpel16_hv_lowpass_num
00054
00055 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
00056 #define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
00057 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec
00058 #define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
00059 #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
00060 #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
00061 #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
00062 #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
00063 #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
00064 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
00065 #include "h264_template_altivec.c"
00066 #undef OP_U8_ALTIVEC
00067 #undef PREFIX_h264_chroma_mc8_altivec
00068 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
00069 #undef PREFIX_h264_chroma_mc8_num
00070 #undef PREFIX_h264_qpel16_h_lowpass_altivec
00071 #undef PREFIX_h264_qpel16_h_lowpass_num
00072 #undef PREFIX_h264_qpel16_v_lowpass_altivec
00073 #undef PREFIX_h264_qpel16_v_lowpass_num
00074 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
00075 #undef PREFIX_h264_qpel16_hv_lowpass_num
00076
00077 #define H264_MC(OPNAME, SIZE, CODETYPE) \
00078 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
00079 OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
00080 }\
00081 \
00082 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
00083 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
00084 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00085 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
00086 }\
00087 \
00088 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00089 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
00090 }\
00091 \
00092 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00093 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
00094 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00095 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
00096 }\
00097 \
00098 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00099 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
00100 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00101 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
00102 }\
00103 \
00104 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00105 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
00106 }\
00107 \
00108 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00109 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
00110 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00111 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
00112 }\
00113 \
00114 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00115 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00116 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00117 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00118 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00119 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00120 }\
00121 \
00122 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00123 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00124 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00125 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00126 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00127 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00128 }\
00129 \
00130 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00131 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00132 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00133 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00134 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00135 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00136 }\
00137 \
00138 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00139 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00140 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00141 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00142 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00143 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00144 }\
00145 \
00146 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00147 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
00148 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
00149 }\
00150 \
00151 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00152 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00153 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
00154 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
00155 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00156 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00157 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
00158 }\
00159 \
00160 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00161 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
00162 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
00163 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
00164 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00165 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00166 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
00167 }\
00168 \
00169 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00170 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00171 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
00172 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
00173 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00174 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00175 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
00176 }\
00177 \
00178 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00179 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
00180 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
00181 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
00182 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00183 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00184 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
00185 }\
00186
00187 static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
00188 const uint8_t * src2, int dst_stride,
00189 int src_stride1, int h)
00190 {
00191 int i;
00192 vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
00193
00194 mask_ = vec_lvsl(0, src2);
00195
00196 for (i = 0; i < h; i++) {
00197
00198 tmp1 = vec_ld(i * src_stride1, src1);
00199 mask = vec_lvsl(i * src_stride1, src1);
00200 tmp2 = vec_ld(i * src_stride1 + 15, src1);
00201
00202 a = vec_perm(tmp1, tmp2, mask);
00203
00204 tmp1 = vec_ld(i * 16, src2);
00205 tmp2 = vec_ld(i * 16 + 15, src2);
00206
00207 b = vec_perm(tmp1, tmp2, mask_);
00208
00209 tmp1 = vec_ld(0, dst);
00210 mask = vec_lvsl(0, dst);
00211 tmp2 = vec_ld(15, dst);
00212
00213 d = vec_avg(a, b);
00214
00215 edges = vec_perm(tmp2, tmp1, mask);
00216
00217 align = vec_lvsr(0, dst);
00218
00219 tmp2 = vec_perm(d, edges, align);
00220 tmp1 = vec_perm(edges, d, align);
00221
00222 vec_st(tmp2, 15, dst);
00223 vec_st(tmp1, 0 , dst);
00224
00225 dst += dst_stride;
00226 }
00227 }
00228
00229 static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
00230 const uint8_t * src2, int dst_stride,
00231 int src_stride1, int h)
00232 {
00233 int i;
00234 vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
00235
00236 mask_ = vec_lvsl(0, src2);
00237
00238 for (i = 0; i < h; i++) {
00239
00240 tmp1 = vec_ld(i * src_stride1, src1);
00241 mask = vec_lvsl(i * src_stride1, src1);
00242 tmp2 = vec_ld(i * src_stride1 + 15, src1);
00243
00244 a = vec_perm(tmp1, tmp2, mask);
00245
00246 tmp1 = vec_ld(i * 16, src2);
00247 tmp2 = vec_ld(i * 16 + 15, src2);
00248
00249 b = vec_perm(tmp1, tmp2, mask_);
00250
00251 tmp1 = vec_ld(0, dst);
00252 mask = vec_lvsl(0, dst);
00253 tmp2 = vec_ld(15, dst);
00254
00255 d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
00256
00257 edges = vec_perm(tmp2, tmp1, mask);
00258
00259 align = vec_lvsr(0, dst);
00260
00261 tmp2 = vec_perm(d, edges, align);
00262 tmp1 = vec_perm(edges, d, align);
00263
00264 vec_st(tmp2, 15, dst);
00265 vec_st(tmp1, 0 , dst);
00266
00267 dst += dst_stride;
00268 }
00269 }
00270
00271
00272
00273
00274
00275
00276 H264_MC(put_, 16, altivec)
00277 H264_MC(avg_, 16, altivec)
00278
00279
00280
00281
00282
00283
00284 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
00285 \
00286 vz0 = vec_add(vb0,vb2); \
00287 vz1 = vec_sub(vb0,vb2); \
00288 vz2 = vec_sra(vb1,vec_splat_u16(1)); \
00289 vz2 = vec_sub(vz2,vb3); \
00290 vz3 = vec_sra(vb3,vec_splat_u16(1)); \
00291 vz3 = vec_add(vb1,vz3); \
00292 \
00293 va0 = vec_add(vz0,vz3); \
00294 va1 = vec_add(vz1,vz2); \
00295 va2 = vec_sub(vz1,vz2); \
00296 va3 = vec_sub(vz0,vz3)
00297
00298 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
00299 b0 = vec_mergeh( a0, a0 ); \
00300 b1 = vec_mergeh( a1, a0 ); \
00301 b2 = vec_mergeh( a2, a0 ); \
00302 b3 = vec_mergeh( a3, a0 ); \
00303 a0 = vec_mergeh( b0, b2 ); \
00304 a1 = vec_mergel( b0, b2 ); \
00305 a2 = vec_mergeh( b1, b3 ); \
00306 a3 = vec_mergel( b1, b3 ); \
00307 b0 = vec_mergeh( a0, a2 ); \
00308 b1 = vec_mergel( a0, a2 ); \
00309 b2 = vec_mergeh( a1, a3 ); \
00310 b3 = vec_mergel( a1, a3 )
00311
00312 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
00313 vdst_orig = vec_ld(0, dst); \
00314 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
00315 vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \
00316 va = vec_add(va, vdst_ss); \
00317 va_u8 = vec_packsu(va, zero_s16v); \
00318 va_u32 = vec_splat((vec_u32)va_u8, 0); \
00319 vec_ste(va_u32, element, (uint32_t*)dst);
00320
00321 static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
00322 {
00323 vec_s16 va0, va1, va2, va3;
00324 vec_s16 vz0, vz1, vz2, vz3;
00325 vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
00326 vec_u8 va_u8;
00327 vec_u32 va_u32;
00328 vec_s16 vdst_ss;
00329 const vec_u16 v6us = vec_splat_u16(6);
00330 vec_u8 vdst, vdst_orig;
00331 vec_u8 vdst_mask = vec_lvsl(0, dst);
00332 int element = ((unsigned long)dst & 0xf) >> 2;
00333 LOAD_ZERO;
00334
00335 block[0] += 32;
00336
00337 vtmp0 = vec_ld(0,block);
00338 vtmp1 = vec_sld(vtmp0, vtmp0, 8);
00339 vtmp2 = vec_ld(16,block);
00340 vtmp3 = vec_sld(vtmp2, vtmp2, 8);
00341
00342 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
00343 VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
00344 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
00345
00346 va0 = vec_sra(va0,v6us);
00347 va1 = vec_sra(va1,v6us);
00348 va2 = vec_sra(va2,v6us);
00349 va3 = vec_sra(va3,v6us);
00350
00351 VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
00352 dst += stride;
00353 VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
00354 dst += stride;
00355 VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
00356 dst += stride;
00357 VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
00358 }
00359
00360 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
00361 \
00362 vec_s16 a0v = vec_add(s0, s4); \
00363 \
00364 vec_s16 a2v = vec_sub(s0, s4); \
00365 \
00366 vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \
00367 \
00368 vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \
00369 \
00370 vec_s16 b0v = vec_add(a0v, a6v); \
00371 \
00372 vec_s16 b2v = vec_add(a2v, a4v); \
00373 \
00374 vec_s16 b4v = vec_sub(a2v, a4v); \
00375 \
00376 vec_s16 b6v = vec_sub(a0v, a6v); \
00377 \
00378 \
00379 vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
00380 \
00381 \
00382 vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
00383 \
00384 \
00385 vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
00386 \
00387 vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
00388 \
00389 vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
00390 \
00391 vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
00392 \
00393 vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
00394 \
00395 vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
00396 \
00397 d0 = vec_add(b0v, b7v); \
00398 \
00399 d1 = vec_add(b2v, b5v); \
00400 \
00401 d2 = vec_add(b4v, b3v); \
00402 \
00403 d3 = vec_add(b6v, b1v); \
00404 \
00405 d4 = vec_sub(b6v, b1v); \
00406 \
00407 d5 = vec_sub(b4v, b3v); \
00408 \
00409 d6 = vec_sub(b2v, b5v); \
00410 \
00411 d7 = vec_sub(b0v, b7v); \
00412 }
00413
00414 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
00415 \
00416 vec_u8 hv = vec_ld( 0, dest ); \
00417 vec_u8 lv = vec_ld( 7, dest ); \
00418 vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \
00419 vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
00420 vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \
00421 vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
00422 vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
00423 vec_u8 edgehv; \
00424 \
00425 vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
00426 vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
00427 lv = vec_sel( lv, bodyv, edgelv ); \
00428 vec_st( lv, 7, dest ); \
00429 hv = vec_ld( 0, dest ); \
00430 edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
00431 hv = vec_sel( hv, bodyv, edgehv ); \
00432 vec_st( hv, 0, dest ); \
00433 }
00434
00435 static void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
00436 vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
00437 vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
00438 vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
00439
00440 vec_u8 perm_ldv = vec_lvsl(0, dst);
00441 vec_u8 perm_stv = vec_lvsr(8, dst);
00442
00443 const vec_u16 onev = vec_splat_u16(1);
00444 const vec_u16 twov = vec_splat_u16(2);
00445 const vec_u16 sixv = vec_splat_u16(6);
00446
00447 const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
00448 LOAD_ZERO;
00449
00450 dct[0] += 32;
00451
00452 s0 = vec_ld(0x00, (int16_t*)dct);
00453 s1 = vec_ld(0x10, (int16_t*)dct);
00454 s2 = vec_ld(0x20, (int16_t*)dct);
00455 s3 = vec_ld(0x30, (int16_t*)dct);
00456 s4 = vec_ld(0x40, (int16_t*)dct);
00457 s5 = vec_ld(0x50, (int16_t*)dct);
00458 s6 = vec_ld(0x60, (int16_t*)dct);
00459 s7 = vec_ld(0x70, (int16_t*)dct);
00460
00461 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
00462 d0, d1, d2, d3, d4, d5, d6, d7);
00463
00464 TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 );
00465
00466 IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7,
00467 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
00468
00469 ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
00470 ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
00471 ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
00472 ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
00473 ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
00474 ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
00475 ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
00476 ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
00477 }
00478
00479 static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, DCTELEM *block, int stride, int size)
00480 {
00481 vec_s16 dc16;
00482 vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
00483 LOAD_ZERO;
00484 DECLARE_ALIGNED(16, int, dc);
00485 int i;
00486
00487 dc = (block[0] + 32) >> 6;
00488 dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
00489
00490 if (size == 4)
00491 dc16 = vec_sld(dc16, zero_s16v, 8);
00492 dcplus = vec_packsu(dc16, zero_s16v);
00493 dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
00494
00495 aligner = vec_lvsr(0, dst);
00496 dcplus = vec_perm(dcplus, dcplus, aligner);
00497 dcminus = vec_perm(dcminus, dcminus, aligner);
00498
00499 for (i = 0; i < size; i += 4) {
00500 v0 = vec_ld(0, dst+0*stride);
00501 v1 = vec_ld(0, dst+1*stride);
00502 v2 = vec_ld(0, dst+2*stride);
00503 v3 = vec_ld(0, dst+3*stride);
00504
00505 v0 = vec_adds(v0, dcplus);
00506 v1 = vec_adds(v1, dcplus);
00507 v2 = vec_adds(v2, dcplus);
00508 v3 = vec_adds(v3, dcplus);
00509
00510 v0 = vec_subs(v0, dcminus);
00511 v1 = vec_subs(v1, dcminus);
00512 v2 = vec_subs(v2, dcminus);
00513 v3 = vec_subs(v3, dcminus);
00514
00515 vec_st(v0, 0, dst+0*stride);
00516 vec_st(v1, 0, dst+1*stride);
00517 vec_st(v2, 0, dst+2*stride);
00518 vec_st(v3, 0, dst+3*stride);
00519
00520 dst += 4*stride;
00521 }
00522 }
00523
00524 static void h264_idct_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
00525 {
00526 h264_idct_dc_add_internal(dst, block, stride, 4);
00527 }
00528
00529 static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
00530 {
00531 h264_idct_dc_add_internal(dst, block, stride, 8);
00532 }
00533
00534 static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
00535 int i;
00536 for(i=0; i<16; i++){
00537 int nnz = nnzc[ scan8[i] ];
00538 if(nnz){
00539 if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
00540 else ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
00541 }
00542 }
00543 }
00544
00545 static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
00546 int i;
00547 for(i=0; i<16; i++){
00548 if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
00549 else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
00550 }
00551 }
00552
00553 static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
00554 int i;
00555 for(i=0; i<16; i+=4){
00556 int nnz = nnzc[ scan8[i] ];
00557 if(nnz){
00558 if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
00559 else ff_h264_idct8_add_altivec (dst + block_offset[i], block + i*16, stride);
00560 }
00561 }
00562 }
00563
00564 static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
00565 int i;
00566 for(i=16; i<16+8; i++){
00567 if(nnzc[ scan8[i] ])
00568 ff_h264_idct_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
00569 else if(block[i*16])
00570 h264_idct_dc_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
00571 }
00572 }
00573
00574 #define transpose4x16(r0, r1, r2, r3) { \
00575 register vec_u8 r4; \
00576 register vec_u8 r5; \
00577 register vec_u8 r6; \
00578 register vec_u8 r7; \
00579 \
00580 r4 = vec_mergeh(r0, r2); \
00581 r5 = vec_mergel(r0, r2); \
00582 r6 = vec_mergeh(r1, r3); \
00583 r7 = vec_mergel(r1, r3); \
00584 \
00585 r0 = vec_mergeh(r4, r6); \
00586 r1 = vec_mergel(r4, r6); \
00587 r2 = vec_mergeh(r5, r7); \
00588 r3 = vec_mergel(r5, r7); \
00589 }
00590
00591 static inline void write16x4(uint8_t *dst, int dst_stride,
00592 register vec_u8 r0, register vec_u8 r1,
00593 register vec_u8 r2, register vec_u8 r3) {
00594 DECLARE_ALIGNED(16, unsigned char, result)[64];
00595 uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
00596 int int_dst_stride = dst_stride/4;
00597
00598 vec_st(r0, 0, result);
00599 vec_st(r1, 16, result);
00600 vec_st(r2, 32, result);
00601 vec_st(r3, 48, result);
00602
00603 *dst_int = *src_int;
00604 *(dst_int+ int_dst_stride) = *(src_int + 1);
00605 *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
00606 *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
00607 *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
00608 *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
00609 *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
00610 *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
00611 *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
00612 *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
00613 *(dst_int+10*int_dst_stride) = *(src_int + 10);
00614 *(dst_int+11*int_dst_stride) = *(src_int + 11);
00615 *(dst_int+12*int_dst_stride) = *(src_int + 12);
00616 *(dst_int+13*int_dst_stride) = *(src_int + 13);
00617 *(dst_int+14*int_dst_stride) = *(src_int + 14);
00618 *(dst_int+15*int_dst_stride) = *(src_int + 15);
00619 }
00620
00624 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
00625 register vec_u8 r0 = unaligned_load(0, src); \
00626 register vec_u8 r1 = unaligned_load( src_stride, src); \
00627 register vec_u8 r2 = unaligned_load(2* src_stride, src); \
00628 register vec_u8 r3 = unaligned_load(3* src_stride, src); \
00629 register vec_u8 r4 = unaligned_load(4* src_stride, src); \
00630 register vec_u8 r5 = unaligned_load(5* src_stride, src); \
00631 register vec_u8 r6 = unaligned_load(6* src_stride, src); \
00632 register vec_u8 r7 = unaligned_load(7* src_stride, src); \
00633 register vec_u8 r14 = unaligned_load(14*src_stride, src); \
00634 register vec_u8 r15 = unaligned_load(15*src_stride, src); \
00635 \
00636 r8 = unaligned_load( 8*src_stride, src); \
00637 r9 = unaligned_load( 9*src_stride, src); \
00638 r10 = unaligned_load(10*src_stride, src); \
00639 r11 = unaligned_load(11*src_stride, src); \
00640 r12 = unaligned_load(12*src_stride, src); \
00641 r13 = unaligned_load(13*src_stride, src); \
00642 \
00643 \
00644 r0 = vec_mergeh(r0, r8); \
00645 r1 = vec_mergeh(r1, r9); \
00646 r2 = vec_mergeh(r2, r10); \
00647 r3 = vec_mergeh(r3, r11); \
00648 r4 = vec_mergeh(r4, r12); \
00649 r5 = vec_mergeh(r5, r13); \
00650 r6 = vec_mergeh(r6, r14); \
00651 r7 = vec_mergeh(r7, r15); \
00652 \
00653 \
00654 r8 = vec_mergeh(r0, r4); \
00655 r9 = vec_mergel(r0, r4); \
00656 r10 = vec_mergeh(r1, r5); \
00657 r11 = vec_mergel(r1, r5); \
00658 r12 = vec_mergeh(r2, r6); \
00659 r13 = vec_mergel(r2, r6); \
00660 r14 = vec_mergeh(r3, r7); \
00661 r15 = vec_mergel(r3, r7); \
00662 \
00663 \
00664 r0 = vec_mergeh(r8, r12); \
00665 r1 = vec_mergel(r8, r12); \
00666 r2 = vec_mergeh(r9, r13); \
00667 r4 = vec_mergeh(r10, r14); \
00668 r5 = vec_mergel(r10, r14); \
00669 r6 = vec_mergeh(r11, r15); \
00670 \
00671 \
00672 \
00673 r8 = vec_mergeh(r0, r4); \
00674 r9 = vec_mergel(r0, r4); \
00675 r10 = vec_mergeh(r1, r5); \
00676 r11 = vec_mergel(r1, r5); \
00677 r12 = vec_mergeh(r2, r6); \
00678 r13 = vec_mergel(r2, r6); \
00679 \
00680 \
00681 }
00682
00683
00684 static inline vec_u8 diff_lt_altivec ( register vec_u8 x,
00685 register vec_u8 y,
00686 register vec_u8 a) {
00687
00688 register vec_u8 diff = vec_subs(x, y);
00689 register vec_u8 diffneg = vec_subs(y, x);
00690 register vec_u8 o = vec_or(diff, diffneg);
00691 o = (vec_u8)vec_cmplt(o, a);
00692 return o;
00693 }
00694
00695 static inline vec_u8 h264_deblock_mask ( register vec_u8 p0,
00696 register vec_u8 p1,
00697 register vec_u8 q0,
00698 register vec_u8 q1,
00699 register vec_u8 alpha,
00700 register vec_u8 beta) {
00701
00702 register vec_u8 mask;
00703 register vec_u8 tempmask;
00704
00705 mask = diff_lt_altivec(p0, q0, alpha);
00706 tempmask = diff_lt_altivec(p1, p0, beta);
00707 mask = vec_and(mask, tempmask);
00708 tempmask = diff_lt_altivec(q1, q0, beta);
00709 mask = vec_and(mask, tempmask);
00710
00711 return mask;
00712 }
00713
00714
00715 static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
00716 register vec_u8 p1,
00717 register vec_u8 p2,
00718 register vec_u8 q0,
00719 register vec_u8 tc0) {
00720
00721 register vec_u8 average = vec_avg(p0, q0);
00722 register vec_u8 temp;
00723 register vec_u8 uncliped;
00724 register vec_u8 ones;
00725 register vec_u8 max;
00726 register vec_u8 min;
00727 register vec_u8 newp1;
00728
00729 temp = vec_xor(average, p2);
00730 average = vec_avg(average, p2);
00731 ones = vec_splat_u8(1);
00732 temp = vec_and(temp, ones);
00733 uncliped = vec_subs(average, temp);
00734 max = vec_adds(p1, tc0);
00735 min = vec_subs(p1, tc0);
00736 newp1 = vec_max(min, uncliped);
00737 newp1 = vec_min(max, newp1);
00738 return newp1;
00739 }
00740
00741 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
00742 \
00743 const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
00744 \
00745 register vec_u8 pq0bit = vec_xor(p0,q0); \
00746 register vec_u8 q1minus; \
00747 register vec_u8 p0minus; \
00748 register vec_u8 stage1; \
00749 register vec_u8 stage2; \
00750 register vec_u8 vec160; \
00751 register vec_u8 delta; \
00752 register vec_u8 deltaneg; \
00753 \
00754 q1minus = vec_nor(q1, q1); \
00755 stage1 = vec_avg(p1, q1minus); \
00756 stage2 = vec_sr(stage1, vec_splat_u8(1)); \
00757 p0minus = vec_nor(p0, p0); \
00758 stage1 = vec_avg(q0, p0minus); \
00759 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
00760 stage2 = vec_avg(stage2, pq0bit); \
00761 stage2 = vec_adds(stage2, stage1); \
00762 vec160 = vec_ld(0, &A0v); \
00763 deltaneg = vec_subs(vec160, stage2); \
00764 delta = vec_subs(stage2, vec160); \
00765 deltaneg = vec_min(tc0masked, deltaneg); \
00766 delta = vec_min(tc0masked, delta); \
00767 p0 = vec_subs(p0, deltaneg); \
00768 q0 = vec_subs(q0, delta); \
00769 p0 = vec_adds(p0, delta); \
00770 q0 = vec_adds(q0, deltaneg); \
00771 }
00772
00773 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
00774 DECLARE_ALIGNED(16, unsigned char, temp)[16]; \
00775 register vec_u8 alphavec; \
00776 register vec_u8 betavec; \
00777 register vec_u8 mask; \
00778 register vec_u8 p1mask; \
00779 register vec_u8 q1mask; \
00780 register vector signed char tc0vec; \
00781 register vec_u8 finaltc0; \
00782 register vec_u8 tc0masked; \
00783 register vec_u8 newp1; \
00784 register vec_u8 newq1; \
00785 \
00786 temp[0] = alpha; \
00787 temp[1] = beta; \
00788 alphavec = vec_ld(0, temp); \
00789 betavec = vec_splat(alphavec, 0x1); \
00790 alphavec = vec_splat(alphavec, 0x0); \
00791 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); \
00792 \
00793 *((int *)temp) = *((int *)tc0); \
00794 tc0vec = vec_ld(0, (signed char*)temp); \
00795 tc0vec = vec_mergeh(tc0vec, tc0vec); \
00796 tc0vec = vec_mergeh(tc0vec, tc0vec); \
00797 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); \
00798 finaltc0 = vec_and((vec_u8)tc0vec, mask); \
00799 \
00800 p1mask = diff_lt_altivec(p2, p0, betavec); \
00801 p1mask = vec_and(p1mask, mask); \
00802 tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \
00803 finaltc0 = vec_sub(finaltc0, p1mask); \
00804 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
00805 \
00806 \
00807 q1mask = diff_lt_altivec(q2, q0, betavec); \
00808 q1mask = vec_and(q1mask, mask); \
00809 tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \
00810 finaltc0 = vec_sub(finaltc0, q1mask); \
00811 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
00812 \
00813 \
00814 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
00815 p1 = newp1; \
00816 q1 = newq1; \
00817 }
00818
00819 static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
00820
00821 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
00822 register vec_u8 p2 = vec_ld(-3*stride, pix);
00823 register vec_u8 p1 = vec_ld(-2*stride, pix);
00824 register vec_u8 p0 = vec_ld(-1*stride, pix);
00825 register vec_u8 q0 = vec_ld(0, pix);
00826 register vec_u8 q1 = vec_ld(stride, pix);
00827 register vec_u8 q2 = vec_ld(2*stride, pix);
00828 h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
00829 vec_st(p1, -2*stride, pix);
00830 vec_st(p0, -1*stride, pix);
00831 vec_st(q0, 0, pix);
00832 vec_st(q1, stride, pix);
00833 }
00834 }
00835
00836 static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
00837
00838 register vec_u8 line0, line1, line2, line3, line4, line5;
00839 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
00840 return;
00841 readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
00842 h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
00843 transpose4x16(line1, line2, line3, line4);
00844 write16x4(pix-2, stride, line1, line2, line3, line4);
00845 }
00846
00847 static av_always_inline
00848 void weight_h264_WxH_altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset, int w, int h)
00849 {
00850 int y, aligned;
00851 vec_u8 vblock;
00852 vec_s16 vtemp, vweight, voffset, v0, v1;
00853 vec_u16 vlog2_denom;
00854 DECLARE_ALIGNED(16, int32_t, temp)[4];
00855 LOAD_ZERO;
00856
00857 offset <<= log2_denom;
00858 if(log2_denom) offset += 1<<(log2_denom-1);
00859 temp[0] = log2_denom;
00860 temp[1] = weight;
00861 temp[2] = offset;
00862
00863 vtemp = (vec_s16)vec_ld(0, temp);
00864 vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
00865 vweight = vec_splat(vtemp, 3);
00866 voffset = vec_splat(vtemp, 5);
00867 aligned = !((unsigned long)block & 0xf);
00868
00869 for (y=0; y<h; y++) {
00870 vblock = vec_ld(0, block);
00871
00872 v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
00873 v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
00874
00875 if (w == 16 || aligned) {
00876 v0 = vec_mladd(v0, vweight, zero_s16v);
00877 v0 = vec_adds(v0, voffset);
00878 v0 = vec_sra(v0, vlog2_denom);
00879 }
00880 if (w == 16 || !aligned) {
00881 v1 = vec_mladd(v1, vweight, zero_s16v);
00882 v1 = vec_adds(v1, voffset);
00883 v1 = vec_sra(v1, vlog2_denom);
00884 }
00885 vblock = vec_packsu(v0, v1);
00886 vec_st(vblock, 0, block);
00887
00888 block += stride;
00889 }
00890 }
00891
00892 static av_always_inline
00893 void biweight_h264_WxH_altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom,
00894 int weightd, int weights, int offset, int w, int h)
00895 {
00896 int y, dst_aligned, src_aligned;
00897 vec_u8 vsrc, vdst;
00898 vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
00899 vec_u16 vlog2_denom;
00900 DECLARE_ALIGNED(16, int32_t, temp)[4];
00901 LOAD_ZERO;
00902
00903 offset = ((offset + 1) | 1) << log2_denom;
00904 temp[0] = log2_denom+1;
00905 temp[1] = weights;
00906 temp[2] = weightd;
00907 temp[3] = offset;
00908
00909 vtemp = (vec_s16)vec_ld(0, temp);
00910 vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
00911 vweights = vec_splat(vtemp, 3);
00912 vweightd = vec_splat(vtemp, 5);
00913 voffset = vec_splat(vtemp, 7);
00914 dst_aligned = !((unsigned long)dst & 0xf);
00915 src_aligned = !((unsigned long)src & 0xf);
00916
00917 for (y=0; y<h; y++) {
00918 vdst = vec_ld(0, dst);
00919 vsrc = vec_ld(0, src);
00920
00921 v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
00922 v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
00923 v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
00924 v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
00925
00926 if (w == 8) {
00927 if (src_aligned)
00928 v3 = v2;
00929 else
00930 v2 = v3;
00931 }
00932
00933 if (w == 16 || dst_aligned) {
00934 v0 = vec_mladd(v0, vweightd, zero_s16v);
00935 v2 = vec_mladd(v2, vweights, zero_s16v);
00936
00937 v0 = vec_adds(v0, voffset);
00938 v0 = vec_adds(v0, v2);
00939 v0 = vec_sra(v0, vlog2_denom);
00940 }
00941 if (w == 16 || !dst_aligned) {
00942 v1 = vec_mladd(v1, vweightd, zero_s16v);
00943 v3 = vec_mladd(v3, vweights, zero_s16v);
00944
00945 v1 = vec_adds(v1, voffset);
00946 v1 = vec_adds(v1, v3);
00947 v1 = vec_sra(v1, vlog2_denom);
00948 }
00949 vdst = vec_packsu(v0, v1);
00950 vec_st(vdst, 0, dst);
00951
00952 dst += stride;
00953 src += stride;
00954 }
00955 }
00956
00957 #define H264_WEIGHT(W,H) \
00958 static void ff_weight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
00959 weight_h264_WxH_altivec(block, stride, log2_denom, weight, offset, W, H); \
00960 }\
00961 static void ff_biweight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
00962 biweight_h264_WxH_altivec(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
00963 }
00964
00965 H264_WEIGHT(16,16)
00966 H264_WEIGHT(16, 8)
00967 H264_WEIGHT( 8,16)
00968 H264_WEIGHT( 8, 8)
00969 H264_WEIGHT( 8, 4)
00970
00971 void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
00972
00973 if (has_altivec()) {
00974 c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
00975 c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
00976 c->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec;
00977 c->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec;
00978
00979 #define dspfunc(PFX, IDX, NUM) \
00980 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
00981 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
00982 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
00983 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
00984 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
00985 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
00986 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
00987 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
00988 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
00989 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
00990 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
00991 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
00992 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
00993 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
00994 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
00995 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
00996
00997 dspfunc(put_h264_qpel, 0, 16);
00998 dspfunc(avg_h264_qpel, 0, 16);
00999 #undef dspfunc
01000 }
01001 }
01002
01003 void ff_h264dsp_init_ppc(H264DSPContext *c)
01004 {
01005 if (has_altivec()) {
01006 c->h264_idct_add = ff_h264_idct_add_altivec;
01007 c->h264_idct_add8 = ff_h264_idct_add8_altivec;
01008 c->h264_idct_add16 = ff_h264_idct_add16_altivec;
01009 c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
01010 c->h264_idct_dc_add= h264_idct_dc_add_altivec;
01011 c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec;
01012 c->h264_idct8_add = ff_h264_idct8_add_altivec;
01013 c->h264_idct8_add4 = ff_h264_idct8_add4_altivec;
01014 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
01015 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
01016
01017 c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16x16_altivec;
01018 c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels16x8_altivec;
01019 c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels8x16_altivec;
01020 c->weight_h264_pixels_tab[3] = ff_weight_h264_pixels8x8_altivec;
01021 c->weight_h264_pixels_tab[4] = ff_weight_h264_pixels8x4_altivec;
01022 c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16x16_altivec;
01023 c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels16x8_altivec;
01024 c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels8x16_altivec;
01025 c->biweight_h264_pixels_tab[3] = ff_biweight_h264_pixels8x8_altivec;
01026 c->biweight_h264_pixels_tab[4] = ff_biweight_h264_pixels8x4_altivec;
01027 }
01028 }