32 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
33 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
35 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
36 #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
37 #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
38 #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
39 #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
40 #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
41 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
44 #undef PREFIX_h264_qpel16_h_lowpass_altivec
45 #undef PREFIX_h264_qpel16_h_lowpass_num
46 #undef PREFIX_h264_qpel16_v_lowpass_altivec
47 #undef PREFIX_h264_qpel16_v_lowpass_num
48 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
49 #undef PREFIX_h264_qpel16_hv_lowpass_num
51 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
52 #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
53 #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
54 #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
55 #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
56 #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
57 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
60 #undef PREFIX_h264_qpel16_h_lowpass_altivec
61 #undef PREFIX_h264_qpel16_h_lowpass_num
62 #undef PREFIX_h264_qpel16_v_lowpass_altivec
63 #undef PREFIX_h264_qpel16_v_lowpass_num
64 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
65 #undef PREFIX_h264_qpel16_hv_lowpass_num
67 #define H264_MC(OPNAME, SIZE, CODETYPE) \
68 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
69 ff_ ## OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
72 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
73 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
74 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
75 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
78 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
79 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
82 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
83 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
84 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
85 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
88 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
89 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
90 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
91 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
94 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
95 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
98 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
99 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
100 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
101 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
104 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
105 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
106 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
107 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
108 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
109 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
112 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
113 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
114 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
115 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
116 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
117 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
120 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
121 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
122 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
123 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
124 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
125 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
128 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
129 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
130 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
131 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
132 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
133 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
136 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
137 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
138 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
141 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
142 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
143 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
144 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
145 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
146 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
147 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
150 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
151 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
152 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
153 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
154 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
155 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
156 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
159 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
160 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
161 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
162 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
163 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
164 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
165 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
168 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
169 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
170 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
171 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
172 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
173 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
174 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
178 const uint8_t * src2,
int dst_stride,
179 int src_stride1,
int h)
184 mask_ = vec_lvsl(0, src2);
186 for (i = 0; i < h; i++) {
188 tmp1 = vec_ld(i * src_stride1, src1);
189 mask = vec_lvsl(i * src_stride1, src1);
190 tmp2 = vec_ld(i * src_stride1 + 15, src1);
192 a = vec_perm(tmp1, tmp2, mask);
194 tmp1 = vec_ld(i * 16, src2);
195 tmp2 = vec_ld(i * 16 + 15, src2);
197 b = vec_perm(tmp1, tmp2, mask_);
199 tmp1 = vec_ld(0, dst);
200 mask = vec_lvsl(0, dst);
201 tmp2 = vec_ld(15, dst);
205 edges = vec_perm(tmp2, tmp1, mask);
207 align = vec_lvsr(0, dst);
209 tmp2 = vec_perm(d, edges, align);
210 tmp1 = vec_perm(edges, d, align);
212 vec_st(tmp2, 15, dst);
213 vec_st(tmp1, 0 , dst);
219 static inline void avg_pixels16_l2_altivec(
uint8_t * dst,
const uint8_t * src1,
220 const uint8_t * src2,
int dst_stride,
221 int src_stride1,
int h)
226 mask_ = vec_lvsl(0, src2);
228 for (i = 0; i < h; i++) {
230 tmp1 = vec_ld(i * src_stride1, src1);
231 mask = vec_lvsl(i * src_stride1, src1);
232 tmp2 = vec_ld(i * src_stride1 + 15, src1);
234 a = vec_perm(tmp1, tmp2, mask);
236 tmp1 = vec_ld(i * 16, src2);
237 tmp2 = vec_ld(i * 16 + 15, src2);
239 b = vec_perm(tmp1, tmp2, mask_);
241 tmp1 = vec_ld(0, dst);
242 mask = vec_lvsl(0, dst);
243 tmp2 = vec_ld(15, dst);
245 d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
247 edges = vec_perm(tmp2, tmp1, mask);
249 align = vec_lvsr(0, dst);
251 tmp2 = vec_perm(d, edges, align);
252 tmp1 = vec_perm(edges, d, align);
254 vec_st(tmp2, 15, dst);
255 vec_st(tmp1, 0 , dst);
273 const int high_bit_depth = bit_depth > 8;
276 if (!high_bit_depth) {
277 #define dspfunc(PFX, IDX, NUM) \
278 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
279 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
280 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
281 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
282 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
283 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
284 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
285 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
286 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
287 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
288 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
289 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
290 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
291 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
292 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
293 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec