39 int32_t p0, p1, p2, p3, tmp0, tmp1, tmp2;
40 int32_t b0_1, b0_2, b1_1, b1_2, b1_3, b2_1, b2_2, b2_3, b2_4, b2_5, b2_6;
41 int32_t b3_1, b3_2, b3_3, b3_4, b3_5, b3_6, b3_7, b3_8, b3_9;
43 const IDWTELEM *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr;
44 const int num_bands = 4;
58 for (y = 0; y < plane->
height; y += 2) {
69 b1_1 = b1_ptr[back_pitch];
71 b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch];
82 b3_2 = b3_ptr[back_pitch];
86 b3_8 = b3_2 - b3_5*6 + b3_ptr[pitch];
90 for (x = 0, indx = 0; x < plane->
width; x+=2, indx++) {
91 if (x+2 >= plane->
width) {
111 p0 = p1 = p2 = p3 = 0;
117 b0_1 = b0_ptr[indx+1];
118 b0_2 = b0_ptr[pitch+indx+1];
123 p2 = (tmp0 + tmp2) << 3;
124 p3 = (tmp1 + tmp2 + b0_2) << 2;
131 b1_2 = b1_ptr[indx+1];
132 b1_1 = b1_ptr[back_pitch+indx+1];
134 tmp2 = tmp1 - tmp0*6 + b1_3;
135 b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch+indx+1];
137 p0 += (tmp0 + tmp1) << 3;
138 p1 += (tmp0 + tmp1 + b1_1 + b1_2) << 2;
140 p3 += (tmp2 + b1_3) << 1;
145 b2_3 = b2_ptr[indx+1];
146 b2_6 = b2_ptr[pitch+indx+1];
149 tmp1 = b2_1 - b2_2*6 + b2_3;
153 p2 += (tmp0 + b2_4 + b2_5) << 2;
154 p3 += (tmp1 + b2_4 - b2_5*6 + b2_6) << 1;
159 b3_6 = b3_ptr[indx+1];
160 b3_3 = b3_ptr[back_pitch+indx+1];
166 b3_9 = b3_3 - b3_6*6 + b3_ptr[pitch+indx+1];
168 p0 += (tmp0 + tmp1) << 2;
169 p1 += (tmp0 - tmp1*6 + tmp2) << 1;
170 p2 += (b3_7 + b3_8) << 1;
171 p3 += b3_7 - b3_8*6 + b3_9;
175 dst[x] = av_clip_uint8((p0 >> 6) + 128);
176 dst[x+1] = av_clip_uint8((p1 >> 6) + 128);
177 dst[dst_pitch+x] = av_clip_uint8((p2 >> 6) + 128);
178 dst[dst_pitch+x+1] = av_clip_uint8((p3 >> 6) + 128);
181 dst += dst_pitch << 1;
195 int x, y, indx, b0, b1, b2, b3, p0, p1, p2, p3;
196 const IDWTELEM *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr;
208 for (y = 0; y < plane->
height; y += 2) {
209 for (x = 0, indx = 0; x < plane->
width; x += 2, indx++) {
217 p0 = (b0 + b1 + b2 + b3 + 2) >> 2;
218 p1 = (b0 + b1 - b2 - b3 + 2) >> 2;
219 p2 = (b0 - b1 + b2 - b3 + 2) >> 2;
220 p3 = (b0 - b1 - b2 + b3 + 2) >> 2;
223 dst[x] = av_clip_uint8(p0 + 128);
224 dst[x + 1] = av_clip_uint8(p1 + 128);
225 dst[dst_pitch + x] = av_clip_uint8(p2 + 128);
226 dst[dst_pitch + x + 1] = av_clip_uint8(p3 + 128);
229 dst += dst_pitch << 1;
239 #define IVI_HAAR_BFLY(s1, s2, o1, o2, t) \
241 o1 = (s1 + s2) >> 1;\
245 #define INV_HAAR8(s1, s5, s3, s7, s2, s4, s6, s8,\
246 d1, d2, d3, d4, d5, d6, d7, d8,\
247 t0, t1, t2, t3, t4, t5, t6, t7, t8) {\
248 t1 = s1 << 1; t5 = s5 << 1;\
249 IVI_HAAR_BFLY(t1, t5, t1, t5, t0); IVI_HAAR_BFLY(t1, s3, t1, t3, t0);\
250 IVI_HAAR_BFLY(t5, s7, t5, t7, t0); IVI_HAAR_BFLY(t1, s2, t1, t2, t0);\
251 IVI_HAAR_BFLY(t3, s4, t3, t4, t0); IVI_HAAR_BFLY(t5, s6, t5, t6, t0);\
252 IVI_HAAR_BFLY(t7, s8, t7, t8, t0);\
253 d1 = COMPENSATE(t1);\
254 d2 = COMPENSATE(t2);\
255 d3 = COMPENSATE(t3);\
256 d4 = COMPENSATE(t4);\
257 d5 = COMPENSATE(t5);\
258 d6 = COMPENSATE(t6);\
259 d7 = COMPENSATE(t7);\
260 d8 = COMPENSATE(t8); }
263 #define INV_HAAR4(s1, s3, s5, s7) {\
264 HAAR_BFLY(s1, s5); HAAR_BFLY(s1, s3); HAAR_BFLY(s5, s7);\
265 s1 = COMPENSATE(s1);\
266 s3 = COMPENSATE(s3);\
267 s5 = COMPENSATE(s5);\
268 s7 = COMPENSATE(s7); }
273 int i,
shift, sp1, sp2, sp3, sp4;
280 #define COMPENSATE(x) (x)
283 for (i = 0; i < 8; i++) {
287 sp1 = src[ 0] <<
shift;
288 sp2 = src[ 8] <<
shift;
289 sp3 = src[16] <<
shift;
290 sp4 = src[24] <<
shift;
292 src[32], src[40], src[48], src[56],
293 dst[ 0], dst[ 8], dst[16], dst[24],
294 dst[32], dst[40], dst[48], dst[56],
295 t0, t1, t2, t3, t4, t5, t6, t7, t8);
297 dst[ 0] = dst[ 8] = dst[16] = dst[24] =
298 dst[32] = dst[40] = dst[48] = dst[56] = 0;
306 #define COMPENSATE(x) (x)
308 for (i = 0; i < 8; i++) {
309 if ( !src[0] && !src[1] && !src[2] && !src[3]
310 && !src[4] && !src[5] && !src[6] && !src[7]) {
311 memset(out, 0, 8 *
sizeof(out[0]));
313 INV_HAAR8(src[0], src[1], src[2], src[3],
314 src[4], src[5], src[6], src[7],
315 out[0], out[1], out[2], out[3],
316 out[4], out[5], out[6], out[7],
317 t0, t1, t2, t3, t4, t5, t6, t7, t8);
331 dc_coeff = (*in + 0) >> 3;
333 for (y = 0; y < blk_size; out += pitch, y++) {
334 for (x = 0; x < blk_size; x++)
340 #define IVI_SLANT_BFLY(s1, s2, o1, o2, t) \
346 #define IVI_IREFLECT(s1, s2, o1, o2, t) \
347 t = ((s1 + s2*2 + 2) >> 2) + s1;\
348 o2 = ((s1*2 - s2 + 2) >> 2) - s2;\
352 #define IVI_SLANT_PART4(s1, s2, o1, o2, t) \
353 t = s2 + ((s1*4 - s2 + 4) >> 3);\
354 o2 = s1 + ((-s1 - s2*4 + 4) >> 3);\
358 #define IVI_INV_SLANT8(s1, s4, s8, s5, s2, s6, s3, s7,\
359 d1, d2, d3, d4, d5, d6, d7, d8,\
360 t0, t1, t2, t3, t4, t5, t6, t7, t8) {\
361 IVI_SLANT_PART4(s4, s5, t4, t5, t0);\
363 IVI_SLANT_BFLY(s1, t5, t1, t5, t0); IVI_SLANT_BFLY(s2, s6, t2, t6, t0);\
364 IVI_SLANT_BFLY(s7, s3, t7, t3, t0); IVI_SLANT_BFLY(t4, s8, t4, t8, t0);\
366 IVI_SLANT_BFLY(t1, t2, t1, t2, t0); IVI_IREFLECT (t4, t3, t4, t3, t0);\
367 IVI_SLANT_BFLY(t5, t6, t5, t6, t0); IVI_IREFLECT (t8, t7, t8, t7, t0);\
368 IVI_SLANT_BFLY(t1, t4, t1, t4, t0); IVI_SLANT_BFLY(t2, t3, t2, t3, t0);\
369 IVI_SLANT_BFLY(t5, t8, t5, t8, t0); IVI_SLANT_BFLY(t6, t7, t6, t7, t0);\
370 d1 = COMPENSATE(t1);\
371 d2 = COMPENSATE(t2);\
372 d3 = COMPENSATE(t3);\
373 d4 = COMPENSATE(t4);\
374 d5 = COMPENSATE(t5);\
375 d6 = COMPENSATE(t6);\
376 d7 = COMPENSATE(t7);\
377 d8 = COMPENSATE(t8);}
380 #define IVI_INV_SLANT4(s1, s4, s2, s3, d1, d2, d3, d4, t0, t1, t2, t3, t4) {\
381 IVI_SLANT_BFLY(s1, s2, t1, t2, t0); IVI_IREFLECT (s4, s3, t4, t3, t0);\
383 IVI_SLANT_BFLY(t1, t4, t1, t4, t0); IVI_SLANT_BFLY(t2, t3, t2, t3, t0);\
384 d1 = COMPENSATE(t1);\
385 d2 = COMPENSATE(t2);\
386 d3 = COMPENSATE(t3);\
387 d4 = COMPENSATE(t4);}
397 #define COMPENSATE(x) (x)
400 for (i = 0; i < 8; i++) {
402 IVI_INV_SLANT8(src[0], src[8], src[16], src[24], src[32], src[40], src[48], src[56],
403 dst[0], dst[8], dst[16], dst[24], dst[32], dst[40], dst[48], dst[56],
404 t0, t1, t2, t3, t4, t5, t6, t7, t8);
406 dst[0] = dst[8] = dst[16] = dst[24] = dst[32] = dst[40] = dst[48] = dst[56] = 0;
413 #define COMPENSATE(x) ((x + 1)>>1)
415 for (i = 0; i < 8; i++) {
416 if (!src[0] && !src[1] && !src[2] && !src[3] && !src[4] && !src[5] && !src[6] && !src[7]) {
417 memset(out, 0, 8*
sizeof(out[0]));
419 IVI_INV_SLANT8(src[0], src[1], src[2], src[3], src[4], src[5], src[6], src[7],
420 out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7],
421 t0, t1, t2, t3, t4, t5, t6, t7, t8);
437 #define COMPENSATE(x) (x)
440 for (i = 0; i < 4; i++) {
443 dst[0], dst[4], dst[8], dst[12],
446 dst[0] = dst[4] = dst[8] = dst[12] = 0;
453 #define COMPENSATE(x) ((x + 1)>>1)
455 for (i = 0; i < 4; i++) {
456 if (!src[0] && !src[1] && !src[2] && !src[3]) {
457 out[0] = out[1] = out[2] = out[3] = 0;
460 out[0], out[1], out[2], out[3],
474 dc_coeff = (*in + 1) >> 1;
476 for (y = 0; y < blk_size; out += pitch, y++) {
477 for (x = 0; x < blk_size; x++)
487 #define COMPENSATE(x) ((x + 1)>>1)
488 for (i = 0; i < 8; i++) {
489 if (!in[0] && !in[1] && !in[2] && !in[3] && !in[4] && !in[5] && !in[6] && !in[7]) {
490 memset(out, 0, 8*
sizeof(out[0]));
492 IVI_INV_SLANT8( in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7],
493 out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7],
494 t0, t1, t2, t3, t4, t5, t6, t7, t8);
507 dc_coeff = (*in + 1) >> 1;
509 for (x = 0; x < blk_size; x++)
514 for (y = 1; y < blk_size; out += pitch, y++) {
515 for (x = 0; x < blk_size; x++)
522 int i, row2, row4, row8;
529 #define COMPENSATE(x) ((x + 1)>>1)
530 for (i = 0; i < 8; i++) {
532 IVI_INV_SLANT8(in[0], in[8], in[16], in[24], in[32], in[40], in[48], in[56],
533 out[0], out[pitch], out[row2], out[row2 + pitch], out[row4],
534 out[row4 + pitch], out[row4 + row2], out[row8 - pitch],
535 t0, t1, t2, t3, t4, t5, t6, t7, t8);
537 out[0] = out[pitch] = out[row2] = out[row2 + pitch] = out[row4] =
538 out[row4 + pitch] = out[row4 + row2] = out[row8 - pitch] = 0;
552 dc_coeff = (*in + 1) >> 1;
554 for (y = 0; y < blk_size; out += pitch, y++) {
556 for (x = 1; x < blk_size; x++)
566 for (y = 0; y < 8; out += pitch, in += 8, y++)
567 for (x = 0; x < 8; x++)
577 memset(out + 1, 0, 7*
sizeof(out[0]));
580 for (y = 1; y < 8; out += pitch, y++)
581 memset(out, 0, 8*
sizeof(out[0]));
584 #define IVI_MC_TEMPLATE(size, suffix, OP) \
585 void ff_ivi_mc_ ## size ##x## size ## suffix (int16_t *buf, const int16_t *ref_buf, \
586 uint32_t pitch, int mc_type) \
589 const int16_t *wptr; \
593 for (i = 0; i < size; i++, buf += pitch, ref_buf += pitch) { \
594 for (j = 0; j < size; j++) {\
595 OP(buf[j], ref_buf[j]); \
600 for (i = 0; i < size; i++, buf += pitch, ref_buf += pitch) \
601 for (j = 0; j < size; j++) \
602 OP(buf[j], (ref_buf[j] + ref_buf[j+1]) >> 1); \
605 wptr = ref_buf + pitch; \
606 for (i = 0; i < size; i++, buf += pitch, wptr += pitch, ref_buf += pitch) \
607 for (j = 0; j < size; j++) \
608 OP(buf[j], (ref_buf[j] + wptr[j]) >> 1); \
611 wptr = ref_buf + pitch; \
612 for (i = 0; i < size; i++, buf += pitch, wptr += pitch, ref_buf += pitch) \
613 for (j = 0; j < size; j++) \
614 OP(buf[j], (ref_buf[j] + ref_buf[j+1] + wptr[j] + wptr[j+1]) >> 2); \
619 #define OP_PUT(a, b) (a) = (b)
620 #define OP_ADD(a, b) (a) += (b)
624 IVI_MC_TEMPLATE(4, _no_delta,
OP_PUT)
625 IVI_MC_TEMPLATE(4, _delta,
OP_ADD)