00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023 #include "libavutil/common.h"
00024 #include "libavcodec/dsputil.h"
00025 #include "dsputil_ppc.h"
00026 #include "gcc_fixes.h"
00027
00028
00029 #define vs16(v) ((vector signed short)(v))
00030 #define vs32(v) ((vector signed int)(v))
00031 #define vu8(v) ((vector unsigned char)(v))
00032 #define vu16(v) ((vector unsigned short)(v))
00033 #define vu32(v) ((vector unsigned int)(v))
00034
00035
00036 #define C1 0.98078525066375732421875000
00037 #define C2 0.92387950420379638671875000
00038 #define C3 0.83146959543228149414062500
00039 #define C4 0.70710676908493041992187500
00040 #define C5 0.55557024478912353515625000
00041 #define C6 0.38268342614173889160156250
00042 #define C7 0.19509032368659973144531250
00043 #define SQRT_2 1.41421353816986083984375000
00044
00045
00046 #define W0 -(2 * C2)
00047 #define W1 (2 * C6)
00048 #define W2 (SQRT_2 * C6)
00049 #define W3 (SQRT_2 * C3)
00050 #define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
00051 #define W5 (SQRT_2 * ( C1 + C3 - C5 + C7))
00052 #define W6 (SQRT_2 * ( C1 + C3 + C5 - C7))
00053 #define W7 (SQRT_2 * ( C1 + C3 - C5 - C7))
00054 #define W8 (SQRT_2 * ( C7 - C3))
00055 #define W9 (SQRT_2 * (-C1 - C3))
00056 #define WA (SQRT_2 * (-C3 - C5))
00057 #define WB (SQRT_2 * ( C5 - C3))
00058
00059
00060 static vector float fdctconsts[3] = {
00061 { W0, W1, W2, W3 },
00062 { W4, W5, W6, W7 },
00063 { W8, W9, WA, WB }
00064 };
00065
00066 #define LD_W0 vec_splat(cnsts0, 0)
00067 #define LD_W1 vec_splat(cnsts0, 1)
00068 #define LD_W2 vec_splat(cnsts0, 2)
00069 #define LD_W3 vec_splat(cnsts0, 3)
00070 #define LD_W4 vec_splat(cnsts1, 0)
00071 #define LD_W5 vec_splat(cnsts1, 1)
00072 #define LD_W6 vec_splat(cnsts1, 2)
00073 #define LD_W7 vec_splat(cnsts1, 3)
00074 #define LD_W8 vec_splat(cnsts2, 0)
00075 #define LD_W9 vec_splat(cnsts2, 1)
00076 #define LD_WA vec_splat(cnsts2, 2)
00077 #define LD_WB vec_splat(cnsts2, 3)
00078
00079
00080 #define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) \
00081 x0 = vec_add(b0, b7); \
00082 x7 = vec_sub(b0, b7); \
00083 x1 = vec_add(b1, b6); \
00084 x6 = vec_sub(b1, b6); \
00085 x2 = vec_add(b2, b5); \
00086 x5 = vec_sub(b2, b5); \
00087 x3 = vec_add(b3, b4); \
00088 x4 = vec_sub(b3, b4); \
00089 \
00090 b7 = vec_add(x0, x3); \
00091 b1 = vec_add(x1, x2); \
00092 b0 = vec_add(b7, b1); \
00093 b4 = vec_sub(b7, b1); \
00094 \
00095 b2 = vec_sub(x0, x3); \
00096 b6 = vec_sub(x1, x2); \
00097 b5 = vec_add(b6, b2); \
00098 cnst = LD_W2; \
00099 b5 = vec_madd(cnst, b5, mzero); \
00100 cnst = LD_W1; \
00101 b2 = vec_madd(cnst, b2, b5); \
00102 cnst = LD_W0; \
00103 b6 = vec_madd(cnst, b6, b5); \
00104 \
00105 x0 = vec_add(x4, x7); \
00106 x1 = vec_add(x5, x6); \
00107 x2 = vec_add(x4, x6); \
00108 x3 = vec_add(x5, x7); \
00109 x8 = vec_add(x2, x3); \
00110 cnst = LD_W3; \
00111 x8 = vec_madd(cnst, x8, mzero); \
00112 \
00113 cnst = LD_W8; \
00114 x0 = vec_madd(cnst, x0, mzero); \
00115 cnst = LD_W9; \
00116 x1 = vec_madd(cnst, x1, mzero); \
00117 cnst = LD_WA; \
00118 x2 = vec_madd(cnst, x2, x8); \
00119 cnst = LD_WB; \
00120 x3 = vec_madd(cnst, x3, x8); \
00121 \
00122 cnst = LD_W4; \
00123 b7 = vec_madd(cnst, x4, x0); \
00124 cnst = LD_W5; \
00125 b5 = vec_madd(cnst, x5, x1); \
00126 cnst = LD_W6; \
00127 b3 = vec_madd(cnst, x6, x1); \
00128 cnst = LD_W7; \
00129 b1 = vec_madd(cnst, x7, x0); \
00130 \
00131 b7 = vec_add(b7, x2); \
00132 b5 = vec_add(b5, x3); \
00133 b3 = vec_add(b3, x2); \
00134 b1 = vec_add(b1, x3); \
00135
00136
00137 #define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) \
00138 x0 = vec_add(b0, b7); \
00139 x7 = vec_sub(b0, b7); \
00140 x1 = vec_add(b1, b6); \
00141 x6 = vec_sub(b1, b6); \
00142 x2 = vec_add(b2, b5); \
00143 x5 = vec_sub(b2, b5); \
00144 x3 = vec_add(b3, b4); \
00145 x4 = vec_sub(b3, b4); \
00146 \
00147 b7 = vec_add(x0, x3); \
00148 b1 = vec_add(x1, x2); \
00149 b0 = vec_add(b7, b1); \
00150 b4 = vec_sub(b7, b1); \
00151 \
00152 b2 = vec_sub(x0, x3); \
00153 b6 = vec_sub(x1, x2); \
00154 b5 = vec_add(b6, b2); \
00155 cnst = LD_W2; \
00156 b5 = vec_madd(cnst, b5, mzero); \
00157 cnst = LD_W1; \
00158 b2 = vec_madd(cnst, b2, b5); \
00159 cnst = LD_W0; \
00160 b6 = vec_madd(cnst, b6, b5); \
00161 \
00162 x0 = vec_add(x4, x7); \
00163 x1 = vec_add(x5, x6); \
00164 x2 = vec_add(x4, x6); \
00165 x3 = vec_add(x5, x7); \
00166 x8 = vec_add(x2, x3); \
00167 cnst = LD_W3; \
00168 x8 = vec_madd(cnst, x8, mzero); \
00169 \
00170 cnst = LD_W8; \
00171 x0 = vec_madd(cnst, x0, mzero); \
00172 cnst = LD_W9; \
00173 x1 = vec_madd(cnst, x1, mzero); \
00174 cnst = LD_WA; \
00175 x2 = vec_madd(cnst, x2, x8); \
00176 cnst = LD_WB; \
00177 x3 = vec_madd(cnst, x3, x8); \
00178 \
00179 cnst = LD_W4; \
00180 b7 = vec_madd(cnst, x4, x0); \
00181 cnst = LD_W5; \
00182 b5 = vec_madd(cnst, x5, x1); \
00183 cnst = LD_W6; \
00184 b3 = vec_madd(cnst, x6, x1); \
00185 cnst = LD_W7; \
00186 b1 = vec_madd(cnst, x7, x0); \
00187 \
00188 b7 = vec_add(b7, x2); \
00189 b5 = vec_add(b5, x3); \
00190 b3 = vec_add(b3, x2); \
00191 b1 = vec_add(b1, x3); \
00192
00193
00194
00195
00196
00197
00198 void fdct_altivec(int16_t *block)
00199 {
00200 POWERPC_PERF_DECLARE(altivec_fdct, 1);
00201 vector signed short *bp;
00202 vector float *cp;
00203 vector float b00, b10, b20, b30, b40, b50, b60, b70;
00204 vector float b01, b11, b21, b31, b41, b51, b61, b71;
00205 vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
00206 vector float x0, x1, x2, x3, x4, x5, x6, x7, x8;
00207
00208 POWERPC_PERF_START_COUNT(altivec_fdct, 1);
00209
00210
00211
00212
00213 mzero = ((vector float)vec_splat_u32(-1));
00214 mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero)));
00215 cp = fdctconsts;
00216 cnsts0 = vec_ld(0, cp); cp++;
00217 cnsts1 = vec_ld(0, cp); cp++;
00218 cnsts2 = vec_ld(0, cp);
00219
00220
00221
00222
00223 #define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))
00224
00225 bp = (vector signed short*)block;
00226 b00 = ((vector float)vec_ld(0, bp));
00227 b40 = ((vector float)vec_ld(16*4, bp));
00228 b01 = ((vector float)MERGE_S16(h, b00, b40));
00229 b11 = ((vector float)MERGE_S16(l, b00, b40));
00230 bp++;
00231 b10 = ((vector float)vec_ld(0, bp));
00232 b50 = ((vector float)vec_ld(16*4, bp));
00233 b21 = ((vector float)MERGE_S16(h, b10, b50));
00234 b31 = ((vector float)MERGE_S16(l, b10, b50));
00235 bp++;
00236 b20 = ((vector float)vec_ld(0, bp));
00237 b60 = ((vector float)vec_ld(16*4, bp));
00238 b41 = ((vector float)MERGE_S16(h, b20, b60));
00239 b51 = ((vector float)MERGE_S16(l, b20, b60));
00240 bp++;
00241 b30 = ((vector float)vec_ld(0, bp));
00242 b70 = ((vector float)vec_ld(16*4, bp));
00243 b61 = ((vector float)MERGE_S16(h, b30, b70));
00244 b71 = ((vector float)MERGE_S16(l, b30, b70));
00245
00246 x0 = ((vector float)MERGE_S16(h, b01, b41));
00247 x1 = ((vector float)MERGE_S16(l, b01, b41));
00248 x2 = ((vector float)MERGE_S16(h, b11, b51));
00249 x3 = ((vector float)MERGE_S16(l, b11, b51));
00250 x4 = ((vector float)MERGE_S16(h, b21, b61));
00251 x5 = ((vector float)MERGE_S16(l, b21, b61));
00252 x6 = ((vector float)MERGE_S16(h, b31, b71));
00253 x7 = ((vector float)MERGE_S16(l, b31, b71));
00254
00255 b00 = ((vector float)MERGE_S16(h, x0, x4));
00256 b10 = ((vector float)MERGE_S16(l, x0, x4));
00257 b20 = ((vector float)MERGE_S16(h, x1, x5));
00258 b30 = ((vector float)MERGE_S16(l, x1, x5));
00259 b40 = ((vector float)MERGE_S16(h, x2, x6));
00260 b50 = ((vector float)MERGE_S16(l, x2, x6));
00261 b60 = ((vector float)MERGE_S16(h, x3, x7));
00262 b70 = ((vector float)MERGE_S16(l, x3, x7));
00263
00264 #undef MERGE_S16
00265
00266
00267
00268
00269
00270
00271
00272 #if 1
00273
00274 x0 = ((vector float)vec_add(vs16(b00), vs16(b70)));
00275 x7 = ((vector float)vec_sub(vs16(b00), vs16(b70)));
00276 x1 = ((vector float)vec_add(vs16(b10), vs16(b60)));
00277 x6 = ((vector float)vec_sub(vs16(b10), vs16(b60)));
00278 x2 = ((vector float)vec_add(vs16(b20), vs16(b50)));
00279 x5 = ((vector float)vec_sub(vs16(b20), vs16(b50)));
00280 x3 = ((vector float)vec_add(vs16(b30), vs16(b40)));
00281 x4 = ((vector float)vec_sub(vs16(b30), vs16(b40)));
00282
00283 b70 = ((vector float)vec_add(vs16(x0), vs16(x3)));
00284 b10 = ((vector float)vec_add(vs16(x1), vs16(x2)));
00285
00286 b00 = ((vector float)vec_add(vs16(b70), vs16(b10)));
00287 b40 = ((vector float)vec_sub(vs16(b70), vs16(b10)));
00288
00289 #define CTF0(n) \
00290 b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
00291 b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
00292 b##n##1 = vec_ctf(vs32(b##n##1), 0); \
00293 b##n##0 = vec_ctf(vs32(b##n##0), 0);
00294
00295 CTF0(0);
00296 CTF0(4);
00297
00298 b20 = ((vector float)vec_sub(vs16(x0), vs16(x3)));
00299 b60 = ((vector float)vec_sub(vs16(x1), vs16(x2)));
00300
00301 CTF0(2);
00302 CTF0(6);
00303
00304 #undef CTF0
00305
00306 x0 = vec_add(b60, b20);
00307 x1 = vec_add(b61, b21);
00308
00309 cnst = LD_W2;
00310 x0 = vec_madd(cnst, x0, mzero);
00311 x1 = vec_madd(cnst, x1, mzero);
00312 cnst = LD_W1;
00313 b20 = vec_madd(cnst, b20, x0);
00314 b21 = vec_madd(cnst, b21, x1);
00315 cnst = LD_W0;
00316 b60 = vec_madd(cnst, b60, x0);
00317 b61 = vec_madd(cnst, b61, x1);
00318
00319 #define CTFX(x,b) \
00320 b##0 = ((vector float)vec_unpackh(vs16(x))); \
00321 b##1 = ((vector float)vec_unpackl(vs16(x))); \
00322 b##0 = vec_ctf(vs32(b##0), 0); \
00323 b##1 = vec_ctf(vs32(b##1), 0); \
00324
00325 CTFX(x4, b7);
00326 CTFX(x5, b5);
00327 CTFX(x6, b3);
00328 CTFX(x7, b1);
00329
00330 #undef CTFX
00331
00332
00333 x0 = vec_add(b70, b10);
00334 x1 = vec_add(b50, b30);
00335 x2 = vec_add(b70, b30);
00336 x3 = vec_add(b50, b10);
00337 x8 = vec_add(x2, x3);
00338 cnst = LD_W3;
00339 x8 = vec_madd(cnst, x8, mzero);
00340
00341 cnst = LD_W8;
00342 x0 = vec_madd(cnst, x0, mzero);
00343 cnst = LD_W9;
00344 x1 = vec_madd(cnst, x1, mzero);
00345 cnst = LD_WA;
00346 x2 = vec_madd(cnst, x2, x8);
00347 cnst = LD_WB;
00348 x3 = vec_madd(cnst, x3, x8);
00349
00350 cnst = LD_W4;
00351 b70 = vec_madd(cnst, b70, x0);
00352 cnst = LD_W5;
00353 b50 = vec_madd(cnst, b50, x1);
00354 cnst = LD_W6;
00355 b30 = vec_madd(cnst, b30, x1);
00356 cnst = LD_W7;
00357 b10 = vec_madd(cnst, b10, x0);
00358
00359 b70 = vec_add(b70, x2);
00360 b50 = vec_add(b50, x3);
00361 b30 = vec_add(b30, x2);
00362 b10 = vec_add(b10, x3);
00363
00364
00365 x0 = vec_add(b71, b11);
00366 x1 = vec_add(b51, b31);
00367 x2 = vec_add(b71, b31);
00368 x3 = vec_add(b51, b11);
00369 x8 = vec_add(x2, x3);
00370 cnst = LD_W3;
00371 x8 = vec_madd(cnst, x8, mzero);
00372
00373 cnst = LD_W8;
00374 x0 = vec_madd(cnst, x0, mzero);
00375 cnst = LD_W9;
00376 x1 = vec_madd(cnst, x1, mzero);
00377 cnst = LD_WA;
00378 x2 = vec_madd(cnst, x2, x8);
00379 cnst = LD_WB;
00380 x3 = vec_madd(cnst, x3, x8);
00381
00382 cnst = LD_W4;
00383 b71 = vec_madd(cnst, b71, x0);
00384 cnst = LD_W5;
00385 b51 = vec_madd(cnst, b51, x1);
00386 cnst = LD_W6;
00387 b31 = vec_madd(cnst, b31, x1);
00388 cnst = LD_W7;
00389 b11 = vec_madd(cnst, b11, x0);
00390
00391 b71 = vec_add(b71, x2);
00392 b51 = vec_add(b51, x3);
00393 b31 = vec_add(b31, x2);
00394 b11 = vec_add(b11, x3);
00395
00396 #else
00397
00398 #define CTF(n) \
00399 vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \
00400 vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \
00401 b##n##1 = vec_ctf(vs32(b##n##1), 0); \
00402 b##n##0 = vec_ctf(vs32(b##n##0), 0); \
00403
00404 CTF(0);
00405 CTF(1);
00406 CTF(2);
00407 CTF(3);
00408 CTF(4);
00409 CTF(5);
00410 CTF(6);
00411 CTF(7);
00412
00413 #undef CTF
00414
00415
00416 FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70);
00417 FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71);
00418 #endif
00419
00420
00421
00422 x0 = vec_mergel(b00, b20);
00423 x1 = vec_mergeh(b00, b20);
00424 x2 = vec_mergel(b10, b30);
00425 x3 = vec_mergeh(b10, b30);
00426
00427 b00 = vec_mergeh(x1, x3);
00428 b10 = vec_mergel(x1, x3);
00429 b20 = vec_mergeh(x0, x2);
00430 b30 = vec_mergel(x0, x2);
00431
00432 x4 = vec_mergel(b41, b61);
00433 x5 = vec_mergeh(b41, b61);
00434 x6 = vec_mergel(b51, b71);
00435 x7 = vec_mergeh(b51, b71);
00436
00437 b41 = vec_mergeh(x5, x7);
00438 b51 = vec_mergel(x5, x7);
00439 b61 = vec_mergeh(x4, x6);
00440 b71 = vec_mergel(x4, x6);
00441
00442 x0 = vec_mergel(b01, b21);
00443 x1 = vec_mergeh(b01, b21);
00444 x2 = vec_mergel(b11, b31);
00445 x3 = vec_mergeh(b11, b31);
00446
00447 x4 = vec_mergel(b40, b60);
00448 x5 = vec_mergeh(b40, b60);
00449 x6 = vec_mergel(b50, b70);
00450 x7 = vec_mergeh(b50, b70);
00451
00452 b40 = vec_mergeh(x1, x3);
00453 b50 = vec_mergel(x1, x3);
00454 b60 = vec_mergeh(x0, x2);
00455 b70 = vec_mergel(x0, x2);
00456
00457 b01 = vec_mergeh(x5, x7);
00458 b11 = vec_mergel(x5, x7);
00459 b21 = vec_mergeh(x4, x6);
00460 b31 = vec_mergel(x4, x6);
00461
00462
00463
00464 FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70);
00465 FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71);
00466
00467
00468
00469 #define CTS(n) \
00470 b##n##0 = vec_round(b##n##0); \
00471 b##n##1 = vec_round(b##n##1); \
00472 b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
00473 b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
00474 b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
00475 vec_st(vs16(b##n##0), 0, bp);
00476
00477 bp = (vector signed short*)block;
00478 CTS(0); bp++;
00479 CTS(1); bp++;
00480 CTS(2); bp++;
00481 CTS(3); bp++;
00482 CTS(4); bp++;
00483 CTS(5); bp++;
00484 CTS(6); bp++;
00485 CTS(7);
00486
00487 #undef CTS
00488
00489
00490 POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
00491 }
00492
00493