00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025 #include "libavutil/x86_cpu.h"
00026 #include "libavcodec/dsputil.h"
00027 #include "libavcodec/h263.h"
00028 #include "libavcodec/mpegvideo.h"
00029 #include "libavcodec/simple_idct.h"
00030 #include "dsputil_mmx.h"
00031 #include "mmx.h"
00032 #include "vp3dsp_mmx.h"
00033 #include "vp3dsp_sse2.h"
00034 #include "vp6dsp_mmx.h"
00035 #include "vp6dsp_sse2.h"
00036 #include "idct_xvid.h"
00037
00038
00039
00040
00041 int mm_flags;
00042
00043
00044 DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
00045 DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
00046
00047 DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
00048 {0x8000000080000000ULL, 0x8000000080000000ULL};
00049
00050 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
00051 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL;
00052 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
00053 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
00054 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
00055 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
00056 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
00057 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
00058 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
00059 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
00060 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
00061 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
00062 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
00063 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
00064
00065 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL;
00066 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL;
00067 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
00068 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
00069 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
00070 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
00071 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
00072 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
00073
00074 DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
00075 DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
00076
00077 #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
00078 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
00079
00080 #define MOVQ_BFE(regd) \
00081 __asm__ volatile ( \
00082 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
00083 "paddb %%" #regd ", %%" #regd " \n\t" ::)
00084
00085 #ifndef PIC
00086 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
00087 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
00088 #else
00089
00090
00091 #define MOVQ_BONE(regd) \
00092 __asm__ volatile ( \
00093 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
00094 "psrlw $15, %%" #regd " \n\t" \
00095 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
00096
00097 #define MOVQ_WTWO(regd) \
00098 __asm__ volatile ( \
00099 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
00100 "psrlw $15, %%" #regd " \n\t" \
00101 "psllw $1, %%" #regd " \n\t"::)
00102
00103 #endif
00104
00105
00106
00107
00108 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
00109 "movq " #rega ", " #regr " \n\t"\
00110 "pand " #regb ", " #regr " \n\t"\
00111 "pxor " #rega ", " #regb " \n\t"\
00112 "pand " #regfe "," #regb " \n\t"\
00113 "psrlq $1, " #regb " \n\t"\
00114 "paddb " #regb ", " #regr " \n\t"
00115
00116 #define PAVGB_MMX(rega, regb, regr, regfe) \
00117 "movq " #rega ", " #regr " \n\t"\
00118 "por " #regb ", " #regr " \n\t"\
00119 "pxor " #rega ", " #regb " \n\t"\
00120 "pand " #regfe "," #regb " \n\t"\
00121 "psrlq $1, " #regb " \n\t"\
00122 "psubb " #regb ", " #regr " \n\t"
00123
00124
00125 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
00126 "movq " #rega ", " #regr " \n\t"\
00127 "movq " #regc ", " #regp " \n\t"\
00128 "pand " #regb ", " #regr " \n\t"\
00129 "pand " #regd ", " #regp " \n\t"\
00130 "pxor " #rega ", " #regb " \n\t"\
00131 "pxor " #regc ", " #regd " \n\t"\
00132 "pand %%mm6, " #regb " \n\t"\
00133 "pand %%mm6, " #regd " \n\t"\
00134 "psrlq $1, " #regb " \n\t"\
00135 "psrlq $1, " #regd " \n\t"\
00136 "paddb " #regb ", " #regr " \n\t"\
00137 "paddb " #regd ", " #regp " \n\t"
00138
00139 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
00140 "movq " #rega ", " #regr " \n\t"\
00141 "movq " #regc ", " #regp " \n\t"\
00142 "por " #regb ", " #regr " \n\t"\
00143 "por " #regd ", " #regp " \n\t"\
00144 "pxor " #rega ", " #regb " \n\t"\
00145 "pxor " #regc ", " #regd " \n\t"\
00146 "pand %%mm6, " #regb " \n\t"\
00147 "pand %%mm6, " #regd " \n\t"\
00148 "psrlq $1, " #regd " \n\t"\
00149 "psrlq $1, " #regb " \n\t"\
00150 "psubb " #regb ", " #regr " \n\t"\
00151 "psubb " #regd ", " #regp " \n\t"
00152
00153
00154
00155 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
00156 #define SET_RND MOVQ_WONE
00157 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
00158 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
00159
00160 #include "dsputil_mmx_rnd_template.c"
00161
00162 #undef DEF
00163 #undef SET_RND
00164 #undef PAVGBP
00165 #undef PAVGB
00166
00167
00168
00169 #define DEF(x, y) x ## _ ## y ##_mmx
00170 #define SET_RND MOVQ_WTWO
00171 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
00172 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
00173
00174 #include "dsputil_mmx_rnd_template.c"
00175
00176 #undef DEF
00177 #undef SET_RND
00178 #undef PAVGBP
00179 #undef PAVGB
00180
00181
00182
00183
00184 #define DEF(x) x ## _3dnow
00185 #define PAVGB "pavgusb"
00186
00187 #include "dsputil_mmx_avg_template.c"
00188
00189 #undef DEF
00190 #undef PAVGB
00191
00192
00193
00194
00195 #define DEF(x) x ## _mmx2
00196
00197
00198 #define PAVGB "pavgb"
00199
00200 #include "dsputil_mmx_avg_template.c"
00201
00202 #undef DEF
00203 #undef PAVGB
00204
00205 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
00206 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
00207 #define put_pixels16_mmx2 put_pixels16_mmx
00208 #define put_pixels8_mmx2 put_pixels8_mmx
00209 #define put_pixels4_mmx2 put_pixels4_mmx
00210 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
00211 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
00212 #define put_pixels16_3dnow put_pixels16_mmx
00213 #define put_pixels8_3dnow put_pixels8_mmx
00214 #define put_pixels4_3dnow put_pixels4_mmx
00215 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
00216 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
00217
00218
00219
00220
00221 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00222 {
00223 const DCTELEM *p;
00224 uint8_t *pix;
00225
00226
00227 p = block;
00228 pix = pixels;
00229
00230 __asm__ volatile(
00231 "movq %3, %%mm0 \n\t"
00232 "movq 8%3, %%mm1 \n\t"
00233 "movq 16%3, %%mm2 \n\t"
00234 "movq 24%3, %%mm3 \n\t"
00235 "movq 32%3, %%mm4 \n\t"
00236 "movq 40%3, %%mm5 \n\t"
00237 "movq 48%3, %%mm6 \n\t"
00238 "movq 56%3, %%mm7 \n\t"
00239 "packuswb %%mm1, %%mm0 \n\t"
00240 "packuswb %%mm3, %%mm2 \n\t"
00241 "packuswb %%mm5, %%mm4 \n\t"
00242 "packuswb %%mm7, %%mm6 \n\t"
00243 "movq %%mm0, (%0) \n\t"
00244 "movq %%mm2, (%0, %1) \n\t"
00245 "movq %%mm4, (%0, %1, 2) \n\t"
00246 "movq %%mm6, (%0, %2) \n\t"
00247 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
00248 :"memory");
00249 pix += line_size*4;
00250 p += 32;
00251
00252
00253
00254
00255 __asm__ volatile(
00256 "movq (%3), %%mm0 \n\t"
00257 "movq 8(%3), %%mm1 \n\t"
00258 "movq 16(%3), %%mm2 \n\t"
00259 "movq 24(%3), %%mm3 \n\t"
00260 "movq 32(%3), %%mm4 \n\t"
00261 "movq 40(%3), %%mm5 \n\t"
00262 "movq 48(%3), %%mm6 \n\t"
00263 "movq 56(%3), %%mm7 \n\t"
00264 "packuswb %%mm1, %%mm0 \n\t"
00265 "packuswb %%mm3, %%mm2 \n\t"
00266 "packuswb %%mm5, %%mm4 \n\t"
00267 "packuswb %%mm7, %%mm6 \n\t"
00268 "movq %%mm0, (%0) \n\t"
00269 "movq %%mm2, (%0, %1) \n\t"
00270 "movq %%mm4, (%0, %1, 2) \n\t"
00271 "movq %%mm6, (%0, %2) \n\t"
00272 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
00273 :"memory");
00274 }
00275
00276 static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
00277 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
00278
00279 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00280 {
00281 int i;
00282
00283 movq_m2r(*vector128, mm1);
00284 for (i = 0; i < 8; i++) {
00285 movq_m2r(*(block), mm0);
00286 packsswb_m2r(*(block + 4), mm0);
00287 block += 8;
00288 paddb_r2r(mm1, mm0);
00289 movq_r2m(mm0, *pixels);
00290 pixels += line_size;
00291 }
00292 }
00293
00294 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00295 {
00296 const DCTELEM *p;
00297 uint8_t *pix;
00298 int i;
00299
00300
00301 p = block;
00302 pix = pixels;
00303 MOVQ_ZERO(mm7);
00304 i = 4;
00305 do {
00306 __asm__ volatile(
00307 "movq (%2), %%mm0 \n\t"
00308 "movq 8(%2), %%mm1 \n\t"
00309 "movq 16(%2), %%mm2 \n\t"
00310 "movq 24(%2), %%mm3 \n\t"
00311 "movq %0, %%mm4 \n\t"
00312 "movq %1, %%mm6 \n\t"
00313 "movq %%mm4, %%mm5 \n\t"
00314 "punpcklbw %%mm7, %%mm4 \n\t"
00315 "punpckhbw %%mm7, %%mm5 \n\t"
00316 "paddsw %%mm4, %%mm0 \n\t"
00317 "paddsw %%mm5, %%mm1 \n\t"
00318 "movq %%mm6, %%mm5 \n\t"
00319 "punpcklbw %%mm7, %%mm6 \n\t"
00320 "punpckhbw %%mm7, %%mm5 \n\t"
00321 "paddsw %%mm6, %%mm2 \n\t"
00322 "paddsw %%mm5, %%mm3 \n\t"
00323 "packuswb %%mm1, %%mm0 \n\t"
00324 "packuswb %%mm3, %%mm2 \n\t"
00325 "movq %%mm0, %0 \n\t"
00326 "movq %%mm2, %1 \n\t"
00327 :"+m"(*pix), "+m"(*(pix+line_size))
00328 :"r"(p)
00329 :"memory");
00330 pix += line_size*2;
00331 p += 16;
00332 } while (--i);
00333 }
00334
00335 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00336 {
00337 __asm__ volatile(
00338 "lea (%3, %3), %%"REG_a" \n\t"
00339 ASMALIGN(3)
00340 "1: \n\t"
00341 "movd (%1), %%mm0 \n\t"
00342 "movd (%1, %3), %%mm1 \n\t"
00343 "movd %%mm0, (%2) \n\t"
00344 "movd %%mm1, (%2, %3) \n\t"
00345 "add %%"REG_a", %1 \n\t"
00346 "add %%"REG_a", %2 \n\t"
00347 "movd (%1), %%mm0 \n\t"
00348 "movd (%1, %3), %%mm1 \n\t"
00349 "movd %%mm0, (%2) \n\t"
00350 "movd %%mm1, (%2, %3) \n\t"
00351 "add %%"REG_a", %1 \n\t"
00352 "add %%"REG_a", %2 \n\t"
00353 "subl $4, %0 \n\t"
00354 "jnz 1b \n\t"
00355 : "+g"(h), "+r" (pixels), "+r" (block)
00356 : "r"((x86_reg)line_size)
00357 : "%"REG_a, "memory"
00358 );
00359 }
00360
00361 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00362 {
00363 __asm__ volatile(
00364 "lea (%3, %3), %%"REG_a" \n\t"
00365 ASMALIGN(3)
00366 "1: \n\t"
00367 "movq (%1), %%mm0 \n\t"
00368 "movq (%1, %3), %%mm1 \n\t"
00369 "movq %%mm0, (%2) \n\t"
00370 "movq %%mm1, (%2, %3) \n\t"
00371 "add %%"REG_a", %1 \n\t"
00372 "add %%"REG_a", %2 \n\t"
00373 "movq (%1), %%mm0 \n\t"
00374 "movq (%1, %3), %%mm1 \n\t"
00375 "movq %%mm0, (%2) \n\t"
00376 "movq %%mm1, (%2, %3) \n\t"
00377 "add %%"REG_a", %1 \n\t"
00378 "add %%"REG_a", %2 \n\t"
00379 "subl $4, %0 \n\t"
00380 "jnz 1b \n\t"
00381 : "+g"(h), "+r" (pixels), "+r" (block)
00382 : "r"((x86_reg)line_size)
00383 : "%"REG_a, "memory"
00384 );
00385 }
00386
00387 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00388 {
00389 __asm__ volatile(
00390 "lea (%3, %3), %%"REG_a" \n\t"
00391 ASMALIGN(3)
00392 "1: \n\t"
00393 "movq (%1), %%mm0 \n\t"
00394 "movq 8(%1), %%mm4 \n\t"
00395 "movq (%1, %3), %%mm1 \n\t"
00396 "movq 8(%1, %3), %%mm5 \n\t"
00397 "movq %%mm0, (%2) \n\t"
00398 "movq %%mm4, 8(%2) \n\t"
00399 "movq %%mm1, (%2, %3) \n\t"
00400 "movq %%mm5, 8(%2, %3) \n\t"
00401 "add %%"REG_a", %1 \n\t"
00402 "add %%"REG_a", %2 \n\t"
00403 "movq (%1), %%mm0 \n\t"
00404 "movq 8(%1), %%mm4 \n\t"
00405 "movq (%1, %3), %%mm1 \n\t"
00406 "movq 8(%1, %3), %%mm5 \n\t"
00407 "movq %%mm0, (%2) \n\t"
00408 "movq %%mm4, 8(%2) \n\t"
00409 "movq %%mm1, (%2, %3) \n\t"
00410 "movq %%mm5, 8(%2, %3) \n\t"
00411 "add %%"REG_a", %1 \n\t"
00412 "add %%"REG_a", %2 \n\t"
00413 "subl $4, %0 \n\t"
00414 "jnz 1b \n\t"
00415 : "+g"(h), "+r" (pixels), "+r" (block)
00416 : "r"((x86_reg)line_size)
00417 : "%"REG_a, "memory"
00418 );
00419 }
00420
00421 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00422 {
00423 __asm__ volatile(
00424 "1: \n\t"
00425 "movdqu (%1), %%xmm0 \n\t"
00426 "movdqu (%1,%3), %%xmm1 \n\t"
00427 "movdqu (%1,%3,2), %%xmm2 \n\t"
00428 "movdqu (%1,%4), %%xmm3 \n\t"
00429 "movdqa %%xmm0, (%2) \n\t"
00430 "movdqa %%xmm1, (%2,%3) \n\t"
00431 "movdqa %%xmm2, (%2,%3,2) \n\t"
00432 "movdqa %%xmm3, (%2,%4) \n\t"
00433 "subl $4, %0 \n\t"
00434 "lea (%1,%3,4), %1 \n\t"
00435 "lea (%2,%3,4), %2 \n\t"
00436 "jnz 1b \n\t"
00437 : "+g"(h), "+r" (pixels), "+r" (block)
00438 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
00439 : "memory"
00440 );
00441 }
00442
00443 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00444 {
00445 __asm__ volatile(
00446 "1: \n\t"
00447 "movdqu (%1), %%xmm0 \n\t"
00448 "movdqu (%1,%3), %%xmm1 \n\t"
00449 "movdqu (%1,%3,2), %%xmm2 \n\t"
00450 "movdqu (%1,%4), %%xmm3 \n\t"
00451 "pavgb (%2), %%xmm0 \n\t"
00452 "pavgb (%2,%3), %%xmm1 \n\t"
00453 "pavgb (%2,%3,2), %%xmm2 \n\t"
00454 "pavgb (%2,%4), %%xmm3 \n\t"
00455 "movdqa %%xmm0, (%2) \n\t"
00456 "movdqa %%xmm1, (%2,%3) \n\t"
00457 "movdqa %%xmm2, (%2,%3,2) \n\t"
00458 "movdqa %%xmm3, (%2,%4) \n\t"
00459 "subl $4, %0 \n\t"
00460 "lea (%1,%3,4), %1 \n\t"
00461 "lea (%2,%3,4), %2 \n\t"
00462 "jnz 1b \n\t"
00463 : "+g"(h), "+r" (pixels), "+r" (block)
00464 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
00465 : "memory"
00466 );
00467 }
00468
00469 #define CLEAR_BLOCKS(name,n) \
00470 static void name(DCTELEM *blocks)\
00471 {\
00472 __asm__ volatile(\
00473 "pxor %%mm7, %%mm7 \n\t"\
00474 "mov %1, %%"REG_a" \n\t"\
00475 "1: \n\t"\
00476 "movq %%mm7, (%0, %%"REG_a") \n\t"\
00477 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
00478 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
00479 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
00480 "add $32, %%"REG_a" \n\t"\
00481 " js 1b \n\t"\
00482 : : "r" (((uint8_t *)blocks)+128*n),\
00483 "i" (-128*n)\
00484 : "%"REG_a\
00485 );\
00486 }
00487 CLEAR_BLOCKS(clear_blocks_mmx, 6)
00488 CLEAR_BLOCKS(clear_block_mmx, 1)
00489
00490 static void clear_block_sse(DCTELEM *block)
00491 {
00492 __asm__ volatile(
00493 "xorps %%xmm0, %%xmm0 \n"
00494 "movaps %%xmm0, (%0) \n"
00495 "movaps %%xmm0, 16(%0) \n"
00496 "movaps %%xmm0, 32(%0) \n"
00497 "movaps %%xmm0, 48(%0) \n"
00498 "movaps %%xmm0, 64(%0) \n"
00499 "movaps %%xmm0, 80(%0) \n"
00500 "movaps %%xmm0, 96(%0) \n"
00501 "movaps %%xmm0, 112(%0) \n"
00502 :: "r"(block)
00503 : "memory"
00504 );
00505 }
00506
00507 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
00508 x86_reg i=0;
00509 __asm__ volatile(
00510 "jmp 2f \n\t"
00511 "1: \n\t"
00512 "movq (%1, %0), %%mm0 \n\t"
00513 "movq (%2, %0), %%mm1 \n\t"
00514 "paddb %%mm0, %%mm1 \n\t"
00515 "movq %%mm1, (%2, %0) \n\t"
00516 "movq 8(%1, %0), %%mm0 \n\t"
00517 "movq 8(%2, %0), %%mm1 \n\t"
00518 "paddb %%mm0, %%mm1 \n\t"
00519 "movq %%mm1, 8(%2, %0) \n\t"
00520 "add $16, %0 \n\t"
00521 "2: \n\t"
00522 "cmp %3, %0 \n\t"
00523 " js 1b \n\t"
00524 : "+r" (i)
00525 : "r"(src), "r"(dst), "r"((x86_reg)w-15)
00526 );
00527 for(; i<w; i++)
00528 dst[i+0] += src[i+0];
00529 }
00530
00531 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
00532 x86_reg i=0;
00533 __asm__ volatile(
00534 "jmp 2f \n\t"
00535 "1: \n\t"
00536 "movq (%2, %0), %%mm0 \n\t"
00537 "movq 8(%2, %0), %%mm1 \n\t"
00538 "paddb (%3, %0), %%mm0 \n\t"
00539 "paddb 8(%3, %0), %%mm1 \n\t"
00540 "movq %%mm0, (%1, %0) \n\t"
00541 "movq %%mm1, 8(%1, %0) \n\t"
00542 "add $16, %0 \n\t"
00543 "2: \n\t"
00544 "cmp %4, %0 \n\t"
00545 " js 1b \n\t"
00546 : "+r" (i)
00547 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
00548 );
00549 for(; i<w; i++)
00550 dst[i] = src1[i] + src2[i];
00551 }
00552
00553 #if HAVE_7REGS && HAVE_TEN_OPERANDS
00554 static void add_hfyu_median_prediction_cmov(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top) {
00555 x86_reg w2 = -w;
00556 x86_reg x;
00557 int l = *left & 0xff;
00558 int tl = *left_top & 0xff;
00559 int t;
00560 __asm__ volatile(
00561 "mov %7, %3 \n"
00562 "1: \n"
00563 "movzx (%3,%4), %2 \n"
00564 "mov %2, %k3 \n"
00565 "sub %b1, %b3 \n"
00566 "add %b0, %b3 \n"
00567 "mov %2, %1 \n"
00568 "cmp %0, %2 \n"
00569 "cmovg %0, %2 \n"
00570 "cmovg %1, %0 \n"
00571 "cmp %k3, %0 \n"
00572 "cmovg %k3, %0 \n"
00573 "mov %7, %3 \n"
00574 "cmp %2, %0 \n"
00575 "cmovl %2, %0 \n"
00576 "add (%6,%4), %b0 \n"
00577 "mov %b0, (%5,%4) \n"
00578 "inc %4 \n"
00579 "jl 1b \n"
00580 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
00581 :"r"(dst+w), "r"(diff+w), "rm"(top+w)
00582 );
00583 *left = l;
00584 *left_top = tl;
00585 }
00586 #endif
00587
00588 #define H263_LOOP_FILTER \
00589 "pxor %%mm7, %%mm7 \n\t"\
00590 "movq %0, %%mm0 \n\t"\
00591 "movq %0, %%mm1 \n\t"\
00592 "movq %3, %%mm2 \n\t"\
00593 "movq %3, %%mm3 \n\t"\
00594 "punpcklbw %%mm7, %%mm0 \n\t"\
00595 "punpckhbw %%mm7, %%mm1 \n\t"\
00596 "punpcklbw %%mm7, %%mm2 \n\t"\
00597 "punpckhbw %%mm7, %%mm3 \n\t"\
00598 "psubw %%mm2, %%mm0 \n\t"\
00599 "psubw %%mm3, %%mm1 \n\t"\
00600 "movq %1, %%mm2 \n\t"\
00601 "movq %1, %%mm3 \n\t"\
00602 "movq %2, %%mm4 \n\t"\
00603 "movq %2, %%mm5 \n\t"\
00604 "punpcklbw %%mm7, %%mm2 \n\t"\
00605 "punpckhbw %%mm7, %%mm3 \n\t"\
00606 "punpcklbw %%mm7, %%mm4 \n\t"\
00607 "punpckhbw %%mm7, %%mm5 \n\t"\
00608 "psubw %%mm2, %%mm4 \n\t"\
00609 "psubw %%mm3, %%mm5 \n\t"\
00610 "psllw $2, %%mm4 \n\t"\
00611 "psllw $2, %%mm5 \n\t"\
00612 "paddw %%mm0, %%mm4 \n\t"\
00613 "paddw %%mm1, %%mm5 \n\t"\
00614 "pxor %%mm6, %%mm6 \n\t"\
00615 "pcmpgtw %%mm4, %%mm6 \n\t"\
00616 "pcmpgtw %%mm5, %%mm7 \n\t"\
00617 "pxor %%mm6, %%mm4 \n\t"\
00618 "pxor %%mm7, %%mm5 \n\t"\
00619 "psubw %%mm6, %%mm4 \n\t"\
00620 "psubw %%mm7, %%mm5 \n\t"\
00621 "psrlw $3, %%mm4 \n\t"\
00622 "psrlw $3, %%mm5 \n\t"\
00623 "packuswb %%mm5, %%mm4 \n\t"\
00624 "packsswb %%mm7, %%mm6 \n\t"\
00625 "pxor %%mm7, %%mm7 \n\t"\
00626 "movd %4, %%mm2 \n\t"\
00627 "punpcklbw %%mm2, %%mm2 \n\t"\
00628 "punpcklbw %%mm2, %%mm2 \n\t"\
00629 "punpcklbw %%mm2, %%mm2 \n\t"\
00630 "psubusb %%mm4, %%mm2 \n\t"\
00631 "movq %%mm2, %%mm3 \n\t"\
00632 "psubusb %%mm4, %%mm3 \n\t"\
00633 "psubb %%mm3, %%mm2 \n\t"\
00634 "movq %1, %%mm3 \n\t"\
00635 "movq %2, %%mm4 \n\t"\
00636 "pxor %%mm6, %%mm3 \n\t"\
00637 "pxor %%mm6, %%mm4 \n\t"\
00638 "paddusb %%mm2, %%mm3 \n\t"\
00639 "psubusb %%mm2, %%mm4 \n\t"\
00640 "pxor %%mm6, %%mm3 \n\t"\
00641 "pxor %%mm6, %%mm4 \n\t"\
00642 "paddusb %%mm2, %%mm2 \n\t"\
00643 "packsswb %%mm1, %%mm0 \n\t"\
00644 "pcmpgtb %%mm0, %%mm7 \n\t"\
00645 "pxor %%mm7, %%mm0 \n\t"\
00646 "psubb %%mm7, %%mm0 \n\t"\
00647 "movq %%mm0, %%mm1 \n\t"\
00648 "psubusb %%mm2, %%mm0 \n\t"\
00649 "psubb %%mm0, %%mm1 \n\t"\
00650 "pand %5, %%mm1 \n\t"\
00651 "psrlw $2, %%mm1 \n\t"\
00652 "pxor %%mm7, %%mm1 \n\t"\
00653 "psubb %%mm7, %%mm1 \n\t"\
00654 "movq %0, %%mm5 \n\t"\
00655 "movq %3, %%mm6 \n\t"\
00656 "psubb %%mm1, %%mm5 \n\t"\
00657 "paddb %%mm1, %%mm6 \n\t"
00658
00659 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
00660 if(CONFIG_ANY_H263) {
00661 const int strength= ff_h263_loop_filter_strength[qscale];
00662
00663 __asm__ volatile(
00664
00665 H263_LOOP_FILTER
00666
00667 "movq %%mm3, %1 \n\t"
00668 "movq %%mm4, %2 \n\t"
00669 "movq %%mm5, %0 \n\t"
00670 "movq %%mm6, %3 \n\t"
00671 : "+m" (*(uint64_t*)(src - 2*stride)),
00672 "+m" (*(uint64_t*)(src - 1*stride)),
00673 "+m" (*(uint64_t*)(src + 0*stride)),
00674 "+m" (*(uint64_t*)(src + 1*stride))
00675 : "g" (2*strength), "m"(ff_pb_FC)
00676 );
00677 }
00678 }
00679
00680 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
00681 __asm__ volatile(
00682 "movd %4, %%mm0 \n\t"
00683 "movd %5, %%mm1 \n\t"
00684 "movd %6, %%mm2 \n\t"
00685 "movd %7, %%mm3 \n\t"
00686 "punpcklbw %%mm1, %%mm0 \n\t"
00687 "punpcklbw %%mm3, %%mm2 \n\t"
00688 "movq %%mm0, %%mm1 \n\t"
00689 "punpcklwd %%mm2, %%mm0 \n\t"
00690 "punpckhwd %%mm2, %%mm1 \n\t"
00691 "movd %%mm0, %0 \n\t"
00692 "punpckhdq %%mm0, %%mm0 \n\t"
00693 "movd %%mm0, %1 \n\t"
00694 "movd %%mm1, %2 \n\t"
00695 "punpckhdq %%mm1, %%mm1 \n\t"
00696 "movd %%mm1, %3 \n\t"
00697
00698 : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
00699 "=m" (*(uint32_t*)(dst + 1*dst_stride)),
00700 "=m" (*(uint32_t*)(dst + 2*dst_stride)),
00701 "=m" (*(uint32_t*)(dst + 3*dst_stride))
00702 : "m" (*(uint32_t*)(src + 0*src_stride)),
00703 "m" (*(uint32_t*)(src + 1*src_stride)),
00704 "m" (*(uint32_t*)(src + 2*src_stride)),
00705 "m" (*(uint32_t*)(src + 3*src_stride))
00706 );
00707 }
00708
00709 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
00710 if(CONFIG_ANY_H263) {
00711 const int strength= ff_h263_loop_filter_strength[qscale];
00712 DECLARE_ALIGNED(8, uint64_t, temp[4]);
00713 uint8_t *btemp= (uint8_t*)temp;
00714
00715 src -= 2;
00716
00717 transpose4x4(btemp , src , 8, stride);
00718 transpose4x4(btemp+4, src + 4*stride, 8, stride);
00719 __asm__ volatile(
00720 H263_LOOP_FILTER
00721
00722 : "+m" (temp[0]),
00723 "+m" (temp[1]),
00724 "+m" (temp[2]),
00725 "+m" (temp[3])
00726 : "g" (2*strength), "m"(ff_pb_FC)
00727 );
00728
00729 __asm__ volatile(
00730 "movq %%mm5, %%mm1 \n\t"
00731 "movq %%mm4, %%mm0 \n\t"
00732 "punpcklbw %%mm3, %%mm5 \n\t"
00733 "punpcklbw %%mm6, %%mm4 \n\t"
00734 "punpckhbw %%mm3, %%mm1 \n\t"
00735 "punpckhbw %%mm6, %%mm0 \n\t"
00736 "movq %%mm5, %%mm3 \n\t"
00737 "movq %%mm1, %%mm6 \n\t"
00738 "punpcklwd %%mm4, %%mm5 \n\t"
00739 "punpcklwd %%mm0, %%mm1 \n\t"
00740 "punpckhwd %%mm4, %%mm3 \n\t"
00741 "punpckhwd %%mm0, %%mm6 \n\t"
00742 "movd %%mm5, (%0) \n\t"
00743 "punpckhdq %%mm5, %%mm5 \n\t"
00744 "movd %%mm5, (%0,%2) \n\t"
00745 "movd %%mm3, (%0,%2,2) \n\t"
00746 "punpckhdq %%mm3, %%mm3 \n\t"
00747 "movd %%mm3, (%0,%3) \n\t"
00748 "movd %%mm1, (%1) \n\t"
00749 "punpckhdq %%mm1, %%mm1 \n\t"
00750 "movd %%mm1, (%1,%2) \n\t"
00751 "movd %%mm6, (%1,%2,2) \n\t"
00752 "punpckhdq %%mm6, %%mm6 \n\t"
00753 "movd %%mm6, (%1,%3) \n\t"
00754 :: "r" (src),
00755 "r" (src + 4*stride),
00756 "r" ((x86_reg) stride ),
00757 "r" ((x86_reg)(3*stride))
00758 );
00759 }
00760 }
00761
00762
00763
00764 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
00765 {
00766 uint8_t *ptr, *last_line;
00767 int i;
00768
00769 last_line = buf + (height - 1) * wrap;
00770
00771 ptr = buf;
00772 if(w==8)
00773 {
00774 __asm__ volatile(
00775 "1: \n\t"
00776 "movd (%0), %%mm0 \n\t"
00777 "punpcklbw %%mm0, %%mm0 \n\t"
00778 "punpcklwd %%mm0, %%mm0 \n\t"
00779 "punpckldq %%mm0, %%mm0 \n\t"
00780 "movq %%mm0, -8(%0) \n\t"
00781 "movq -8(%0, %2), %%mm1 \n\t"
00782 "punpckhbw %%mm1, %%mm1 \n\t"
00783 "punpckhwd %%mm1, %%mm1 \n\t"
00784 "punpckhdq %%mm1, %%mm1 \n\t"
00785 "movq %%mm1, (%0, %2) \n\t"
00786 "add %1, %0 \n\t"
00787 "cmp %3, %0 \n\t"
00788 " jb 1b \n\t"
00789 : "+r" (ptr)
00790 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
00791 );
00792 }
00793 else
00794 {
00795 __asm__ volatile(
00796 "1: \n\t"
00797 "movd (%0), %%mm0 \n\t"
00798 "punpcklbw %%mm0, %%mm0 \n\t"
00799 "punpcklwd %%mm0, %%mm0 \n\t"
00800 "punpckldq %%mm0, %%mm0 \n\t"
00801 "movq %%mm0, -8(%0) \n\t"
00802 "movq %%mm0, -16(%0) \n\t"
00803 "movq -8(%0, %2), %%mm1 \n\t"
00804 "punpckhbw %%mm1, %%mm1 \n\t"
00805 "punpckhwd %%mm1, %%mm1 \n\t"
00806 "punpckhdq %%mm1, %%mm1 \n\t"
00807 "movq %%mm1, (%0, %2) \n\t"
00808 "movq %%mm1, 8(%0, %2) \n\t"
00809 "add %1, %0 \n\t"
00810 "cmp %3, %0 \n\t"
00811 " jb 1b \n\t"
00812 : "+r" (ptr)
00813 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
00814 );
00815 }
00816
00817 for(i=0;i<w;i+=4) {
00818
00819 ptr= buf - (i + 1) * wrap - w;
00820 __asm__ volatile(
00821 "1: \n\t"
00822 "movq (%1, %0), %%mm0 \n\t"
00823 "movq %%mm0, (%0) \n\t"
00824 "movq %%mm0, (%0, %2) \n\t"
00825 "movq %%mm0, (%0, %2, 2) \n\t"
00826 "movq %%mm0, (%0, %3) \n\t"
00827 "add $8, %0 \n\t"
00828 "cmp %4, %0 \n\t"
00829 " jb 1b \n\t"
00830 : "+r" (ptr)
00831 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
00832 );
00833 ptr= last_line + (i + 1) * wrap - w;
00834 __asm__ volatile(
00835 "1: \n\t"
00836 "movq (%1, %0), %%mm0 \n\t"
00837 "movq %%mm0, (%0) \n\t"
00838 "movq %%mm0, (%0, %2) \n\t"
00839 "movq %%mm0, (%0, %2, 2) \n\t"
00840 "movq %%mm0, (%0, %3) \n\t"
00841 "add $8, %0 \n\t"
00842 "cmp %4, %0 \n\t"
00843 " jb 1b \n\t"
00844 : "+r" (ptr)
00845 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
00846 );
00847 }
00848 }
00849
00850 #define PAETH(cpu, abs3)\
00851 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
00852 {\
00853 x86_reg i = -bpp;\
00854 x86_reg end = w-3;\
00855 __asm__ volatile(\
00856 "pxor %%mm7, %%mm7 \n"\
00857 "movd (%1,%0), %%mm0 \n"\
00858 "movd (%2,%0), %%mm1 \n"\
00859 "punpcklbw %%mm7, %%mm0 \n"\
00860 "punpcklbw %%mm7, %%mm1 \n"\
00861 "add %4, %0 \n"\
00862 "1: \n"\
00863 "movq %%mm1, %%mm2 \n"\
00864 "movd (%2,%0), %%mm1 \n"\
00865 "movq %%mm2, %%mm3 \n"\
00866 "punpcklbw %%mm7, %%mm1 \n"\
00867 "movq %%mm2, %%mm4 \n"\
00868 "psubw %%mm1, %%mm3 \n"\
00869 "psubw %%mm0, %%mm4 \n"\
00870 "movq %%mm3, %%mm5 \n"\
00871 "paddw %%mm4, %%mm5 \n"\
00872 abs3\
00873 "movq %%mm4, %%mm6 \n"\
00874 "pminsw %%mm5, %%mm6 \n"\
00875 "pcmpgtw %%mm6, %%mm3 \n"\
00876 "pcmpgtw %%mm5, %%mm4 \n"\
00877 "movq %%mm4, %%mm6 \n"\
00878 "pand %%mm3, %%mm4 \n"\
00879 "pandn %%mm3, %%mm6 \n"\
00880 "pandn %%mm0, %%mm3 \n"\
00881 "movd (%3,%0), %%mm0 \n"\
00882 "pand %%mm1, %%mm6 \n"\
00883 "pand %%mm4, %%mm2 \n"\
00884 "punpcklbw %%mm7, %%mm0 \n"\
00885 "movq %6, %%mm5 \n"\
00886 "paddw %%mm6, %%mm0 \n"\
00887 "paddw %%mm2, %%mm3 \n"\
00888 "paddw %%mm3, %%mm0 \n"\
00889 "pand %%mm5, %%mm0 \n"\
00890 "movq %%mm0, %%mm3 \n"\
00891 "packuswb %%mm3, %%mm3 \n"\
00892 "movd %%mm3, (%1,%0) \n"\
00893 "add %4, %0 \n"\
00894 "cmp %5, %0 \n"\
00895 "jle 1b \n"\
00896 :"+r"(i)\
00897 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
00898 "m"(ff_pw_255)\
00899 :"memory"\
00900 );\
00901 }
00902
00903 #define ABS3_MMX2\
00904 "psubw %%mm5, %%mm7 \n"\
00905 "pmaxsw %%mm7, %%mm5 \n"\
00906 "pxor %%mm6, %%mm6 \n"\
00907 "pxor %%mm7, %%mm7 \n"\
00908 "psubw %%mm3, %%mm6 \n"\
00909 "psubw %%mm4, %%mm7 \n"\
00910 "pmaxsw %%mm6, %%mm3 \n"\
00911 "pmaxsw %%mm7, %%mm4 \n"\
00912 "pxor %%mm7, %%mm7 \n"
00913
00914 #define ABS3_SSSE3\
00915 "pabsw %%mm3, %%mm3 \n"\
00916 "pabsw %%mm4, %%mm4 \n"\
00917 "pabsw %%mm5, %%mm5 \n"
00918
00919 PAETH(mmx2, ABS3_MMX2)
00920 #if HAVE_SSSE3
00921 PAETH(ssse3, ABS3_SSSE3)
00922 #endif
00923
00924 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
00925 "paddw " #m4 ", " #m3 " \n\t" \
00926 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" \
00927 "pmullw " #m3 ", %%mm4 \n\t" \
00928 "movq "#in7", " #m3 " \n\t" \
00929 "movq "#in0", %%mm5 \n\t" \
00930 "paddw " #m3 ", %%mm5 \n\t" \
00931 "psubw %%mm5, %%mm4 \n\t" \
00932 "movq "#in1", %%mm5 \n\t" \
00933 "movq "#in2", %%mm6 \n\t" \
00934 "paddw " #m6 ", %%mm5 \n\t" \
00935 "paddw " #m5 ", %%mm6 \n\t" \
00936 "paddw %%mm6, %%mm6 \n\t" \
00937 "psubw %%mm6, %%mm5 \n\t" \
00938 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" \
00939 "paddw " #rnd ", %%mm4 \n\t" \
00940 "paddw %%mm4, %%mm5 \n\t" \
00941 "psraw $5, %%mm5 \n\t"\
00942 "packuswb %%mm5, %%mm5 \n\t"\
00943 OP(%%mm5, out, %%mm7, d)
00944
00945 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
00946 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
00947 uint64_t temp;\
00948 \
00949 __asm__ volatile(\
00950 "pxor %%mm7, %%mm7 \n\t"\
00951 "1: \n\t"\
00952 "movq (%0), %%mm0 \n\t" \
00953 "movq %%mm0, %%mm1 \n\t" \
00954 "movq %%mm0, %%mm2 \n\t" \
00955 "punpcklbw %%mm7, %%mm0 \n\t" \
00956 "punpckhbw %%mm7, %%mm1 \n\t" \
00957 "pshufw $0x90, %%mm0, %%mm5 \n\t" \
00958 "pshufw $0x41, %%mm0, %%mm6 \n\t" \
00959 "movq %%mm2, %%mm3 \n\t" \
00960 "movq %%mm2, %%mm4 \n\t" \
00961 "psllq $8, %%mm2 \n\t" \
00962 "psllq $16, %%mm3 \n\t" \
00963 "psllq $24, %%mm4 \n\t" \
00964 "punpckhbw %%mm7, %%mm2 \n\t" \
00965 "punpckhbw %%mm7, %%mm3 \n\t" \
00966 "punpckhbw %%mm7, %%mm4 \n\t" \
00967 "paddw %%mm3, %%mm5 \n\t" \
00968 "paddw %%mm2, %%mm6 \n\t" \
00969 "paddw %%mm5, %%mm5 \n\t" \
00970 "psubw %%mm5, %%mm6 \n\t" \
00971 "pshufw $0x06, %%mm0, %%mm5 \n\t" \
00972 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" \
00973 "paddw %%mm4, %%mm0 \n\t" \
00974 "paddw %%mm1, %%mm5 \n\t" \
00975 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" \
00976 "psubw %%mm5, %%mm0 \n\t" \
00977 "paddw %6, %%mm6 \n\t"\
00978 "paddw %%mm6, %%mm0 \n\t" \
00979 "psraw $5, %%mm0 \n\t"\
00980 "movq %%mm0, %5 \n\t"\
00981 \
00982 \
00983 "movq 5(%0), %%mm0 \n\t" \
00984 "movq %%mm0, %%mm5 \n\t" \
00985 "movq %%mm0, %%mm6 \n\t" \
00986 "psrlq $8, %%mm0 \n\t" \
00987 "psrlq $16, %%mm5 \n\t" \
00988 "punpcklbw %%mm7, %%mm0 \n\t" \
00989 "punpcklbw %%mm7, %%mm5 \n\t" \
00990 "paddw %%mm0, %%mm2 \n\t" \
00991 "paddw %%mm5, %%mm3 \n\t" \
00992 "paddw %%mm2, %%mm2 \n\t" \
00993 "psubw %%mm2, %%mm3 \n\t" \
00994 "movq %%mm6, %%mm2 \n\t" \
00995 "psrlq $24, %%mm6 \n\t" \
00996 "punpcklbw %%mm7, %%mm2 \n\t" \
00997 "punpcklbw %%mm7, %%mm6 \n\t" \
00998 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" \
00999 "paddw %%mm2, %%mm1 \n\t" \
01000 "paddw %%mm6, %%mm4 \n\t" \
01001 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" \
01002 "psubw %%mm4, %%mm3 \n\t" \
01003 "paddw %6, %%mm1 \n\t"\
01004 "paddw %%mm1, %%mm3 \n\t" \
01005 "psraw $5, %%mm3 \n\t"\
01006 "movq %5, %%mm1 \n\t"\
01007 "packuswb %%mm3, %%mm1 \n\t"\
01008 OP_MMX2(%%mm1, (%1),%%mm4, q)\
01009 \
01010 \
01011 "movq 9(%0), %%mm1 \n\t" \
01012 "movq %%mm1, %%mm4 \n\t" \
01013 "movq %%mm1, %%mm3 \n\t" \
01014 "psrlq $8, %%mm1 \n\t" \
01015 "psrlq $16, %%mm4 \n\t" \
01016 "punpcklbw %%mm7, %%mm1 \n\t" \
01017 "punpcklbw %%mm7, %%mm4 \n\t" \
01018 "paddw %%mm1, %%mm5 \n\t" \
01019 "paddw %%mm4, %%mm0 \n\t" \
01020 "paddw %%mm5, %%mm5 \n\t" \
01021 "psubw %%mm5, %%mm0 \n\t" \
01022 "movq %%mm3, %%mm5 \n\t" \
01023 "psrlq $24, %%mm3 \n\t" \
01024 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" \
01025 "punpcklbw %%mm7, %%mm3 \n\t" \
01026 "paddw %%mm3, %%mm2 \n\t" \
01027 "psubw %%mm2, %%mm0 \n\t" \
01028 "movq %%mm5, %%mm2 \n\t" \
01029 "punpcklbw %%mm7, %%mm2 \n\t" \
01030 "punpckhbw %%mm7, %%mm5 \n\t" \
01031 "paddw %%mm2, %%mm6 \n\t" \
01032 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" \
01033 "paddw %6, %%mm0 \n\t"\
01034 "paddw %%mm6, %%mm0 \n\t" \
01035 "psraw $5, %%mm0 \n\t"\
01036 \
01037 \
01038 "paddw %%mm5, %%mm3 \n\t" \
01039 "pshufw $0xF9, %%mm5, %%mm6 \n\t" \
01040 "paddw %%mm4, %%mm6 \n\t" \
01041 "pshufw $0xBE, %%mm5, %%mm4 \n\t" \
01042 "pshufw $0x6F, %%mm5, %%mm5 \n\t" \
01043 "paddw %%mm1, %%mm4 \n\t" \
01044 "paddw %%mm2, %%mm5 \n\t" \
01045 "paddw %%mm6, %%mm6 \n\t" \
01046 "psubw %%mm6, %%mm4 \n\t" \
01047 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" \
01048 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" \
01049 "psubw %%mm5, %%mm3 \n\t" \
01050 "paddw %6, %%mm4 \n\t"\
01051 "paddw %%mm3, %%mm4 \n\t" \
01052 "psraw $5, %%mm4 \n\t"\
01053 "packuswb %%mm4, %%mm0 \n\t"\
01054 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
01055 \
01056 "add %3, %0 \n\t"\
01057 "add %4, %1 \n\t"\
01058 "decl %2 \n\t"\
01059 " jnz 1b \n\t"\
01060 : "+a"(src), "+c"(dst), "+D"(h)\
01061 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(temp), "m"(ROUNDER)\
01062 : "memory"\
01063 );\
01064 }\
01065 \
01066 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01067 int i;\
01068 int16_t temp[16];\
01069 \
01070 for(i=0; i<h; i++)\
01071 {\
01072 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
01073 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
01074 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
01075 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
01076 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
01077 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
01078 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
01079 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
01080 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
01081 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
01082 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
01083 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
01084 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
01085 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
01086 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
01087 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
01088 __asm__ volatile(\
01089 "movq (%0), %%mm0 \n\t"\
01090 "movq 8(%0), %%mm1 \n\t"\
01091 "paddw %2, %%mm0 \n\t"\
01092 "paddw %2, %%mm1 \n\t"\
01093 "psraw $5, %%mm0 \n\t"\
01094 "psraw $5, %%mm1 \n\t"\
01095 "packuswb %%mm1, %%mm0 \n\t"\
01096 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
01097 "movq 16(%0), %%mm0 \n\t"\
01098 "movq 24(%0), %%mm1 \n\t"\
01099 "paddw %2, %%mm0 \n\t"\
01100 "paddw %2, %%mm1 \n\t"\
01101 "psraw $5, %%mm0 \n\t"\
01102 "psraw $5, %%mm1 \n\t"\
01103 "packuswb %%mm1, %%mm0 \n\t"\
01104 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
01105 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
01106 : "memory"\
01107 );\
01108 dst+=dstStride;\
01109 src+=srcStride;\
01110 }\
01111 }\
01112 \
01113 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01114 __asm__ volatile(\
01115 "pxor %%mm7, %%mm7 \n\t"\
01116 "1: \n\t"\
01117 "movq (%0), %%mm0 \n\t" \
01118 "movq %%mm0, %%mm1 \n\t" \
01119 "movq %%mm0, %%mm2 \n\t" \
01120 "punpcklbw %%mm7, %%mm0 \n\t" \
01121 "punpckhbw %%mm7, %%mm1 \n\t" \
01122 "pshufw $0x90, %%mm0, %%mm5 \n\t" \
01123 "pshufw $0x41, %%mm0, %%mm6 \n\t" \
01124 "movq %%mm2, %%mm3 \n\t" \
01125 "movq %%mm2, %%mm4 \n\t" \
01126 "psllq $8, %%mm2 \n\t" \
01127 "psllq $16, %%mm3 \n\t" \
01128 "psllq $24, %%mm4 \n\t" \
01129 "punpckhbw %%mm7, %%mm2 \n\t" \
01130 "punpckhbw %%mm7, %%mm3 \n\t" \
01131 "punpckhbw %%mm7, %%mm4 \n\t" \
01132 "paddw %%mm3, %%mm5 \n\t" \
01133 "paddw %%mm2, %%mm6 \n\t" \
01134 "paddw %%mm5, %%mm5 \n\t" \
01135 "psubw %%mm5, %%mm6 \n\t" \
01136 "pshufw $0x06, %%mm0, %%mm5 \n\t" \
01137 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" \
01138 "paddw %%mm4, %%mm0 \n\t" \
01139 "paddw %%mm1, %%mm5 \n\t" \
01140 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" \
01141 "psubw %%mm5, %%mm0 \n\t" \
01142 "paddw %5, %%mm6 \n\t"\
01143 "paddw %%mm6, %%mm0 \n\t" \
01144 "psraw $5, %%mm0 \n\t"\
01145 \
01146 \
01147 "movd 5(%0), %%mm5 \n\t" \
01148 "punpcklbw %%mm7, %%mm5 \n\t" \
01149 "pshufw $0xF9, %%mm5, %%mm6 \n\t" \
01150 "paddw %%mm5, %%mm1 \n\t" \
01151 "paddw %%mm6, %%mm2 \n\t" \
01152 "pshufw $0xBE, %%mm5, %%mm6 \n\t" \
01153 "pshufw $0x6F, %%mm5, %%mm5 \n\t" \
01154 "paddw %%mm6, %%mm3 \n\t" \
01155 "paddw %%mm5, %%mm4 \n\t" \
01156 "paddw %%mm2, %%mm2 \n\t" \
01157 "psubw %%mm2, %%mm3 \n\t" \
01158 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" \
01159 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" \
01160 "psubw %%mm4, %%mm3 \n\t" \
01161 "paddw %5, %%mm1 \n\t"\
01162 "paddw %%mm1, %%mm3 \n\t" \
01163 "psraw $5, %%mm3 \n\t"\
01164 "packuswb %%mm3, %%mm0 \n\t"\
01165 OP_MMX2(%%mm0, (%1), %%mm4, q)\
01166 \
01167 "add %3, %0 \n\t"\
01168 "add %4, %1 \n\t"\
01169 "decl %2 \n\t"\
01170 " jnz 1b \n\t"\
01171 : "+a"(src), "+c"(dst), "+d"(h)\
01172 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ROUNDER)\
01173 : "memory"\
01174 );\
01175 }\
01176 \
01177 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01178 int i;\
01179 int16_t temp[8];\
01180 \
01181 for(i=0; i<h; i++)\
01182 {\
01183 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
01184 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
01185 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
01186 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
01187 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
01188 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
01189 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
01190 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
01191 __asm__ volatile(\
01192 "movq (%0), %%mm0 \n\t"\
01193 "movq 8(%0), %%mm1 \n\t"\
01194 "paddw %2, %%mm0 \n\t"\
01195 "paddw %2, %%mm1 \n\t"\
01196 "psraw $5, %%mm0 \n\t"\
01197 "psraw $5, %%mm1 \n\t"\
01198 "packuswb %%mm1, %%mm0 \n\t"\
01199 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
01200 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
01201 :"memory"\
01202 );\
01203 dst+=dstStride;\
01204 src+=srcStride;\
01205 }\
01206 }
01207
01208 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
01209 \
01210 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01211 uint64_t temp[17*4];\
01212 uint64_t *temp_ptr= temp;\
01213 int count= 17;\
01214 \
01215 \
01216 __asm__ volatile(\
01217 "pxor %%mm7, %%mm7 \n\t"\
01218 "1: \n\t"\
01219 "movq (%0), %%mm0 \n\t"\
01220 "movq (%0), %%mm1 \n\t"\
01221 "movq 8(%0), %%mm2 \n\t"\
01222 "movq 8(%0), %%mm3 \n\t"\
01223 "punpcklbw %%mm7, %%mm0 \n\t"\
01224 "punpckhbw %%mm7, %%mm1 \n\t"\
01225 "punpcklbw %%mm7, %%mm2 \n\t"\
01226 "punpckhbw %%mm7, %%mm3 \n\t"\
01227 "movq %%mm0, (%1) \n\t"\
01228 "movq %%mm1, 17*8(%1) \n\t"\
01229 "movq %%mm2, 2*17*8(%1) \n\t"\
01230 "movq %%mm3, 3*17*8(%1) \n\t"\
01231 "add $8, %1 \n\t"\
01232 "add %3, %0 \n\t"\
01233 "decl %2 \n\t"\
01234 " jnz 1b \n\t"\
01235 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
01236 : "r" ((x86_reg)srcStride)\
01237 : "memory"\
01238 );\
01239 \
01240 temp_ptr= temp;\
01241 count=4;\
01242 \
01243 \
01244 __asm__ volatile(\
01245 \
01246 "1: \n\t"\
01247 "movq (%0), %%mm0 \n\t"\
01248 "movq 8(%0), %%mm1 \n\t"\
01249 "movq 16(%0), %%mm2 \n\t"\
01250 "movq 24(%0), %%mm3 \n\t"\
01251 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
01252 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
01253 "add %4, %1 \n\t"\
01254 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
01255 \
01256 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
01257 "add %4, %1 \n\t"\
01258 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
01259 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
01260 "add %4, %1 \n\t"\
01261 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
01262 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
01263 "add %4, %1 \n\t"\
01264 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
01265 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
01266 "add %4, %1 \n\t"\
01267 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
01268 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
01269 "add %4, %1 \n\t"\
01270 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
01271 \
01272 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
01273 "add %4, %1 \n\t" \
01274 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
01275 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
01276 \
01277 "add $136, %0 \n\t"\
01278 "add %6, %1 \n\t"\
01279 "decl %2 \n\t"\
01280 " jnz 1b \n\t"\
01281 \
01282 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
01283 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
01284 :"memory"\
01285 );\
01286 }\
01287 \
01288 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01289 uint64_t temp[9*2];\
01290 uint64_t *temp_ptr= temp;\
01291 int count= 9;\
01292 \
01293 \
01294 __asm__ volatile(\
01295 "pxor %%mm7, %%mm7 \n\t"\
01296 "1: \n\t"\
01297 "movq (%0), %%mm0 \n\t"\
01298 "movq (%0), %%mm1 \n\t"\
01299 "punpcklbw %%mm7, %%mm0 \n\t"\
01300 "punpckhbw %%mm7, %%mm1 \n\t"\
01301 "movq %%mm0, (%1) \n\t"\
01302 "movq %%mm1, 9*8(%1) \n\t"\
01303 "add $8, %1 \n\t"\
01304 "add %3, %0 \n\t"\
01305 "decl %2 \n\t"\
01306 " jnz 1b \n\t"\
01307 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
01308 : "r" ((x86_reg)srcStride)\
01309 : "memory"\
01310 );\
01311 \
01312 temp_ptr= temp;\
01313 count=2;\
01314 \
01315 \
01316 __asm__ volatile(\
01317 \
01318 "1: \n\t"\
01319 "movq (%0), %%mm0 \n\t"\
01320 "movq 8(%0), %%mm1 \n\t"\
01321 "movq 16(%0), %%mm2 \n\t"\
01322 "movq 24(%0), %%mm3 \n\t"\
01323 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
01324 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
01325 "add %4, %1 \n\t"\
01326 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
01327 \
01328 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
01329 "add %4, %1 \n\t"\
01330 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
01331 \
01332 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
01333 "add %4, %1 \n\t"\
01334 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
01335 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
01336 \
01337 "add $72, %0 \n\t"\
01338 "add %6, %1 \n\t"\
01339 "decl %2 \n\t"\
01340 " jnz 1b \n\t"\
01341 \
01342 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
01343 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
01344 : "memory"\
01345 );\
01346 }\
01347 \
01348 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01349 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
01350 }\
01351 \
01352 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01353 uint64_t temp[8];\
01354 uint8_t * const half= (uint8_t*)temp;\
01355 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
01356 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
01357 }\
01358 \
01359 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01360 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
01361 }\
01362 \
01363 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01364 uint64_t temp[8];\
01365 uint8_t * const half= (uint8_t*)temp;\
01366 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
01367 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
01368 }\
01369 \
01370 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01371 uint64_t temp[8];\
01372 uint8_t * const half= (uint8_t*)temp;\
01373 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
01374 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
01375 }\
01376 \
01377 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01378 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
01379 }\
01380 \
01381 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01382 uint64_t temp[8];\
01383 uint8_t * const half= (uint8_t*)temp;\
01384 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
01385 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
01386 }\
01387 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01388 uint64_t half[8 + 9];\
01389 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01390 uint8_t * const halfHV= ((uint8_t*)half);\
01391 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01392 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01393 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01394 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01395 }\
01396 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01397 uint64_t half[8 + 9];\
01398 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01399 uint8_t * const halfHV= ((uint8_t*)half);\
01400 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01401 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01402 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01403 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01404 }\
01405 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01406 uint64_t half[8 + 9];\
01407 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01408 uint8_t * const halfHV= ((uint8_t*)half);\
01409 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01410 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01411 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01412 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01413 }\
01414 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01415 uint64_t half[8 + 9];\
01416 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01417 uint8_t * const halfHV= ((uint8_t*)half);\
01418 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01419 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01420 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01421 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01422 }\
01423 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01424 uint64_t half[8 + 9];\
01425 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01426 uint8_t * const halfHV= ((uint8_t*)half);\
01427 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01428 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01429 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01430 }\
01431 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01432 uint64_t half[8 + 9];\
01433 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01434 uint8_t * const halfHV= ((uint8_t*)half);\
01435 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01436 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01437 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01438 }\
01439 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01440 uint64_t half[8 + 9];\
01441 uint8_t * const halfH= ((uint8_t*)half);\
01442 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01443 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01444 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01445 }\
01446 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01447 uint64_t half[8 + 9];\
01448 uint8_t * const halfH= ((uint8_t*)half);\
01449 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01450 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01451 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01452 }\
01453 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01454 uint64_t half[9];\
01455 uint8_t * const halfH= ((uint8_t*)half);\
01456 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01457 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01458 }\
01459 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01460 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
01461 }\
01462 \
01463 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01464 uint64_t temp[32];\
01465 uint8_t * const half= (uint8_t*)temp;\
01466 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
01467 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
01468 }\
01469 \
01470 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01471 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
01472 }\
01473 \
01474 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01475 uint64_t temp[32];\
01476 uint8_t * const half= (uint8_t*)temp;\
01477 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
01478 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
01479 }\
01480 \
01481 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01482 uint64_t temp[32];\
01483 uint8_t * const half= (uint8_t*)temp;\
01484 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
01485 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
01486 }\
01487 \
01488 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01489 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
01490 }\
01491 \
01492 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01493 uint64_t temp[32];\
01494 uint8_t * const half= (uint8_t*)temp;\
01495 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
01496 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
01497 }\
01498 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01499 uint64_t half[16*2 + 17*2];\
01500 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01501 uint8_t * const halfHV= ((uint8_t*)half);\
01502 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01503 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01504 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01505 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01506 }\
01507 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01508 uint64_t half[16*2 + 17*2];\
01509 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01510 uint8_t * const halfHV= ((uint8_t*)half);\
01511 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01512 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01513 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01514 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01515 }\
01516 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01517 uint64_t half[16*2 + 17*2];\
01518 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01519 uint8_t * const halfHV= ((uint8_t*)half);\
01520 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01521 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01522 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01523 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01524 }\
01525 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01526 uint64_t half[16*2 + 17*2];\
01527 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01528 uint8_t * const halfHV= ((uint8_t*)half);\
01529 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01530 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01531 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01532 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01533 }\
01534 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01535 uint64_t half[16*2 + 17*2];\
01536 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01537 uint8_t * const halfHV= ((uint8_t*)half);\
01538 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01539 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01540 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01541 }\
01542 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01543 uint64_t half[16*2 + 17*2];\
01544 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01545 uint8_t * const halfHV= ((uint8_t*)half);\
01546 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01547 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01548 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01549 }\
01550 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01551 uint64_t half[17*2];\
01552 uint8_t * const halfH= ((uint8_t*)half);\
01553 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01554 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01555 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01556 }\
01557 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01558 uint64_t half[17*2];\
01559 uint8_t * const halfH= ((uint8_t*)half);\
01560 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01561 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01562 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01563 }\
01564 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01565 uint64_t half[17*2];\
01566 uint8_t * const halfH= ((uint8_t*)half);\
01567 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01568 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01569 }
01570
01571 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
01572 #define AVG_3DNOW_OP(a,b,temp, size) \
01573 "mov" #size " " #b ", " #temp " \n\t"\
01574 "pavgusb " #temp ", " #a " \n\t"\
01575 "mov" #size " " #a ", " #b " \n\t"
01576 #define AVG_MMX2_OP(a,b,temp, size) \
01577 "mov" #size " " #b ", " #temp " \n\t"\
01578 "pavgb " #temp ", " #a " \n\t"\
01579 "mov" #size " " #a ", " #b " \n\t"
01580
01581 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
01582 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
01583 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
01584 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
01585 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
01586 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
01587 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
01588 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
01589 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
01590
01591
01592
01593
01594 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
01595 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01596 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
01597 }
01598 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
01599 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01600 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
01601 }
01602
01603 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
01604 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
01605 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
01606 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
01607 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
01608 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
01609 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
01610 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
01611 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
01612 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
01613 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01614 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
01615 }\
01616 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01617 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
01618 }\
01619 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
01620 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
01621 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
01622 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
01623 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
01624 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
01625 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
01626 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
01627
01628 QPEL_2TAP(put_, 16, mmx2)
01629 QPEL_2TAP(avg_, 16, mmx2)
01630 QPEL_2TAP(put_, 8, mmx2)
01631 QPEL_2TAP(avg_, 8, mmx2)
01632 QPEL_2TAP(put_, 16, 3dnow)
01633 QPEL_2TAP(avg_, 16, 3dnow)
01634 QPEL_2TAP(put_, 8, 3dnow)
01635 QPEL_2TAP(avg_, 8, 3dnow)
01636
01637
01638 #if 0
01639 static void just_return(void) { return; }
01640 #endif
01641
01642 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01643 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
01644 const int w = 8;
01645 const int ix = ox>>(16+shift);
01646 const int iy = oy>>(16+shift);
01647 const int oxs = ox>>4;
01648 const int oys = oy>>4;
01649 const int dxxs = dxx>>4;
01650 const int dxys = dxy>>4;
01651 const int dyxs = dyx>>4;
01652 const int dyys = dyy>>4;
01653 const uint16_t r4[4] = {r,r,r,r};
01654 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
01655 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
01656 const uint64_t shift2 = 2*shift;
01657 uint8_t edge_buf[(h+1)*stride];
01658 int x, y;
01659
01660 const int dxw = (dxx-(1<<(16+shift)))*(w-1);
01661 const int dyh = (dyy-(1<<(16+shift)))*(h-1);
01662 const int dxh = dxy*(h-1);
01663 const int dyw = dyx*(w-1);
01664 if(
01665 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
01666 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
01667
01668 || (dxx|dxy|dyx|dyy)&15 )
01669 {
01670
01671 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
01672 return;
01673 }
01674
01675 src += ix + iy*stride;
01676 if( (unsigned)ix >= width-w ||
01677 (unsigned)iy >= height-h )
01678 {
01679 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
01680 src = edge_buf;
01681 }
01682
01683 __asm__ volatile(
01684 "movd %0, %%mm6 \n\t"
01685 "pxor %%mm7, %%mm7 \n\t"
01686 "punpcklwd %%mm6, %%mm6 \n\t"
01687 "punpcklwd %%mm6, %%mm6 \n\t"
01688 :: "r"(1<<shift)
01689 );
01690
01691 for(x=0; x<w; x+=4){
01692 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
01693 oxs - dxys + dxxs*(x+1),
01694 oxs - dxys + dxxs*(x+2),
01695 oxs - dxys + dxxs*(x+3) };
01696 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
01697 oys - dyys + dyxs*(x+1),
01698 oys - dyys + dyxs*(x+2),
01699 oys - dyys + dyxs*(x+3) };
01700
01701 for(y=0; y<h; y++){
01702 __asm__ volatile(
01703 "movq %0, %%mm4 \n\t"
01704 "movq %1, %%mm5 \n\t"
01705 "paddw %2, %%mm4 \n\t"
01706 "paddw %3, %%mm5 \n\t"
01707 "movq %%mm4, %0 \n\t"
01708 "movq %%mm5, %1 \n\t"
01709 "psrlw $12, %%mm4 \n\t"
01710 "psrlw $12, %%mm5 \n\t"
01711 : "+m"(*dx4), "+m"(*dy4)
01712 : "m"(*dxy4), "m"(*dyy4)
01713 );
01714
01715 __asm__ volatile(
01716 "movq %%mm6, %%mm2 \n\t"
01717 "movq %%mm6, %%mm1 \n\t"
01718 "psubw %%mm4, %%mm2 \n\t"
01719 "psubw %%mm5, %%mm1 \n\t"
01720 "movq %%mm2, %%mm0 \n\t"
01721 "movq %%mm4, %%mm3 \n\t"
01722 "pmullw %%mm1, %%mm0 \n\t"
01723 "pmullw %%mm5, %%mm3 \n\t"
01724 "pmullw %%mm5, %%mm2 \n\t"
01725 "pmullw %%mm4, %%mm1 \n\t"
01726
01727 "movd %4, %%mm5 \n\t"
01728 "movd %3, %%mm4 \n\t"
01729 "punpcklbw %%mm7, %%mm5 \n\t"
01730 "punpcklbw %%mm7, %%mm4 \n\t"
01731 "pmullw %%mm5, %%mm3 \n\t"
01732 "pmullw %%mm4, %%mm2 \n\t"
01733
01734 "movd %2, %%mm5 \n\t"
01735 "movd %1, %%mm4 \n\t"
01736 "punpcklbw %%mm7, %%mm5 \n\t"
01737 "punpcklbw %%mm7, %%mm4 \n\t"
01738 "pmullw %%mm5, %%mm1 \n\t"
01739 "pmullw %%mm4, %%mm0 \n\t"
01740 "paddw %5, %%mm1 \n\t"
01741 "paddw %%mm3, %%mm2 \n\t"
01742 "paddw %%mm1, %%mm0 \n\t"
01743 "paddw %%mm2, %%mm0 \n\t"
01744
01745 "psrlw %6, %%mm0 \n\t"
01746 "packuswb %%mm0, %%mm0 \n\t"
01747 "movd %%mm0, %0 \n\t"
01748
01749 : "=m"(dst[x+y*stride])
01750 : "m"(src[0]), "m"(src[1]),
01751 "m"(src[stride]), "m"(src[stride+1]),
01752 "m"(*r4), "m"(shift2)
01753 );
01754 src += stride;
01755 }
01756 src += 4-h*stride;
01757 }
01758 }
01759
01760 #define PREFETCH(name, op) \
01761 static void name(void *mem, int stride, int h){\
01762 const uint8_t *p= mem;\
01763 do{\
01764 __asm__ volatile(#op" %0" :: "m"(*p));\
01765 p+= stride;\
01766 }while(--h);\
01767 }
01768 PREFETCH(prefetch_mmx2, prefetcht0)
01769 PREFETCH(prefetch_3dnow, prefetch)
01770 #undef PREFETCH
01771
01772 #include "h264dsp_mmx.c"
01773 #include "rv40dsp_mmx.c"
01774
01775
01776 void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
01777 void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx);
01778
01779 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01780 put_pixels8_mmx(dst, src, stride, 8);
01781 }
01782 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01783 avg_pixels8_mmx(dst, src, stride, 8);
01784 }
01785 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01786 put_pixels16_mmx(dst, src, stride, 16);
01787 }
01788 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01789 avg_pixels16_mmx(dst, src, stride, 16);
01790 }
01791
01792
01793 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
01794
01795 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
01796 put_pixels8_mmx(dst, src, stride, 8);
01797 }
01798
01799
01800 void ff_mmx_idct(DCTELEM *block);
01801 void ff_mmxext_idct(DCTELEM *block);
01802
01803
01804
01805 #if CONFIG_GPL
01806 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
01807 {
01808 ff_mmx_idct (block);
01809 put_pixels_clamped_mmx(block, dest, line_size);
01810 }
01811 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
01812 {
01813 ff_mmx_idct (block);
01814 add_pixels_clamped_mmx(block, dest, line_size);
01815 }
01816 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
01817 {
01818 ff_mmxext_idct (block);
01819 put_pixels_clamped_mmx(block, dest, line_size);
01820 }
01821 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
01822 {
01823 ff_mmxext_idct (block);
01824 add_pixels_clamped_mmx(block, dest, line_size);
01825 }
01826 #endif
01827 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
01828 {
01829 ff_idct_xvid_mmx (block);
01830 put_pixels_clamped_mmx(block, dest, line_size);
01831 }
01832 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
01833 {
01834 ff_idct_xvid_mmx (block);
01835 add_pixels_clamped_mmx(block, dest, line_size);
01836 }
01837 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
01838 {
01839 ff_idct_xvid_mmx2 (block);
01840 put_pixels_clamped_mmx(block, dest, line_size);
01841 }
01842 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
01843 {
01844 ff_idct_xvid_mmx2 (block);
01845 add_pixels_clamped_mmx(block, dest, line_size);
01846 }
01847
01848 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
01849 {
01850 int i;
01851 __asm__ volatile("pxor %%mm7, %%mm7":);
01852 for(i=0; i<blocksize; i+=2) {
01853 __asm__ volatile(
01854 "movq %0, %%mm0 \n\t"
01855 "movq %1, %%mm1 \n\t"
01856 "movq %%mm0, %%mm2 \n\t"
01857 "movq %%mm1, %%mm3 \n\t"
01858 "pfcmpge %%mm7, %%mm2 \n\t"
01859 "pfcmpge %%mm7, %%mm3 \n\t"
01860 "pslld $31, %%mm2 \n\t"
01861 "pxor %%mm2, %%mm1 \n\t"
01862 "movq %%mm3, %%mm4 \n\t"
01863 "pand %%mm1, %%mm3 \n\t"
01864 "pandn %%mm1, %%mm4 \n\t"
01865 "pfadd %%mm0, %%mm3 \n\t"
01866 "pfsub %%mm4, %%mm0 \n\t"
01867 "movq %%mm3, %1 \n\t"
01868 "movq %%mm0, %0 \n\t"
01869 :"+m"(mag[i]), "+m"(ang[i])
01870 ::"memory"
01871 );
01872 }
01873 __asm__ volatile("femms");
01874 }
01875 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
01876 {
01877 int i;
01878
01879 __asm__ volatile(
01880 "movaps %0, %%xmm5 \n\t"
01881 ::"m"(ff_pdw_80000000[0])
01882 );
01883 for(i=0; i<blocksize; i+=4) {
01884 __asm__ volatile(
01885 "movaps %0, %%xmm0 \n\t"
01886 "movaps %1, %%xmm1 \n\t"
01887 "xorps %%xmm2, %%xmm2 \n\t"
01888 "xorps %%xmm3, %%xmm3 \n\t"
01889 "cmpleps %%xmm0, %%xmm2 \n\t"
01890 "cmpleps %%xmm1, %%xmm3 \n\t"
01891 "andps %%xmm5, %%xmm2 \n\t"
01892 "xorps %%xmm2, %%xmm1 \n\t"
01893 "movaps %%xmm3, %%xmm4 \n\t"
01894 "andps %%xmm1, %%xmm3 \n\t"
01895 "andnps %%xmm1, %%xmm4 \n\t"
01896 "addps %%xmm0, %%xmm3 \n\t"
01897 "subps %%xmm4, %%xmm0 \n\t"
01898 "movaps %%xmm3, %1 \n\t"
01899 "movaps %%xmm0, %0 \n\t"
01900 :"+m"(mag[i]), "+m"(ang[i])
01901 ::"memory"
01902 );
01903 }
01904 }
01905
01906 #define IF1(x) x
01907 #define IF0(x)
01908
01909 #define MIX5(mono,stereo)\
01910 __asm__ volatile(\
01911 "movss 0(%2), %%xmm5 \n"\
01912 "movss 8(%2), %%xmm6 \n"\
01913 "movss 24(%2), %%xmm7 \n"\
01914 "shufps $0, %%xmm5, %%xmm5 \n"\
01915 "shufps $0, %%xmm6, %%xmm6 \n"\
01916 "shufps $0, %%xmm7, %%xmm7 \n"\
01917 "1: \n"\
01918 "movaps (%0,%1), %%xmm0 \n"\
01919 "movaps 0x400(%0,%1), %%xmm1 \n"\
01920 "movaps 0x800(%0,%1), %%xmm2 \n"\
01921 "movaps 0xc00(%0,%1), %%xmm3 \n"\
01922 "movaps 0x1000(%0,%1), %%xmm4 \n"\
01923 "mulps %%xmm5, %%xmm0 \n"\
01924 "mulps %%xmm6, %%xmm1 \n"\
01925 "mulps %%xmm5, %%xmm2 \n"\
01926 "mulps %%xmm7, %%xmm3 \n"\
01927 "mulps %%xmm7, %%xmm4 \n"\
01928 stereo("addps %%xmm1, %%xmm0 \n")\
01929 "addps %%xmm1, %%xmm2 \n"\
01930 "addps %%xmm3, %%xmm0 \n"\
01931 "addps %%xmm4, %%xmm2 \n"\
01932 mono("addps %%xmm2, %%xmm0 \n")\
01933 "movaps %%xmm0, (%0,%1) \n"\
01934 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
01935 "add $16, %0 \n"\
01936 "jl 1b \n"\
01937 :"+&r"(i)\
01938 :"r"(samples[0]+len), "r"(matrix)\
01939 :"memory"\
01940 );
01941
01942 #define MIX_MISC(stereo)\
01943 __asm__ volatile(\
01944 "1: \n"\
01945 "movaps (%3,%0), %%xmm0 \n"\
01946 stereo("movaps %%xmm0, %%xmm1 \n")\
01947 "mulps %%xmm6, %%xmm0 \n"\
01948 stereo("mulps %%xmm7, %%xmm1 \n")\
01949 "lea 1024(%3,%0), %1 \n"\
01950 "mov %5, %2 \n"\
01951 "2: \n"\
01952 "movaps (%1), %%xmm2 \n"\
01953 stereo("movaps %%xmm2, %%xmm3 \n")\
01954 "mulps (%4,%2), %%xmm2 \n"\
01955 stereo("mulps 16(%4,%2), %%xmm3 \n")\
01956 "addps %%xmm2, %%xmm0 \n"\
01957 stereo("addps %%xmm3, %%xmm1 \n")\
01958 "add $1024, %1 \n"\
01959 "add $32, %2 \n"\
01960 "jl 2b \n"\
01961 "movaps %%xmm0, (%3,%0) \n"\
01962 stereo("movaps %%xmm1, 1024(%3,%0) \n")\
01963 "add $16, %0 \n"\
01964 "jl 1b \n"\
01965 :"+&r"(i), "=&r"(j), "=&r"(k)\
01966 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
01967 :"memory"\
01968 );
01969
01970 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
01971 {
01972 int (*matrix_cmp)[2] = (int(*)[2])matrix;
01973 intptr_t i,j,k;
01974
01975 i = -len*sizeof(float);
01976 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
01977 MIX5(IF0,IF1);
01978 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
01979 MIX5(IF1,IF0);
01980 } else {
01981 DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]);
01982 j = 2*in_ch*sizeof(float);
01983 __asm__ volatile(
01984 "1: \n"
01985 "sub $8, %0 \n"
01986 "movss (%2,%0), %%xmm6 \n"
01987 "movss 4(%2,%0), %%xmm7 \n"
01988 "shufps $0, %%xmm6, %%xmm6 \n"
01989 "shufps $0, %%xmm7, %%xmm7 \n"
01990 "movaps %%xmm6, (%1,%0,4) \n"
01991 "movaps %%xmm7, 16(%1,%0,4) \n"
01992 "jg 1b \n"
01993 :"+&r"(j)
01994 :"r"(matrix_simd), "r"(matrix)
01995 :"memory"
01996 );
01997 if(out_ch == 2) {
01998 MIX_MISC(IF1);
01999 } else {
02000 MIX_MISC(IF0);
02001 }
02002 }
02003 }
02004
02005 static void vector_fmul_3dnow(float *dst, const float *src, int len){
02006 x86_reg i = (len-4)*4;
02007 __asm__ volatile(
02008 "1: \n\t"
02009 "movq (%1,%0), %%mm0 \n\t"
02010 "movq 8(%1,%0), %%mm1 \n\t"
02011 "pfmul (%2,%0), %%mm0 \n\t"
02012 "pfmul 8(%2,%0), %%mm1 \n\t"
02013 "movq %%mm0, (%1,%0) \n\t"
02014 "movq %%mm1, 8(%1,%0) \n\t"
02015 "sub $16, %0 \n\t"
02016 "jge 1b \n\t"
02017 "femms \n\t"
02018 :"+r"(i)
02019 :"r"(dst), "r"(src)
02020 :"memory"
02021 );
02022 }
02023 static void vector_fmul_sse(float *dst, const float *src, int len){
02024 x86_reg i = (len-8)*4;
02025 __asm__ volatile(
02026 "1: \n\t"
02027 "movaps (%1,%0), %%xmm0 \n\t"
02028 "movaps 16(%1,%0), %%xmm1 \n\t"
02029 "mulps (%2,%0), %%xmm0 \n\t"
02030 "mulps 16(%2,%0), %%xmm1 \n\t"
02031 "movaps %%xmm0, (%1,%0) \n\t"
02032 "movaps %%xmm1, 16(%1,%0) \n\t"
02033 "sub $32, %0 \n\t"
02034 "jge 1b \n\t"
02035 :"+r"(i)
02036 :"r"(dst), "r"(src)
02037 :"memory"
02038 );
02039 }
02040
02041 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
02042 x86_reg i = len*4-16;
02043 __asm__ volatile(
02044 "1: \n\t"
02045 "pswapd 8(%1), %%mm0 \n\t"
02046 "pswapd (%1), %%mm1 \n\t"
02047 "pfmul (%3,%0), %%mm0 \n\t"
02048 "pfmul 8(%3,%0), %%mm1 \n\t"
02049 "movq %%mm0, (%2,%0) \n\t"
02050 "movq %%mm1, 8(%2,%0) \n\t"
02051 "add $16, %1 \n\t"
02052 "sub $16, %0 \n\t"
02053 "jge 1b \n\t"
02054 :"+r"(i), "+r"(src1)
02055 :"r"(dst), "r"(src0)
02056 );
02057 __asm__ volatile("femms");
02058 }
02059 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
02060 x86_reg i = len*4-32;
02061 __asm__ volatile(
02062 "1: \n\t"
02063 "movaps 16(%1), %%xmm0 \n\t"
02064 "movaps (%1), %%xmm1 \n\t"
02065 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
02066 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
02067 "mulps (%3,%0), %%xmm0 \n\t"
02068 "mulps 16(%3,%0), %%xmm1 \n\t"
02069 "movaps %%xmm0, (%2,%0) \n\t"
02070 "movaps %%xmm1, 16(%2,%0) \n\t"
02071 "add $32, %1 \n\t"
02072 "sub $32, %0 \n\t"
02073 "jge 1b \n\t"
02074 :"+r"(i), "+r"(src1)
02075 :"r"(dst), "r"(src0)
02076 );
02077 }
02078
02079 static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
02080 const float *src2, int src3, int len, int step){
02081 x86_reg i = (len-4)*4;
02082 if(step == 2 && src3 == 0){
02083 dst += (len-4)*2;
02084 __asm__ volatile(
02085 "1: \n\t"
02086 "movq (%2,%0), %%mm0 \n\t"
02087 "movq 8(%2,%0), %%mm1 \n\t"
02088 "pfmul (%3,%0), %%mm0 \n\t"
02089 "pfmul 8(%3,%0), %%mm1 \n\t"
02090 "pfadd (%4,%0), %%mm0 \n\t"
02091 "pfadd 8(%4,%0), %%mm1 \n\t"
02092 "movd %%mm0, (%1) \n\t"
02093 "movd %%mm1, 16(%1) \n\t"
02094 "psrlq $32, %%mm0 \n\t"
02095 "psrlq $32, %%mm1 \n\t"
02096 "movd %%mm0, 8(%1) \n\t"
02097 "movd %%mm1, 24(%1) \n\t"
02098 "sub $32, %1 \n\t"
02099 "sub $16, %0 \n\t"
02100 "jge 1b \n\t"
02101 :"+r"(i), "+r"(dst)
02102 :"r"(src0), "r"(src1), "r"(src2)
02103 :"memory"
02104 );
02105 }
02106 else if(step == 1 && src3 == 0){
02107 __asm__ volatile(
02108 "1: \n\t"
02109 "movq (%2,%0), %%mm0 \n\t"
02110 "movq 8(%2,%0), %%mm1 \n\t"
02111 "pfmul (%3,%0), %%mm0 \n\t"
02112 "pfmul 8(%3,%0), %%mm1 \n\t"
02113 "pfadd (%4,%0), %%mm0 \n\t"
02114 "pfadd 8(%4,%0), %%mm1 \n\t"
02115 "movq %%mm0, (%1,%0) \n\t"
02116 "movq %%mm1, 8(%1,%0) \n\t"
02117 "sub $16, %0 \n\t"
02118 "jge 1b \n\t"
02119 :"+r"(i)
02120 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
02121 :"memory"
02122 );
02123 }
02124 else
02125 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
02126 __asm__ volatile("femms");
02127 }
02128 static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
02129 const float *src2, int src3, int len, int step){
02130 x86_reg i = (len-8)*4;
02131 if(step == 2 && src3 == 0){
02132 dst += (len-8)*2;
02133 __asm__ volatile(
02134 "1: \n\t"
02135 "movaps (%2,%0), %%xmm0 \n\t"
02136 "movaps 16(%2,%0), %%xmm1 \n\t"
02137 "mulps (%3,%0), %%xmm0 \n\t"
02138 "mulps 16(%3,%0), %%xmm1 \n\t"
02139 "addps (%4,%0), %%xmm0 \n\t"
02140 "addps 16(%4,%0), %%xmm1 \n\t"
02141 "movss %%xmm0, (%1) \n\t"
02142 "movss %%xmm1, 32(%1) \n\t"
02143 "movhlps %%xmm0, %%xmm2 \n\t"
02144 "movhlps %%xmm1, %%xmm3 \n\t"
02145 "movss %%xmm2, 16(%1) \n\t"
02146 "movss %%xmm3, 48(%1) \n\t"
02147 "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
02148 "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
02149 "movss %%xmm0, 8(%1) \n\t"
02150 "movss %%xmm1, 40(%1) \n\t"
02151 "movhlps %%xmm0, %%xmm2 \n\t"
02152 "movhlps %%xmm1, %%xmm3 \n\t"
02153 "movss %%xmm2, 24(%1) \n\t"
02154 "movss %%xmm3, 56(%1) \n\t"
02155 "sub $64, %1 \n\t"
02156 "sub $32, %0 \n\t"
02157 "jge 1b \n\t"
02158 :"+r"(i), "+r"(dst)
02159 :"r"(src0), "r"(src1), "r"(src2)
02160 :"memory"
02161 );
02162 }
02163 else if(step == 1 && src3 == 0){
02164 __asm__ volatile(
02165 "1: \n\t"
02166 "movaps (%2,%0), %%xmm0 \n\t"
02167 "movaps 16(%2,%0), %%xmm1 \n\t"
02168 "mulps (%3,%0), %%xmm0 \n\t"
02169 "mulps 16(%3,%0), %%xmm1 \n\t"
02170 "addps (%4,%0), %%xmm0 \n\t"
02171 "addps 16(%4,%0), %%xmm1 \n\t"
02172 "movaps %%xmm0, (%1,%0) \n\t"
02173 "movaps %%xmm1, 16(%1,%0) \n\t"
02174 "sub $32, %0 \n\t"
02175 "jge 1b \n\t"
02176 :"+r"(i)
02177 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
02178 :"memory"
02179 );
02180 }
02181 else
02182 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
02183 }
02184
02185 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
02186 const float *win, float add_bias, int len){
02187 #if HAVE_6REGS
02188 if(add_bias == 0){
02189 x86_reg i = -len*4;
02190 x86_reg j = len*4-8;
02191 __asm__ volatile(
02192 "1: \n"
02193 "pswapd (%5,%1), %%mm1 \n"
02194 "movq (%5,%0), %%mm0 \n"
02195 "pswapd (%4,%1), %%mm5 \n"
02196 "movq (%3,%0), %%mm4 \n"
02197 "movq %%mm0, %%mm2 \n"
02198 "movq %%mm1, %%mm3 \n"
02199 "pfmul %%mm4, %%mm2 \n"
02200 "pfmul %%mm5, %%mm3 \n"
02201 "pfmul %%mm4, %%mm1 \n"
02202 "pfmul %%mm5, %%mm0 \n"
02203 "pfadd %%mm3, %%mm2 \n"
02204 "pfsub %%mm0, %%mm1 \n"
02205 "pswapd %%mm2, %%mm2 \n"
02206 "movq %%mm1, (%2,%0) \n"
02207 "movq %%mm2, (%2,%1) \n"
02208 "sub $8, %1 \n"
02209 "add $8, %0 \n"
02210 "jl 1b \n"
02211 "femms \n"
02212 :"+r"(i), "+r"(j)
02213 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
02214 );
02215 }else
02216 #endif
02217 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
02218 }
02219
02220 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
02221 const float *win, float add_bias, int len){
02222 #if HAVE_6REGS
02223 if(add_bias == 0){
02224 x86_reg i = -len*4;
02225 x86_reg j = len*4-16;
02226 __asm__ volatile(
02227 "1: \n"
02228 "movaps (%5,%1), %%xmm1 \n"
02229 "movaps (%5,%0), %%xmm0 \n"
02230 "movaps (%4,%1), %%xmm5 \n"
02231 "movaps (%3,%0), %%xmm4 \n"
02232 "shufps $0x1b, %%xmm1, %%xmm1 \n"
02233 "shufps $0x1b, %%xmm5, %%xmm5 \n"
02234 "movaps %%xmm0, %%xmm2 \n"
02235 "movaps %%xmm1, %%xmm3 \n"
02236 "mulps %%xmm4, %%xmm2 \n"
02237 "mulps %%xmm5, %%xmm3 \n"
02238 "mulps %%xmm4, %%xmm1 \n"
02239 "mulps %%xmm5, %%xmm0 \n"
02240 "addps %%xmm3, %%xmm2 \n"
02241 "subps %%xmm0, %%xmm1 \n"
02242 "shufps $0x1b, %%xmm2, %%xmm2 \n"
02243 "movaps %%xmm1, (%2,%0) \n"
02244 "movaps %%xmm2, (%2,%1) \n"
02245 "sub $16, %1 \n"
02246 "add $16, %0 \n"
02247 "jl 1b \n"
02248 :"+r"(i), "+r"(j)
02249 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
02250 );
02251 }else
02252 #endif
02253 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
02254 }
02255
02256 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
02257 {
02258 x86_reg i = -4*len;
02259 __asm__ volatile(
02260 "movss %3, %%xmm4 \n"
02261 "shufps $0, %%xmm4, %%xmm4 \n"
02262 "1: \n"
02263 "cvtpi2ps (%2,%0), %%xmm0 \n"
02264 "cvtpi2ps 8(%2,%0), %%xmm1 \n"
02265 "cvtpi2ps 16(%2,%0), %%xmm2 \n"
02266 "cvtpi2ps 24(%2,%0), %%xmm3 \n"
02267 "movlhps %%xmm1, %%xmm0 \n"
02268 "movlhps %%xmm3, %%xmm2 \n"
02269 "mulps %%xmm4, %%xmm0 \n"
02270 "mulps %%xmm4, %%xmm2 \n"
02271 "movaps %%xmm0, (%1,%0) \n"
02272 "movaps %%xmm2, 16(%1,%0) \n"
02273 "add $32, %0 \n"
02274 "jl 1b \n"
02275 :"+r"(i)
02276 :"r"(dst+len), "r"(src+len), "m"(mul)
02277 );
02278 }
02279
02280 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
02281 {
02282 x86_reg i = -4*len;
02283 __asm__ volatile(
02284 "movss %3, %%xmm4 \n"
02285 "shufps $0, %%xmm4, %%xmm4 \n"
02286 "1: \n"
02287 "cvtdq2ps (%2,%0), %%xmm0 \n"
02288 "cvtdq2ps 16(%2,%0), %%xmm1 \n"
02289 "mulps %%xmm4, %%xmm0 \n"
02290 "mulps %%xmm4, %%xmm1 \n"
02291 "movaps %%xmm0, (%1,%0) \n"
02292 "movaps %%xmm1, 16(%1,%0) \n"
02293 "add $32, %0 \n"
02294 "jl 1b \n"
02295 :"+r"(i)
02296 :"r"(dst+len), "r"(src+len), "m"(mul)
02297 );
02298 }
02299
02300 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
02301 x86_reg reglen = len;
02302
02303 __asm__ volatile(
02304 "add %0 , %0 \n\t"
02305 "lea (%2,%0,2) , %2 \n\t"
02306 "add %0 , %1 \n\t"
02307 "neg %0 \n\t"
02308 "1: \n\t"
02309 "pf2id (%2,%0,2) , %%mm0 \n\t"
02310 "pf2id 8(%2,%0,2) , %%mm1 \n\t"
02311 "pf2id 16(%2,%0,2) , %%mm2 \n\t"
02312 "pf2id 24(%2,%0,2) , %%mm3 \n\t"
02313 "packssdw %%mm1 , %%mm0 \n\t"
02314 "packssdw %%mm3 , %%mm2 \n\t"
02315 "movq %%mm0 , (%1,%0) \n\t"
02316 "movq %%mm2 , 8(%1,%0) \n\t"
02317 "add $16 , %0 \n\t"
02318 " js 1b \n\t"
02319 "femms \n\t"
02320 :"+r"(reglen), "+r"(dst), "+r"(src)
02321 );
02322 }
02323 static void float_to_int16_sse(int16_t *dst, const float *src, long len){
02324 x86_reg reglen = len;
02325 __asm__ volatile(
02326 "add %0 , %0 \n\t"
02327 "lea (%2,%0,2) , %2 \n\t"
02328 "add %0 , %1 \n\t"
02329 "neg %0 \n\t"
02330 "1: \n\t"
02331 "cvtps2pi (%2,%0,2) , %%mm0 \n\t"
02332 "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t"
02333 "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t"
02334 "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t"
02335 "packssdw %%mm1 , %%mm0 \n\t"
02336 "packssdw %%mm3 , %%mm2 \n\t"
02337 "movq %%mm0 , (%1,%0) \n\t"
02338 "movq %%mm2 , 8(%1,%0) \n\t"
02339 "add $16 , %0 \n\t"
02340 " js 1b \n\t"
02341 "emms \n\t"
02342 :"+r"(reglen), "+r"(dst), "+r"(src)
02343 );
02344 }
02345
02346 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
02347 x86_reg reglen = len;
02348 __asm__ volatile(
02349 "add %0 , %0 \n\t"
02350 "lea (%2,%0,2) , %2 \n\t"
02351 "add %0 , %1 \n\t"
02352 "neg %0 \n\t"
02353 "1: \n\t"
02354 "cvtps2dq (%2,%0,2) , %%xmm0 \n\t"
02355 "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t"
02356 "packssdw %%xmm1 , %%xmm0 \n\t"
02357 "movdqa %%xmm0 , (%1,%0) \n\t"
02358 "add $16 , %0 \n\t"
02359 " js 1b \n\t"
02360 :"+r"(reglen), "+r"(dst), "+r"(src)
02361 );
02362 }
02363
02364 #if HAVE_YASM
02365 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
02366 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
02367 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
02368 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top);
02369 void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
02370 void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
02371 void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
02372 void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
02373 #if ARCH_X86_32
02374 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
02375 {
02376 ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
02377 ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
02378 }
02379 #endif
02380 void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
02381 void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
02382 #else
02383 #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6)
02384 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
02385 #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
02386 #endif
02387 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
02388
02389 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
02390 \
02391 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
02392 DECLARE_ALIGNED_16(int16_t, tmp[len]);\
02393 int i,j,c;\
02394 for(c=0; c<channels; c++){\
02395 float_to_int16_##cpu(tmp, src[c], len);\
02396 for(i=0, j=c; i<len; i++, j+=channels)\
02397 dst[j] = tmp[i];\
02398 }\
02399 }\
02400 \
02401 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
02402 if(channels==1)\
02403 float_to_int16_##cpu(dst, src[0], len);\
02404 else if(channels==2){\
02405 x86_reg reglen = len; \
02406 const float *src0 = src[0];\
02407 const float *src1 = src[1];\
02408 __asm__ volatile(\
02409 "shl $2, %0 \n"\
02410 "add %0, %1 \n"\
02411 "add %0, %2 \n"\
02412 "add %0, %3 \n"\
02413 "neg %0 \n"\
02414 body\
02415 :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
02416 );\
02417 }else if(channels==6){\
02418 ff_float_to_int16_interleave6_##cpu(dst, src, len);\
02419 }else\
02420 float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
02421 }
02422
02423 FLOAT_TO_INT16_INTERLEAVE(3dnow,
02424 "1: \n"
02425 "pf2id (%2,%0), %%mm0 \n"
02426 "pf2id 8(%2,%0), %%mm1 \n"
02427 "pf2id (%3,%0), %%mm2 \n"
02428 "pf2id 8(%3,%0), %%mm3 \n"
02429 "packssdw %%mm1, %%mm0 \n"
02430 "packssdw %%mm3, %%mm2 \n"
02431 "movq %%mm0, %%mm1 \n"
02432 "punpcklwd %%mm2, %%mm0 \n"
02433 "punpckhwd %%mm2, %%mm1 \n"
02434 "movq %%mm0, (%1,%0)\n"
02435 "movq %%mm1, 8(%1,%0)\n"
02436 "add $16, %0 \n"
02437 "js 1b \n"
02438 "femms \n"
02439 )
02440
02441 FLOAT_TO_INT16_INTERLEAVE(sse,
02442 "1: \n"
02443 "cvtps2pi (%2,%0), %%mm0 \n"
02444 "cvtps2pi 8(%2,%0), %%mm1 \n"
02445 "cvtps2pi (%3,%0), %%mm2 \n"
02446 "cvtps2pi 8(%3,%0), %%mm3 \n"
02447 "packssdw %%mm1, %%mm0 \n"
02448 "packssdw %%mm3, %%mm2 \n"
02449 "movq %%mm0, %%mm1 \n"
02450 "punpcklwd %%mm2, %%mm0 \n"
02451 "punpckhwd %%mm2, %%mm1 \n"
02452 "movq %%mm0, (%1,%0)\n"
02453 "movq %%mm1, 8(%1,%0)\n"
02454 "add $16, %0 \n"
02455 "js 1b \n"
02456 "emms \n"
02457 )
02458
02459 FLOAT_TO_INT16_INTERLEAVE(sse2,
02460 "1: \n"
02461 "cvtps2dq (%2,%0), %%xmm0 \n"
02462 "cvtps2dq (%3,%0), %%xmm1 \n"
02463 "packssdw %%xmm1, %%xmm0 \n"
02464 "movhlps %%xmm0, %%xmm1 \n"
02465 "punpcklwd %%xmm1, %%xmm0 \n"
02466 "movdqa %%xmm0, (%1,%0) \n"
02467 "add $16, %0 \n"
02468 "js 1b \n"
02469 )
02470
02471 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
02472 if(channels==6)
02473 ff_float_to_int16_interleave6_3dn2(dst, src, len);
02474 else
02475 float_to_int16_interleave_3dnow(dst, src, len, channels);
02476 }
02477
02478
02479 void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width);
02480 void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width);
02481 void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
02482 void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
02483 void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
02484 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
02485 void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
02486 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
02487
02488
02489 static void add_int16_sse2(int16_t * v1, int16_t * v2, int order)
02490 {
02491 x86_reg o = -(order << 1);
02492 v1 += order;
02493 v2 += order;
02494 __asm__ volatile(
02495 "1: \n\t"
02496 "movdqu (%1,%2), %%xmm0 \n\t"
02497 "movdqu 16(%1,%2), %%xmm1 \n\t"
02498 "paddw (%0,%2), %%xmm0 \n\t"
02499 "paddw 16(%0,%2), %%xmm1 \n\t"
02500 "movdqa %%xmm0, (%0,%2) \n\t"
02501 "movdqa %%xmm1, 16(%0,%2) \n\t"
02502 "add $32, %2 \n\t"
02503 "js 1b \n\t"
02504 : "+r"(v1), "+r"(v2), "+r"(o)
02505 );
02506 }
02507
02508 static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order)
02509 {
02510 x86_reg o = -(order << 1);
02511 v1 += order;
02512 v2 += order;
02513 __asm__ volatile(
02514 "1: \n\t"
02515 "movdqa (%0,%2), %%xmm0 \n\t"
02516 "movdqa 16(%0,%2), %%xmm2 \n\t"
02517 "movdqu (%1,%2), %%xmm1 \n\t"
02518 "movdqu 16(%1,%2), %%xmm3 \n\t"
02519 "psubw %%xmm1, %%xmm0 \n\t"
02520 "psubw %%xmm3, %%xmm2 \n\t"
02521 "movdqa %%xmm0, (%0,%2) \n\t"
02522 "movdqa %%xmm2, 16(%0,%2) \n\t"
02523 "add $32, %2 \n\t"
02524 "js 1b \n\t"
02525 : "+r"(v1), "+r"(v2), "+r"(o)
02526 );
02527 }
02528
02529 static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift)
02530 {
02531 int res = 0;
02532 DECLARE_ALIGNED_16(xmm_reg, sh);
02533 x86_reg o = -(order << 1);
02534
02535 v1 += order;
02536 v2 += order;
02537 sh.a = shift;
02538 __asm__ volatile(
02539 "pxor %%xmm7, %%xmm7 \n\t"
02540 "1: \n\t"
02541 "movdqu (%0,%3), %%xmm0 \n\t"
02542 "movdqu 16(%0,%3), %%xmm1 \n\t"
02543 "pmaddwd (%1,%3), %%xmm0 \n\t"
02544 "pmaddwd 16(%1,%3), %%xmm1 \n\t"
02545 "paddd %%xmm0, %%xmm7 \n\t"
02546 "paddd %%xmm1, %%xmm7 \n\t"
02547 "add $32, %3 \n\t"
02548 "js 1b \n\t"
02549 "movhlps %%xmm7, %%xmm2 \n\t"
02550 "paddd %%xmm2, %%xmm7 \n\t"
02551 "psrad %4, %%xmm7 \n\t"
02552 "pshuflw $0x4E, %%xmm7,%%xmm2 \n\t"
02553 "paddd %%xmm2, %%xmm7 \n\t"
02554 "movd %%xmm7, %2 \n\t"
02555 : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o)
02556 : "m"(sh)
02557 );
02558 return res;
02559 }
02560
02561 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
02562 {
02563 mm_flags = mm_support();
02564
02565 if (avctx->dsp_mask) {
02566 if (avctx->dsp_mask & FF_MM_FORCE)
02567 mm_flags |= (avctx->dsp_mask & 0xffff);
02568 else
02569 mm_flags &= ~(avctx->dsp_mask & 0xffff);
02570 }
02571
02572 #if 0
02573 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
02574 if (mm_flags & FF_MM_MMX)
02575 av_log(avctx, AV_LOG_INFO, " mmx");
02576 if (mm_flags & FF_MM_MMXEXT)
02577 av_log(avctx, AV_LOG_INFO, " mmxext");
02578 if (mm_flags & FF_MM_3DNOW)
02579 av_log(avctx, AV_LOG_INFO, " 3dnow");
02580 if (mm_flags & FF_MM_SSE)
02581 av_log(avctx, AV_LOG_INFO, " sse");
02582 if (mm_flags & FF_MM_SSE2)
02583 av_log(avctx, AV_LOG_INFO, " sse2");
02584 av_log(avctx, AV_LOG_INFO, "\n");
02585 #endif
02586
02587 if (mm_flags & FF_MM_MMX) {
02588 const int idct_algo= avctx->idct_algo;
02589
02590 if(avctx->lowres==0){
02591 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
02592 c->idct_put= ff_simple_idct_put_mmx;
02593 c->idct_add= ff_simple_idct_add_mmx;
02594 c->idct = ff_simple_idct_mmx;
02595 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
02596 #if CONFIG_GPL
02597 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
02598 if(mm_flags & FF_MM_MMXEXT){
02599 c->idct_put= ff_libmpeg2mmx2_idct_put;
02600 c->idct_add= ff_libmpeg2mmx2_idct_add;
02601 c->idct = ff_mmxext_idct;
02602 }else{
02603 c->idct_put= ff_libmpeg2mmx_idct_put;
02604 c->idct_add= ff_libmpeg2mmx_idct_add;
02605 c->idct = ff_mmx_idct;
02606 }
02607 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
02608 #endif
02609 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER || CONFIG_THEORA_DECODER) &&
02610 idct_algo==FF_IDCT_VP3){
02611 if(mm_flags & FF_MM_SSE2){
02612 c->idct_put= ff_vp3_idct_put_sse2;
02613 c->idct_add= ff_vp3_idct_add_sse2;
02614 c->idct = ff_vp3_idct_sse2;
02615 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
02616 }else{
02617 c->idct_put= ff_vp3_idct_put_mmx;
02618 c->idct_add= ff_vp3_idct_add_mmx;
02619 c->idct = ff_vp3_idct_mmx;
02620 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
02621 }
02622 }else if(idct_algo==FF_IDCT_CAVS){
02623 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
02624 }else if(idct_algo==FF_IDCT_XVIDMMX){
02625 if(mm_flags & FF_MM_SSE2){
02626 c->idct_put= ff_idct_xvid_sse2_put;
02627 c->idct_add= ff_idct_xvid_sse2_add;
02628 c->idct = ff_idct_xvid_sse2;
02629 c->idct_permutation_type= FF_SSE2_IDCT_PERM;
02630 }else if(mm_flags & FF_MM_MMXEXT){
02631 c->idct_put= ff_idct_xvid_mmx2_put;
02632 c->idct_add= ff_idct_xvid_mmx2_add;
02633 c->idct = ff_idct_xvid_mmx2;
02634 }else{
02635 c->idct_put= ff_idct_xvid_mmx_put;
02636 c->idct_add= ff_idct_xvid_mmx_add;
02637 c->idct = ff_idct_xvid_mmx;
02638 }
02639 }
02640 }
02641
02642 c->put_pixels_clamped = put_pixels_clamped_mmx;
02643 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
02644 c->add_pixels_clamped = add_pixels_clamped_mmx;
02645 c->clear_block = clear_block_mmx;
02646 c->clear_blocks = clear_blocks_mmx;
02647 if (mm_flags & FF_MM_SSE)
02648 c->clear_block = clear_block_sse;
02649
02650 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
02651 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
02652 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
02653 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
02654 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
02655
02656 SET_HPEL_FUNCS(put, 0, 16, mmx);
02657 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
02658 SET_HPEL_FUNCS(avg, 0, 16, mmx);
02659 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
02660 SET_HPEL_FUNCS(put, 1, 8, mmx);
02661 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
02662 SET_HPEL_FUNCS(avg, 1, 8, mmx);
02663 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
02664
02665 c->gmc= gmc_mmx;
02666
02667 c->add_bytes= add_bytes_mmx;
02668 c->add_bytes_l2= add_bytes_l2_mmx;
02669
02670 c->draw_edges = draw_edges_mmx;
02671
02672 if (CONFIG_ANY_H263) {
02673 c->h263_v_loop_filter= h263_v_loop_filter_mmx;
02674 c->h263_h_loop_filter= h263_h_loop_filter_mmx;
02675 }
02676 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
02677 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
02678 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_nornd;
02679
02680 c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx;
02681 c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx;
02682
02683 c->h264_idct_dc_add=
02684 c->h264_idct_add= ff_h264_idct_add_mmx;
02685 c->h264_idct8_dc_add=
02686 c->h264_idct8_add= ff_h264_idct8_add_mmx;
02687
02688 c->h264_idct_add16 = ff_h264_idct_add16_mmx;
02689 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx;
02690 c->h264_idct_add8 = ff_h264_idct_add8_mmx;
02691 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
02692
02693 if (CONFIG_VP6_DECODER) {
02694 c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx;
02695 }
02696
02697 if (mm_flags & FF_MM_MMXEXT) {
02698 c->prefetch = prefetch_mmx2;
02699
02700 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
02701 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
02702
02703 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
02704 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
02705 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
02706
02707 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
02708 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
02709
02710 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
02711 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
02712 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
02713
02714 c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
02715 c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
02716 c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
02717 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2;
02718 c->h264_idct_add8 = ff_h264_idct_add8_mmx2;
02719 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
02720
02721 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02722 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
02723 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
02724 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
02725 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
02726 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
02727 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
02728
02729 if (CONFIG_VP3_DECODER || CONFIG_THEORA_DECODER) {
02730 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
02731 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
02732 }
02733 }
02734
02735 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
02736 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
02737 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
02738 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
02739 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
02740 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
02741 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
02742 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
02743 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
02744 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
02745 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
02746 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
02747 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
02748 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
02749 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
02750 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
02751 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
02752
02753 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
02754 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
02755 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
02756 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
02757 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
02758 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
02759
02760 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
02761 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
02762 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
02763 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
02764 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
02765 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
02766
02767 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
02768 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
02769 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
02770 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
02771
02772 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2;
02773 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2;
02774
02775 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
02776 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
02777 c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
02778 c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
02779 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
02780 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
02781 c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
02782 c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
02783 c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
02784 c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
02785 c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
02786
02787 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
02788 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
02789 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
02790 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
02791 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
02792 c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
02793 c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
02794 c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
02795
02796 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
02797 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
02798 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
02799 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
02800 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
02801 c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
02802 c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
02803 c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
02804
02805 #if HAVE_YASM
02806 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
02807 #endif
02808 #if HAVE_7REGS && HAVE_TEN_OPERANDS
02809 if( mm_flags&FF_MM_3DNOW )
02810 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
02811 #endif
02812
02813 if (CONFIG_CAVS_DECODER)
02814 ff_cavsdsp_init_mmx2(c, avctx);
02815
02816 if (CONFIG_VC1_DECODER || CONFIG_WMV3_DECODER)
02817 ff_vc1dsp_init_mmx(c, avctx);
02818
02819 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
02820 } else if (mm_flags & FF_MM_3DNOW) {
02821 c->prefetch = prefetch_3dnow;
02822
02823 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
02824 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
02825
02826 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
02827 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
02828 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
02829
02830 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
02831 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
02832
02833 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
02834 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
02835 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
02836
02837 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02838 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
02839 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
02840 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
02841 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
02842 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
02843 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
02844 }
02845
02846 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
02847 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
02848 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
02849 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
02850 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
02851 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
02852
02853 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
02854 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
02855 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
02856 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
02857 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
02858 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
02859
02860 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
02861 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
02862 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
02863 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
02864
02865 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
02866 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
02867
02868 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow;
02869 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow;
02870
02871 if (CONFIG_CAVS_DECODER)
02872 ff_cavsdsp_init_3dnow(c, avctx);
02873 }
02874
02875
02876 #define H264_QPEL_FUNCS(x, y, CPU)\
02877 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
02878 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
02879 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
02880 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
02881 if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
02882
02883
02884
02885
02886
02887 H264_QPEL_FUNCS(0, 0, sse2);
02888 }
02889 if(mm_flags & FF_MM_SSE2){
02890 c->h264_idct8_add = ff_h264_idct8_add_sse2;
02891 c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
02892
02893 H264_QPEL_FUNCS(0, 1, sse2);
02894 H264_QPEL_FUNCS(0, 2, sse2);
02895 H264_QPEL_FUNCS(0, 3, sse2);
02896 H264_QPEL_FUNCS(1, 1, sse2);
02897 H264_QPEL_FUNCS(1, 2, sse2);
02898 H264_QPEL_FUNCS(1, 3, sse2);
02899 H264_QPEL_FUNCS(2, 1, sse2);
02900 H264_QPEL_FUNCS(2, 2, sse2);
02901 H264_QPEL_FUNCS(2, 3, sse2);
02902 H264_QPEL_FUNCS(3, 1, sse2);
02903 H264_QPEL_FUNCS(3, 2, sse2);
02904 H264_QPEL_FUNCS(3, 3, sse2);
02905
02906 if (CONFIG_VP6_DECODER) {
02907 c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2;
02908 }
02909 }
02910 #if HAVE_SSSE3
02911 if(mm_flags & FF_MM_SSSE3){
02912 H264_QPEL_FUNCS(1, 0, ssse3);
02913 H264_QPEL_FUNCS(1, 1, ssse3);
02914 H264_QPEL_FUNCS(1, 2, ssse3);
02915 H264_QPEL_FUNCS(1, 3, ssse3);
02916 H264_QPEL_FUNCS(2, 0, ssse3);
02917 H264_QPEL_FUNCS(2, 1, ssse3);
02918 H264_QPEL_FUNCS(2, 2, ssse3);
02919 H264_QPEL_FUNCS(2, 3, ssse3);
02920 H264_QPEL_FUNCS(3, 0, ssse3);
02921 H264_QPEL_FUNCS(3, 1, ssse3);
02922 H264_QPEL_FUNCS(3, 2, ssse3);
02923 H264_QPEL_FUNCS(3, 3, ssse3);
02924 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_nornd;
02925 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
02926 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
02927 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
02928 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
02929 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
02930 }
02931 #endif
02932
02933 #if CONFIG_GPL && HAVE_YASM
02934 if( mm_flags&FF_MM_MMXEXT ){
02935 #if ARCH_X86_32
02936 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
02937 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
02938 #endif
02939 if( mm_flags&FF_MM_SSE2 ){
02940 #if ARCH_X86_64 || !defined(__ICC) || __ICC > 1100
02941 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
02942 c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
02943 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
02944 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
02945 #endif
02946 c->h264_idct_add16 = ff_h264_idct_add16_sse2;
02947 c->h264_idct_add8 = ff_h264_idct_add8_sse2;
02948 c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
02949 }
02950 }
02951 #endif
02952
02953 #if CONFIG_SNOW_DECODER
02954 if(mm_flags & FF_MM_SSE2 & 0){
02955 c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
02956 #if HAVE_7REGS
02957 c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
02958 #endif
02959 c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
02960 }
02961 else{
02962 if(mm_flags & FF_MM_MMXEXT){
02963 c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
02964 #if HAVE_7REGS
02965 c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
02966 #endif
02967 }
02968 c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
02969 }
02970 #endif
02971
02972 if(mm_flags & FF_MM_3DNOW){
02973 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
02974 c->vector_fmul = vector_fmul_3dnow;
02975 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02976 c->float_to_int16 = float_to_int16_3dnow;
02977 c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
02978 }
02979 }
02980 if(mm_flags & FF_MM_3DNOWEXT){
02981 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
02982 c->vector_fmul_window = vector_fmul_window_3dnow2;
02983 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02984 c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
02985 }
02986 }
02987 if(mm_flags & FF_MM_SSE){
02988 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
02989 c->ac3_downmix = ac3_downmix_sse;
02990 c->vector_fmul = vector_fmul_sse;
02991 c->vector_fmul_reverse = vector_fmul_reverse_sse;
02992 c->vector_fmul_add_add = vector_fmul_add_add_sse;
02993 c->vector_fmul_window = vector_fmul_window_sse;
02994 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
02995 c->float_to_int16 = float_to_int16_sse;
02996 c->float_to_int16_interleave = float_to_int16_interleave_sse;
02997 }
02998 if(mm_flags & FF_MM_3DNOW)
02999 c->vector_fmul_add_add = vector_fmul_add_add_3dnow;
03000 if(mm_flags & FF_MM_SSE2){
03001 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
03002 c->float_to_int16 = float_to_int16_sse2;
03003 c->float_to_int16_interleave = float_to_int16_interleave_sse2;
03004 c->add_int16 = add_int16_sse2;
03005 c->sub_int16 = sub_int16_sse2;
03006 c->scalarproduct_int16 = scalarproduct_int16_sse2;
03007 }
03008 }
03009
03010 if (CONFIG_ENCODERS)
03011 dsputilenc_init_mmx(c, avctx);
03012
03013 #if 0
03014
03015 get_pixels = just_return;
03016 put_pixels_clamped = just_return;
03017 add_pixels_clamped = just_return;
03018
03019 pix_abs16x16 = just_return;
03020 pix_abs16x16_x2 = just_return;
03021 pix_abs16x16_y2 = just_return;
03022 pix_abs16x16_xy2 = just_return;
03023
03024 put_pixels_tab[0] = just_return;
03025 put_pixels_tab[1] = just_return;
03026 put_pixels_tab[2] = just_return;
03027 put_pixels_tab[3] = just_return;
03028
03029 put_no_rnd_pixels_tab[0] = just_return;
03030 put_no_rnd_pixels_tab[1] = just_return;
03031 put_no_rnd_pixels_tab[2] = just_return;
03032 put_no_rnd_pixels_tab[3] = just_return;
03033
03034 avg_pixels_tab[0] = just_return;
03035 avg_pixels_tab[1] = just_return;
03036 avg_pixels_tab[2] = just_return;
03037 avg_pixels_tab[3] = just_return;
03038
03039 avg_no_rnd_pixels_tab[0] = just_return;
03040 avg_no_rnd_pixels_tab[1] = just_return;
03041 avg_no_rnd_pixels_tab[2] = just_return;
03042 avg_no_rnd_pixels_tab[3] = just_return;
03043
03044
03045
03046 #endif
03047 }