00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025 #include "libavutil/cpu.h"
00026 #include "libavutil/x86/asm.h"
00027 #include "libavutil/x86/cpu.h"
00028 #include "libavcodec/dsputil.h"
00029 #include "libavcodec/mpegvideo.h"
00030 #include "libavcodec/mathops.h"
00031 #include "dsputil_mmx.h"
00032
00033
00034 #if HAVE_INLINE_ASM
00035
00036 static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
00037 {
00038 __asm__ volatile(
00039 "mov $-128, %%"REG_a" \n\t"
00040 "pxor %%mm7, %%mm7 \n\t"
00041 ".p2align 4 \n\t"
00042 "1: \n\t"
00043 "movq (%0), %%mm0 \n\t"
00044 "movq (%0, %2), %%mm2 \n\t"
00045 "movq %%mm0, %%mm1 \n\t"
00046 "movq %%mm2, %%mm3 \n\t"
00047 "punpcklbw %%mm7, %%mm0 \n\t"
00048 "punpckhbw %%mm7, %%mm1 \n\t"
00049 "punpcklbw %%mm7, %%mm2 \n\t"
00050 "punpckhbw %%mm7, %%mm3 \n\t"
00051 "movq %%mm0, (%1, %%"REG_a") \n\t"
00052 "movq %%mm1, 8(%1, %%"REG_a") \n\t"
00053 "movq %%mm2, 16(%1, %%"REG_a") \n\t"
00054 "movq %%mm3, 24(%1, %%"REG_a") \n\t"
00055 "add %3, %0 \n\t"
00056 "add $32, %%"REG_a" \n\t"
00057 "js 1b \n\t"
00058 : "+r" (pixels)
00059 : "r" (block+64), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*2)
00060 : "%"REG_a
00061 );
00062 }
00063
00064 static void get_pixels_sse2(DCTELEM *block, const uint8_t *pixels, int line_size)
00065 {
00066 __asm__ volatile(
00067 "pxor %%xmm4, %%xmm4 \n\t"
00068 "movq (%0), %%xmm0 \n\t"
00069 "movq (%0, %2), %%xmm1 \n\t"
00070 "movq (%0, %2,2), %%xmm2 \n\t"
00071 "movq (%0, %3), %%xmm3 \n\t"
00072 "lea (%0,%2,4), %0 \n\t"
00073 "punpcklbw %%xmm4, %%xmm0 \n\t"
00074 "punpcklbw %%xmm4, %%xmm1 \n\t"
00075 "punpcklbw %%xmm4, %%xmm2 \n\t"
00076 "punpcklbw %%xmm4, %%xmm3 \n\t"
00077 "movdqa %%xmm0, (%1) \n\t"
00078 "movdqa %%xmm1, 16(%1) \n\t"
00079 "movdqa %%xmm2, 32(%1) \n\t"
00080 "movdqa %%xmm3, 48(%1) \n\t"
00081 "movq (%0), %%xmm0 \n\t"
00082 "movq (%0, %2), %%xmm1 \n\t"
00083 "movq (%0, %2,2), %%xmm2 \n\t"
00084 "movq (%0, %3), %%xmm3 \n\t"
00085 "punpcklbw %%xmm4, %%xmm0 \n\t"
00086 "punpcklbw %%xmm4, %%xmm1 \n\t"
00087 "punpcklbw %%xmm4, %%xmm2 \n\t"
00088 "punpcklbw %%xmm4, %%xmm3 \n\t"
00089 "movdqa %%xmm0, 64(%1) \n\t"
00090 "movdqa %%xmm1, 80(%1) \n\t"
00091 "movdqa %%xmm2, 96(%1) \n\t"
00092 "movdqa %%xmm3, 112(%1) \n\t"
00093 : "+r" (pixels)
00094 : "r" (block), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3)
00095 );
00096 }
00097
00098 static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
00099 {
00100 __asm__ volatile(
00101 "pxor %%mm7, %%mm7 \n\t"
00102 "mov $-128, %%"REG_a" \n\t"
00103 ".p2align 4 \n\t"
00104 "1: \n\t"
00105 "movq (%0), %%mm0 \n\t"
00106 "movq (%1), %%mm2 \n\t"
00107 "movq %%mm0, %%mm1 \n\t"
00108 "movq %%mm2, %%mm3 \n\t"
00109 "punpcklbw %%mm7, %%mm0 \n\t"
00110 "punpckhbw %%mm7, %%mm1 \n\t"
00111 "punpcklbw %%mm7, %%mm2 \n\t"
00112 "punpckhbw %%mm7, %%mm3 \n\t"
00113 "psubw %%mm2, %%mm0 \n\t"
00114 "psubw %%mm3, %%mm1 \n\t"
00115 "movq %%mm0, (%2, %%"REG_a") \n\t"
00116 "movq %%mm1, 8(%2, %%"REG_a") \n\t"
00117 "add %3, %0 \n\t"
00118 "add %3, %1 \n\t"
00119 "add $16, %%"REG_a" \n\t"
00120 "jnz 1b \n\t"
00121 : "+r" (s1), "+r" (s2)
00122 : "r" (block+64), "r" ((x86_reg)stride)
00123 : "%"REG_a
00124 );
00125 }
00126
00127 static int pix_sum16_mmx(uint8_t * pix, int line_size){
00128 const int h=16;
00129 int sum;
00130 x86_reg index= -line_size*h;
00131
00132 __asm__ volatile(
00133 "pxor %%mm7, %%mm7 \n\t"
00134 "pxor %%mm6, %%mm6 \n\t"
00135 "1: \n\t"
00136 "movq (%2, %1), %%mm0 \n\t"
00137 "movq (%2, %1), %%mm1 \n\t"
00138 "movq 8(%2, %1), %%mm2 \n\t"
00139 "movq 8(%2, %1), %%mm3 \n\t"
00140 "punpcklbw %%mm7, %%mm0 \n\t"
00141 "punpckhbw %%mm7, %%mm1 \n\t"
00142 "punpcklbw %%mm7, %%mm2 \n\t"
00143 "punpckhbw %%mm7, %%mm3 \n\t"
00144 "paddw %%mm0, %%mm1 \n\t"
00145 "paddw %%mm2, %%mm3 \n\t"
00146 "paddw %%mm1, %%mm3 \n\t"
00147 "paddw %%mm3, %%mm6 \n\t"
00148 "add %3, %1 \n\t"
00149 " js 1b \n\t"
00150 "movq %%mm6, %%mm5 \n\t"
00151 "psrlq $32, %%mm6 \n\t"
00152 "paddw %%mm5, %%mm6 \n\t"
00153 "movq %%mm6, %%mm5 \n\t"
00154 "psrlq $16, %%mm6 \n\t"
00155 "paddw %%mm5, %%mm6 \n\t"
00156 "movd %%mm6, %0 \n\t"
00157 "andl $0xFFFF, %0 \n\t"
00158 : "=&r" (sum), "+r" (index)
00159 : "r" (pix - index), "r" ((x86_reg)line_size)
00160 );
00161
00162 return sum;
00163 }
00164
00165 static int pix_norm1_mmx(uint8_t *pix, int line_size) {
00166 int tmp;
00167 __asm__ volatile (
00168 "movl $16,%%ecx\n"
00169 "pxor %%mm0,%%mm0\n"
00170 "pxor %%mm7,%%mm7\n"
00171 "1:\n"
00172 "movq (%0),%%mm2\n"
00173 "movq 8(%0),%%mm3\n"
00174
00175 "movq %%mm2,%%mm1\n"
00176
00177 "punpckhbw %%mm0,%%mm1\n"
00178 "punpcklbw %%mm0,%%mm2\n"
00179
00180 "movq %%mm3,%%mm4\n"
00181 "punpckhbw %%mm0,%%mm3\n"
00182 "punpcklbw %%mm0,%%mm4\n"
00183
00184 "pmaddwd %%mm1,%%mm1\n"
00185 "pmaddwd %%mm2,%%mm2\n"
00186
00187 "pmaddwd %%mm3,%%mm3\n"
00188 "pmaddwd %%mm4,%%mm4\n"
00189
00190 "paddd %%mm1,%%mm2\n"
00191
00192 "paddd %%mm3,%%mm4\n"
00193 "paddd %%mm2,%%mm7\n"
00194
00195 "add %2, %0\n"
00196 "paddd %%mm4,%%mm7\n"
00197 "dec %%ecx\n"
00198 "jnz 1b\n"
00199
00200 "movq %%mm7,%%mm1\n"
00201 "psrlq $32, %%mm7\n"
00202 "paddd %%mm7,%%mm1\n"
00203 "movd %%mm1,%1\n"
00204 : "+r" (pix), "=r"(tmp) : "r" ((x86_reg)line_size) : "%ecx" );
00205 return tmp;
00206 }
00207
00208 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
00209 int tmp;
00210 __asm__ volatile (
00211 "movl %4,%%ecx\n"
00212 "shr $1,%%ecx\n"
00213 "pxor %%mm0,%%mm0\n"
00214 "pxor %%mm7,%%mm7\n"
00215 "1:\n"
00216 "movq (%0),%%mm1\n"
00217 "movq (%1),%%mm2\n"
00218 "movq (%0,%3),%%mm3\n"
00219 "movq (%1,%3),%%mm4\n"
00220
00221
00222
00223
00224 "movq %%mm1,%%mm5\n"
00225 "movq %%mm3,%%mm6\n"
00226 "psubusb %%mm2,%%mm1\n"
00227 "psubusb %%mm4,%%mm3\n"
00228 "psubusb %%mm5,%%mm2\n"
00229 "psubusb %%mm6,%%mm4\n"
00230
00231 "por %%mm1,%%mm2\n"
00232 "por %%mm3,%%mm4\n"
00233
00234
00235 "movq %%mm2,%%mm1\n"
00236 "movq %%mm4,%%mm3\n"
00237
00238 "punpckhbw %%mm0,%%mm2\n"
00239 "punpckhbw %%mm0,%%mm4\n"
00240 "punpcklbw %%mm0,%%mm1\n"
00241 "punpcklbw %%mm0,%%mm3\n"
00242
00243 "pmaddwd %%mm2,%%mm2\n"
00244 "pmaddwd %%mm4,%%mm4\n"
00245 "pmaddwd %%mm1,%%mm1\n"
00246 "pmaddwd %%mm3,%%mm3\n"
00247
00248 "lea (%0,%3,2), %0\n"
00249 "lea (%1,%3,2), %1\n"
00250
00251 "paddd %%mm2,%%mm1\n"
00252 "paddd %%mm4,%%mm3\n"
00253 "paddd %%mm1,%%mm7\n"
00254 "paddd %%mm3,%%mm7\n"
00255
00256 "decl %%ecx\n"
00257 "jnz 1b\n"
00258
00259 "movq %%mm7,%%mm1\n"
00260 "psrlq $32, %%mm7\n"
00261 "paddd %%mm7,%%mm1\n"
00262 "movd %%mm1,%2\n"
00263 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
00264 : "r" ((x86_reg)line_size) , "m" (h)
00265 : "%ecx");
00266 return tmp;
00267 }
00268
00269 static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
00270 int tmp;
00271 __asm__ volatile (
00272 "movl %4,%%ecx\n"
00273 "pxor %%mm0,%%mm0\n"
00274 "pxor %%mm7,%%mm7\n"
00275 "1:\n"
00276 "movq (%0),%%mm1\n"
00277 "movq (%1),%%mm2\n"
00278 "movq 8(%0),%%mm3\n"
00279 "movq 8(%1),%%mm4\n"
00280
00281
00282
00283
00284 "movq %%mm1,%%mm5\n"
00285 "movq %%mm3,%%mm6\n"
00286 "psubusb %%mm2,%%mm1\n"
00287 "psubusb %%mm4,%%mm3\n"
00288 "psubusb %%mm5,%%mm2\n"
00289 "psubusb %%mm6,%%mm4\n"
00290
00291 "por %%mm1,%%mm2\n"
00292 "por %%mm3,%%mm4\n"
00293
00294
00295 "movq %%mm2,%%mm1\n"
00296 "movq %%mm4,%%mm3\n"
00297
00298 "punpckhbw %%mm0,%%mm2\n"
00299 "punpckhbw %%mm0,%%mm4\n"
00300 "punpcklbw %%mm0,%%mm1\n"
00301 "punpcklbw %%mm0,%%mm3\n"
00302
00303 "pmaddwd %%mm2,%%mm2\n"
00304 "pmaddwd %%mm4,%%mm4\n"
00305 "pmaddwd %%mm1,%%mm1\n"
00306 "pmaddwd %%mm3,%%mm3\n"
00307
00308 "add %3,%0\n"
00309 "add %3,%1\n"
00310
00311 "paddd %%mm2,%%mm1\n"
00312 "paddd %%mm4,%%mm3\n"
00313 "paddd %%mm1,%%mm7\n"
00314 "paddd %%mm3,%%mm7\n"
00315
00316 "decl %%ecx\n"
00317 "jnz 1b\n"
00318
00319 "movq %%mm7,%%mm1\n"
00320 "psrlq $32, %%mm7\n"
00321 "paddd %%mm7,%%mm1\n"
00322 "movd %%mm1,%2\n"
00323 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
00324 : "r" ((x86_reg)line_size) , "m" (h)
00325 : "%ecx");
00326 return tmp;
00327 }
00328
00329 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
00330 int tmp;
00331 __asm__ volatile (
00332 "movl %3,%%ecx\n"
00333 "pxor %%mm7,%%mm7\n"
00334 "pxor %%mm6,%%mm6\n"
00335
00336 "movq (%0),%%mm0\n"
00337 "movq %%mm0, %%mm1\n"
00338 "psllq $8, %%mm0\n"
00339 "psrlq $8, %%mm1\n"
00340 "psrlq $8, %%mm0\n"
00341 "movq %%mm0, %%mm2\n"
00342 "movq %%mm1, %%mm3\n"
00343 "punpcklbw %%mm7,%%mm0\n"
00344 "punpcklbw %%mm7,%%mm1\n"
00345 "punpckhbw %%mm7,%%mm2\n"
00346 "punpckhbw %%mm7,%%mm3\n"
00347 "psubw %%mm1, %%mm0\n"
00348 "psubw %%mm3, %%mm2\n"
00349
00350 "add %2,%0\n"
00351
00352 "movq (%0),%%mm4\n"
00353 "movq %%mm4, %%mm1\n"
00354 "psllq $8, %%mm4\n"
00355 "psrlq $8, %%mm1\n"
00356 "psrlq $8, %%mm4\n"
00357 "movq %%mm4, %%mm5\n"
00358 "movq %%mm1, %%mm3\n"
00359 "punpcklbw %%mm7,%%mm4\n"
00360 "punpcklbw %%mm7,%%mm1\n"
00361 "punpckhbw %%mm7,%%mm5\n"
00362 "punpckhbw %%mm7,%%mm3\n"
00363 "psubw %%mm1, %%mm4\n"
00364 "psubw %%mm3, %%mm5\n"
00365 "psubw %%mm4, %%mm0\n"
00366 "psubw %%mm5, %%mm2\n"
00367 "pxor %%mm3, %%mm3\n"
00368 "pxor %%mm1, %%mm1\n"
00369 "pcmpgtw %%mm0, %%mm3\n\t"
00370 "pcmpgtw %%mm2, %%mm1\n\t"
00371 "pxor %%mm3, %%mm0\n"
00372 "pxor %%mm1, %%mm2\n"
00373 "psubw %%mm3, %%mm0\n"
00374 "psubw %%mm1, %%mm2\n"
00375 "paddw %%mm0, %%mm2\n"
00376 "paddw %%mm2, %%mm6\n"
00377
00378 "add %2,%0\n"
00379 "1:\n"
00380
00381 "movq (%0),%%mm0\n"
00382 "movq %%mm0, %%mm1\n"
00383 "psllq $8, %%mm0\n"
00384 "psrlq $8, %%mm1\n"
00385 "psrlq $8, %%mm0\n"
00386 "movq %%mm0, %%mm2\n"
00387 "movq %%mm1, %%mm3\n"
00388 "punpcklbw %%mm7,%%mm0\n"
00389 "punpcklbw %%mm7,%%mm1\n"
00390 "punpckhbw %%mm7,%%mm2\n"
00391 "punpckhbw %%mm7,%%mm3\n"
00392 "psubw %%mm1, %%mm0\n"
00393 "psubw %%mm3, %%mm2\n"
00394 "psubw %%mm0, %%mm4\n"
00395 "psubw %%mm2, %%mm5\n"
00396 "pxor %%mm3, %%mm3\n"
00397 "pxor %%mm1, %%mm1\n"
00398 "pcmpgtw %%mm4, %%mm3\n\t"
00399 "pcmpgtw %%mm5, %%mm1\n\t"
00400 "pxor %%mm3, %%mm4\n"
00401 "pxor %%mm1, %%mm5\n"
00402 "psubw %%mm3, %%mm4\n"
00403 "psubw %%mm1, %%mm5\n"
00404 "paddw %%mm4, %%mm5\n"
00405 "paddw %%mm5, %%mm6\n"
00406
00407 "add %2,%0\n"
00408
00409 "movq (%0),%%mm4\n"
00410 "movq %%mm4, %%mm1\n"
00411 "psllq $8, %%mm4\n"
00412 "psrlq $8, %%mm1\n"
00413 "psrlq $8, %%mm4\n"
00414 "movq %%mm4, %%mm5\n"
00415 "movq %%mm1, %%mm3\n"
00416 "punpcklbw %%mm7,%%mm4\n"
00417 "punpcklbw %%mm7,%%mm1\n"
00418 "punpckhbw %%mm7,%%mm5\n"
00419 "punpckhbw %%mm7,%%mm3\n"
00420 "psubw %%mm1, %%mm4\n"
00421 "psubw %%mm3, %%mm5\n"
00422 "psubw %%mm4, %%mm0\n"
00423 "psubw %%mm5, %%mm2\n"
00424 "pxor %%mm3, %%mm3\n"
00425 "pxor %%mm1, %%mm1\n"
00426 "pcmpgtw %%mm0, %%mm3\n\t"
00427 "pcmpgtw %%mm2, %%mm1\n\t"
00428 "pxor %%mm3, %%mm0\n"
00429 "pxor %%mm1, %%mm2\n"
00430 "psubw %%mm3, %%mm0\n"
00431 "psubw %%mm1, %%mm2\n"
00432 "paddw %%mm0, %%mm2\n"
00433 "paddw %%mm2, %%mm6\n"
00434
00435 "add %2,%0\n"
00436 "subl $2, %%ecx\n"
00437 " jnz 1b\n"
00438
00439 "movq %%mm6, %%mm0\n"
00440 "punpcklwd %%mm7,%%mm0\n"
00441 "punpckhwd %%mm7,%%mm6\n"
00442 "paddd %%mm0, %%mm6\n"
00443
00444 "movq %%mm6,%%mm0\n"
00445 "psrlq $32, %%mm6\n"
00446 "paddd %%mm6,%%mm0\n"
00447 "movd %%mm0,%1\n"
00448 : "+r" (pix1), "=r"(tmp)
00449 : "r" ((x86_reg)line_size) , "g" (h-2)
00450 : "%ecx");
00451 return tmp;
00452 }
00453
00454 static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
00455 int tmp;
00456 uint8_t * pix= pix1;
00457 __asm__ volatile (
00458 "movl %3,%%ecx\n"
00459 "pxor %%mm7,%%mm7\n"
00460 "pxor %%mm6,%%mm6\n"
00461
00462 "movq (%0),%%mm0\n"
00463 "movq 1(%0),%%mm1\n"
00464 "movq %%mm0, %%mm2\n"
00465 "movq %%mm1, %%mm3\n"
00466 "punpcklbw %%mm7,%%mm0\n"
00467 "punpcklbw %%mm7,%%mm1\n"
00468 "punpckhbw %%mm7,%%mm2\n"
00469 "punpckhbw %%mm7,%%mm3\n"
00470 "psubw %%mm1, %%mm0\n"
00471 "psubw %%mm3, %%mm2\n"
00472
00473 "add %2,%0\n"
00474
00475 "movq (%0),%%mm4\n"
00476 "movq 1(%0),%%mm1\n"
00477 "movq %%mm4, %%mm5\n"
00478 "movq %%mm1, %%mm3\n"
00479 "punpcklbw %%mm7,%%mm4\n"
00480 "punpcklbw %%mm7,%%mm1\n"
00481 "punpckhbw %%mm7,%%mm5\n"
00482 "punpckhbw %%mm7,%%mm3\n"
00483 "psubw %%mm1, %%mm4\n"
00484 "psubw %%mm3, %%mm5\n"
00485 "psubw %%mm4, %%mm0\n"
00486 "psubw %%mm5, %%mm2\n"
00487 "pxor %%mm3, %%mm3\n"
00488 "pxor %%mm1, %%mm1\n"
00489 "pcmpgtw %%mm0, %%mm3\n\t"
00490 "pcmpgtw %%mm2, %%mm1\n\t"
00491 "pxor %%mm3, %%mm0\n"
00492 "pxor %%mm1, %%mm2\n"
00493 "psubw %%mm3, %%mm0\n"
00494 "psubw %%mm1, %%mm2\n"
00495 "paddw %%mm0, %%mm2\n"
00496 "paddw %%mm2, %%mm6\n"
00497
00498 "add %2,%0\n"
00499 "1:\n"
00500
00501 "movq (%0),%%mm0\n"
00502 "movq 1(%0),%%mm1\n"
00503 "movq %%mm0, %%mm2\n"
00504 "movq %%mm1, %%mm3\n"
00505 "punpcklbw %%mm7,%%mm0\n"
00506 "punpcklbw %%mm7,%%mm1\n"
00507 "punpckhbw %%mm7,%%mm2\n"
00508 "punpckhbw %%mm7,%%mm3\n"
00509 "psubw %%mm1, %%mm0\n"
00510 "psubw %%mm3, %%mm2\n"
00511 "psubw %%mm0, %%mm4\n"
00512 "psubw %%mm2, %%mm5\n"
00513 "pxor %%mm3, %%mm3\n"
00514 "pxor %%mm1, %%mm1\n"
00515 "pcmpgtw %%mm4, %%mm3\n\t"
00516 "pcmpgtw %%mm5, %%mm1\n\t"
00517 "pxor %%mm3, %%mm4\n"
00518 "pxor %%mm1, %%mm5\n"
00519 "psubw %%mm3, %%mm4\n"
00520 "psubw %%mm1, %%mm5\n"
00521 "paddw %%mm4, %%mm5\n"
00522 "paddw %%mm5, %%mm6\n"
00523
00524 "add %2,%0\n"
00525
00526 "movq (%0),%%mm4\n"
00527 "movq 1(%0),%%mm1\n"
00528 "movq %%mm4, %%mm5\n"
00529 "movq %%mm1, %%mm3\n"
00530 "punpcklbw %%mm7,%%mm4\n"
00531 "punpcklbw %%mm7,%%mm1\n"
00532 "punpckhbw %%mm7,%%mm5\n"
00533 "punpckhbw %%mm7,%%mm3\n"
00534 "psubw %%mm1, %%mm4\n"
00535 "psubw %%mm3, %%mm5\n"
00536 "psubw %%mm4, %%mm0\n"
00537 "psubw %%mm5, %%mm2\n"
00538 "pxor %%mm3, %%mm3\n"
00539 "pxor %%mm1, %%mm1\n"
00540 "pcmpgtw %%mm0, %%mm3\n\t"
00541 "pcmpgtw %%mm2, %%mm1\n\t"
00542 "pxor %%mm3, %%mm0\n"
00543 "pxor %%mm1, %%mm2\n"
00544 "psubw %%mm3, %%mm0\n"
00545 "psubw %%mm1, %%mm2\n"
00546 "paddw %%mm0, %%mm2\n"
00547 "paddw %%mm2, %%mm6\n"
00548
00549 "add %2,%0\n"
00550 "subl $2, %%ecx\n"
00551 " jnz 1b\n"
00552
00553 "movq %%mm6, %%mm0\n"
00554 "punpcklwd %%mm7,%%mm0\n"
00555 "punpckhwd %%mm7,%%mm6\n"
00556 "paddd %%mm0, %%mm6\n"
00557
00558 "movq %%mm6,%%mm0\n"
00559 "psrlq $32, %%mm6\n"
00560 "paddd %%mm6,%%mm0\n"
00561 "movd %%mm0,%1\n"
00562 : "+r" (pix1), "=r"(tmp)
00563 : "r" ((x86_reg)line_size) , "g" (h-2)
00564 : "%ecx");
00565 return tmp + hf_noise8_mmx(pix+8, line_size, h);
00566 }
00567
00568 static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
00569 MpegEncContext *c = p;
00570 int score1, score2;
00571
00572 if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
00573 else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
00574 score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
00575
00576 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
00577 else return score1 + FFABS(score2)*8;
00578 }
00579
00580 static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
00581 MpegEncContext *c = p;
00582 int score1= sse8_mmx(c, pix1, pix2, line_size, h);
00583 int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
00584
00585 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
00586 else return score1 + FFABS(score2)*8;
00587 }
00588
00589 static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
00590 int tmp;
00591
00592 av_assert2( (((int)pix) & 7) == 0);
00593 av_assert2((line_size &7) ==0);
00594
00595 #define SUM(in0, in1, out0, out1) \
00596 "movq (%0), %%mm2\n"\
00597 "movq 8(%0), %%mm3\n"\
00598 "add %2,%0\n"\
00599 "movq %%mm2, " #out0 "\n"\
00600 "movq %%mm3, " #out1 "\n"\
00601 "psubusb " #in0 ", %%mm2\n"\
00602 "psubusb " #in1 ", %%mm3\n"\
00603 "psubusb " #out0 ", " #in0 "\n"\
00604 "psubusb " #out1 ", " #in1 "\n"\
00605 "por %%mm2, " #in0 "\n"\
00606 "por %%mm3, " #in1 "\n"\
00607 "movq " #in0 ", %%mm2\n"\
00608 "movq " #in1 ", %%mm3\n"\
00609 "punpcklbw %%mm7, " #in0 "\n"\
00610 "punpcklbw %%mm7, " #in1 "\n"\
00611 "punpckhbw %%mm7, %%mm2\n"\
00612 "punpckhbw %%mm7, %%mm3\n"\
00613 "paddw " #in1 ", " #in0 "\n"\
00614 "paddw %%mm3, %%mm2\n"\
00615 "paddw %%mm2, " #in0 "\n"\
00616 "paddw " #in0 ", %%mm6\n"
00617
00618
00619 __asm__ volatile (
00620 "movl %3,%%ecx\n"
00621 "pxor %%mm6,%%mm6\n"
00622 "pxor %%mm7,%%mm7\n"
00623 "movq (%0),%%mm0\n"
00624 "movq 8(%0),%%mm1\n"
00625 "add %2,%0\n"
00626 "jmp 2f\n"
00627 "1:\n"
00628
00629 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
00630 "2:\n"
00631 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
00632
00633 "subl $2, %%ecx\n"
00634 "jnz 1b\n"
00635
00636 "movq %%mm6,%%mm0\n"
00637 "psrlq $32, %%mm6\n"
00638 "paddw %%mm6,%%mm0\n"
00639 "movq %%mm0,%%mm6\n"
00640 "psrlq $16, %%mm0\n"
00641 "paddw %%mm6,%%mm0\n"
00642 "movd %%mm0,%1\n"
00643 : "+r" (pix), "=r"(tmp)
00644 : "r" ((x86_reg)line_size) , "m" (h)
00645 : "%ecx");
00646 return tmp & 0xFFFF;
00647 }
00648 #undef SUM
00649
00650 static int vsad_intra16_mmxext(void *v, uint8_t *pix, uint8_t *dummy,
00651 int line_size, int h)
00652 {
00653 int tmp;
00654
00655 av_assert2( (((int)pix) & 7) == 0);
00656 av_assert2((line_size &7) ==0);
00657
00658 #define SUM(in0, in1, out0, out1) \
00659 "movq (%0), " #out0 "\n"\
00660 "movq 8(%0), " #out1 "\n"\
00661 "add %2,%0\n"\
00662 "psadbw " #out0 ", " #in0 "\n"\
00663 "psadbw " #out1 ", " #in1 "\n"\
00664 "paddw " #in1 ", " #in0 "\n"\
00665 "paddw " #in0 ", %%mm6\n"
00666
00667 __asm__ volatile (
00668 "movl %3,%%ecx\n"
00669 "pxor %%mm6,%%mm6\n"
00670 "pxor %%mm7,%%mm7\n"
00671 "movq (%0),%%mm0\n"
00672 "movq 8(%0),%%mm1\n"
00673 "add %2,%0\n"
00674 "jmp 2f\n"
00675 "1:\n"
00676
00677 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
00678 "2:\n"
00679 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
00680
00681 "subl $2, %%ecx\n"
00682 "jnz 1b\n"
00683
00684 "movd %%mm6,%1\n"
00685 : "+r" (pix), "=r"(tmp)
00686 : "r" ((x86_reg)line_size) , "m" (h)
00687 : "%ecx");
00688 return tmp;
00689 }
00690 #undef SUM
00691
00692 static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
00693 int tmp;
00694
00695 av_assert2( (((int)pix1) & 7) == 0);
00696 av_assert2( (((int)pix2) & 7) == 0);
00697 av_assert2((line_size &7) ==0);
00698
00699 #define SUM(in0, in1, out0, out1) \
00700 "movq (%0),%%mm2\n"\
00701 "movq (%1)," #out0 "\n"\
00702 "movq 8(%0),%%mm3\n"\
00703 "movq 8(%1)," #out1 "\n"\
00704 "add %3,%0\n"\
00705 "add %3,%1\n"\
00706 "psubb " #out0 ", %%mm2\n"\
00707 "psubb " #out1 ", %%mm3\n"\
00708 "pxor %%mm7, %%mm2\n"\
00709 "pxor %%mm7, %%mm3\n"\
00710 "movq %%mm2, " #out0 "\n"\
00711 "movq %%mm3, " #out1 "\n"\
00712 "psubusb " #in0 ", %%mm2\n"\
00713 "psubusb " #in1 ", %%mm3\n"\
00714 "psubusb " #out0 ", " #in0 "\n"\
00715 "psubusb " #out1 ", " #in1 "\n"\
00716 "por %%mm2, " #in0 "\n"\
00717 "por %%mm3, " #in1 "\n"\
00718 "movq " #in0 ", %%mm2\n"\
00719 "movq " #in1 ", %%mm3\n"\
00720 "punpcklbw %%mm7, " #in0 "\n"\
00721 "punpcklbw %%mm7, " #in1 "\n"\
00722 "punpckhbw %%mm7, %%mm2\n"\
00723 "punpckhbw %%mm7, %%mm3\n"\
00724 "paddw " #in1 ", " #in0 "\n"\
00725 "paddw %%mm3, %%mm2\n"\
00726 "paddw %%mm2, " #in0 "\n"\
00727 "paddw " #in0 ", %%mm6\n"
00728
00729
00730 __asm__ volatile (
00731 "movl %4,%%ecx\n"
00732 "pxor %%mm6,%%mm6\n"
00733 "pcmpeqw %%mm7,%%mm7\n"
00734 "psllw $15, %%mm7\n"
00735 "packsswb %%mm7, %%mm7\n"
00736 "movq (%0),%%mm0\n"
00737 "movq (%1),%%mm2\n"
00738 "movq 8(%0),%%mm1\n"
00739 "movq 8(%1),%%mm3\n"
00740 "add %3,%0\n"
00741 "add %3,%1\n"
00742 "psubb %%mm2, %%mm0\n"
00743 "psubb %%mm3, %%mm1\n"
00744 "pxor %%mm7, %%mm0\n"
00745 "pxor %%mm7, %%mm1\n"
00746 "jmp 2f\n"
00747 "1:\n"
00748
00749 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
00750 "2:\n"
00751 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
00752
00753 "subl $2, %%ecx\n"
00754 "jnz 1b\n"
00755
00756 "movq %%mm6,%%mm0\n"
00757 "psrlq $32, %%mm6\n"
00758 "paddw %%mm6,%%mm0\n"
00759 "movq %%mm0,%%mm6\n"
00760 "psrlq $16, %%mm0\n"
00761 "paddw %%mm6,%%mm0\n"
00762 "movd %%mm0,%2\n"
00763 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
00764 : "r" ((x86_reg)line_size) , "m" (h)
00765 : "%ecx");
00766 return tmp & 0x7FFF;
00767 }
00768 #undef SUM
00769
00770 static int vsad16_mmxext(void *v, uint8_t *pix1, uint8_t *pix2,
00771 int line_size, int h)
00772 {
00773 int tmp;
00774
00775 av_assert2( (((int)pix1) & 7) == 0);
00776 av_assert2( (((int)pix2) & 7) == 0);
00777 av_assert2((line_size &7) ==0);
00778
00779 #define SUM(in0, in1, out0, out1) \
00780 "movq (%0)," #out0 "\n"\
00781 "movq (%1),%%mm2\n"\
00782 "movq 8(%0)," #out1 "\n"\
00783 "movq 8(%1),%%mm3\n"\
00784 "add %3,%0\n"\
00785 "add %3,%1\n"\
00786 "psubb %%mm2, " #out0 "\n"\
00787 "psubb %%mm3, " #out1 "\n"\
00788 "pxor %%mm7, " #out0 "\n"\
00789 "pxor %%mm7, " #out1 "\n"\
00790 "psadbw " #out0 ", " #in0 "\n"\
00791 "psadbw " #out1 ", " #in1 "\n"\
00792 "paddw " #in1 ", " #in0 "\n"\
00793 "paddw " #in0 ", %%mm6\n"
00794
00795 __asm__ volatile (
00796 "movl %4,%%ecx\n"
00797 "pxor %%mm6,%%mm6\n"
00798 "pcmpeqw %%mm7,%%mm7\n"
00799 "psllw $15, %%mm7\n"
00800 "packsswb %%mm7, %%mm7\n"
00801 "movq (%0),%%mm0\n"
00802 "movq (%1),%%mm2\n"
00803 "movq 8(%0),%%mm1\n"
00804 "movq 8(%1),%%mm3\n"
00805 "add %3,%0\n"
00806 "add %3,%1\n"
00807 "psubb %%mm2, %%mm0\n"
00808 "psubb %%mm3, %%mm1\n"
00809 "pxor %%mm7, %%mm0\n"
00810 "pxor %%mm7, %%mm1\n"
00811 "jmp 2f\n"
00812 "1:\n"
00813
00814 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
00815 "2:\n"
00816 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
00817
00818 "subl $2, %%ecx\n"
00819 "jnz 1b\n"
00820
00821 "movd %%mm6,%2\n"
00822 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
00823 : "r" ((x86_reg)line_size) , "m" (h)
00824 : "%ecx");
00825 return tmp;
00826 }
00827 #undef SUM
00828
00829 static void diff_bytes_mmx(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w){
00830 x86_reg i=0;
00831 if(w>=16)
00832 __asm__ volatile(
00833 "1: \n\t"
00834 "movq (%2, %0), %%mm0 \n\t"
00835 "movq (%1, %0), %%mm1 \n\t"
00836 "psubb %%mm0, %%mm1 \n\t"
00837 "movq %%mm1, (%3, %0) \n\t"
00838 "movq 8(%2, %0), %%mm0 \n\t"
00839 "movq 8(%1, %0), %%mm1 \n\t"
00840 "psubb %%mm0, %%mm1 \n\t"
00841 "movq %%mm1, 8(%3, %0) \n\t"
00842 "add $16, %0 \n\t"
00843 "cmp %4, %0 \n\t"
00844 " jb 1b \n\t"
00845 : "+r" (i)
00846 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15)
00847 );
00848 for(; i<w; i++)
00849 dst[i+0] = src1[i+0]-src2[i+0];
00850 }
00851
00852 static void sub_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *src1,
00853 const uint8_t *src2, int w,
00854 int *left, int *left_top)
00855 {
00856 x86_reg i=0;
00857 uint8_t l, lt;
00858
00859 __asm__ volatile(
00860 "movq (%1, %0), %%mm0 \n\t"
00861 "psllq $8, %%mm0 \n\t"
00862 "1: \n\t"
00863 "movq (%1, %0), %%mm1 \n\t"
00864 "movq -1(%2, %0), %%mm2 \n\t"
00865 "movq (%2, %0), %%mm3 \n\t"
00866 "movq %%mm2, %%mm4 \n\t"
00867 "psubb %%mm0, %%mm2 \n\t"
00868 "paddb %%mm1, %%mm2 \n\t"
00869 "movq %%mm4, %%mm5 \n\t"
00870 "pmaxub %%mm1, %%mm4 \n\t"
00871 "pminub %%mm5, %%mm1 \n\t"
00872 "pminub %%mm2, %%mm4 \n\t"
00873 "pmaxub %%mm1, %%mm4 \n\t"
00874 "psubb %%mm4, %%mm3 \n\t"
00875 "movq %%mm3, (%3, %0) \n\t"
00876 "add $8, %0 \n\t"
00877 "movq -1(%1, %0), %%mm0 \n\t"
00878 "cmp %4, %0 \n\t"
00879 " jb 1b \n\t"
00880 : "+r" (i)
00881 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w)
00882 );
00883
00884 l= *left;
00885 lt= *left_top;
00886
00887 dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
00888
00889 *left_top= src1[w-1];
00890 *left = src2[w-1];
00891 }
00892
00893 #define MMABS_MMX(a,z)\
00894 "pxor " #z ", " #z " \n\t"\
00895 "pcmpgtw " #a ", " #z " \n\t"\
00896 "pxor " #z ", " #a " \n\t"\
00897 "psubw " #z ", " #a " \n\t"
00898
00899 #define MMABS_MMXEXT(a, z) \
00900 "pxor " #z ", " #z " \n\t"\
00901 "psubw " #a ", " #z " \n\t"\
00902 "pmaxsw " #z ", " #a " \n\t"
00903
00904 #define MMABS_SSSE3(a,z)\
00905 "pabsw " #a ", " #a " \n\t"
00906
00907 #define MMABS_SUM(a,z, sum)\
00908 MMABS(a,z)\
00909 "paddusw " #a ", " #sum " \n\t"
00910
00911
00912
00913
00914 #define HSUM_MMX(a, t, dst)\
00915 "movq "#a", "#t" \n\t"\
00916 "psrlq $32, "#a" \n\t"\
00917 "paddusw "#t", "#a" \n\t"\
00918 "movq "#a", "#t" \n\t"\
00919 "psrlq $16, "#a" \n\t"\
00920 "paddusw "#t", "#a" \n\t"\
00921 "movd "#a", "#dst" \n\t"\
00922
00923 #define HSUM_MMXEXT(a, t, dst) \
00924 "pshufw $0x0E, "#a", "#t" \n\t"\
00925 "paddusw "#t", "#a" \n\t"\
00926 "pshufw $0x01, "#a", "#t" \n\t"\
00927 "paddusw "#t", "#a" \n\t"\
00928 "movd "#a", "#dst" \n\t"\
00929
00930 #define HSUM_SSE2(a, t, dst)\
00931 "movhlps "#a", "#t" \n\t"\
00932 "paddusw "#t", "#a" \n\t"\
00933 "pshuflw $0x0E, "#a", "#t" \n\t"\
00934 "paddusw "#t", "#a" \n\t"\
00935 "pshuflw $0x01, "#a", "#t" \n\t"\
00936 "paddusw "#t", "#a" \n\t"\
00937 "movd "#a", "#dst" \n\t"\
00938
00939 #define DCT_SAD4(m,mm,o)\
00940 "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
00941 "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
00942 "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
00943 "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
00944 MMABS_SUM(mm##2, mm##6, mm##0)\
00945 MMABS_SUM(mm##3, mm##7, mm##1)\
00946 MMABS_SUM(mm##4, mm##6, mm##0)\
00947 MMABS_SUM(mm##5, mm##7, mm##1)\
00948
00949 #define DCT_SAD_MMX\
00950 "pxor %%mm0, %%mm0 \n\t"\
00951 "pxor %%mm1, %%mm1 \n\t"\
00952 DCT_SAD4(q, %%mm, 0)\
00953 DCT_SAD4(q, %%mm, 8)\
00954 DCT_SAD4(q, %%mm, 64)\
00955 DCT_SAD4(q, %%mm, 72)\
00956 "paddusw %%mm1, %%mm0 \n\t"\
00957 HSUM(%%mm0, %%mm1, %0)
00958
00959 #define DCT_SAD_SSE2\
00960 "pxor %%xmm0, %%xmm0 \n\t"\
00961 "pxor %%xmm1, %%xmm1 \n\t"\
00962 DCT_SAD4(dqa, %%xmm, 0)\
00963 DCT_SAD4(dqa, %%xmm, 64)\
00964 "paddusw %%xmm1, %%xmm0 \n\t"\
00965 HSUM(%%xmm0, %%xmm1, %0)
00966
00967 #define DCT_SAD_FUNC(cpu) \
00968 static int sum_abs_dctelem_##cpu(DCTELEM *block){\
00969 int sum;\
00970 __asm__ volatile(\
00971 DCT_SAD\
00972 :"=r"(sum)\
00973 :"r"(block)\
00974 );\
00975 return sum&0xFFFF;\
00976 }
00977
00978 #define DCT_SAD DCT_SAD_MMX
00979 #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
00980 #define MMABS(a,z) MMABS_MMX(a,z)
00981 DCT_SAD_FUNC(mmx)
00982 #undef MMABS
00983 #undef HSUM
00984
00985 #define HSUM(a,t,dst) HSUM_MMXEXT(a,t,dst)
00986 #define MMABS(a,z) MMABS_MMXEXT(a,z)
00987 DCT_SAD_FUNC(mmxext)
00988 #undef HSUM
00989 #undef DCT_SAD
00990
00991 #define DCT_SAD DCT_SAD_SSE2
00992 #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
00993 DCT_SAD_FUNC(sse2)
00994 #undef MMABS
00995
00996 #if HAVE_SSSE3_INLINE
00997 #define MMABS(a,z) MMABS_SSSE3(a,z)
00998 DCT_SAD_FUNC(ssse3)
00999 #undef MMABS
01000 #endif
01001 #undef HSUM
01002 #undef DCT_SAD
01003
01004 static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
01005 int sum;
01006 x86_reg i=size;
01007 __asm__ volatile(
01008 "pxor %%mm4, %%mm4 \n"
01009 "1: \n"
01010 "sub $8, %0 \n"
01011 "movq (%2,%0), %%mm2 \n"
01012 "movq (%3,%0,2), %%mm0 \n"
01013 "movq 8(%3,%0,2), %%mm1 \n"
01014 "punpckhbw %%mm2, %%mm3 \n"
01015 "punpcklbw %%mm2, %%mm2 \n"
01016 "psraw $8, %%mm3 \n"
01017 "psraw $8, %%mm2 \n"
01018 "psubw %%mm3, %%mm1 \n"
01019 "psubw %%mm2, %%mm0 \n"
01020 "pmaddwd %%mm1, %%mm1 \n"
01021 "pmaddwd %%mm0, %%mm0 \n"
01022 "paddd %%mm1, %%mm4 \n"
01023 "paddd %%mm0, %%mm4 \n"
01024 "jg 1b \n"
01025 "movq %%mm4, %%mm3 \n"
01026 "psrlq $32, %%mm3 \n"
01027 "paddd %%mm3, %%mm4 \n"
01028 "movd %%mm4, %1 \n"
01029 :"+r"(i), "=r"(sum)
01030 :"r"(pix1), "r"(pix2)
01031 );
01032 return sum;
01033 }
01034
01035 #define PHADDD(a, t)\
01036 "movq "#a", "#t" \n\t"\
01037 "psrlq $32, "#a" \n\t"\
01038 "paddd "#t", "#a" \n\t"
01039
01040
01041
01042
01043
01044 #define PMULHRW(x, y, s, o)\
01045 "pmulhw " #s ", "#x " \n\t"\
01046 "pmulhw " #s ", "#y " \n\t"\
01047 "paddw " #o ", "#x " \n\t"\
01048 "paddw " #o ", "#y " \n\t"\
01049 "psraw $1, "#x " \n\t"\
01050 "psraw $1, "#y " \n\t"
01051 #define DEF(x) x ## _mmx
01052 #define SET_RND MOVQ_WONE
01053 #define SCALE_OFFSET 1
01054
01055 #include "dsputil_qns_template.c"
01056
01057 #undef DEF
01058 #undef SET_RND
01059 #undef SCALE_OFFSET
01060 #undef PMULHRW
01061
01062 #define DEF(x) x ## _3dnow
01063 #define SET_RND(x)
01064 #define SCALE_OFFSET 0
01065 #define PMULHRW(x, y, s, o)\
01066 "pmulhrw " #s ", "#x " \n\t"\
01067 "pmulhrw " #s ", "#y " \n\t"
01068
01069 #include "dsputil_qns_template.c"
01070
01071 #undef DEF
01072 #undef SET_RND
01073 #undef SCALE_OFFSET
01074 #undef PMULHRW
01075
01076 #if HAVE_SSSE3_INLINE
01077 #undef PHADDD
01078 #define DEF(x) x ## _ssse3
01079 #define SET_RND(x)
01080 #define SCALE_OFFSET -1
01081 #define PHADDD(a, t)\
01082 "pshufw $0x0E, "#a", "#t" \n\t"\
01083 "paddd "#t", "#a" \n\t"
01084 #define PMULHRW(x, y, s, o)\
01085 "pmulhrsw " #s ", "#x " \n\t"\
01086 "pmulhrsw " #s ", "#y " \n\t"
01087
01088 #include "dsputil_qns_template.c"
01089
01090 #undef DEF
01091 #undef SET_RND
01092 #undef SCALE_OFFSET
01093 #undef PMULHRW
01094 #undef PHADDD
01095 #endif
01096
01097 #endif
01098
01099 int ff_sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
01100
01101 #define hadamard_func(cpu) \
01102 int ff_hadamard8_diff_##cpu (void *s, uint8_t *src1, uint8_t *src2, \
01103 int stride, int h); \
01104 int ff_hadamard8_diff16_##cpu(void *s, uint8_t *src1, uint8_t *src2, \
01105 int stride, int h);
01106
01107 hadamard_func(mmx)
01108 hadamard_func(mmxext)
01109 hadamard_func(sse2)
01110 hadamard_func(ssse3)
01111
01112 void ff_dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
01113 {
01114 int mm_flags = av_get_cpu_flags();
01115
01116 #if HAVE_INLINE_ASM
01117 int bit_depth = avctx->bits_per_raw_sample;
01118
01119 if (mm_flags & AV_CPU_FLAG_MMX) {
01120 const int dct_algo = avctx->dct_algo;
01121 if (avctx->bits_per_raw_sample <= 8 &&
01122 (dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX)) {
01123 if(mm_flags & AV_CPU_FLAG_SSE2){
01124 c->fdct = ff_fdct_sse2;
01125 } else if (mm_flags & AV_CPU_FLAG_MMXEXT) {
01126 c->fdct = ff_fdct_mmxext;
01127 }else{
01128 c->fdct = ff_fdct_mmx;
01129 }
01130 }
01131
01132 if (bit_depth <= 8)
01133 c->get_pixels = get_pixels_mmx;
01134 c->diff_pixels = diff_pixels_mmx;
01135 c->pix_sum = pix_sum16_mmx;
01136
01137 c->diff_bytes= diff_bytes_mmx;
01138 c->sum_abs_dctelem= sum_abs_dctelem_mmx;
01139
01140 c->pix_norm1 = pix_norm1_mmx;
01141 c->sse[0] = sse16_mmx;
01142 c->sse[1] = sse8_mmx;
01143 c->vsad[4]= vsad_intra16_mmx;
01144
01145 c->nsse[0] = nsse16_mmx;
01146 c->nsse[1] = nsse8_mmx;
01147 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
01148 c->vsad[0] = vsad16_mmx;
01149 }
01150
01151 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
01152 c->try_8x8basis= try_8x8basis_mmx;
01153 }
01154 c->add_8x8basis= add_8x8basis_mmx;
01155
01156 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
01157
01158 if (mm_flags & AV_CPU_FLAG_MMXEXT) {
01159 c->sum_abs_dctelem = sum_abs_dctelem_mmxext;
01160 c->vsad[4] = vsad_intra16_mmxext;
01161
01162 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
01163 c->vsad[0] = vsad16_mmxext;
01164 }
01165
01166 c->sub_hfyu_median_prediction = sub_hfyu_median_prediction_mmxext;
01167 }
01168
01169 if(mm_flags & AV_CPU_FLAG_SSE2){
01170 if (bit_depth <= 8)
01171 c->get_pixels = get_pixels_sse2;
01172 c->sum_abs_dctelem= sum_abs_dctelem_sse2;
01173 }
01174
01175 #if HAVE_SSSE3_INLINE
01176 if(mm_flags & AV_CPU_FLAG_SSSE3){
01177 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
01178 c->try_8x8basis= try_8x8basis_ssse3;
01179 }
01180 c->add_8x8basis= add_8x8basis_ssse3;
01181 c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
01182 }
01183 #endif
01184
01185 if(mm_flags & AV_CPU_FLAG_3DNOW){
01186 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
01187 c->try_8x8basis= try_8x8basis_3dnow;
01188 }
01189 c->add_8x8basis= add_8x8basis_3dnow;
01190 }
01191 }
01192 #endif
01193
01194 if (EXTERNAL_MMX(mm_flags)) {
01195 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
01196 c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
01197
01198 if (EXTERNAL_MMXEXT(mm_flags)) {
01199 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
01200 c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
01201 }
01202
01203 if (EXTERNAL_SSE2(mm_flags)) {
01204 c->sse[0] = ff_sse16_sse2;
01205
01206 #if HAVE_ALIGNED_STACK
01207 c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
01208 c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
01209 #endif
01210 }
01211
01212 if (EXTERNAL_SSSE3(mm_flags) && HAVE_ALIGNED_STACK) {
01213 c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
01214 c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
01215 }
01216 }
01217
01218 ff_dsputil_init_pix_mmx(c, avctx);
01219 }